OpenCores
URL https://opencores.org/ocsvn/open8_urisc/open8_urisc/trunk

Subversion Repositories open8_urisc

[/] [open8_urisc/] [trunk/] [gnu/] [binutils/] [gas/] [config/] [tc-arm.c] - Blame information for rev 166

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 16 khays
/* tc-arm.c -- Assemble for the ARM
2
   Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3
   2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6
        Modified by David Taylor (dtaylor@armltd.co.uk)
7
        Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8
        Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9
        Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
 
11
   This file is part of GAS, the GNU Assembler.
12
 
13
   GAS is free software; you can redistribute it and/or modify
14
   it under the terms of the GNU General Public License as published by
15
   the Free Software Foundation; either version 3, or (at your option)
16
   any later version.
17
 
18
   GAS is distributed in the hope that it will be useful,
19
   but WITHOUT ANY WARRANTY; without even the implied warranty of
20
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
   GNU General Public License for more details.
22
 
23
   You should have received a copy of the GNU General Public License
24
   along with GAS; see the file COPYING.  If not, write to the Free
25
   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26
   02110-1301, USA.  */
27
 
28
#include "as.h"
29
#include <limits.h>
30
#include <stdarg.h>
31
#define  NO_RELOC 0
32
#include "safe-ctype.h"
33
#include "subsegs.h"
34
#include "obstack.h"
35 163 khays
#include "libiberty.h"
36 16 khays
#include "opcode/arm.h"
37
 
38
#ifdef OBJ_ELF
39
#include "elf/arm.h"
40
#include "dw2gencfi.h"
41
#endif
42
 
43
#include "dwarf2dbg.h"
44
 
45
#ifdef OBJ_ELF
46
/* Must be at least the size of the largest unwind opcode (currently two).  */
47
#define ARM_OPCODE_CHUNK_SIZE 8
48
 
49
/* This structure holds the unwinding state.  */
50
 
51
static struct
52
{
53
  symbolS *       proc_start;
54
  symbolS *       table_entry;
55
  symbolS *       personality_routine;
56
  int             personality_index;
57
  /* The segment containing the function.  */
58
  segT            saved_seg;
59
  subsegT         saved_subseg;
60
  /* Opcodes generated from this function.  */
61
  unsigned char * opcodes;
62
  int             opcode_count;
63
  int             opcode_alloc;
64
  /* The number of bytes pushed to the stack.  */
65
  offsetT         frame_size;
66
  /* We don't add stack adjustment opcodes immediately so that we can merge
67
     multiple adjustments.  We can also omit the final adjustment
68
     when using a frame pointer.  */
69
  offsetT         pending_offset;
70
  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
71
     hold the reg+offset to use when restoring sp from a frame pointer.  */
72
  offsetT         fp_offset;
73
  int             fp_reg;
74
  /* Nonzero if an unwind_setfp directive has been seen.  */
75
  unsigned        fp_used:1;
76
  /* Nonzero if the last opcode restores sp from fp_reg.  */
77
  unsigned        sp_restored:1;
78
} unwind;
79
 
80
#endif /* OBJ_ELF */
81
 
82
/* Results from operand parsing worker functions.  */
83
 
84
typedef enum
85
{
86
  PARSE_OPERAND_SUCCESS,
87
  PARSE_OPERAND_FAIL,
88
  PARSE_OPERAND_FAIL_NO_BACKTRACK
89
} parse_operand_result;
90
 
91
enum arm_float_abi
92
{
93
  ARM_FLOAT_ABI_HARD,
94
  ARM_FLOAT_ABI_SOFTFP,
95
  ARM_FLOAT_ABI_SOFT
96
};
97
 
98
/* Types of processor to assemble for.  */
99
#ifndef CPU_DEFAULT
100
/* The code that was here used to select a default CPU depending on compiler
101
   pre-defines which were only present when doing native builds, thus
102
   changing gas' default behaviour depending upon the build host.
103
 
104
   If you have a target that requires a default CPU option then the you
105
   should define CPU_DEFAULT here.  */
106
#endif
107
 
108
#ifndef FPU_DEFAULT
109
# ifdef TE_LINUX
110
#  define FPU_DEFAULT FPU_ARCH_FPA
111
# elif defined (TE_NetBSD)
112
#  ifdef OBJ_ELF
113
#   define FPU_DEFAULT FPU_ARCH_VFP     /* Soft-float, but VFP order.  */
114
#  else
115
    /* Legacy a.out format.  */
116
#   define FPU_DEFAULT FPU_ARCH_FPA     /* Soft-float, but FPA order.  */
117
#  endif
118
# elif defined (TE_VXWORKS)
119
#  define FPU_DEFAULT FPU_ARCH_VFP      /* Soft-float, VFP order.  */
120
# else
121
   /* For backwards compatibility, default to FPA.  */
122
#  define FPU_DEFAULT FPU_ARCH_FPA
123
# endif
124
#endif /* ifndef FPU_DEFAULT */
125
 
126
#define streq(a, b)           (strcmp (a, b) == 0)
127
 
128
static arm_feature_set cpu_variant;
129
static arm_feature_set arm_arch_used;
130
static arm_feature_set thumb_arch_used;
131
 
132
/* Flags stored in private area of BFD structure.  */
133
static int uses_apcs_26      = FALSE;
134
static int atpcs             = FALSE;
135
static int support_interwork = FALSE;
136
static int uses_apcs_float   = FALSE;
137
static int pic_code          = FALSE;
138
static int fix_v4bx          = FALSE;
139
/* Warn on using deprecated features.  */
140
static int warn_on_deprecated = TRUE;
141
 
142
 
143
/* Variables that we set while parsing command-line options.  Once all
144
   options have been read we re-process these values to set the real
145
   assembly flags.  */
146
static const arm_feature_set *legacy_cpu = NULL;
147
static const arm_feature_set *legacy_fpu = NULL;
148
 
149
static const arm_feature_set *mcpu_cpu_opt = NULL;
150
static const arm_feature_set *mcpu_fpu_opt = NULL;
151
static const arm_feature_set *march_cpu_opt = NULL;
152
static const arm_feature_set *march_fpu_opt = NULL;
153
static const arm_feature_set *mfpu_opt = NULL;
154
static const arm_feature_set *object_arch = NULL;
155
 
156
/* Constants for known architecture features.  */
157
static const arm_feature_set fpu_default = FPU_DEFAULT;
158
static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159
static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160
static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161
static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162
static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163
static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164
static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165
static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
 
167
#ifdef CPU_DEFAULT
168
static const arm_feature_set cpu_default = CPU_DEFAULT;
169
#endif
170
 
171
static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172
static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173
static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174
static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175
static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176
static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177
static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178
static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179
static const arm_feature_set arm_ext_v4t_5 =
180
  ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181
static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182
static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183
static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184
static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185
static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186
static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187
static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188
static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189
static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190
static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191
static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192
static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193
static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194
static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195
static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196
static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197
static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198
static const arm_feature_set arm_ext_m =
199
  ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
200
static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
201
static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
202
static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
203
static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
204
static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
205
 
206
static const arm_feature_set arm_arch_any = ARM_ANY;
207
static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
208
static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
209
static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
210
static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
211
 
212
static const arm_feature_set arm_cext_iwmmxt2 =
213
  ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
214
static const arm_feature_set arm_cext_iwmmxt =
215
  ARM_FEATURE (0, ARM_CEXT_IWMMXT);
216
static const arm_feature_set arm_cext_xscale =
217
  ARM_FEATURE (0, ARM_CEXT_XSCALE);
218
static const arm_feature_set arm_cext_maverick =
219
  ARM_FEATURE (0, ARM_CEXT_MAVERICK);
220
static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
221
static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
222
static const arm_feature_set fpu_vfp_ext_v1xd =
223
  ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
224
static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
225
static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
226
static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
227
static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
228
static const arm_feature_set fpu_vfp_ext_d32 =
229
  ARM_FEATURE (0, FPU_VFP_EXT_D32);
230
static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
231
static const arm_feature_set fpu_vfp_v3_or_neon_ext =
232
  ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
233
static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
234
static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
235
static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
236
 
237
static int mfloat_abi_opt = -1;
238
/* Record user cpu selection for object attributes.  */
239
static arm_feature_set selected_cpu = ARM_ARCH_NONE;
240
/* Must be long enough to hold any of the names in arm_cpus.  */
241
static char selected_cpu_name[16];
242
 
243
/* Return if no cpu was selected on command-line.  */
244
static bfd_boolean
245
no_cpu_selected (void)
246
{
247
  return selected_cpu.core == arm_arch_none.core
248
    && selected_cpu.coproc == arm_arch_none.coproc;
249
}
250
 
251
#ifdef OBJ_ELF
252
# ifdef EABI_DEFAULT
253
static int meabi_flags = EABI_DEFAULT;
254
# else
255
static int meabi_flags = EF_ARM_EABI_UNKNOWN;
256
# endif
257
 
258
static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
259
 
260
bfd_boolean
261
arm_is_eabi (void)
262
{
263
  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
264
}
265
#endif
266
 
267
#ifdef OBJ_ELF
268
/* Pre-defined "_GLOBAL_OFFSET_TABLE_"  */
269
symbolS * GOT_symbol;
270
#endif
271
 
272
/* 0: assemble for ARM,
273
   1: assemble for Thumb,
274
   2: assemble for Thumb even though target CPU does not support thumb
275
      instructions.  */
276
static int thumb_mode = 0;
277
/* A value distinct from the possible values for thumb_mode that we
278
   can use to record whether thumb_mode has been copied into the
279
   tc_frag_data field of a frag.  */
280
#define MODE_RECORDED (1 << 4)
281
 
282
/* Specifies the intrinsic IT insn behavior mode.  */
283
enum implicit_it_mode
284
{
285
  IMPLICIT_IT_MODE_NEVER  = 0x00,
286
  IMPLICIT_IT_MODE_ARM    = 0x01,
287
  IMPLICIT_IT_MODE_THUMB  = 0x02,
288
  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
289
};
290
static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
291
 
292
/* If unified_syntax is true, we are processing the new unified
293
   ARM/Thumb syntax.  Important differences from the old ARM mode:
294
 
295
     - Immediate operands do not require a # prefix.
296
     - Conditional affixes always appear at the end of the
297
       instruction.  (For backward compatibility, those instructions
298
       that formerly had them in the middle, continue to accept them
299
       there.)
300
     - The IT instruction may appear, and if it does is validated
301
       against subsequent conditional affixes.  It does not generate
302
       machine code.
303
 
304
   Important differences from the old Thumb mode:
305
 
306
     - Immediate operands do not require a # prefix.
307
     - Most of the V6T2 instructions are only available in unified mode.
308
     - The .N and .W suffixes are recognized and honored (it is an error
309
       if they cannot be honored).
310
     - All instructions set the flags if and only if they have an 's' affix.
311
     - Conditional affixes may be used.  They are validated against
312
       preceding IT instructions.  Unlike ARM mode, you cannot use a
313
       conditional affix except in the scope of an IT instruction.  */
314
 
315
static bfd_boolean unified_syntax = FALSE;
316
 
317
enum neon_el_type
318
{
319
  NT_invtype,
320
  NT_untyped,
321
  NT_integer,
322
  NT_float,
323
  NT_poly,
324
  NT_signed,
325
  NT_unsigned
326
};
327
 
328
struct neon_type_el
329
{
330
  enum neon_el_type type;
331
  unsigned size;
332
};
333
 
334
#define NEON_MAX_TYPE_ELS 4
335
 
336
struct neon_type
337
{
338
  struct neon_type_el el[NEON_MAX_TYPE_ELS];
339
  unsigned elems;
340
};
341
 
342
enum it_instruction_type
343
{
344
   OUTSIDE_IT_INSN,
345
   INSIDE_IT_INSN,
346
   INSIDE_IT_LAST_INSN,
347
   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
348
                              if inside, should be the last one.  */
349
   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
350
                              i.e. BKPT and NOP.  */
351
   IT_INSN                 /* The IT insn has been parsed.  */
352
};
353
 
354 166 khays
/* The maximum number of operands we need.  */
355
#define ARM_IT_MAX_OPERANDS 6
356
 
357 16 khays
struct arm_it
358
{
359
  const char *  error;
360
  unsigned long instruction;
361
  int           size;
362
  int           size_req;
363
  int           cond;
364
  /* "uncond_value" is set to the value in place of the conditional field in
365
     unconditional versions of the instruction, or -1 if nothing is
366
     appropriate.  */
367
  int           uncond_value;
368
  struct neon_type vectype;
369
  /* This does not indicate an actual NEON instruction, only that
370
     the mnemonic accepts neon-style type suffixes.  */
371
  int           is_neon;
372
  /* Set to the opcode if the instruction needs relaxation.
373
     Zero if the instruction is not relaxed.  */
374
  unsigned long relax;
375
  struct
376
  {
377
    bfd_reloc_code_real_type type;
378
    expressionS              exp;
379
    int                      pc_rel;
380
  } reloc;
381
 
382
  enum it_instruction_type it_insn_type;
383
 
384
  struct
385
  {
386
    unsigned reg;
387
    signed int imm;
388
    struct neon_type_el vectype;
389
    unsigned present    : 1;  /* Operand present.  */
390
    unsigned isreg      : 1;  /* Operand was a register.  */
391
    unsigned immisreg   : 1;  /* .imm field is a second register.  */
392
    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
393
    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
394
    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
395
    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
396
       instructions. This allows us to disambiguate ARM <-> vector insns.  */
397
    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
398
    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
399
    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
400
    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
401
    unsigned hasreloc   : 1;  /* Operand has relocation suffix.  */
402
    unsigned writeback  : 1;  /* Operand has trailing !  */
403
    unsigned preind     : 1;  /* Preindexed address.  */
404
    unsigned postind    : 1;  /* Postindexed address.  */
405
    unsigned negative   : 1;  /* Index register was negated.  */
406
    unsigned shifted    : 1;  /* Shift applied to operation.  */
407
    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
408 166 khays
  } operands[ARM_IT_MAX_OPERANDS];
409 16 khays
};
410
 
411
static struct arm_it inst;
412
 
413
#define NUM_FLOAT_VALS 8
414
 
415
const char * fp_const[] =
416
{
417
  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
418
};
419
 
420
/* Number of littlenums required to hold an extended precision number.  */
421
#define MAX_LITTLENUMS 6
422
 
423
LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
424
 
425
#define FAIL    (-1)
426
#define SUCCESS (0)
427
 
428
#define SUFF_S 1
429
#define SUFF_D 2
430
#define SUFF_E 3
431
#define SUFF_P 4
432
 
433
#define CP_T_X   0x00008000
434
#define CP_T_Y   0x00400000
435
 
436
#define CONDS_BIT        0x00100000
437
#define LOAD_BIT         0x00100000
438
 
439
#define DOUBLE_LOAD_FLAG 0x00000001
440
 
441
struct asm_cond
442
{
443
  const char *   template_name;
444
  unsigned long  value;
445
};
446
 
447
#define COND_ALWAYS 0xE
448
 
449
struct asm_psr
450
{
451
  const char *   template_name;
452
  unsigned long  field;
453
};
454
 
455
struct asm_barrier_opt
456
{
457
  const char *   template_name;
458
  unsigned long  value;
459
};
460
 
461
/* The bit that distinguishes CPSR and SPSR.  */
462
#define SPSR_BIT   (1 << 22)
463
 
464
/* The individual PSR flag bits.  */
465
#define PSR_c   (1 << 16)
466
#define PSR_x   (1 << 17)
467
#define PSR_s   (1 << 18)
468
#define PSR_f   (1 << 19)
469
 
470
struct reloc_entry
471
{
472
  char *                    name;
473
  bfd_reloc_code_real_type  reloc;
474
};
475
 
476
enum vfp_reg_pos
477
{
478
  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
479
  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
480
};
481
 
482
enum vfp_ldstm_type
483
{
484
  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
485
};
486
 
487
/* Bits for DEFINED field in neon_typed_alias.  */
488
#define NTA_HASTYPE  1
489
#define NTA_HASINDEX 2
490
 
491
struct neon_typed_alias
492
{
493
  unsigned char        defined;
494
  unsigned char        index;
495
  struct neon_type_el  eltype;
496
};
497
 
498
/* ARM register categories.  This includes coprocessor numbers and various
499
   architecture extensions' registers.  */
500
enum arm_reg_type
501
{
502
  REG_TYPE_RN,
503
  REG_TYPE_CP,
504
  REG_TYPE_CN,
505
  REG_TYPE_FN,
506
  REG_TYPE_VFS,
507
  REG_TYPE_VFD,
508
  REG_TYPE_NQ,
509
  REG_TYPE_VFSD,
510
  REG_TYPE_NDQ,
511
  REG_TYPE_NSDQ,
512
  REG_TYPE_VFC,
513
  REG_TYPE_MVF,
514
  REG_TYPE_MVD,
515
  REG_TYPE_MVFX,
516
  REG_TYPE_MVDX,
517
  REG_TYPE_MVAX,
518
  REG_TYPE_DSPSC,
519
  REG_TYPE_MMXWR,
520
  REG_TYPE_MMXWC,
521
  REG_TYPE_MMXWCG,
522
  REG_TYPE_XSCALE,
523
  REG_TYPE_RNB
524
};
525
 
526
/* Structure for a hash table entry for a register.
527
   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
528
   information which states whether a vector type or index is specified (for a
529
   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
530
struct reg_entry
531
{
532
  const char *               name;
533
  unsigned int               number;
534
  unsigned char              type;
535
  unsigned char              builtin;
536
  struct neon_typed_alias *  neon;
537
};
538
 
539
/* Diagnostics used when we don't get a register of the expected type.  */
540
const char * const reg_expected_msgs[] =
541
{
542
  N_("ARM register expected"),
543
  N_("bad or missing co-processor number"),
544
  N_("co-processor register expected"),
545
  N_("FPA register expected"),
546
  N_("VFP single precision register expected"),
547
  N_("VFP/Neon double precision register expected"),
548
  N_("Neon quad precision register expected"),
549
  N_("VFP single or double precision register expected"),
550
  N_("Neon double or quad precision register expected"),
551
  N_("VFP single, double or Neon quad precision register expected"),
552
  N_("VFP system register expected"),
553
  N_("Maverick MVF register expected"),
554
  N_("Maverick MVD register expected"),
555
  N_("Maverick MVFX register expected"),
556
  N_("Maverick MVDX register expected"),
557
  N_("Maverick MVAX register expected"),
558
  N_("Maverick DSPSC register expected"),
559
  N_("iWMMXt data register expected"),
560
  N_("iWMMXt control register expected"),
561
  N_("iWMMXt scalar register expected"),
562
  N_("XScale accumulator register expected"),
563
};
564
 
565
/* Some well known registers that we refer to directly elsewhere.  */
566 166 khays
#define REG_R12 12
567 16 khays
#define REG_SP  13
568
#define REG_LR  14
569
#define REG_PC  15
570
 
571
/* ARM instructions take 4bytes in the object file, Thumb instructions
572
   take 2:  */
573
#define INSN_SIZE       4
574
 
575
struct asm_opcode
576
{
577
  /* Basic string to match.  */
578
  const char * template_name;
579
 
580
  /* Parameters to instruction.  */
581
  unsigned int operands[8];
582
 
583
  /* Conditional tag - see opcode_lookup.  */
584
  unsigned int tag : 4;
585
 
586
  /* Basic instruction code.  */
587
  unsigned int avalue : 28;
588
 
589
  /* Thumb-format instruction code.  */
590
  unsigned int tvalue;
591
 
592
  /* Which architecture variant provides this instruction.  */
593
  const arm_feature_set * avariant;
594
  const arm_feature_set * tvariant;
595
 
596
  /* Function to call to encode instruction in ARM format.  */
597
  void (* aencode) (void);
598
 
599
  /* Function to call to encode instruction in Thumb format.  */
600
  void (* tencode) (void);
601
};
602
 
603
/* Defines for various bits that we will want to toggle.  */
604
#define INST_IMMEDIATE  0x02000000
605
#define OFFSET_REG      0x02000000
606
#define HWOFFSET_IMM    0x00400000
607
#define SHIFT_BY_REG    0x00000010
608
#define PRE_INDEX       0x01000000
609
#define INDEX_UP        0x00800000
610
#define WRITE_BACK      0x00200000
611
#define LDM_TYPE_2_OR_3 0x00400000
612
#define CPSI_MMOD       0x00020000
613
 
614
#define LITERAL_MASK    0xf000f000
615
#define OPCODE_MASK     0xfe1fffff
616
#define V4_STR_BIT      0x00000020
617
 
618
#define T2_SUBS_PC_LR   0xf3de8f00
619
 
620
#define DATA_OP_SHIFT   21
621
 
622
#define T2_OPCODE_MASK  0xfe1fffff
623
#define T2_DATA_OP_SHIFT 21
624
 
625
/* Codes to distinguish the arithmetic instructions.  */
626
#define OPCODE_AND      0
627
#define OPCODE_EOR      1
628
#define OPCODE_SUB      2
629
#define OPCODE_RSB      3
630
#define OPCODE_ADD      4
631
#define OPCODE_ADC      5
632
#define OPCODE_SBC      6
633
#define OPCODE_RSC      7
634
#define OPCODE_TST      8
635
#define OPCODE_TEQ      9
636
#define OPCODE_CMP      10
637
#define OPCODE_CMN      11
638
#define OPCODE_ORR      12
639
#define OPCODE_MOV      13
640
#define OPCODE_BIC      14
641
#define OPCODE_MVN      15
642
 
643
#define T2_OPCODE_AND   0
644
#define T2_OPCODE_BIC   1
645
#define T2_OPCODE_ORR   2
646
#define T2_OPCODE_ORN   3
647
#define T2_OPCODE_EOR   4
648
#define T2_OPCODE_ADD   8
649
#define T2_OPCODE_ADC   10
650
#define T2_OPCODE_SBC   11
651
#define T2_OPCODE_SUB   13
652
#define T2_OPCODE_RSB   14
653
 
654
#define T_OPCODE_MUL 0x4340
655
#define T_OPCODE_TST 0x4200
656
#define T_OPCODE_CMN 0x42c0
657
#define T_OPCODE_NEG 0x4240
658
#define T_OPCODE_MVN 0x43c0
659
 
660
#define T_OPCODE_ADD_R3 0x1800
661
#define T_OPCODE_SUB_R3 0x1a00
662
#define T_OPCODE_ADD_HI 0x4400
663
#define T_OPCODE_ADD_ST 0xb000
664
#define T_OPCODE_SUB_ST 0xb080
665
#define T_OPCODE_ADD_SP 0xa800
666
#define T_OPCODE_ADD_PC 0xa000
667
#define T_OPCODE_ADD_I8 0x3000
668
#define T_OPCODE_SUB_I8 0x3800
669
#define T_OPCODE_ADD_I3 0x1c00
670
#define T_OPCODE_SUB_I3 0x1e00
671
 
672
#define T_OPCODE_ASR_R  0x4100
673
#define T_OPCODE_LSL_R  0x4080
674
#define T_OPCODE_LSR_R  0x40c0
675
#define T_OPCODE_ROR_R  0x41c0
676
#define T_OPCODE_ASR_I  0x1000
677
#define T_OPCODE_LSL_I  0x0000
678
#define T_OPCODE_LSR_I  0x0800
679
 
680
#define T_OPCODE_MOV_I8 0x2000
681
#define T_OPCODE_CMP_I8 0x2800
682
#define T_OPCODE_CMP_LR 0x4280
683
#define T_OPCODE_MOV_HR 0x4600
684
#define T_OPCODE_CMP_HR 0x4500
685
 
686
#define T_OPCODE_LDR_PC 0x4800
687
#define T_OPCODE_LDR_SP 0x9800
688
#define T_OPCODE_STR_SP 0x9000
689
#define T_OPCODE_LDR_IW 0x6800
690
#define T_OPCODE_STR_IW 0x6000
691
#define T_OPCODE_LDR_IH 0x8800
692
#define T_OPCODE_STR_IH 0x8000
693
#define T_OPCODE_LDR_IB 0x7800
694
#define T_OPCODE_STR_IB 0x7000
695
#define T_OPCODE_LDR_RW 0x5800
696
#define T_OPCODE_STR_RW 0x5000
697
#define T_OPCODE_LDR_RH 0x5a00
698
#define T_OPCODE_STR_RH 0x5200
699
#define T_OPCODE_LDR_RB 0x5c00
700
#define T_OPCODE_STR_RB 0x5400
701
 
702
#define T_OPCODE_PUSH   0xb400
703
#define T_OPCODE_POP    0xbc00
704
 
705
#define T_OPCODE_BRANCH 0xe000
706
 
707
#define THUMB_SIZE      2       /* Size of thumb instruction.  */
708
#define THUMB_PP_PC_LR 0x0100
709
#define THUMB_LOAD_BIT 0x0800
710
#define THUMB2_LOAD_BIT 0x00100000
711
 
712
#define BAD_ARGS        _("bad arguments to instruction")
713
#define BAD_SP          _("r13 not allowed here")
714
#define BAD_PC          _("r15 not allowed here")
715
#define BAD_COND        _("instruction cannot be conditional")
716
#define BAD_OVERLAP     _("registers may not be the same")
717
#define BAD_HIREG       _("lo register required")
718
#define BAD_THUMB32     _("instruction not supported in Thumb16 mode")
719
#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
720
#define BAD_BRANCH      _("branch must be last instruction in IT block")
721
#define BAD_NOT_IT      _("instruction not allowed in IT block")
722
#define BAD_FPU         _("selected FPU does not support instruction")
723
#define BAD_OUT_IT      _("thumb conditional instruction should be in IT block")
724
#define BAD_IT_COND     _("incorrect condition in IT block")
725
#define BAD_IT_IT       _("IT falling in the range of a previous IT block")
726
#define MISSING_FNSTART _("missing .fnstart before unwinding directive")
727
#define BAD_PC_ADDRESSING \
728
        _("cannot use register index with PC-relative addressing")
729
#define BAD_PC_WRITEBACK \
730
        _("cannot use writeback with PC-relative addressing")
731 160 khays
#define BAD_RANGE     _("branch out of range")
732 16 khays
 
733
static struct hash_control * arm_ops_hsh;
734
static struct hash_control * arm_cond_hsh;
735
static struct hash_control * arm_shift_hsh;
736
static struct hash_control * arm_psr_hsh;
737
static struct hash_control * arm_v7m_psr_hsh;
738
static struct hash_control * arm_reg_hsh;
739
static struct hash_control * arm_reloc_hsh;
740
static struct hash_control * arm_barrier_opt_hsh;
741
 
742
/* Stuff needed to resolve the label ambiguity
743
   As:
744
     ...
745
     label:   <insn>
746
   may differ from:
747
     ...
748
     label:
749
              <insn>  */
750
 
751
symbolS *  last_label_seen;
752
static int label_is_thumb_function_name = FALSE;
753
 
754
/* Literal pool structure.  Held on a per-section
755
   and per-sub-section basis.  */
756
 
757
#define MAX_LITERAL_POOL_SIZE 1024
758
typedef struct literal_pool
759
{
760
  expressionS            literals [MAX_LITERAL_POOL_SIZE];
761
  unsigned int           next_free_entry;
762
  unsigned int           id;
763
  symbolS *              symbol;
764
  segT                   section;
765
  subsegT                sub_section;
766 160 khays
#ifdef OBJ_ELF
767
  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
768
#endif
769 16 khays
  struct literal_pool *  next;
770
} literal_pool;
771
 
772
/* Pointer to a linked list of literal pools.  */
773
literal_pool * list_of_pools = NULL;
774
 
775
#ifdef OBJ_ELF
776
#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
777
#else
778
static struct current_it now_it;
779
#endif
780
 
781
static inline int
782
now_it_compatible (int cond)
783
{
784
  return (cond & ~1) == (now_it.cc & ~1);
785
}
786
 
787
static inline int
788
conditional_insn (void)
789
{
790
  return inst.cond != COND_ALWAYS;
791
}
792
 
793
static int in_it_block (void);
794
 
795
static int handle_it_state (void);
796
 
797
static void force_automatic_it_block_close (void);
798
 
799
static void it_fsm_post_encode (void);
800
 
801
#define set_it_insn_type(type)                  \
802
  do                                            \
803
    {                                           \
804
      inst.it_insn_type = type;                 \
805
      if (handle_it_state () == FAIL)           \
806
        return;                                 \
807
    }                                           \
808
  while (0)
809
 
810
#define set_it_insn_type_nonvoid(type, failret) \
811
  do                                            \
812
    {                                           \
813
      inst.it_insn_type = type;                 \
814
      if (handle_it_state () == FAIL)           \
815
        return failret;                         \
816
    }                                           \
817
  while(0)
818
 
819
#define set_it_insn_type_last()                         \
820
  do                                                    \
821
    {                                                   \
822
      if (inst.cond == COND_ALWAYS)                     \
823
        set_it_insn_type (IF_INSIDE_IT_LAST_INSN);      \
824
      else                                              \
825
        set_it_insn_type (INSIDE_IT_LAST_INSN);         \
826
    }                                                   \
827
  while (0)
828
 
829
/* Pure syntax.  */
830
 
831
/* This array holds the chars that always start a comment.  If the
832
   pre-processor is disabled, these aren't very useful.  */
833
const char comment_chars[] = "@";
834
 
835
/* This array holds the chars that only start a comment at the beginning of
836
   a line.  If the line seems to have the form '# 123 filename'
837
   .line and .file directives will appear in the pre-processed output.  */
838
/* Note that input_file.c hand checks for '#' at the beginning of the
839
   first line of the input file.  This is because the compiler outputs
840
   #NO_APP at the beginning of its output.  */
841
/* Also note that comments like this one will always work.  */
842
const char line_comment_chars[] = "#";
843
 
844
const char line_separator_chars[] = ";";
845
 
846
/* Chars that can be used to separate mant
847
   from exp in floating point numbers.  */
848
const char EXP_CHARS[] = "eE";
849
 
850
/* Chars that mean this number is a floating point constant.  */
851
/* As in 0f12.456  */
852
/* or    0d1.2345e12  */
853
 
854
const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
855
 
856
/* Prefix characters that indicate the start of an immediate
857
   value.  */
858
#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
859
 
860
/* Separator character handling.  */
861
 
862
#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
863
 
864
static inline int
865
skip_past_char (char ** str, char c)
866
{
867
  if (**str == c)
868
    {
869
      (*str)++;
870
      return SUCCESS;
871
    }
872
  else
873
    return FAIL;
874
}
875
 
876
#define skip_past_comma(str) skip_past_char (str, ',')
877
 
878
/* Arithmetic expressions (possibly involving symbols).  */
879
 
880
/* Return TRUE if anything in the expression is a bignum.  */
881
 
882
static int
883
walk_no_bignums (symbolS * sp)
884
{
885
  if (symbol_get_value_expression (sp)->X_op == O_big)
886
    return 1;
887
 
888
  if (symbol_get_value_expression (sp)->X_add_symbol)
889
    {
890
      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
891
              || (symbol_get_value_expression (sp)->X_op_symbol
892
                  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
893
    }
894
 
895
  return 0;
896
}
897
 
898
static int in_my_get_expression = 0;
899
 
900
/* Third argument to my_get_expression.  */
901
#define GE_NO_PREFIX 0
902
#define GE_IMM_PREFIX 1
903
#define GE_OPT_PREFIX 2
904
/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
905
   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
906
#define GE_OPT_PREFIX_BIG 3
907
 
908
static int
909
my_get_expression (expressionS * ep, char ** str, int prefix_mode)
910
{
911
  char * save_in;
912
  segT   seg;
913
 
914
  /* In unified syntax, all prefixes are optional.  */
915
  if (unified_syntax)
916
    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
917
                  : GE_OPT_PREFIX;
918
 
919
  switch (prefix_mode)
920
    {
921
    case GE_NO_PREFIX: break;
922
    case GE_IMM_PREFIX:
923
      if (!is_immediate_prefix (**str))
924
        {
925
          inst.error = _("immediate expression requires a # prefix");
926
          return FAIL;
927
        }
928
      (*str)++;
929
      break;
930
    case GE_OPT_PREFIX:
931
    case GE_OPT_PREFIX_BIG:
932
      if (is_immediate_prefix (**str))
933
        (*str)++;
934
      break;
935
    default: abort ();
936
    }
937
 
938
  memset (ep, 0, sizeof (expressionS));
939
 
940
  save_in = input_line_pointer;
941
  input_line_pointer = *str;
942
  in_my_get_expression = 1;
943
  seg = expression (ep);
944
  in_my_get_expression = 0;
945
 
946
  if (ep->X_op == O_illegal || ep->X_op == O_absent)
947
    {
948
      /* We found a bad or missing expression in md_operand().  */
949
      *str = input_line_pointer;
950
      input_line_pointer = save_in;
951
      if (inst.error == NULL)
952
        inst.error = (ep->X_op == O_absent
953
                      ? _("missing expression") :_("bad expression"));
954
      return 1;
955
    }
956
 
957
#ifdef OBJ_AOUT
958
  if (seg != absolute_section
959
      && seg != text_section
960
      && seg != data_section
961
      && seg != bss_section
962
      && seg != undefined_section)
963
    {
964
      inst.error = _("bad segment");
965
      *str = input_line_pointer;
966
      input_line_pointer = save_in;
967
      return 1;
968
    }
969
#else
970
  (void) seg;
971
#endif
972
 
973
  /* Get rid of any bignums now, so that we don't generate an error for which
974
     we can't establish a line number later on.  Big numbers are never valid
975
     in instructions, which is where this routine is always called.  */
976
  if (prefix_mode != GE_OPT_PREFIX_BIG
977
      && (ep->X_op == O_big
978
          || (ep->X_add_symbol
979
              && (walk_no_bignums (ep->X_add_symbol)
980
                  || (ep->X_op_symbol
981
                      && walk_no_bignums (ep->X_op_symbol))))))
982
    {
983
      inst.error = _("invalid constant");
984
      *str = input_line_pointer;
985
      input_line_pointer = save_in;
986
      return 1;
987
    }
988
 
989
  *str = input_line_pointer;
990
  input_line_pointer = save_in;
991
  return 0;
992
}
993
 
994
/* Turn a string in input_line_pointer into a floating point constant
995
   of type TYPE, and store the appropriate bytes in *LITP.  The number
996
   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
997
   returned, or NULL on OK.
998
 
999
   Note that fp constants aren't represent in the normal way on the ARM.
1000
   In big endian mode, things are as expected.  However, in little endian
1001
   mode fp constants are big-endian word-wise, and little-endian byte-wise
1002
   within the words.  For example, (double) 1.1 in big endian mode is
1003
   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1004
   the byte sequence 99 99 f1 3f 9a 99 99 99.
1005
 
1006
   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1007
 
1008
char *
1009
md_atof (int type, char * litP, int * sizeP)
1010
{
1011
  int prec;
1012
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1013
  char *t;
1014
  int i;
1015
 
1016
  switch (type)
1017
    {
1018
    case 'f':
1019
    case 'F':
1020
    case 's':
1021
    case 'S':
1022
      prec = 2;
1023
      break;
1024
 
1025
    case 'd':
1026
    case 'D':
1027
    case 'r':
1028
    case 'R':
1029
      prec = 4;
1030
      break;
1031
 
1032
    case 'x':
1033
    case 'X':
1034
      prec = 5;
1035
      break;
1036
 
1037
    case 'p':
1038
    case 'P':
1039
      prec = 5;
1040
      break;
1041
 
1042
    default:
1043
      *sizeP = 0;
1044
      return _("Unrecognized or unsupported floating point constant");
1045
    }
1046
 
1047
  t = atof_ieee (input_line_pointer, type, words);
1048
  if (t)
1049
    input_line_pointer = t;
1050
  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1051
 
1052
  if (target_big_endian)
1053
    {
1054
      for (i = 0; i < prec; i++)
1055
        {
1056
          md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1057
          litP += sizeof (LITTLENUM_TYPE);
1058
        }
1059
    }
1060
  else
1061
    {
1062
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1063
        for (i = prec - 1; i >= 0; i--)
1064
          {
1065
            md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1066
            litP += sizeof (LITTLENUM_TYPE);
1067
          }
1068
      else
1069
        /* For a 4 byte float the order of elements in `words' is 1 0.
1070
           For an 8 byte float the order is 1 0 3 2.  */
1071
        for (i = 0; i < prec; i += 2)
1072
          {
1073
            md_number_to_chars (litP, (valueT) words[i + 1],
1074
                                sizeof (LITTLENUM_TYPE));
1075
            md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1076
                                (valueT) words[i], sizeof (LITTLENUM_TYPE));
1077
            litP += 2 * sizeof (LITTLENUM_TYPE);
1078
          }
1079
    }
1080
 
1081
  return NULL;
1082
}
1083
 
1084
/* We handle all bad expressions here, so that we can report the faulty
1085
   instruction in the error message.  */
1086
void
1087
md_operand (expressionS * exp)
1088
{
1089
  if (in_my_get_expression)
1090
    exp->X_op = O_illegal;
1091
}
1092
 
1093
/* Immediate values.  */
1094
 
1095
/* Generic immediate-value read function for use in directives.
1096
   Accepts anything that 'expression' can fold to a constant.
1097
   *val receives the number.  */
1098
#ifdef OBJ_ELF
1099
static int
1100
immediate_for_directive (int *val)
1101
{
1102
  expressionS exp;
1103
  exp.X_op = O_illegal;
1104
 
1105
  if (is_immediate_prefix (*input_line_pointer))
1106
    {
1107
      input_line_pointer++;
1108
      expression (&exp);
1109
    }
1110
 
1111
  if (exp.X_op != O_constant)
1112
    {
1113
      as_bad (_("expected #constant"));
1114
      ignore_rest_of_line ();
1115
      return FAIL;
1116
    }
1117
  *val = exp.X_add_number;
1118
  return SUCCESS;
1119
}
1120
#endif
1121
 
1122
/* Register parsing.  */
1123
 
1124
/* Generic register parser.  CCP points to what should be the
1125
   beginning of a register name.  If it is indeed a valid register
1126
   name, advance CCP over it and return the reg_entry structure;
1127
   otherwise return NULL.  Does not issue diagnostics.  */
1128
 
1129
static struct reg_entry *
1130
arm_reg_parse_multi (char **ccp)
1131
{
1132
  char *start = *ccp;
1133
  char *p;
1134
  struct reg_entry *reg;
1135
 
1136
#ifdef REGISTER_PREFIX
1137
  if (*start != REGISTER_PREFIX)
1138
    return NULL;
1139
  start++;
1140
#endif
1141
#ifdef OPTIONAL_REGISTER_PREFIX
1142
  if (*start == OPTIONAL_REGISTER_PREFIX)
1143
    start++;
1144
#endif
1145
 
1146
  p = start;
1147
  if (!ISALPHA (*p) || !is_name_beginner (*p))
1148
    return NULL;
1149
 
1150
  do
1151
    p++;
1152
  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1153
 
1154
  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1155
 
1156
  if (!reg)
1157
    return NULL;
1158
 
1159
  *ccp = p;
1160
  return reg;
1161
}
1162
 
1163
static int
1164
arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1165
                    enum arm_reg_type type)
1166
{
1167
  /* Alternative syntaxes are accepted for a few register classes.  */
1168
  switch (type)
1169
    {
1170
    case REG_TYPE_MVF:
1171
    case REG_TYPE_MVD:
1172
    case REG_TYPE_MVFX:
1173
    case REG_TYPE_MVDX:
1174
      /* Generic coprocessor register names are allowed for these.  */
1175
      if (reg && reg->type == REG_TYPE_CN)
1176
        return reg->number;
1177
      break;
1178
 
1179
    case REG_TYPE_CP:
1180
      /* For backward compatibility, a bare number is valid here.  */
1181
      {
1182
        unsigned long processor = strtoul (start, ccp, 10);
1183
        if (*ccp != start && processor <= 15)
1184
          return processor;
1185
      }
1186
 
1187
    case REG_TYPE_MMXWC:
1188
      /* WC includes WCG.  ??? I'm not sure this is true for all
1189
         instructions that take WC registers.  */
1190
      if (reg && reg->type == REG_TYPE_MMXWCG)
1191
        return reg->number;
1192
      break;
1193
 
1194
    default:
1195
      break;
1196
    }
1197
 
1198
  return FAIL;
1199
}
1200
 
1201
/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1202
   return value is the register number or FAIL.  */
1203
 
1204
static int
1205
arm_reg_parse (char **ccp, enum arm_reg_type type)
1206
{
1207
  char *start = *ccp;
1208
  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1209
  int ret;
1210
 
1211
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1212
  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1213
    return FAIL;
1214
 
1215
  if (reg && reg->type == type)
1216
    return reg->number;
1217
 
1218
  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1219
    return ret;
1220
 
1221
  *ccp = start;
1222
  return FAIL;
1223
}
1224
 
1225
/* Parse a Neon type specifier. *STR should point at the leading '.'
1226
   character. Does no verification at this stage that the type fits the opcode
1227
   properly. E.g.,
1228
 
1229
     .i32.i32.s16
1230
     .s32.f32
1231
     .u16
1232
 
1233
   Can all be legally parsed by this function.
1234
 
1235
   Fills in neon_type struct pointer with parsed information, and updates STR
1236
   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1237
   type, FAIL if not.  */
1238
 
1239
static int
1240
parse_neon_type (struct neon_type *type, char **str)
1241
{
1242
  char *ptr = *str;
1243
 
1244
  if (type)
1245
    type->elems = 0;
1246
 
1247
  while (type->elems < NEON_MAX_TYPE_ELS)
1248
    {
1249
      enum neon_el_type thistype = NT_untyped;
1250
      unsigned thissize = -1u;
1251
 
1252
      if (*ptr != '.')
1253
        break;
1254
 
1255
      ptr++;
1256
 
1257
      /* Just a size without an explicit type.  */
1258
      if (ISDIGIT (*ptr))
1259
        goto parsesize;
1260
 
1261
      switch (TOLOWER (*ptr))
1262
        {
1263
        case 'i': thistype = NT_integer; break;
1264
        case 'f': thistype = NT_float; break;
1265
        case 'p': thistype = NT_poly; break;
1266
        case 's': thistype = NT_signed; break;
1267
        case 'u': thistype = NT_unsigned; break;
1268
        case 'd':
1269
          thistype = NT_float;
1270
          thissize = 64;
1271
          ptr++;
1272
          goto done;
1273
        default:
1274
          as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1275
          return FAIL;
1276
        }
1277
 
1278
      ptr++;
1279
 
1280
      /* .f is an abbreviation for .f32.  */
1281
      if (thistype == NT_float && !ISDIGIT (*ptr))
1282
        thissize = 32;
1283
      else
1284
        {
1285
        parsesize:
1286
          thissize = strtoul (ptr, &ptr, 10);
1287
 
1288
          if (thissize != 8 && thissize != 16 && thissize != 32
1289
              && thissize != 64)
1290
            {
1291
              as_bad (_("bad size %d in type specifier"), thissize);
1292
              return FAIL;
1293
            }
1294
        }
1295
 
1296
      done:
1297
      if (type)
1298
        {
1299
          type->el[type->elems].type = thistype;
1300
          type->el[type->elems].size = thissize;
1301
          type->elems++;
1302
        }
1303
    }
1304
 
1305
  /* Empty/missing type is not a successful parse.  */
1306
  if (type->elems == 0)
1307
    return FAIL;
1308
 
1309
  *str = ptr;
1310
 
1311
  return SUCCESS;
1312
}
1313
 
1314
/* Errors may be set multiple times during parsing or bit encoding
1315
   (particularly in the Neon bits), but usually the earliest error which is set
1316
   will be the most meaningful. Avoid overwriting it with later (cascading)
1317
   errors by calling this function.  */
1318
 
1319
static void
1320
first_error (const char *err)
1321
{
1322
  if (!inst.error)
1323
    inst.error = err;
1324
}
1325
 
1326
/* Parse a single type, e.g. ".s32", leading period included.  */
1327
static int
1328
parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1329
{
1330
  char *str = *ccp;
1331
  struct neon_type optype;
1332
 
1333
  if (*str == '.')
1334
    {
1335
      if (parse_neon_type (&optype, &str) == SUCCESS)
1336
        {
1337
          if (optype.elems == 1)
1338
            *vectype = optype.el[0];
1339
          else
1340
            {
1341
              first_error (_("only one type should be specified for operand"));
1342
              return FAIL;
1343
            }
1344
        }
1345
      else
1346
        {
1347
          first_error (_("vector type expected"));
1348
          return FAIL;
1349
        }
1350
    }
1351
  else
1352
    return FAIL;
1353
 
1354
  *ccp = str;
1355
 
1356
  return SUCCESS;
1357
}
1358
 
1359
/* Special meanings for indices (which have a range of 0-7), which will fit into
1360
   a 4-bit integer.  */
1361
 
1362
#define NEON_ALL_LANES          15
1363
#define NEON_INTERLEAVE_LANES   14
1364
 
1365
/* Parse either a register or a scalar, with an optional type. Return the
1366
   register number, and optionally fill in the actual type of the register
1367
   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1368
   type/index information in *TYPEINFO.  */
1369
 
1370
static int
1371
parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1372
                           enum arm_reg_type *rtype,
1373
                           struct neon_typed_alias *typeinfo)
1374
{
1375
  char *str = *ccp;
1376
  struct reg_entry *reg = arm_reg_parse_multi (&str);
1377
  struct neon_typed_alias atype;
1378
  struct neon_type_el parsetype;
1379
 
1380
  atype.defined = 0;
1381
  atype.index = -1;
1382
  atype.eltype.type = NT_invtype;
1383
  atype.eltype.size = -1;
1384
 
1385
  /* Try alternate syntax for some types of register. Note these are mutually
1386
     exclusive with the Neon syntax extensions.  */
1387
  if (reg == NULL)
1388
    {
1389
      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1390
      if (altreg != FAIL)
1391
        *ccp = str;
1392
      if (typeinfo)
1393
        *typeinfo = atype;
1394
      return altreg;
1395
    }
1396
 
1397
  /* Undo polymorphism when a set of register types may be accepted.  */
1398
  if ((type == REG_TYPE_NDQ
1399
       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1400
      || (type == REG_TYPE_VFSD
1401
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1402
      || (type == REG_TYPE_NSDQ
1403
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1404
              || reg->type == REG_TYPE_NQ))
1405
      || (type == REG_TYPE_MMXWC
1406
          && (reg->type == REG_TYPE_MMXWCG)))
1407
    type = (enum arm_reg_type) reg->type;
1408
 
1409
  if (type != reg->type)
1410
    return FAIL;
1411
 
1412
  if (reg->neon)
1413
    atype = *reg->neon;
1414
 
1415
  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1416
    {
1417
      if ((atype.defined & NTA_HASTYPE) != 0)
1418
        {
1419
          first_error (_("can't redefine type for operand"));
1420
          return FAIL;
1421
        }
1422
      atype.defined |= NTA_HASTYPE;
1423
      atype.eltype = parsetype;
1424
    }
1425
 
1426
  if (skip_past_char (&str, '[') == SUCCESS)
1427
    {
1428
      if (type != REG_TYPE_VFD)
1429
        {
1430
          first_error (_("only D registers may be indexed"));
1431
          return FAIL;
1432
        }
1433
 
1434
      if ((atype.defined & NTA_HASINDEX) != 0)
1435
        {
1436
          first_error (_("can't change index for operand"));
1437
          return FAIL;
1438
        }
1439
 
1440
      atype.defined |= NTA_HASINDEX;
1441
 
1442
      if (skip_past_char (&str, ']') == SUCCESS)
1443
        atype.index = NEON_ALL_LANES;
1444
      else
1445
        {
1446
          expressionS exp;
1447
 
1448
          my_get_expression (&exp, &str, GE_NO_PREFIX);
1449
 
1450
          if (exp.X_op != O_constant)
1451
            {
1452
              first_error (_("constant expression required"));
1453
              return FAIL;
1454
            }
1455
 
1456
          if (skip_past_char (&str, ']') == FAIL)
1457
            return FAIL;
1458
 
1459
          atype.index = exp.X_add_number;
1460
        }
1461
    }
1462
 
1463
  if (typeinfo)
1464
    *typeinfo = atype;
1465
 
1466
  if (rtype)
1467
    *rtype = type;
1468
 
1469
  *ccp = str;
1470
 
1471
  return reg->number;
1472
}
1473
 
1474
/* Like arm_reg_parse, but allow allow the following extra features:
1475
    - If RTYPE is non-zero, return the (possibly restricted) type of the
1476
      register (e.g. Neon double or quad reg when either has been requested).
1477
    - If this is a Neon vector type with additional type information, fill
1478
      in the struct pointed to by VECTYPE (if non-NULL).
1479
   This function will fault on encountering a scalar.  */
1480
 
1481
static int
1482
arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1483
                     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1484
{
1485
  struct neon_typed_alias atype;
1486
  char *str = *ccp;
1487
  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1488
 
1489
  if (reg == FAIL)
1490
    return FAIL;
1491
 
1492
  /* Do not allow regname(... to parse as a register.  */
1493
  if (*str == '(')
1494
    return FAIL;
1495
 
1496
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1497
  if ((atype.defined & NTA_HASINDEX) != 0)
1498
    {
1499
      first_error (_("register operand expected, but got scalar"));
1500
      return FAIL;
1501
    }
1502
 
1503
  if (vectype)
1504
    *vectype = atype.eltype;
1505
 
1506
  *ccp = str;
1507
 
1508
  return reg;
1509
}
1510
 
1511
#define NEON_SCALAR_REG(X)      ((X) >> 4)
1512
#define NEON_SCALAR_INDEX(X)    ((X) & 15)
1513
 
1514
/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1515
   have enough information to be able to do a good job bounds-checking. So, we
1516
   just do easy checks here, and do further checks later.  */
1517
 
1518
static int
1519
parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1520
{
1521
  int reg;
1522
  char *str = *ccp;
1523
  struct neon_typed_alias atype;
1524
 
1525
  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1526
 
1527
  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1528
    return FAIL;
1529
 
1530
  if (atype.index == NEON_ALL_LANES)
1531
    {
1532
      first_error (_("scalar must have an index"));
1533
      return FAIL;
1534
    }
1535
  else if (atype.index >= 64 / elsize)
1536
    {
1537
      first_error (_("scalar index out of range"));
1538
      return FAIL;
1539
    }
1540
 
1541
  if (type)
1542
    *type = atype.eltype;
1543
 
1544
  *ccp = str;
1545
 
1546
  return reg * 16 + atype.index;
1547
}
1548
 
1549
/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1550
 
1551
static long
1552
parse_reg_list (char ** strp)
1553
{
1554
  char * str = * strp;
1555
  long   range = 0;
1556
  int    another_range;
1557
 
1558
  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1559
  do
1560
    {
1561
      another_range = 0;
1562
 
1563
      if (*str == '{')
1564
        {
1565
          int in_range = 0;
1566
          int cur_reg = -1;
1567
 
1568
          str++;
1569
          do
1570
            {
1571
              int reg;
1572
 
1573
              if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1574
                {
1575
                  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1576
                  return FAIL;
1577
                }
1578
 
1579
              if (in_range)
1580
                {
1581
                  int i;
1582
 
1583
                  if (reg <= cur_reg)
1584
                    {
1585
                      first_error (_("bad range in register list"));
1586
                      return FAIL;
1587
                    }
1588
 
1589
                  for (i = cur_reg + 1; i < reg; i++)
1590
                    {
1591
                      if (range & (1 << i))
1592
                        as_tsktsk
1593
                          (_("Warning: duplicated register (r%d) in register list"),
1594
                           i);
1595
                      else
1596
                        range |= 1 << i;
1597
                    }
1598
                  in_range = 0;
1599
                }
1600
 
1601
              if (range & (1 << reg))
1602
                as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1603
                           reg);
1604
              else if (reg <= cur_reg)
1605
                as_tsktsk (_("Warning: register range not in ascending order"));
1606
 
1607
              range |= 1 << reg;
1608
              cur_reg = reg;
1609
            }
1610
          while (skip_past_comma (&str) != FAIL
1611
                 || (in_range = 1, *str++ == '-'));
1612
          str--;
1613
 
1614
          if (*str++ != '}')
1615
            {
1616
              first_error (_("missing `}'"));
1617
              return FAIL;
1618
            }
1619
        }
1620
      else
1621
        {
1622
          expressionS exp;
1623
 
1624
          if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1625
            return FAIL;
1626
 
1627
          if (exp.X_op == O_constant)
1628
            {
1629
              if (exp.X_add_number
1630
                  != (exp.X_add_number & 0x0000ffff))
1631
                {
1632
                  inst.error = _("invalid register mask");
1633
                  return FAIL;
1634
                }
1635
 
1636
              if ((range & exp.X_add_number) != 0)
1637
                {
1638
                  int regno = range & exp.X_add_number;
1639
 
1640
                  regno &= -regno;
1641
                  regno = (1 << regno) - 1;
1642
                  as_tsktsk
1643
                    (_("Warning: duplicated register (r%d) in register list"),
1644
                     regno);
1645
                }
1646
 
1647
              range |= exp.X_add_number;
1648
            }
1649
          else
1650
            {
1651
              if (inst.reloc.type != 0)
1652
                {
1653
                  inst.error = _("expression too complex");
1654
                  return FAIL;
1655
                }
1656
 
1657
              memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1658
              inst.reloc.type = BFD_RELOC_ARM_MULTI;
1659
              inst.reloc.pc_rel = 0;
1660
            }
1661
        }
1662
 
1663
      if (*str == '|' || *str == '+')
1664
        {
1665
          str++;
1666
          another_range = 1;
1667
        }
1668
    }
1669
  while (another_range);
1670
 
1671
  *strp = str;
1672
  return range;
1673
}
1674
 
1675
/* Types of registers in a list.  */
1676
 
1677
enum reg_list_els
1678
{
1679
  REGLIST_VFP_S,
1680
  REGLIST_VFP_D,
1681
  REGLIST_NEON_D
1682
};
1683
 
1684
/* Parse a VFP register list.  If the string is invalid return FAIL.
1685
   Otherwise return the number of registers, and set PBASE to the first
1686
   register.  Parses registers of type ETYPE.
1687
   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1688
     - Q registers can be used to specify pairs of D registers
1689
     - { } can be omitted from around a singleton register list
1690
         FIXME: This is not implemented, as it would require backtracking in
1691
         some cases, e.g.:
1692
           vtbl.8 d3,d4,d5
1693
         This could be done (the meaning isn't really ambiguous), but doesn't
1694
         fit in well with the current parsing framework.
1695
     - 32 D registers may be used (also true for VFPv3).
1696
   FIXME: Types are ignored in these register lists, which is probably a
1697
   bug.  */
1698
 
1699
static int
1700
parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1701
{
1702
  char *str = *ccp;
1703
  int base_reg;
1704
  int new_base;
1705
  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1706
  int max_regs = 0;
1707
  int count = 0;
1708
  int warned = 0;
1709
  unsigned long mask = 0;
1710
  int i;
1711
 
1712
  if (*str != '{')
1713
    {
1714
      inst.error = _("expecting {");
1715
      return FAIL;
1716
    }
1717
 
1718
  str++;
1719
 
1720
  switch (etype)
1721
    {
1722
    case REGLIST_VFP_S:
1723
      regtype = REG_TYPE_VFS;
1724
      max_regs = 32;
1725
      break;
1726
 
1727
    case REGLIST_VFP_D:
1728
      regtype = REG_TYPE_VFD;
1729
      break;
1730
 
1731
    case REGLIST_NEON_D:
1732
      regtype = REG_TYPE_NDQ;
1733
      break;
1734
    }
1735
 
1736
  if (etype != REGLIST_VFP_S)
1737
    {
1738
      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1739
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1740
        {
1741
          max_regs = 32;
1742
          if (thumb_mode)
1743
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1744
                                    fpu_vfp_ext_d32);
1745
          else
1746
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1747
                                    fpu_vfp_ext_d32);
1748
        }
1749
      else
1750
        max_regs = 16;
1751
    }
1752
 
1753
  base_reg = max_regs;
1754
 
1755
  do
1756
    {
1757
      int setmask = 1, addregs = 1;
1758
 
1759
      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1760
 
1761
      if (new_base == FAIL)
1762
        {
1763
          first_error (_(reg_expected_msgs[regtype]));
1764
          return FAIL;
1765
        }
1766
 
1767
      if (new_base >= max_regs)
1768
        {
1769
          first_error (_("register out of range in list"));
1770
          return FAIL;
1771
        }
1772
 
1773
      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1774
      if (regtype == REG_TYPE_NQ)
1775
        {
1776
          setmask = 3;
1777
          addregs = 2;
1778
        }
1779
 
1780
      if (new_base < base_reg)
1781
        base_reg = new_base;
1782
 
1783
      if (mask & (setmask << new_base))
1784
        {
1785
          first_error (_("invalid register list"));
1786
          return FAIL;
1787
        }
1788
 
1789
      if ((mask >> new_base) != 0 && ! warned)
1790
        {
1791
          as_tsktsk (_("register list not in ascending order"));
1792
          warned = 1;
1793
        }
1794
 
1795
      mask |= setmask << new_base;
1796
      count += addregs;
1797
 
1798
      if (*str == '-') /* We have the start of a range expression */
1799
        {
1800
          int high_range;
1801
 
1802
          str++;
1803
 
1804
          if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1805
              == FAIL)
1806
            {
1807
              inst.error = gettext (reg_expected_msgs[regtype]);
1808
              return FAIL;
1809
            }
1810
 
1811
          if (high_range >= max_regs)
1812
            {
1813
              first_error (_("register out of range in list"));
1814
              return FAIL;
1815
            }
1816
 
1817
          if (regtype == REG_TYPE_NQ)
1818
            high_range = high_range + 1;
1819
 
1820
          if (high_range <= new_base)
1821
            {
1822
              inst.error = _("register range not in ascending order");
1823
              return FAIL;
1824
            }
1825
 
1826
          for (new_base += addregs; new_base <= high_range; new_base += addregs)
1827
            {
1828
              if (mask & (setmask << new_base))
1829
                {
1830
                  inst.error = _("invalid register list");
1831
                  return FAIL;
1832
                }
1833
 
1834
              mask |= setmask << new_base;
1835
              count += addregs;
1836
            }
1837
        }
1838
    }
1839
  while (skip_past_comma (&str) != FAIL);
1840
 
1841
  str++;
1842
 
1843
  /* Sanity check -- should have raised a parse error above.  */
1844
  if (count == 0 || count > max_regs)
1845
    abort ();
1846
 
1847
  *pbase = base_reg;
1848
 
1849
  /* Final test -- the registers must be consecutive.  */
1850
  mask >>= base_reg;
1851
  for (i = 0; i < count; i++)
1852
    {
1853
      if ((mask & (1u << i)) == 0)
1854
        {
1855
          inst.error = _("non-contiguous register range");
1856
          return FAIL;
1857
        }
1858
    }
1859
 
1860
  *ccp = str;
1861
 
1862
  return count;
1863
}
1864
 
1865
/* True if two alias types are the same.  */
1866
 
1867
static bfd_boolean
1868
neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1869
{
1870
  if (!a && !b)
1871
    return TRUE;
1872
 
1873
  if (!a || !b)
1874
    return FALSE;
1875
 
1876
  if (a->defined != b->defined)
1877
    return FALSE;
1878
 
1879
  if ((a->defined & NTA_HASTYPE) != 0
1880
      && (a->eltype.type != b->eltype.type
1881
          || a->eltype.size != b->eltype.size))
1882
    return FALSE;
1883
 
1884
  if ((a->defined & NTA_HASINDEX) != 0
1885
      && (a->index != b->index))
1886
    return FALSE;
1887
 
1888
  return TRUE;
1889
}
1890
 
1891
/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1892
   The base register is put in *PBASE.
1893
   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1894
   the return value.
1895
   The register stride (minus one) is put in bit 4 of the return value.
1896
   Bits [6:5] encode the list length (minus one).
1897
   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1898
 
1899
#define NEON_LANE(X)            ((X) & 0xf)
1900
#define NEON_REG_STRIDE(X)      ((((X) >> 4) & 1) + 1)
1901
#define NEON_REGLIST_LENGTH(X)  ((((X) >> 5) & 3) + 1)
1902
 
1903
static int
1904
parse_neon_el_struct_list (char **str, unsigned *pbase,
1905
                           struct neon_type_el *eltype)
1906
{
1907
  char *ptr = *str;
1908
  int base_reg = -1;
1909
  int reg_incr = -1;
1910
  int count = 0;
1911
  int lane = -1;
1912
  int leading_brace = 0;
1913
  enum arm_reg_type rtype = REG_TYPE_NDQ;
1914
  const char *const incr_error = _("register stride must be 1 or 2");
1915
  const char *const type_error = _("mismatched element/structure types in list");
1916
  struct neon_typed_alias firsttype;
1917
 
1918
  if (skip_past_char (&ptr, '{') == SUCCESS)
1919
    leading_brace = 1;
1920
 
1921
  do
1922
    {
1923
      struct neon_typed_alias atype;
1924
      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1925
 
1926
      if (getreg == FAIL)
1927
        {
1928
          first_error (_(reg_expected_msgs[rtype]));
1929
          return FAIL;
1930
        }
1931
 
1932
      if (base_reg == -1)
1933
        {
1934
          base_reg = getreg;
1935
          if (rtype == REG_TYPE_NQ)
1936
            {
1937
              reg_incr = 1;
1938
            }
1939
          firsttype = atype;
1940
        }
1941
      else if (reg_incr == -1)
1942
        {
1943
          reg_incr = getreg - base_reg;
1944
          if (reg_incr < 1 || reg_incr > 2)
1945
            {
1946
              first_error (_(incr_error));
1947
              return FAIL;
1948
            }
1949
        }
1950
      else if (getreg != base_reg + reg_incr * count)
1951
        {
1952
          first_error (_(incr_error));
1953
          return FAIL;
1954
        }
1955
 
1956
      if (! neon_alias_types_same (&atype, &firsttype))
1957
        {
1958
          first_error (_(type_error));
1959
          return FAIL;
1960
        }
1961
 
1962
      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1963
         modes.  */
1964
      if (ptr[0] == '-')
1965
        {
1966
          struct neon_typed_alias htype;
1967
          int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1968
          if (lane == -1)
1969
            lane = NEON_INTERLEAVE_LANES;
1970
          else if (lane != NEON_INTERLEAVE_LANES)
1971
            {
1972
              first_error (_(type_error));
1973
              return FAIL;
1974
            }
1975
          if (reg_incr == -1)
1976
            reg_incr = 1;
1977
          else if (reg_incr != 1)
1978
            {
1979
              first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1980
              return FAIL;
1981
            }
1982
          ptr++;
1983
          hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1984
          if (hireg == FAIL)
1985
            {
1986
              first_error (_(reg_expected_msgs[rtype]));
1987
              return FAIL;
1988
            }
1989
          if (! neon_alias_types_same (&htype, &firsttype))
1990
            {
1991
              first_error (_(type_error));
1992
              return FAIL;
1993
            }
1994
          count += hireg + dregs - getreg;
1995
          continue;
1996
        }
1997
 
1998
      /* If we're using Q registers, we can't use [] or [n] syntax.  */
1999
      if (rtype == REG_TYPE_NQ)
2000
        {
2001
          count += 2;
2002
          continue;
2003
        }
2004
 
2005
      if ((atype.defined & NTA_HASINDEX) != 0)
2006
        {
2007
          if (lane == -1)
2008
            lane = atype.index;
2009
          else if (lane != atype.index)
2010
            {
2011
              first_error (_(type_error));
2012
              return FAIL;
2013
            }
2014
        }
2015
      else if (lane == -1)
2016
        lane = NEON_INTERLEAVE_LANES;
2017
      else if (lane != NEON_INTERLEAVE_LANES)
2018
        {
2019
          first_error (_(type_error));
2020
          return FAIL;
2021
        }
2022
      count++;
2023
    }
2024
  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2025
 
2026
  /* No lane set by [x]. We must be interleaving structures.  */
2027
  if (lane == -1)
2028
    lane = NEON_INTERLEAVE_LANES;
2029
 
2030
  /* Sanity check.  */
2031
  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2032
      || (count > 1 && reg_incr == -1))
2033
    {
2034
      first_error (_("error parsing element/structure list"));
2035
      return FAIL;
2036
    }
2037
 
2038
  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2039
    {
2040
      first_error (_("expected }"));
2041
      return FAIL;
2042
    }
2043
 
2044
  if (reg_incr == -1)
2045
    reg_incr = 1;
2046
 
2047
  if (eltype)
2048
    *eltype = firsttype.eltype;
2049
 
2050
  *pbase = base_reg;
2051
  *str = ptr;
2052
 
2053
  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2054
}
2055
 
2056
/* Parse an explicit relocation suffix on an expression.  This is
2057
   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2058
   arm_reloc_hsh contains no entries, so this function can only
2059
   succeed if there is no () after the word.  Returns -1 on error,
2060
   BFD_RELOC_UNUSED if there wasn't any suffix.  */
2061 163 khays
 
2062 16 khays
static int
2063
parse_reloc (char **str)
2064
{
2065
  struct reloc_entry *r;
2066
  char *p, *q;
2067
 
2068
  if (**str != '(')
2069
    return BFD_RELOC_UNUSED;
2070
 
2071
  p = *str + 1;
2072
  q = p;
2073
 
2074
  while (*q && *q != ')' && *q != ',')
2075
    q++;
2076
  if (*q != ')')
2077
    return -1;
2078
 
2079
  if ((r = (struct reloc_entry *)
2080
       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2081
    return -1;
2082
 
2083
  *str = q + 1;
2084
  return r->reloc;
2085
}
2086
 
2087
/* Directives: register aliases.  */
2088
 
2089
static struct reg_entry *
2090
insert_reg_alias (char *str, unsigned number, int type)
2091
{
2092
  struct reg_entry *new_reg;
2093
  const char *name;
2094
 
2095
  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2096
    {
2097
      if (new_reg->builtin)
2098
        as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2099
 
2100
      /* Only warn about a redefinition if it's not defined as the
2101
         same register.  */
2102
      else if (new_reg->number != number || new_reg->type != type)
2103
        as_warn (_("ignoring redefinition of register alias '%s'"), str);
2104
 
2105
      return NULL;
2106
    }
2107
 
2108
  name = xstrdup (str);
2109
  new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2110
 
2111
  new_reg->name = name;
2112
  new_reg->number = number;
2113
  new_reg->type = type;
2114
  new_reg->builtin = FALSE;
2115
  new_reg->neon = NULL;
2116
 
2117
  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2118
    abort ();
2119
 
2120
  return new_reg;
2121
}
2122
 
2123
static void
2124
insert_neon_reg_alias (char *str, int number, int type,
2125
                       struct neon_typed_alias *atype)
2126
{
2127
  struct reg_entry *reg = insert_reg_alias (str, number, type);
2128
 
2129
  if (!reg)
2130
    {
2131
      first_error (_("attempt to redefine typed alias"));
2132
      return;
2133
    }
2134
 
2135
  if (atype)
2136
    {
2137
      reg->neon = (struct neon_typed_alias *)
2138
          xmalloc (sizeof (struct neon_typed_alias));
2139
      *reg->neon = *atype;
2140
    }
2141
}
2142
 
2143
/* Look for the .req directive.  This is of the form:
2144
 
2145
        new_register_name .req existing_register_name
2146
 
2147
   If we find one, or if it looks sufficiently like one that we want to
2148
   handle any error here, return TRUE.  Otherwise return FALSE.  */
2149
 
2150
static bfd_boolean
2151
create_register_alias (char * newname, char *p)
2152
{
2153
  struct reg_entry *old;
2154
  char *oldname, *nbuf;
2155
  size_t nlen;
2156
 
2157
  /* The input scrubber ensures that whitespace after the mnemonic is
2158
     collapsed to single spaces.  */
2159
  oldname = p;
2160
  if (strncmp (oldname, " .req ", 6) != 0)
2161
    return FALSE;
2162
 
2163
  oldname += 6;
2164
  if (*oldname == '\0')
2165
    return FALSE;
2166
 
2167
  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2168
  if (!old)
2169
    {
2170
      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2171
      return TRUE;
2172
    }
2173
 
2174
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2175
     the desired alias name, and p points to its end.  If not, then
2176
     the desired alias name is in the global original_case_string.  */
2177
#ifdef TC_CASE_SENSITIVE
2178
  nlen = p - newname;
2179
#else
2180
  newname = original_case_string;
2181
  nlen = strlen (newname);
2182
#endif
2183
 
2184
  nbuf = (char *) alloca (nlen + 1);
2185
  memcpy (nbuf, newname, nlen);
2186
  nbuf[nlen] = '\0';
2187
 
2188
  /* Create aliases under the new name as stated; an all-lowercase
2189
     version of the new name; and an all-uppercase version of the new
2190
     name.  */
2191
  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2192
    {
2193
      for (p = nbuf; *p; p++)
2194
        *p = TOUPPER (*p);
2195
 
2196
      if (strncmp (nbuf, newname, nlen))
2197
        {
2198
          /* If this attempt to create an additional alias fails, do not bother
2199
             trying to create the all-lower case alias.  We will fail and issue
2200
             a second, duplicate error message.  This situation arises when the
2201
             programmer does something like:
2202
               foo .req r0
2203
               Foo .req r1
2204
             The second .req creates the "Foo" alias but then fails to create
2205
             the artificial FOO alias because it has already been created by the
2206
             first .req.  */
2207
          if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2208
            return TRUE;
2209
        }
2210
 
2211
      for (p = nbuf; *p; p++)
2212
        *p = TOLOWER (*p);
2213
 
2214
      if (strncmp (nbuf, newname, nlen))
2215
        insert_reg_alias (nbuf, old->number, old->type);
2216
    }
2217
 
2218
  return TRUE;
2219
}
2220
 
2221
/* Create a Neon typed/indexed register alias using directives, e.g.:
2222
     X .dn d5.s32[1]
2223
     Y .qn 6.s16
2224
     Z .dn d7
2225
     T .dn Z[0]
2226
   These typed registers can be used instead of the types specified after the
2227
   Neon mnemonic, so long as all operands given have types. Types can also be
2228
   specified directly, e.g.:
2229
     vadd d0.s32, d1.s32, d2.s32  */
2230
 
2231
static bfd_boolean
2232
create_neon_reg_alias (char *newname, char *p)
2233
{
2234
  enum arm_reg_type basetype;
2235
  struct reg_entry *basereg;
2236
  struct reg_entry mybasereg;
2237
  struct neon_type ntype;
2238
  struct neon_typed_alias typeinfo;
2239
  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2240
  int namelen;
2241
 
2242
  typeinfo.defined = 0;
2243
  typeinfo.eltype.type = NT_invtype;
2244
  typeinfo.eltype.size = -1;
2245
  typeinfo.index = -1;
2246
 
2247
  nameend = p;
2248
 
2249
  if (strncmp (p, " .dn ", 5) == 0)
2250
    basetype = REG_TYPE_VFD;
2251
  else if (strncmp (p, " .qn ", 5) == 0)
2252
    basetype = REG_TYPE_NQ;
2253
  else
2254
    return FALSE;
2255
 
2256
  p += 5;
2257
 
2258
  if (*p == '\0')
2259
    return FALSE;
2260
 
2261
  basereg = arm_reg_parse_multi (&p);
2262
 
2263
  if (basereg && basereg->type != basetype)
2264
    {
2265
      as_bad (_("bad type for register"));
2266
      return FALSE;
2267
    }
2268
 
2269
  if (basereg == NULL)
2270
    {
2271
      expressionS exp;
2272
      /* Try parsing as an integer.  */
2273
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2274
      if (exp.X_op != O_constant)
2275
        {
2276
          as_bad (_("expression must be constant"));
2277
          return FALSE;
2278
        }
2279
      basereg = &mybasereg;
2280
      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2281
                                                  : exp.X_add_number;
2282
      basereg->neon = 0;
2283
    }
2284
 
2285
  if (basereg->neon)
2286
    typeinfo = *basereg->neon;
2287
 
2288
  if (parse_neon_type (&ntype, &p) == SUCCESS)
2289
    {
2290
      /* We got a type.  */
2291
      if (typeinfo.defined & NTA_HASTYPE)
2292
        {
2293
          as_bad (_("can't redefine the type of a register alias"));
2294
          return FALSE;
2295
        }
2296
 
2297
      typeinfo.defined |= NTA_HASTYPE;
2298
      if (ntype.elems != 1)
2299
        {
2300
          as_bad (_("you must specify a single type only"));
2301
          return FALSE;
2302
        }
2303
      typeinfo.eltype = ntype.el[0];
2304
    }
2305
 
2306
  if (skip_past_char (&p, '[') == SUCCESS)
2307
    {
2308
      expressionS exp;
2309
      /* We got a scalar index.  */
2310
 
2311
      if (typeinfo.defined & NTA_HASINDEX)
2312
        {
2313
          as_bad (_("can't redefine the index of a scalar alias"));
2314
          return FALSE;
2315
        }
2316
 
2317
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2318
 
2319
      if (exp.X_op != O_constant)
2320
        {
2321
          as_bad (_("scalar index must be constant"));
2322
          return FALSE;
2323
        }
2324
 
2325
      typeinfo.defined |= NTA_HASINDEX;
2326
      typeinfo.index = exp.X_add_number;
2327
 
2328
      if (skip_past_char (&p, ']') == FAIL)
2329
        {
2330
          as_bad (_("expecting ]"));
2331
          return FALSE;
2332
        }
2333
    }
2334
 
2335
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2336
     the desired alias name, and p points to its end.  If not, then
2337
     the desired alias name is in the global original_case_string.  */
2338
#ifdef TC_CASE_SENSITIVE
2339
  namelen = nameend - newname;
2340
#else
2341
  newname = original_case_string;
2342
  namelen = strlen (newname);
2343
#endif
2344
 
2345
  namebuf = (char *) alloca (namelen + 1);
2346
  strncpy (namebuf, newname, namelen);
2347
  namebuf[namelen] = '\0';
2348
 
2349
  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2350
                         typeinfo.defined != 0 ? &typeinfo : NULL);
2351
 
2352
  /* Insert name in all uppercase.  */
2353
  for (p = namebuf; *p; p++)
2354
    *p = TOUPPER (*p);
2355
 
2356
  if (strncmp (namebuf, newname, namelen))
2357
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2358
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2359
 
2360
  /* Insert name in all lowercase.  */
2361
  for (p = namebuf; *p; p++)
2362
    *p = TOLOWER (*p);
2363
 
2364
  if (strncmp (namebuf, newname, namelen))
2365
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2366
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2367
 
2368
  return TRUE;
2369
}
2370
 
2371
/* Should never be called, as .req goes between the alias and the
2372
   register name, not at the beginning of the line.  */
2373
 
2374
static void
2375
s_req (int a ATTRIBUTE_UNUSED)
2376
{
2377
  as_bad (_("invalid syntax for .req directive"));
2378
}
2379
 
2380
static void
2381
s_dn (int a ATTRIBUTE_UNUSED)
2382
{
2383
  as_bad (_("invalid syntax for .dn directive"));
2384
}
2385
 
2386
static void
2387
s_qn (int a ATTRIBUTE_UNUSED)
2388
{
2389
  as_bad (_("invalid syntax for .qn directive"));
2390
}
2391
 
2392
/* The .unreq directive deletes an alias which was previously defined
2393
   by .req.  For example:
2394
 
2395
       my_alias .req r11
2396
       .unreq my_alias    */
2397
 
2398
static void
2399
s_unreq (int a ATTRIBUTE_UNUSED)
2400
{
2401
  char * name;
2402
  char saved_char;
2403
 
2404
  name = input_line_pointer;
2405
 
2406
  while (*input_line_pointer != 0
2407
         && *input_line_pointer != ' '
2408
         && *input_line_pointer != '\n')
2409
    ++input_line_pointer;
2410
 
2411
  saved_char = *input_line_pointer;
2412
  *input_line_pointer = 0;
2413
 
2414
  if (!*name)
2415
    as_bad (_("invalid syntax for .unreq directive"));
2416
  else
2417
    {
2418
      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2419
                                                              name);
2420
 
2421
      if (!reg)
2422
        as_bad (_("unknown register alias '%s'"), name);
2423
      else if (reg->builtin)
2424
        as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2425
                 name);
2426
      else
2427
        {
2428
          char * p;
2429
          char * nbuf;
2430
 
2431
          hash_delete (arm_reg_hsh, name, FALSE);
2432
          free ((char *) reg->name);
2433
          if (reg->neon)
2434
            free (reg->neon);
2435
          free (reg);
2436
 
2437
          /* Also locate the all upper case and all lower case versions.
2438
             Do not complain if we cannot find one or the other as it
2439
             was probably deleted above.  */
2440
 
2441
          nbuf = strdup (name);
2442
          for (p = nbuf; *p; p++)
2443
            *p = TOUPPER (*p);
2444
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2445
          if (reg)
2446
            {
2447
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2448
              free ((char *) reg->name);
2449
              if (reg->neon)
2450
                free (reg->neon);
2451
              free (reg);
2452
            }
2453
 
2454
          for (p = nbuf; *p; p++)
2455
            *p = TOLOWER (*p);
2456
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2457
          if (reg)
2458
            {
2459
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2460
              free ((char *) reg->name);
2461
              if (reg->neon)
2462
                free (reg->neon);
2463
              free (reg);
2464
            }
2465
 
2466
          free (nbuf);
2467
        }
2468
    }
2469
 
2470
  *input_line_pointer = saved_char;
2471
  demand_empty_rest_of_line ();
2472
}
2473
 
2474
/* Directives: Instruction set selection.  */
2475
 
2476
#ifdef OBJ_ELF
2477
/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2478
   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2479
   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2480
   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2481
 
2482
/* Create a new mapping symbol for the transition to STATE.  */
2483
 
2484
static void
2485
make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2486
{
2487
  symbolS * symbolP;
2488
  const char * symname;
2489
  int type;
2490
 
2491
  switch (state)
2492
    {
2493
    case MAP_DATA:
2494
      symname = "$d";
2495
      type = BSF_NO_FLAGS;
2496
      break;
2497
    case MAP_ARM:
2498
      symname = "$a";
2499
      type = BSF_NO_FLAGS;
2500
      break;
2501
    case MAP_THUMB:
2502
      symname = "$t";
2503
      type = BSF_NO_FLAGS;
2504
      break;
2505
    default:
2506
      abort ();
2507
    }
2508
 
2509
  symbolP = symbol_new (symname, now_seg, value, frag);
2510
  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2511
 
2512
  switch (state)
2513
    {
2514
    case MAP_ARM:
2515
      THUMB_SET_FUNC (symbolP, 0);
2516
      ARM_SET_THUMB (symbolP, 0);
2517
      ARM_SET_INTERWORK (symbolP, support_interwork);
2518
      break;
2519
 
2520
    case MAP_THUMB:
2521
      THUMB_SET_FUNC (symbolP, 1);
2522
      ARM_SET_THUMB (symbolP, 1);
2523
      ARM_SET_INTERWORK (symbolP, support_interwork);
2524
      break;
2525
 
2526
    case MAP_DATA:
2527
    default:
2528
      break;
2529
    }
2530
 
2531
  /* Save the mapping symbols for future reference.  Also check that
2532
     we do not place two mapping symbols at the same offset within a
2533
     frag.  We'll handle overlap between frags in
2534
     check_mapping_symbols.
2535
 
2536
     If .fill or other data filling directive generates zero sized data,
2537
     the mapping symbol for the following code will have the same value
2538
     as the one generated for the data filling directive.  In this case,
2539
     we replace the old symbol with the new one at the same address.  */
2540
  if (value == 0)
2541
    {
2542
      if (frag->tc_frag_data.first_map != NULL)
2543
        {
2544
          know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2545
          symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2546
        }
2547
      frag->tc_frag_data.first_map = symbolP;
2548
    }
2549
  if (frag->tc_frag_data.last_map != NULL)
2550
    {
2551
      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2552
      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2553
        symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2554
    }
2555
  frag->tc_frag_data.last_map = symbolP;
2556
}
2557
 
2558
/* We must sometimes convert a region marked as code to data during
2559
   code alignment, if an odd number of bytes have to be padded.  The
2560
   code mapping symbol is pushed to an aligned address.  */
2561
 
2562
static void
2563
insert_data_mapping_symbol (enum mstate state,
2564
                            valueT value, fragS *frag, offsetT bytes)
2565
{
2566
  /* If there was already a mapping symbol, remove it.  */
2567
  if (frag->tc_frag_data.last_map != NULL
2568
      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2569
    {
2570
      symbolS *symp = frag->tc_frag_data.last_map;
2571
 
2572
      if (value == 0)
2573
        {
2574
          know (frag->tc_frag_data.first_map == symp);
2575
          frag->tc_frag_data.first_map = NULL;
2576
        }
2577
      frag->tc_frag_data.last_map = NULL;
2578
      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2579
    }
2580
 
2581
  make_mapping_symbol (MAP_DATA, value, frag);
2582
  make_mapping_symbol (state, value + bytes, frag);
2583
}
2584
 
2585
static void mapping_state_2 (enum mstate state, int max_chars);
2586
 
2587
/* Set the mapping state to STATE.  Only call this when about to
2588
   emit some STATE bytes to the file.  */
2589
 
2590
void
2591
mapping_state (enum mstate state)
2592
{
2593
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2594
 
2595
#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2596
 
2597
  if (mapstate == state)
2598
    /* The mapping symbol has already been emitted.
2599
       There is nothing else to do.  */
2600
    return;
2601 160 khays
 
2602
  if (state == MAP_ARM || state == MAP_THUMB)
2603
    /*  PR gas/12931
2604
        All ARM instructions require 4-byte alignment.
2605
        (Almost) all Thumb instructions require 2-byte alignment.
2606
 
2607
        When emitting instructions into any section, mark the section
2608
        appropriately.
2609
 
2610
        Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2611
        but themselves require 2-byte alignment; this applies to some
2612
        PC- relative forms.  However, these cases will invovle implicit
2613
        literal pool generation or an explicit .align >=2, both of
2614
        which will cause the section to me marked with sufficient
2615
        alignment.  Thus, we don't handle those cases here.  */
2616
    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2617
 
2618
  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2619 16 khays
    /* This case will be evaluated later in the next else.  */
2620
    return;
2621
  else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2622
          || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2623
    {
2624
      /* Only add the symbol if the offset is > 0:
2625
         if we're at the first frag, check it's size > 0;
2626
         if we're not at the first frag, then for sure
2627
            the offset is > 0.  */
2628
      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2629
      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2630
 
2631
      if (add_symbol)
2632
        make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2633
    }
2634
 
2635
  mapping_state_2 (state, 0);
2636
#undef TRANSITION
2637
}
2638
 
2639
/* Same as mapping_state, but MAX_CHARS bytes have already been
2640
   allocated.  Put the mapping symbol that far back.  */
2641
 
2642
static void
2643
mapping_state_2 (enum mstate state, int max_chars)
2644
{
2645
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2646
 
2647
  if (!SEG_NORMAL (now_seg))
2648
    return;
2649
 
2650
  if (mapstate == state)
2651
    /* The mapping symbol has already been emitted.
2652
       There is nothing else to do.  */
2653
    return;
2654
 
2655
  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2656
  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2657
}
2658
#else
2659
#define mapping_state(x) ((void)0)
2660
#define mapping_state_2(x, y) ((void)0)
2661
#endif
2662
 
2663
/* Find the real, Thumb encoded start of a Thumb function.  */
2664
 
2665
#ifdef OBJ_COFF
2666
static symbolS *
2667
find_real_start (symbolS * symbolP)
2668
{
2669
  char *       real_start;
2670
  const char * name = S_GET_NAME (symbolP);
2671
  symbolS *    new_target;
2672
 
2673
  /* This definition must agree with the one in gcc/config/arm/thumb.c.  */
2674
#define STUB_NAME ".real_start_of"
2675
 
2676
  if (name == NULL)
2677
    abort ();
2678
 
2679
  /* The compiler may generate BL instructions to local labels because
2680
     it needs to perform a branch to a far away location. These labels
2681
     do not have a corresponding ".real_start_of" label.  We check
2682
     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2683
     the ".real_start_of" convention for nonlocal branches.  */
2684
  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2685
    return symbolP;
2686
 
2687
  real_start = ACONCAT ((STUB_NAME, name, NULL));
2688
  new_target = symbol_find (real_start);
2689
 
2690
  if (new_target == NULL)
2691
    {
2692
      as_warn (_("Failed to find real start of function: %s\n"), name);
2693
      new_target = symbolP;
2694
    }
2695
 
2696
  return new_target;
2697
}
2698
#endif
2699
 
2700
static void
2701
opcode_select (int width)
2702
{
2703
  switch (width)
2704
    {
2705
    case 16:
2706
      if (! thumb_mode)
2707
        {
2708
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2709
            as_bad (_("selected processor does not support THUMB opcodes"));
2710
 
2711
          thumb_mode = 1;
2712
          /* No need to force the alignment, since we will have been
2713
             coming from ARM mode, which is word-aligned.  */
2714
          record_alignment (now_seg, 1);
2715
        }
2716
      break;
2717
 
2718
    case 32:
2719
      if (thumb_mode)
2720
        {
2721
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2722
            as_bad (_("selected processor does not support ARM opcodes"));
2723
 
2724
          thumb_mode = 0;
2725
 
2726
          if (!need_pass_2)
2727
            frag_align (2, 0, 0);
2728
 
2729
          record_alignment (now_seg, 1);
2730
        }
2731
      break;
2732
 
2733
    default:
2734
      as_bad (_("invalid instruction size selected (%d)"), width);
2735
    }
2736
}
2737
 
2738
static void
2739
s_arm (int ignore ATTRIBUTE_UNUSED)
2740
{
2741
  opcode_select (32);
2742
  demand_empty_rest_of_line ();
2743
}
2744
 
2745
static void
2746
s_thumb (int ignore ATTRIBUTE_UNUSED)
2747
{
2748
  opcode_select (16);
2749
  demand_empty_rest_of_line ();
2750
}
2751
 
2752
static void
2753
s_code (int unused ATTRIBUTE_UNUSED)
2754
{
2755
  int temp;
2756
 
2757
  temp = get_absolute_expression ();
2758
  switch (temp)
2759
    {
2760
    case 16:
2761
    case 32:
2762
      opcode_select (temp);
2763
      break;
2764
 
2765
    default:
2766
      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2767
    }
2768
}
2769
 
2770
static void
2771
s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2772
{
2773
  /* If we are not already in thumb mode go into it, EVEN if
2774
     the target processor does not support thumb instructions.
2775
     This is used by gcc/config/arm/lib1funcs.asm for example
2776
     to compile interworking support functions even if the
2777
     target processor should not support interworking.  */
2778
  if (! thumb_mode)
2779
    {
2780
      thumb_mode = 2;
2781
      record_alignment (now_seg, 1);
2782
    }
2783
 
2784
  demand_empty_rest_of_line ();
2785
}
2786
 
2787
static void
2788
s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2789
{
2790
  s_thumb (0);
2791
 
2792
  /* The following label is the name/address of the start of a Thumb function.
2793
     We need to know this for the interworking support.  */
2794
  label_is_thumb_function_name = TRUE;
2795
}
2796
 
2797
/* Perform a .set directive, but also mark the alias as
2798
   being a thumb function.  */
2799
 
2800
static void
2801
s_thumb_set (int equiv)
2802
{
2803
  /* XXX the following is a duplicate of the code for s_set() in read.c
2804
     We cannot just call that code as we need to get at the symbol that
2805
     is created.  */
2806
  char *    name;
2807
  char      delim;
2808
  char *    end_name;
2809
  symbolS * symbolP;
2810
 
2811
  /* Especial apologies for the random logic:
2812
     This just grew, and could be parsed much more simply!
2813
     Dean - in haste.  */
2814
  name      = input_line_pointer;
2815
  delim     = get_symbol_end ();
2816
  end_name  = input_line_pointer;
2817
  *end_name = delim;
2818
 
2819
  if (*input_line_pointer != ',')
2820
    {
2821
      *end_name = 0;
2822
      as_bad (_("expected comma after name \"%s\""), name);
2823
      *end_name = delim;
2824
      ignore_rest_of_line ();
2825
      return;
2826
    }
2827
 
2828
  input_line_pointer++;
2829
  *end_name = 0;
2830
 
2831
  if (name[0] == '.' && name[1] == '\0')
2832
    {
2833
      /* XXX - this should not happen to .thumb_set.  */
2834
      abort ();
2835
    }
2836
 
2837
  if ((symbolP = symbol_find (name)) == NULL
2838
      && (symbolP = md_undefined_symbol (name)) == NULL)
2839
    {
2840
#ifndef NO_LISTING
2841
      /* When doing symbol listings, play games with dummy fragments living
2842
         outside the normal fragment chain to record the file and line info
2843
         for this symbol.  */
2844
      if (listing & LISTING_SYMBOLS)
2845
        {
2846
          extern struct list_info_struct * listing_tail;
2847
          fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2848
 
2849
          memset (dummy_frag, 0, sizeof (fragS));
2850
          dummy_frag->fr_type = rs_fill;
2851
          dummy_frag->line = listing_tail;
2852
          symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2853
          dummy_frag->fr_symbol = symbolP;
2854
        }
2855
      else
2856
#endif
2857
        symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2858
 
2859
#ifdef OBJ_COFF
2860
      /* "set" symbols are local unless otherwise specified.  */
2861
      SF_SET_LOCAL (symbolP);
2862
#endif /* OBJ_COFF  */
2863
    }                           /* Make a new symbol.  */
2864
 
2865
  symbol_table_insert (symbolP);
2866
 
2867
  * end_name = delim;
2868
 
2869
  if (equiv
2870
      && S_IS_DEFINED (symbolP)
2871
      && S_GET_SEGMENT (symbolP) != reg_section)
2872
    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2873
 
2874
  pseudo_set (symbolP);
2875
 
2876
  demand_empty_rest_of_line ();
2877
 
2878
  /* XXX Now we come to the Thumb specific bit of code.  */
2879
 
2880
  THUMB_SET_FUNC (symbolP, 1);
2881
  ARM_SET_THUMB (symbolP, 1);
2882
#if defined OBJ_ELF || defined OBJ_COFF
2883
  ARM_SET_INTERWORK (symbolP, support_interwork);
2884
#endif
2885
}
2886
 
2887
/* Directives: Mode selection.  */
2888
 
2889
/* .syntax [unified|divided] - choose the new unified syntax
2890
   (same for Arm and Thumb encoding, modulo slight differences in what
2891
   can be represented) or the old divergent syntax for each mode.  */
2892
static void
2893
s_syntax (int unused ATTRIBUTE_UNUSED)
2894
{
2895
  char *name, delim;
2896
 
2897
  name = input_line_pointer;
2898
  delim = get_symbol_end ();
2899
 
2900
  if (!strcasecmp (name, "unified"))
2901
    unified_syntax = TRUE;
2902
  else if (!strcasecmp (name, "divided"))
2903
    unified_syntax = FALSE;
2904
  else
2905
    {
2906
      as_bad (_("unrecognized syntax mode \"%s\""), name);
2907
      return;
2908
    }
2909
  *input_line_pointer = delim;
2910
  demand_empty_rest_of_line ();
2911
}
2912
 
2913
/* Directives: sectioning and alignment.  */
2914
 
2915
/* Same as s_align_ptwo but align 0 => align 2.  */
2916
 
2917
static void
2918
s_align (int unused ATTRIBUTE_UNUSED)
2919
{
2920
  int temp;
2921
  bfd_boolean fill_p;
2922
  long temp_fill;
2923
  long max_alignment = 15;
2924
 
2925
  temp = get_absolute_expression ();
2926
  if (temp > max_alignment)
2927
    as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2928
  else if (temp < 0)
2929
    {
2930
      as_bad (_("alignment negative. 0 assumed."));
2931
      temp = 0;
2932
    }
2933
 
2934
  if (*input_line_pointer == ',')
2935
    {
2936
      input_line_pointer++;
2937
      temp_fill = get_absolute_expression ();
2938
      fill_p = TRUE;
2939
    }
2940
  else
2941
    {
2942
      fill_p = FALSE;
2943
      temp_fill = 0;
2944
    }
2945
 
2946
  if (!temp)
2947
    temp = 2;
2948
 
2949
  /* Only make a frag if we HAVE to.  */
2950
  if (temp && !need_pass_2)
2951
    {
2952
      if (!fill_p && subseg_text_p (now_seg))
2953
        frag_align_code (temp, 0);
2954
      else
2955
        frag_align (temp, (int) temp_fill, 0);
2956
    }
2957
  demand_empty_rest_of_line ();
2958
 
2959
  record_alignment (now_seg, temp);
2960
}
2961
 
2962
static void
2963
s_bss (int ignore ATTRIBUTE_UNUSED)
2964
{
2965
  /* We don't support putting frags in the BSS segment, we fake it by
2966
     marking in_bss, then looking at s_skip for clues.  */
2967
  subseg_set (bss_section, 0);
2968
  demand_empty_rest_of_line ();
2969
 
2970
#ifdef md_elf_section_change_hook
2971
  md_elf_section_change_hook ();
2972
#endif
2973
}
2974
 
2975
static void
2976
s_even (int ignore ATTRIBUTE_UNUSED)
2977
{
2978
  /* Never make frag if expect extra pass.  */
2979
  if (!need_pass_2)
2980
    frag_align (1, 0, 0);
2981
 
2982
  record_alignment (now_seg, 1);
2983
 
2984
  demand_empty_rest_of_line ();
2985
}
2986
 
2987
/* Directives: Literal pools.  */
2988
 
2989
static literal_pool *
2990
find_literal_pool (void)
2991
{
2992
  literal_pool * pool;
2993
 
2994
  for (pool = list_of_pools; pool != NULL; pool = pool->next)
2995
    {
2996
      if (pool->section == now_seg
2997
          && pool->sub_section == now_subseg)
2998
        break;
2999
    }
3000
 
3001
  return pool;
3002
}
3003
 
3004
static literal_pool *
3005
find_or_make_literal_pool (void)
3006
{
3007
  /* Next literal pool ID number.  */
3008
  static unsigned int latest_pool_num = 1;
3009
  literal_pool *      pool;
3010
 
3011
  pool = find_literal_pool ();
3012
 
3013
  if (pool == NULL)
3014
    {
3015
      /* Create a new pool.  */
3016
      pool = (literal_pool *) xmalloc (sizeof (* pool));
3017
      if (! pool)
3018
        return NULL;
3019
 
3020
      pool->next_free_entry = 0;
3021
      pool->section         = now_seg;
3022
      pool->sub_section     = now_subseg;
3023
      pool->next            = list_of_pools;
3024
      pool->symbol          = NULL;
3025
 
3026
      /* Add it to the list.  */
3027
      list_of_pools = pool;
3028
    }
3029
 
3030
  /* New pools, and emptied pools, will have a NULL symbol.  */
3031
  if (pool->symbol == NULL)
3032
    {
3033
      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3034
                                    (valueT) 0, &zero_address_frag);
3035
      pool->id = latest_pool_num ++;
3036
    }
3037
 
3038
  /* Done.  */
3039
  return pool;
3040
}
3041
 
3042
/* Add the literal in the global 'inst'
3043
   structure to the relevant literal pool.  */
3044
 
3045
static int
3046
add_to_lit_pool (void)
3047
{
3048
  literal_pool * pool;
3049
  unsigned int entry;
3050
 
3051
  pool = find_or_make_literal_pool ();
3052
 
3053
  /* Check if this literal value is already in the pool.  */
3054
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3055
    {
3056
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3057
          && (inst.reloc.exp.X_op == O_constant)
3058
          && (pool->literals[entry].X_add_number
3059
              == inst.reloc.exp.X_add_number)
3060
          && (pool->literals[entry].X_unsigned
3061
              == inst.reloc.exp.X_unsigned))
3062
        break;
3063
 
3064
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3065
          && (inst.reloc.exp.X_op == O_symbol)
3066
          && (pool->literals[entry].X_add_number
3067
              == inst.reloc.exp.X_add_number)
3068
          && (pool->literals[entry].X_add_symbol
3069
              == inst.reloc.exp.X_add_symbol)
3070
          && (pool->literals[entry].X_op_symbol
3071
              == inst.reloc.exp.X_op_symbol))
3072
        break;
3073
    }
3074
 
3075
  /* Do we need to create a new entry?  */
3076
  if (entry == pool->next_free_entry)
3077
    {
3078
      if (entry >= MAX_LITERAL_POOL_SIZE)
3079
        {
3080
          inst.error = _("literal pool overflow");
3081
          return FAIL;
3082
        }
3083
 
3084
      pool->literals[entry] = inst.reloc.exp;
3085 160 khays
#ifdef OBJ_ELF
3086
      /* PR ld/12974: Record the location of the first source line to reference
3087
         this entry in the literal pool.  If it turns out during linking that the
3088
         symbol does not exist we will be able to give an accurate line number for
3089
         the (first use of the) missing reference.  */
3090
      if (debug_type == DEBUG_DWARF2)
3091
        dwarf2_where (pool->locs + entry);
3092
#endif
3093 16 khays
      pool->next_free_entry += 1;
3094
    }
3095
 
3096
  inst.reloc.exp.X_op         = O_symbol;
3097
  inst.reloc.exp.X_add_number = ((int) entry) * 4;
3098
  inst.reloc.exp.X_add_symbol = pool->symbol;
3099
 
3100
  return SUCCESS;
3101
}
3102
 
3103
/* Can't use symbol_new here, so have to create a symbol and then at
3104
   a later date assign it a value. Thats what these functions do.  */
3105
 
3106
static void
3107
symbol_locate (symbolS *    symbolP,
3108
               const char * name,       /* It is copied, the caller can modify.  */
3109
               segT         segment,    /* Segment identifier (SEG_<something>).  */
3110
               valueT       valu,       /* Symbol value.  */
3111
               fragS *      frag)       /* Associated fragment.  */
3112
{
3113
  unsigned int name_length;
3114
  char * preserved_copy_of_name;
3115
 
3116
  name_length = strlen (name) + 1;   /* +1 for \0.  */
3117
  obstack_grow (&notes, name, name_length);
3118
  preserved_copy_of_name = (char *) obstack_finish (&notes);
3119
 
3120
#ifdef tc_canonicalize_symbol_name
3121
  preserved_copy_of_name =
3122
    tc_canonicalize_symbol_name (preserved_copy_of_name);
3123
#endif
3124
 
3125
  S_SET_NAME (symbolP, preserved_copy_of_name);
3126
 
3127
  S_SET_SEGMENT (symbolP, segment);
3128
  S_SET_VALUE (symbolP, valu);
3129
  symbol_clear_list_pointers (symbolP);
3130
 
3131
  symbol_set_frag (symbolP, frag);
3132
 
3133
  /* Link to end of symbol chain.  */
3134
  {
3135
    extern int symbol_table_frozen;
3136
 
3137
    if (symbol_table_frozen)
3138
      abort ();
3139
  }
3140
 
3141
  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3142
 
3143
  obj_symbol_new_hook (symbolP);
3144
 
3145
#ifdef tc_symbol_new_hook
3146
  tc_symbol_new_hook (symbolP);
3147
#endif
3148
 
3149
#ifdef DEBUG_SYMS
3150
  verify_symbol_chain (symbol_rootP, symbol_lastP);
3151
#endif /* DEBUG_SYMS  */
3152
}
3153
 
3154
 
3155
static void
3156
s_ltorg (int ignored ATTRIBUTE_UNUSED)
3157
{
3158
  unsigned int entry;
3159
  literal_pool * pool;
3160
  char sym_name[20];
3161
 
3162
  pool = find_literal_pool ();
3163
  if (pool == NULL
3164
      || pool->symbol == NULL
3165
      || pool->next_free_entry == 0)
3166
    return;
3167
 
3168
  mapping_state (MAP_DATA);
3169
 
3170
  /* Align pool as you have word accesses.
3171
     Only make a frag if we have to.  */
3172
  if (!need_pass_2)
3173
    frag_align (2, 0, 0);
3174
 
3175
  record_alignment (now_seg, 2);
3176
 
3177
  sprintf (sym_name, "$$lit_\002%x", pool->id);
3178
 
3179
  symbol_locate (pool->symbol, sym_name, now_seg,
3180
                 (valueT) frag_now_fix (), frag_now);
3181
  symbol_table_insert (pool->symbol);
3182
 
3183
  ARM_SET_THUMB (pool->symbol, thumb_mode);
3184
 
3185
#if defined OBJ_COFF || defined OBJ_ELF
3186
  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3187
#endif
3188
 
3189
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3190 160 khays
    {
3191
#ifdef OBJ_ELF
3192
      if (debug_type == DEBUG_DWARF2)
3193
        dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3194
#endif
3195
      /* First output the expression in the instruction to the pool.  */
3196
      emit_expr (&(pool->literals[entry]), 4); /* .word  */
3197
    }
3198 16 khays
 
3199
  /* Mark the pool as empty.  */
3200
  pool->next_free_entry = 0;
3201
  pool->symbol = NULL;
3202
}
3203
 
3204
#ifdef OBJ_ELF
3205
/* Forward declarations for functions below, in the MD interface
3206
   section.  */
3207
static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3208
static valueT create_unwind_entry (int);
3209
static void start_unwind_section (const segT, int);
3210
static void add_unwind_opcode (valueT, int);
3211
static void flush_pending_unwind (void);
3212
 
3213
/* Directives: Data.  */
3214
 
3215
static void
3216
s_arm_elf_cons (int nbytes)
3217
{
3218
  expressionS exp;
3219
 
3220
#ifdef md_flush_pending_output
3221
  md_flush_pending_output ();
3222
#endif
3223
 
3224
  if (is_it_end_of_statement ())
3225
    {
3226
      demand_empty_rest_of_line ();
3227
      return;
3228
    }
3229
 
3230
#ifdef md_cons_align
3231
  md_cons_align (nbytes);
3232
#endif
3233
 
3234
  mapping_state (MAP_DATA);
3235
  do
3236
    {
3237
      int reloc;
3238
      char *base = input_line_pointer;
3239
 
3240
      expression (& exp);
3241
 
3242
      if (exp.X_op != O_symbol)
3243
        emit_expr (&exp, (unsigned int) nbytes);
3244
      else
3245
        {
3246
          char *before_reloc = input_line_pointer;
3247
          reloc = parse_reloc (&input_line_pointer);
3248
          if (reloc == -1)
3249
            {
3250
              as_bad (_("unrecognized relocation suffix"));
3251
              ignore_rest_of_line ();
3252
              return;
3253
            }
3254
          else if (reloc == BFD_RELOC_UNUSED)
3255
            emit_expr (&exp, (unsigned int) nbytes);
3256
          else
3257
            {
3258
              reloc_howto_type *howto = (reloc_howto_type *)
3259
                  bfd_reloc_type_lookup (stdoutput,
3260
                                         (bfd_reloc_code_real_type) reloc);
3261
              int size = bfd_get_reloc_size (howto);
3262
 
3263
              if (reloc == BFD_RELOC_ARM_PLT32)
3264
                {
3265
                  as_bad (_("(plt) is only valid on branch targets"));
3266
                  reloc = BFD_RELOC_UNUSED;
3267
                  size = 0;
3268
                }
3269
 
3270
              if (size > nbytes)
3271
                as_bad (_("%s relocations do not fit in %d bytes"),
3272
                        howto->name, nbytes);
3273
              else
3274
                {
3275
                  /* We've parsed an expression stopping at O_symbol.
3276
                     But there may be more expression left now that we
3277
                     have parsed the relocation marker.  Parse it again.
3278
                     XXX Surely there is a cleaner way to do this.  */
3279
                  char *p = input_line_pointer;
3280
                  int offset;
3281
                  char *save_buf = (char *) alloca (input_line_pointer - base);
3282
                  memcpy (save_buf, base, input_line_pointer - base);
3283
                  memmove (base + (input_line_pointer - before_reloc),
3284
                           base, before_reloc - base);
3285
 
3286
                  input_line_pointer = base + (input_line_pointer-before_reloc);
3287
                  expression (&exp);
3288
                  memcpy (base, save_buf, p - base);
3289
 
3290
                  offset = nbytes - size;
3291
                  p = frag_more ((int) nbytes);
3292
                  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3293
                               size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3294
                }
3295
            }
3296
        }
3297
    }
3298
  while (*input_line_pointer++ == ',');
3299
 
3300
  /* Put terminator back into stream.  */
3301
  input_line_pointer --;
3302
  demand_empty_rest_of_line ();
3303
}
3304
 
3305
/* Emit an expression containing a 32-bit thumb instruction.
3306
   Implementation based on put_thumb32_insn.  */
3307
 
3308
static void
3309
emit_thumb32_expr (expressionS * exp)
3310
{
3311
  expressionS exp_high = *exp;
3312
 
3313
  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3314
  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3315
  exp->X_add_number &= 0xffff;
3316
  emit_expr (exp, (unsigned int) THUMB_SIZE);
3317
}
3318
 
3319
/*  Guess the instruction size based on the opcode.  */
3320
 
3321
static int
3322
thumb_insn_size (int opcode)
3323
{
3324
  if ((unsigned int) opcode < 0xe800u)
3325
    return 2;
3326
  else if ((unsigned int) opcode >= 0xe8000000u)
3327
    return 4;
3328
  else
3329
    return 0;
3330
}
3331
 
3332
static bfd_boolean
3333
emit_insn (expressionS *exp, int nbytes)
3334
{
3335
  int size = 0;
3336
 
3337
  if (exp->X_op == O_constant)
3338
    {
3339
      size = nbytes;
3340
 
3341
      if (size == 0)
3342
        size = thumb_insn_size (exp->X_add_number);
3343
 
3344
      if (size != 0)
3345
        {
3346
          if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3347
            {
3348
              as_bad (_(".inst.n operand too big. "\
3349
                        "Use .inst.w instead"));
3350
              size = 0;
3351
            }
3352
          else
3353
            {
3354
              if (now_it.state == AUTOMATIC_IT_BLOCK)
3355
                set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3356
              else
3357
                set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3358
 
3359
              if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3360
                emit_thumb32_expr (exp);
3361
              else
3362
                emit_expr (exp, (unsigned int) size);
3363
 
3364
              it_fsm_post_encode ();
3365
            }
3366
        }
3367
      else
3368
        as_bad (_("cannot determine Thumb instruction size. "   \
3369
                  "Use .inst.n/.inst.w instead"));
3370
    }
3371
  else
3372
    as_bad (_("constant expression required"));
3373
 
3374
  return (size != 0);
3375
}
3376
 
3377
/* Like s_arm_elf_cons but do not use md_cons_align and
3378
   set the mapping state to MAP_ARM/MAP_THUMB.  */
3379
 
3380
static void
3381
s_arm_elf_inst (int nbytes)
3382
{
3383
  if (is_it_end_of_statement ())
3384
    {
3385
      demand_empty_rest_of_line ();
3386
      return;
3387
    }
3388
 
3389
  /* Calling mapping_state () here will not change ARM/THUMB,
3390
     but will ensure not to be in DATA state.  */
3391
 
3392
  if (thumb_mode)
3393
    mapping_state (MAP_THUMB);
3394
  else
3395
    {
3396
      if (nbytes != 0)
3397
        {
3398
          as_bad (_("width suffixes are invalid in ARM mode"));
3399
          ignore_rest_of_line ();
3400
          return;
3401
        }
3402
 
3403
      nbytes = 4;
3404
 
3405
      mapping_state (MAP_ARM);
3406
    }
3407
 
3408
  do
3409
    {
3410
      expressionS exp;
3411
 
3412
      expression (& exp);
3413
 
3414
      if (! emit_insn (& exp, nbytes))
3415
        {
3416
          ignore_rest_of_line ();
3417
          return;
3418
        }
3419
    }
3420
  while (*input_line_pointer++ == ',');
3421
 
3422
  /* Put terminator back into stream.  */
3423
  input_line_pointer --;
3424
  demand_empty_rest_of_line ();
3425
}
3426
 
3427
/* Parse a .rel31 directive.  */
3428
 
3429
static void
3430
s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3431
{
3432
  expressionS exp;
3433
  char *p;
3434
  valueT highbit;
3435
 
3436
  highbit = 0;
3437
  if (*input_line_pointer == '1')
3438
    highbit = 0x80000000;
3439
  else if (*input_line_pointer != '0')
3440
    as_bad (_("expected 0 or 1"));
3441
 
3442
  input_line_pointer++;
3443
  if (*input_line_pointer != ',')
3444
    as_bad (_("missing comma"));
3445
  input_line_pointer++;
3446
 
3447
#ifdef md_flush_pending_output
3448
  md_flush_pending_output ();
3449
#endif
3450
 
3451
#ifdef md_cons_align
3452
  md_cons_align (4);
3453
#endif
3454
 
3455
  mapping_state (MAP_DATA);
3456
 
3457
  expression (&exp);
3458
 
3459
  p = frag_more (4);
3460
  md_number_to_chars (p, highbit, 4);
3461
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3462
               BFD_RELOC_ARM_PREL31);
3463
 
3464
  demand_empty_rest_of_line ();
3465
}
3466
 
3467
/* Directives: AEABI stack-unwind tables.  */
3468
 
3469
/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3470
 
3471
static void
3472
s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3473
{
3474
  demand_empty_rest_of_line ();
3475
  if (unwind.proc_start)
3476
    {
3477
      as_bad (_("duplicate .fnstart directive"));
3478
      return;
3479
    }
3480
 
3481
  /* Mark the start of the function.  */
3482
  unwind.proc_start = expr_build_dot ();
3483
 
3484
  /* Reset the rest of the unwind info.  */
3485
  unwind.opcode_count = 0;
3486
  unwind.table_entry = NULL;
3487
  unwind.personality_routine = NULL;
3488
  unwind.personality_index = -1;
3489
  unwind.frame_size = 0;
3490
  unwind.fp_offset = 0;
3491
  unwind.fp_reg = REG_SP;
3492
  unwind.fp_used = 0;
3493
  unwind.sp_restored = 0;
3494
}
3495
 
3496
 
3497
/* Parse a handlerdata directive.  Creates the exception handling table entry
3498
   for the function.  */
3499
 
3500
static void
3501
s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3502
{
3503
  demand_empty_rest_of_line ();
3504
  if (!unwind.proc_start)
3505
    as_bad (MISSING_FNSTART);
3506
 
3507
  if (unwind.table_entry)
3508
    as_bad (_("duplicate .handlerdata directive"));
3509
 
3510
  create_unwind_entry (1);
3511
}
3512
 
3513
/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3514
 
3515
static void
3516
s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3517
{
3518
  long where;
3519
  char *ptr;
3520
  valueT val;
3521
  unsigned int marked_pr_dependency;
3522
 
3523
  demand_empty_rest_of_line ();
3524
 
3525
  if (!unwind.proc_start)
3526
    {
3527
      as_bad (_(".fnend directive without .fnstart"));
3528
      return;
3529
    }
3530
 
3531
  /* Add eh table entry.  */
3532
  if (unwind.table_entry == NULL)
3533
    val = create_unwind_entry (0);
3534
  else
3535
    val = 0;
3536
 
3537
  /* Add index table entry.  This is two words.  */
3538
  start_unwind_section (unwind.saved_seg, 1);
3539
  frag_align (2, 0, 0);
3540
  record_alignment (now_seg, 2);
3541
 
3542
  ptr = frag_more (8);
3543 166 khays
  memset (ptr, 0, 8);
3544 16 khays
  where = frag_now_fix () - 8;
3545
 
3546
  /* Self relative offset of the function start.  */
3547
  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3548
           BFD_RELOC_ARM_PREL31);
3549
 
3550
  /* Indicate dependency on EHABI-defined personality routines to the
3551
     linker, if it hasn't been done already.  */
3552
  marked_pr_dependency
3553
    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3554
  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3555
      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3556
    {
3557
      static const char *const name[] =
3558
        {
3559
          "__aeabi_unwind_cpp_pr0",
3560
          "__aeabi_unwind_cpp_pr1",
3561
          "__aeabi_unwind_cpp_pr2"
3562
        };
3563
      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3564
      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3565
      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3566
        |= 1 << unwind.personality_index;
3567
    }
3568
 
3569
  if (val)
3570
    /* Inline exception table entry.  */
3571
    md_number_to_chars (ptr + 4, val, 4);
3572
  else
3573
    /* Self relative offset of the table entry.  */
3574
    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3575
             BFD_RELOC_ARM_PREL31);
3576
 
3577
  /* Restore the original section.  */
3578
  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3579
 
3580
  unwind.proc_start = NULL;
3581
}
3582
 
3583
 
3584
/* Parse an unwind_cantunwind directive.  */
3585
 
3586
static void
3587
s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3588
{
3589
  demand_empty_rest_of_line ();
3590
  if (!unwind.proc_start)
3591
    as_bad (MISSING_FNSTART);
3592
 
3593
  if (unwind.personality_routine || unwind.personality_index != -1)
3594
    as_bad (_("personality routine specified for cantunwind frame"));
3595
 
3596
  unwind.personality_index = -2;
3597
}
3598
 
3599
 
3600
/* Parse a personalityindex directive.  */
3601
 
3602
static void
3603
s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3604
{
3605
  expressionS exp;
3606
 
3607
  if (!unwind.proc_start)
3608
    as_bad (MISSING_FNSTART);
3609
 
3610
  if (unwind.personality_routine || unwind.personality_index != -1)
3611
    as_bad (_("duplicate .personalityindex directive"));
3612
 
3613
  expression (&exp);
3614
 
3615
  if (exp.X_op != O_constant
3616
      || exp.X_add_number < 0 || exp.X_add_number > 15)
3617
    {
3618
      as_bad (_("bad personality routine number"));
3619
      ignore_rest_of_line ();
3620
      return;
3621
    }
3622
 
3623
  unwind.personality_index = exp.X_add_number;
3624
 
3625
  demand_empty_rest_of_line ();
3626
}
3627
 
3628
 
3629
/* Parse a personality directive.  */
3630
 
3631
static void
3632
s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3633
{
3634
  char *name, *p, c;
3635
 
3636
  if (!unwind.proc_start)
3637
    as_bad (MISSING_FNSTART);
3638
 
3639
  if (unwind.personality_routine || unwind.personality_index != -1)
3640
    as_bad (_("duplicate .personality directive"));
3641
 
3642
  name = input_line_pointer;
3643
  c = get_symbol_end ();
3644
  p = input_line_pointer;
3645
  unwind.personality_routine = symbol_find_or_make (name);
3646
  *p = c;
3647
  demand_empty_rest_of_line ();
3648
}
3649
 
3650
 
3651
/* Parse a directive saving core registers.  */
3652
 
3653
static void
3654
s_arm_unwind_save_core (void)
3655
{
3656
  valueT op;
3657
  long range;
3658
  int n;
3659
 
3660
  range = parse_reg_list (&input_line_pointer);
3661
  if (range == FAIL)
3662
    {
3663
      as_bad (_("expected register list"));
3664
      ignore_rest_of_line ();
3665
      return;
3666
    }
3667
 
3668
  demand_empty_rest_of_line ();
3669
 
3670
  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3671
     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3672
     ip because it is clobbered by calls.  */
3673
  if (unwind.sp_restored && unwind.fp_reg == 12
3674
      && (range & 0x3000) == 0x1000)
3675
    {
3676
      unwind.opcode_count--;
3677
      unwind.sp_restored = 0;
3678
      range = (range | 0x2000) & ~0x1000;
3679
      unwind.pending_offset = 0;
3680
    }
3681
 
3682
  /* Pop r4-r15.  */
3683
  if (range & 0xfff0)
3684
    {
3685
      /* See if we can use the short opcodes.  These pop a block of up to 8
3686
         registers starting with r4, plus maybe r14.  */
3687
      for (n = 0; n < 8; n++)
3688
        {
3689
          /* Break at the first non-saved register.      */
3690
          if ((range & (1 << (n + 4))) == 0)
3691
            break;
3692
        }
3693
      /* See if there are any other bits set.  */
3694
      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3695
        {
3696
          /* Use the long form.  */
3697
          op = 0x8000 | ((range >> 4) & 0xfff);
3698
          add_unwind_opcode (op, 2);
3699
        }
3700
      else
3701
        {
3702
          /* Use the short form.  */
3703
          if (range & 0x4000)
3704
            op = 0xa8; /* Pop r14.      */
3705
          else
3706
            op = 0xa0; /* Do not pop r14.  */
3707
          op |= (n - 1);
3708
          add_unwind_opcode (op, 1);
3709
        }
3710
    }
3711
 
3712
  /* Pop r0-r3.  */
3713
  if (range & 0xf)
3714
    {
3715
      op = 0xb100 | (range & 0xf);
3716
      add_unwind_opcode (op, 2);
3717
    }
3718
 
3719
  /* Record the number of bytes pushed.  */
3720
  for (n = 0; n < 16; n++)
3721
    {
3722
      if (range & (1 << n))
3723
        unwind.frame_size += 4;
3724
    }
3725
}
3726
 
3727
 
3728
/* Parse a directive saving FPA registers.  */
3729
 
3730
static void
3731
s_arm_unwind_save_fpa (int reg)
3732
{
3733
  expressionS exp;
3734
  int num_regs;
3735
  valueT op;
3736
 
3737
  /* Get Number of registers to transfer.  */
3738
  if (skip_past_comma (&input_line_pointer) != FAIL)
3739
    expression (&exp);
3740
  else
3741
    exp.X_op = O_illegal;
3742
 
3743
  if (exp.X_op != O_constant)
3744
    {
3745
      as_bad (_("expected , <constant>"));
3746
      ignore_rest_of_line ();
3747
      return;
3748
    }
3749
 
3750
  num_regs = exp.X_add_number;
3751
 
3752
  if (num_regs < 1 || num_regs > 4)
3753
    {
3754
      as_bad (_("number of registers must be in the range [1:4]"));
3755
      ignore_rest_of_line ();
3756
      return;
3757
    }
3758
 
3759
  demand_empty_rest_of_line ();
3760
 
3761
  if (reg == 4)
3762
    {
3763
      /* Short form.  */
3764
      op = 0xb4 | (num_regs - 1);
3765
      add_unwind_opcode (op, 1);
3766
    }
3767
  else
3768
    {
3769
      /* Long form.  */
3770
      op = 0xc800 | (reg << 4) | (num_regs - 1);
3771
      add_unwind_opcode (op, 2);
3772
    }
3773
  unwind.frame_size += num_regs * 12;
3774
}
3775
 
3776
 
3777
/* Parse a directive saving VFP registers for ARMv6 and above.  */
3778
 
3779
static void
3780
s_arm_unwind_save_vfp_armv6 (void)
3781
{
3782
  int count;
3783
  unsigned int start;
3784
  valueT op;
3785
  int num_vfpv3_regs = 0;
3786
  int num_regs_below_16;
3787
 
3788
  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3789
  if (count == FAIL)
3790
    {
3791
      as_bad (_("expected register list"));
3792
      ignore_rest_of_line ();
3793
      return;
3794
    }
3795
 
3796
  demand_empty_rest_of_line ();
3797
 
3798
  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3799
     than FSTMX/FLDMX-style ones).  */
3800
 
3801
  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
3802
  if (start >= 16)
3803
    num_vfpv3_regs = count;
3804
  else if (start + count > 16)
3805
    num_vfpv3_regs = start + count - 16;
3806
 
3807
  if (num_vfpv3_regs > 0)
3808
    {
3809
      int start_offset = start > 16 ? start - 16 : 0;
3810
      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3811
      add_unwind_opcode (op, 2);
3812
    }
3813
 
3814
  /* Generate opcode for registers numbered in the range 0 .. 15.  */
3815
  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3816
  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3817
  if (num_regs_below_16 > 0)
3818
    {
3819
      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3820
      add_unwind_opcode (op, 2);
3821
    }
3822
 
3823
  unwind.frame_size += count * 8;
3824
}
3825
 
3826
 
3827
/* Parse a directive saving VFP registers for pre-ARMv6.  */
3828
 
3829
static void
3830
s_arm_unwind_save_vfp (void)
3831
{
3832
  int count;
3833
  unsigned int reg;
3834
  valueT op;
3835
 
3836
  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3837
  if (count == FAIL)
3838
    {
3839
      as_bad (_("expected register list"));
3840
      ignore_rest_of_line ();
3841
      return;
3842
    }
3843
 
3844
  demand_empty_rest_of_line ();
3845
 
3846
  if (reg == 8)
3847
    {
3848
      /* Short form.  */
3849
      op = 0xb8 | (count - 1);
3850
      add_unwind_opcode (op, 1);
3851
    }
3852
  else
3853
    {
3854
      /* Long form.  */
3855
      op = 0xb300 | (reg << 4) | (count - 1);
3856
      add_unwind_opcode (op, 2);
3857
    }
3858
  unwind.frame_size += count * 8 + 4;
3859
}
3860
 
3861
 
3862
/* Parse a directive saving iWMMXt data registers.  */
3863
 
3864
static void
3865
s_arm_unwind_save_mmxwr (void)
3866
{
3867
  int reg;
3868
  int hi_reg;
3869
  int i;
3870
  unsigned mask = 0;
3871
  valueT op;
3872
 
3873
  if (*input_line_pointer == '{')
3874
    input_line_pointer++;
3875
 
3876
  do
3877
    {
3878
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3879
 
3880
      if (reg == FAIL)
3881
        {
3882
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3883
          goto error;
3884
        }
3885
 
3886
      if (mask >> reg)
3887
        as_tsktsk (_("register list not in ascending order"));
3888
      mask |= 1 << reg;
3889
 
3890
      if (*input_line_pointer == '-')
3891
        {
3892
          input_line_pointer++;
3893
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3894
          if (hi_reg == FAIL)
3895
            {
3896
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3897
              goto error;
3898
            }
3899
          else if (reg >= hi_reg)
3900
            {
3901
              as_bad (_("bad register range"));
3902
              goto error;
3903
            }
3904
          for (; reg < hi_reg; reg++)
3905
            mask |= 1 << reg;
3906
        }
3907
    }
3908
  while (skip_past_comma (&input_line_pointer) != FAIL);
3909
 
3910
  if (*input_line_pointer == '}')
3911
    input_line_pointer++;
3912
 
3913
  demand_empty_rest_of_line ();
3914
 
3915
  /* Generate any deferred opcodes because we're going to be looking at
3916
     the list.  */
3917
  flush_pending_unwind ();
3918
 
3919
  for (i = 0; i < 16; i++)
3920
    {
3921
      if (mask & (1 << i))
3922
        unwind.frame_size += 8;
3923
    }
3924
 
3925
  /* Attempt to combine with a previous opcode.  We do this because gcc
3926
     likes to output separate unwind directives for a single block of
3927
     registers.  */
3928
  if (unwind.opcode_count > 0)
3929
    {
3930
      i = unwind.opcodes[unwind.opcode_count - 1];
3931
      if ((i & 0xf8) == 0xc0)
3932
        {
3933
          i &= 7;
3934
          /* Only merge if the blocks are contiguous.  */
3935
          if (i < 6)
3936
            {
3937
              if ((mask & 0xfe00) == (1 << 9))
3938
                {
3939
                  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3940
                  unwind.opcode_count--;
3941
                }
3942
            }
3943
          else if (i == 6 && unwind.opcode_count >= 2)
3944
            {
3945
              i = unwind.opcodes[unwind.opcode_count - 2];
3946
              reg = i >> 4;
3947
              i &= 0xf;
3948
 
3949
              op = 0xffff << (reg - 1);
3950
              if (reg > 0
3951
                  && ((mask & op) == (1u << (reg - 1))))
3952
                {
3953
                  op = (1 << (reg + i + 1)) - 1;
3954
                  op &= ~((1 << reg) - 1);
3955
                  mask |= op;
3956
                  unwind.opcode_count -= 2;
3957
                }
3958
            }
3959
        }
3960
    }
3961
 
3962
  hi_reg = 15;
3963
  /* We want to generate opcodes in the order the registers have been
3964
     saved, ie. descending order.  */
3965
  for (reg = 15; reg >= -1; reg--)
3966
    {
3967
      /* Save registers in blocks.  */
3968
      if (reg < 0
3969
          || !(mask & (1 << reg)))
3970
        {
3971
          /* We found an unsaved reg.  Generate opcodes to save the
3972
             preceding block.   */
3973
          if (reg != hi_reg)
3974
            {
3975
              if (reg == 9)
3976
                {
3977
                  /* Short form.  */
3978
                  op = 0xc0 | (hi_reg - 10);
3979
                  add_unwind_opcode (op, 1);
3980
                }
3981
              else
3982
                {
3983
                  /* Long form.  */
3984
                  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3985
                  add_unwind_opcode (op, 2);
3986
                }
3987
            }
3988
          hi_reg = reg - 1;
3989
        }
3990
    }
3991
 
3992
  return;
3993
error:
3994
  ignore_rest_of_line ();
3995
}
3996
 
3997
static void
3998
s_arm_unwind_save_mmxwcg (void)
3999
{
4000
  int reg;
4001
  int hi_reg;
4002
  unsigned mask = 0;
4003
  valueT op;
4004
 
4005
  if (*input_line_pointer == '{')
4006
    input_line_pointer++;
4007
 
4008
  do
4009
    {
4010
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4011
 
4012
      if (reg == FAIL)
4013
        {
4014
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4015
          goto error;
4016
        }
4017
 
4018
      reg -= 8;
4019
      if (mask >> reg)
4020
        as_tsktsk (_("register list not in ascending order"));
4021
      mask |= 1 << reg;
4022
 
4023
      if (*input_line_pointer == '-')
4024
        {
4025
          input_line_pointer++;
4026
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4027
          if (hi_reg == FAIL)
4028
            {
4029
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4030
              goto error;
4031
            }
4032
          else if (reg >= hi_reg)
4033
            {
4034
              as_bad (_("bad register range"));
4035
              goto error;
4036
            }
4037
          for (; reg < hi_reg; reg++)
4038
            mask |= 1 << reg;
4039
        }
4040
    }
4041
  while (skip_past_comma (&input_line_pointer) != FAIL);
4042
 
4043
  if (*input_line_pointer == '}')
4044
    input_line_pointer++;
4045
 
4046
  demand_empty_rest_of_line ();
4047
 
4048
  /* Generate any deferred opcodes because we're going to be looking at
4049
     the list.  */
4050
  flush_pending_unwind ();
4051
 
4052
  for (reg = 0; reg < 16; reg++)
4053
    {
4054
      if (mask & (1 << reg))
4055
        unwind.frame_size += 4;
4056
    }
4057
  op = 0xc700 | mask;
4058
  add_unwind_opcode (op, 2);
4059
  return;
4060
error:
4061
  ignore_rest_of_line ();
4062
}
4063
 
4064
 
4065
/* Parse an unwind_save directive.
4066
   If the argument is non-zero, this is a .vsave directive.  */
4067
 
4068
static void
4069
s_arm_unwind_save (int arch_v6)
4070
{
4071
  char *peek;
4072
  struct reg_entry *reg;
4073
  bfd_boolean had_brace = FALSE;
4074
 
4075
  if (!unwind.proc_start)
4076
    as_bad (MISSING_FNSTART);
4077
 
4078
  /* Figure out what sort of save we have.  */
4079
  peek = input_line_pointer;
4080
 
4081
  if (*peek == '{')
4082
    {
4083
      had_brace = TRUE;
4084
      peek++;
4085
    }
4086
 
4087
  reg = arm_reg_parse_multi (&peek);
4088
 
4089
  if (!reg)
4090
    {
4091
      as_bad (_("register expected"));
4092
      ignore_rest_of_line ();
4093
      return;
4094
    }
4095
 
4096
  switch (reg->type)
4097
    {
4098
    case REG_TYPE_FN:
4099
      if (had_brace)
4100
        {
4101
          as_bad (_("FPA .unwind_save does not take a register list"));
4102
          ignore_rest_of_line ();
4103
          return;
4104
        }
4105
      input_line_pointer = peek;
4106
      s_arm_unwind_save_fpa (reg->number);
4107
      return;
4108
 
4109
    case REG_TYPE_RN:     s_arm_unwind_save_core ();   return;
4110
    case REG_TYPE_VFD:
4111
      if (arch_v6)
4112
        s_arm_unwind_save_vfp_armv6 ();
4113
      else
4114
        s_arm_unwind_save_vfp ();
4115
      return;
4116
    case REG_TYPE_MMXWR:  s_arm_unwind_save_mmxwr ();  return;
4117
    case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4118
 
4119
    default:
4120
      as_bad (_(".unwind_save does not support this kind of register"));
4121
      ignore_rest_of_line ();
4122
    }
4123
}
4124
 
4125
 
4126
/* Parse an unwind_movsp directive.  */
4127
 
4128
static void
4129
s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4130
{
4131
  int reg;
4132
  valueT op;
4133
  int offset;
4134
 
4135
  if (!unwind.proc_start)
4136
    as_bad (MISSING_FNSTART);
4137
 
4138
  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4139
  if (reg == FAIL)
4140
    {
4141
      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4142
      ignore_rest_of_line ();
4143
      return;
4144
    }
4145
 
4146
  /* Optional constant.  */
4147
  if (skip_past_comma (&input_line_pointer) != FAIL)
4148
    {
4149
      if (immediate_for_directive (&offset) == FAIL)
4150
        return;
4151
    }
4152
  else
4153
    offset = 0;
4154
 
4155
  demand_empty_rest_of_line ();
4156
 
4157
  if (reg == REG_SP || reg == REG_PC)
4158
    {
4159
      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4160
      return;
4161
    }
4162
 
4163
  if (unwind.fp_reg != REG_SP)
4164
    as_bad (_("unexpected .unwind_movsp directive"));
4165
 
4166
  /* Generate opcode to restore the value.  */
4167
  op = 0x90 | reg;
4168
  add_unwind_opcode (op, 1);
4169
 
4170
  /* Record the information for later.  */
4171
  unwind.fp_reg = reg;
4172
  unwind.fp_offset = unwind.frame_size - offset;
4173
  unwind.sp_restored = 1;
4174
}
4175
 
4176
/* Parse an unwind_pad directive.  */
4177
 
4178
static void
4179
s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4180
{
4181
  int offset;
4182
 
4183
  if (!unwind.proc_start)
4184
    as_bad (MISSING_FNSTART);
4185
 
4186
  if (immediate_for_directive (&offset) == FAIL)
4187
    return;
4188
 
4189
  if (offset & 3)
4190
    {
4191
      as_bad (_("stack increment must be multiple of 4"));
4192
      ignore_rest_of_line ();
4193
      return;
4194
    }
4195
 
4196
  /* Don't generate any opcodes, just record the details for later.  */
4197
  unwind.frame_size += offset;
4198
  unwind.pending_offset += offset;
4199
 
4200
  demand_empty_rest_of_line ();
4201
}
4202
 
4203
/* Parse an unwind_setfp directive.  */
4204
 
4205
static void
4206
s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4207
{
4208
  int sp_reg;
4209
  int fp_reg;
4210
  int offset;
4211
 
4212
  if (!unwind.proc_start)
4213
    as_bad (MISSING_FNSTART);
4214
 
4215
  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4216
  if (skip_past_comma (&input_line_pointer) == FAIL)
4217
    sp_reg = FAIL;
4218
  else
4219
    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4220
 
4221
  if (fp_reg == FAIL || sp_reg == FAIL)
4222
    {
4223
      as_bad (_("expected <reg>, <reg>"));
4224
      ignore_rest_of_line ();
4225
      return;
4226
    }
4227
 
4228
  /* Optional constant.  */
4229
  if (skip_past_comma (&input_line_pointer) != FAIL)
4230
    {
4231
      if (immediate_for_directive (&offset) == FAIL)
4232
        return;
4233
    }
4234
  else
4235
    offset = 0;
4236
 
4237
  demand_empty_rest_of_line ();
4238
 
4239
  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4240
    {
4241
      as_bad (_("register must be either sp or set by a previous"
4242
                "unwind_movsp directive"));
4243
      return;
4244
    }
4245
 
4246
  /* Don't generate any opcodes, just record the information for later.  */
4247
  unwind.fp_reg = fp_reg;
4248
  unwind.fp_used = 1;
4249
  if (sp_reg == REG_SP)
4250
    unwind.fp_offset = unwind.frame_size - offset;
4251
  else
4252
    unwind.fp_offset -= offset;
4253
}
4254
 
4255
/* Parse an unwind_raw directive.  */
4256
 
4257
static void
4258
s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4259
{
4260
  expressionS exp;
4261
  /* This is an arbitrary limit.         */
4262
  unsigned char op[16];
4263
  int count;
4264
 
4265
  if (!unwind.proc_start)
4266
    as_bad (MISSING_FNSTART);
4267
 
4268
  expression (&exp);
4269
  if (exp.X_op == O_constant
4270
      && skip_past_comma (&input_line_pointer) != FAIL)
4271
    {
4272
      unwind.frame_size += exp.X_add_number;
4273
      expression (&exp);
4274
    }
4275
  else
4276
    exp.X_op = O_illegal;
4277
 
4278
  if (exp.X_op != O_constant)
4279
    {
4280
      as_bad (_("expected <offset>, <opcode>"));
4281
      ignore_rest_of_line ();
4282
      return;
4283
    }
4284
 
4285
  count = 0;
4286
 
4287
  /* Parse the opcode.  */
4288
  for (;;)
4289
    {
4290
      if (count >= 16)
4291
        {
4292
          as_bad (_("unwind opcode too long"));
4293
          ignore_rest_of_line ();
4294
        }
4295
      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4296
        {
4297
          as_bad (_("invalid unwind opcode"));
4298
          ignore_rest_of_line ();
4299
          return;
4300
        }
4301
      op[count++] = exp.X_add_number;
4302
 
4303
      /* Parse the next byte.  */
4304
      if (skip_past_comma (&input_line_pointer) == FAIL)
4305
        break;
4306
 
4307
      expression (&exp);
4308
    }
4309
 
4310
  /* Add the opcode bytes in reverse order.  */
4311
  while (count--)
4312
    add_unwind_opcode (op[count], 1);
4313
 
4314
  demand_empty_rest_of_line ();
4315
}
4316
 
4317
 
4318
/* Parse a .eabi_attribute directive.  */
4319
 
4320
static void
4321
s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4322
{
4323
  int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4324
 
4325
  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4326
    attributes_set_explicitly[tag] = 1;
4327
}
4328
 
4329
/* Emit a tls fix for the symbol.  */
4330
 
4331
static void
4332
s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4333
{
4334
  char *p;
4335
  expressionS exp;
4336
#ifdef md_flush_pending_output
4337
  md_flush_pending_output ();
4338
#endif
4339
 
4340
#ifdef md_cons_align
4341
  md_cons_align (4);
4342
#endif
4343
 
4344
  /* Since we're just labelling the code, there's no need to define a
4345
     mapping symbol.  */
4346
  expression (&exp);
4347
  p = obstack_next_free (&frchain_now->frch_obstack);
4348
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4349
               thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4350
               : BFD_RELOC_ARM_TLS_DESCSEQ);
4351
}
4352
#endif /* OBJ_ELF */
4353
 
4354
static void s_arm_arch (int);
4355
static void s_arm_object_arch (int);
4356
static void s_arm_cpu (int);
4357
static void s_arm_fpu (int);
4358
static void s_arm_arch_extension (int);
4359
 
4360
#ifdef TE_PE
4361
 
4362
static void
4363
pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4364
{
4365
  expressionS exp;
4366
 
4367
  do
4368
    {
4369
      expression (&exp);
4370
      if (exp.X_op == O_symbol)
4371
        exp.X_op = O_secrel;
4372
 
4373
      emit_expr (&exp, 4);
4374
    }
4375
  while (*input_line_pointer++ == ',');
4376
 
4377
  input_line_pointer--;
4378
  demand_empty_rest_of_line ();
4379
}
4380
#endif /* TE_PE */
4381
 
4382
/* This table describes all the machine specific pseudo-ops the assembler
4383
   has to support.  The fields are:
4384
     pseudo-op name without dot
4385
     function to call to execute this pseudo-op
4386
     Integer arg to pass to the function.  */
4387
 
4388
const pseudo_typeS md_pseudo_table[] =
4389
{
4390
  /* Never called because '.req' does not start a line.  */
4391
  { "req",         s_req,         0 },
4392
  /* Following two are likewise never called.  */
4393
  { "dn",          s_dn,          0 },
4394
  { "qn",          s_qn,          0 },
4395
  { "unreq",       s_unreq,       0 },
4396
  { "bss",         s_bss,         0 },
4397
  { "align",       s_align,       0 },
4398
  { "arm",         s_arm,         0 },
4399
  { "thumb",       s_thumb,       0 },
4400
  { "code",        s_code,        0 },
4401
  { "force_thumb", s_force_thumb, 0 },
4402
  { "thumb_func",  s_thumb_func,  0 },
4403
  { "thumb_set",   s_thumb_set,   0 },
4404
  { "even",        s_even,        0 },
4405
  { "ltorg",       s_ltorg,       0 },
4406
  { "pool",        s_ltorg,       0 },
4407
  { "syntax",      s_syntax,      0 },
4408
  { "cpu",         s_arm_cpu,     0 },
4409
  { "arch",        s_arm_arch,    0 },
4410
  { "object_arch", s_arm_object_arch,   0 },
4411
  { "fpu",         s_arm_fpu,     0 },
4412
  { "arch_extension", s_arm_arch_extension, 0 },
4413
#ifdef OBJ_ELF
4414
  { "word",             s_arm_elf_cons, 4 },
4415
  { "long",             s_arm_elf_cons, 4 },
4416
  { "inst.n",           s_arm_elf_inst, 2 },
4417
  { "inst.w",           s_arm_elf_inst, 4 },
4418
  { "inst",             s_arm_elf_inst, 0 },
4419
  { "rel31",            s_arm_rel31,      0 },
4420
  { "fnstart",          s_arm_unwind_fnstart,   0 },
4421
  { "fnend",            s_arm_unwind_fnend,     0 },
4422
  { "cantunwind",       s_arm_unwind_cantunwind, 0 },
4423
  { "personality",      s_arm_unwind_personality, 0 },
4424
  { "personalityindex", s_arm_unwind_personalityindex, 0 },
4425
  { "handlerdata",      s_arm_unwind_handlerdata, 0 },
4426
  { "save",             s_arm_unwind_save,      0 },
4427
  { "vsave",            s_arm_unwind_save,      1 },
4428
  { "movsp",            s_arm_unwind_movsp,     0 },
4429
  { "pad",              s_arm_unwind_pad,       0 },
4430
  { "setfp",            s_arm_unwind_setfp,     0 },
4431
  { "unwind_raw",       s_arm_unwind_raw,       0 },
4432
  { "eabi_attribute",   s_arm_eabi_attribute,   0 },
4433
  { "tlsdescseq",       s_arm_tls_descseq,      0 },
4434
#else
4435
  { "word",        cons, 4},
4436
 
4437
  /* These are used for dwarf.  */
4438
  {"2byte", cons, 2},
4439
  {"4byte", cons, 4},
4440
  {"8byte", cons, 8},
4441
  /* These are used for dwarf2.  */
4442
  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4443
  { "loc",  dwarf2_directive_loc,  0 },
4444
  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4445
#endif
4446
  { "extend",      float_cons, 'x' },
4447
  { "ldouble",     float_cons, 'x' },
4448
  { "packed",      float_cons, 'p' },
4449
#ifdef TE_PE
4450
  {"secrel32", pe_directive_secrel, 0},
4451
#endif
4452
  { 0, 0, 0 }
4453
};
4454
 
4455
/* Parser functions used exclusively in instruction operands.  */
4456
 
4457
/* Generic immediate-value read function for use in insn parsing.
4458
   STR points to the beginning of the immediate (the leading #);
4459
   VAL receives the value; if the value is outside [MIN, MAX]
4460
   issue an error.  PREFIX_OPT is true if the immediate prefix is
4461
   optional.  */
4462
 
4463
static int
4464
parse_immediate (char **str, int *val, int min, int max,
4465
                 bfd_boolean prefix_opt)
4466
{
4467
  expressionS exp;
4468
  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4469
  if (exp.X_op != O_constant)
4470
    {
4471
      inst.error = _("constant expression required");
4472
      return FAIL;
4473
    }
4474
 
4475
  if (exp.X_add_number < min || exp.X_add_number > max)
4476
    {
4477
      inst.error = _("immediate value out of range");
4478
      return FAIL;
4479
    }
4480
 
4481
  *val = exp.X_add_number;
4482
  return SUCCESS;
4483
}
4484
 
4485
/* Less-generic immediate-value read function with the possibility of loading a
4486
   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4487
   instructions. Puts the result directly in inst.operands[i].  */
4488
 
4489
static int
4490
parse_big_immediate (char **str, int i)
4491
{
4492
  expressionS exp;
4493
  char *ptr = *str;
4494
 
4495
  my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4496
 
4497
  if (exp.X_op == O_constant)
4498
    {
4499
      inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4500
      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4501
         O_constant.  We have to be careful not to break compilation for
4502
         32-bit X_add_number, though.  */
4503
      if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4504
        {
4505
          /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4.  */
4506
          inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4507
          inst.operands[i].regisimm = 1;
4508
        }
4509
    }
4510
  else if (exp.X_op == O_big
4511
           && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4512
    {
4513
      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4514
 
4515
      /* Bignums have their least significant bits in
4516
         generic_bignum[0]. Make sure we put 32 bits in imm and
4517
         32 bits in reg,  in a (hopefully) portable way.  */
4518
      gas_assert (parts != 0);
4519
 
4520
      /* Make sure that the number is not too big.
4521
         PR 11972: Bignums can now be sign-extended to the
4522
         size of a .octa so check that the out of range bits
4523
         are all zero or all one.  */
4524
      if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4525
        {
4526
          LITTLENUM_TYPE m = -1;
4527
 
4528
          if (generic_bignum[parts * 2] != 0
4529
              && generic_bignum[parts * 2] != m)
4530
            return FAIL;
4531
 
4532
          for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4533
            if (generic_bignum[j] != generic_bignum[j-1])
4534
              return FAIL;
4535
        }
4536
 
4537
      inst.operands[i].imm = 0;
4538
      for (j = 0; j < parts; j++, idx++)
4539
        inst.operands[i].imm |= generic_bignum[idx]
4540
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4541
      inst.operands[i].reg = 0;
4542
      for (j = 0; j < parts; j++, idx++)
4543
        inst.operands[i].reg |= generic_bignum[idx]
4544
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4545
      inst.operands[i].regisimm = 1;
4546
    }
4547
  else
4548
    return FAIL;
4549
 
4550
  *str = ptr;
4551
 
4552
  return SUCCESS;
4553
}
4554
 
4555
/* Returns the pseudo-register number of an FPA immediate constant,
4556
   or FAIL if there isn't a valid constant here.  */
4557
 
4558
static int
4559
parse_fpa_immediate (char ** str)
4560
{
4561
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4562
  char *         save_in;
4563
  expressionS    exp;
4564
  int            i;
4565
  int            j;
4566
 
4567
  /* First try and match exact strings, this is to guarantee
4568
     that some formats will work even for cross assembly.  */
4569
 
4570
  for (i = 0; fp_const[i]; i++)
4571
    {
4572
      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4573
        {
4574
          char *start = *str;
4575
 
4576
          *str += strlen (fp_const[i]);
4577
          if (is_end_of_line[(unsigned char) **str])
4578
            return i + 8;
4579
          *str = start;
4580
        }
4581
    }
4582
 
4583
  /* Just because we didn't get a match doesn't mean that the constant
4584
     isn't valid, just that it is in a format that we don't
4585
     automatically recognize.  Try parsing it with the standard
4586
     expression routines.  */
4587
 
4588
  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4589
 
4590
  /* Look for a raw floating point number.  */
4591
  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4592
      && is_end_of_line[(unsigned char) *save_in])
4593
    {
4594
      for (i = 0; i < NUM_FLOAT_VALS; i++)
4595
        {
4596
          for (j = 0; j < MAX_LITTLENUMS; j++)
4597
            {
4598
              if (words[j] != fp_values[i][j])
4599
                break;
4600
            }
4601
 
4602
          if (j == MAX_LITTLENUMS)
4603
            {
4604
              *str = save_in;
4605
              return i + 8;
4606
            }
4607
        }
4608
    }
4609
 
4610
  /* Try and parse a more complex expression, this will probably fail
4611
     unless the code uses a floating point prefix (eg "0f").  */
4612
  save_in = input_line_pointer;
4613
  input_line_pointer = *str;
4614
  if (expression (&exp) == absolute_section
4615
      && exp.X_op == O_big
4616
      && exp.X_add_number < 0)
4617
    {
4618
      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4619
         Ditto for 15.  */
4620
      if (gen_to_words (words, 5, (long) 15) == 0)
4621
        {
4622
          for (i = 0; i < NUM_FLOAT_VALS; i++)
4623
            {
4624
              for (j = 0; j < MAX_LITTLENUMS; j++)
4625
                {
4626
                  if (words[j] != fp_values[i][j])
4627
                    break;
4628
                }
4629
 
4630
              if (j == MAX_LITTLENUMS)
4631
                {
4632
                  *str = input_line_pointer;
4633
                  input_line_pointer = save_in;
4634
                  return i + 8;
4635
                }
4636
            }
4637
        }
4638
    }
4639
 
4640
  *str = input_line_pointer;
4641
  input_line_pointer = save_in;
4642
  inst.error = _("invalid FPA immediate expression");
4643
  return FAIL;
4644
}
4645
 
4646
/* Returns 1 if a number has "quarter-precision" float format
4647
   0baBbbbbbc defgh000 00000000 00000000.  */
4648
 
4649
static int
4650
is_quarter_float (unsigned imm)
4651
{
4652
  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4653
  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4654
}
4655
 
4656
/* Parse an 8-bit "quarter-precision" floating point number of the form:
4657
   0baBbbbbbc defgh000 00000000 00000000.
4658
   The zero and minus-zero cases need special handling, since they can't be
4659
   encoded in the "quarter-precision" float format, but can nonetheless be
4660
   loaded as integer constants.  */
4661
 
4662
static unsigned
4663
parse_qfloat_immediate (char **ccp, int *immed)
4664
{
4665
  char *str = *ccp;
4666
  char *fpnum;
4667
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4668
  int found_fpchar = 0;
4669
 
4670
  skip_past_char (&str, '#');
4671
 
4672
  /* We must not accidentally parse an integer as a floating-point number. Make
4673
     sure that the value we parse is not an integer by checking for special
4674
     characters '.' or 'e'.
4675
     FIXME: This is a horrible hack, but doing better is tricky because type
4676
     information isn't in a very usable state at parse time.  */
4677
  fpnum = str;
4678
  skip_whitespace (fpnum);
4679
 
4680
  if (strncmp (fpnum, "0x", 2) == 0)
4681
    return FAIL;
4682
  else
4683
    {
4684
      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4685
        if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4686
          {
4687
            found_fpchar = 1;
4688
            break;
4689
          }
4690
 
4691
      if (!found_fpchar)
4692
        return FAIL;
4693
    }
4694
 
4695
  if ((str = atof_ieee (str, 's', words)) != NULL)
4696
    {
4697
      unsigned fpword = 0;
4698
      int i;
4699
 
4700
      /* Our FP word must be 32 bits (single-precision FP).  */
4701
      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4702
        {
4703
          fpword <<= LITTLENUM_NUMBER_OF_BITS;
4704
          fpword |= words[i];
4705
        }
4706
 
4707
      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4708
        *immed = fpword;
4709
      else
4710
        return FAIL;
4711
 
4712
      *ccp = str;
4713
 
4714
      return SUCCESS;
4715
    }
4716
 
4717
  return FAIL;
4718
}
4719
 
4720
/* Shift operands.  */
4721
enum shift_kind
4722
{
4723
  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4724
};
4725
 
4726
struct asm_shift_name
4727
{
4728
  const char      *name;
4729
  enum shift_kind  kind;
4730
};
4731
 
4732
/* Third argument to parse_shift.  */
4733
enum parse_shift_mode
4734
{
4735
  NO_SHIFT_RESTRICT,            /* Any kind of shift is accepted.  */
4736
  SHIFT_IMMEDIATE,              /* Shift operand must be an immediate.  */
4737
  SHIFT_LSL_OR_ASR_IMMEDIATE,   /* Shift must be LSL or ASR immediate.  */
4738
  SHIFT_ASR_IMMEDIATE,          /* Shift must be ASR immediate.  */
4739
  SHIFT_LSL_IMMEDIATE,          /* Shift must be LSL immediate.  */
4740
};
4741
 
4742
/* Parse a <shift> specifier on an ARM data processing instruction.
4743
   This has three forms:
4744
 
4745
     (LSL|LSR|ASL|ASR|ROR) Rs
4746
     (LSL|LSR|ASL|ASR|ROR) #imm
4747
     RRX
4748
 
4749
   Note that ASL is assimilated to LSL in the instruction encoding, and
4750
   RRX to ROR #0 (which cannot be written as such).  */
4751
 
4752
static int
4753
parse_shift (char **str, int i, enum parse_shift_mode mode)
4754
{
4755
  const struct asm_shift_name *shift_name;
4756
  enum shift_kind shift;
4757
  char *s = *str;
4758
  char *p = s;
4759
  int reg;
4760
 
4761
  for (p = *str; ISALPHA (*p); p++)
4762
    ;
4763
 
4764
  if (p == *str)
4765
    {
4766
      inst.error = _("shift expression expected");
4767
      return FAIL;
4768
    }
4769
 
4770
  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4771
                                                            p - *str);
4772
 
4773
  if (shift_name == NULL)
4774
    {
4775
      inst.error = _("shift expression expected");
4776
      return FAIL;
4777
    }
4778
 
4779
  shift = shift_name->kind;
4780
 
4781
  switch (mode)
4782
    {
4783
    case NO_SHIFT_RESTRICT:
4784
    case SHIFT_IMMEDIATE:   break;
4785
 
4786
    case SHIFT_LSL_OR_ASR_IMMEDIATE:
4787
      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4788
        {
4789
          inst.error = _("'LSL' or 'ASR' required");
4790
          return FAIL;
4791
        }
4792
      break;
4793
 
4794
    case SHIFT_LSL_IMMEDIATE:
4795
      if (shift != SHIFT_LSL)
4796
        {
4797
          inst.error = _("'LSL' required");
4798
          return FAIL;
4799
        }
4800
      break;
4801
 
4802
    case SHIFT_ASR_IMMEDIATE:
4803
      if (shift != SHIFT_ASR)
4804
        {
4805
          inst.error = _("'ASR' required");
4806
          return FAIL;
4807
        }
4808
      break;
4809
 
4810
    default: abort ();
4811
    }
4812
 
4813
  if (shift != SHIFT_RRX)
4814
    {
4815
      /* Whitespace can appear here if the next thing is a bare digit.  */
4816
      skip_whitespace (p);
4817
 
4818
      if (mode == NO_SHIFT_RESTRICT
4819
          && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4820
        {
4821
          inst.operands[i].imm = reg;
4822
          inst.operands[i].immisreg = 1;
4823
        }
4824
      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4825
        return FAIL;
4826
    }
4827
  inst.operands[i].shift_kind = shift;
4828
  inst.operands[i].shifted = 1;
4829
  *str = p;
4830
  return SUCCESS;
4831
}
4832
 
4833
/* Parse a <shifter_operand> for an ARM data processing instruction:
4834
 
4835
      #<immediate>
4836
      #<immediate>, <rotate>
4837
      <Rm>
4838
      <Rm>, <shift>
4839
 
4840
   where <shift> is defined by parse_shift above, and <rotate> is a
4841
   multiple of 2 between 0 and 30.  Validation of immediate operands
4842
   is deferred to md_apply_fix.  */
4843
 
4844
static int
4845
parse_shifter_operand (char **str, int i)
4846
{
4847
  int value;
4848
  expressionS exp;
4849
 
4850
  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4851
    {
4852
      inst.operands[i].reg = value;
4853
      inst.operands[i].isreg = 1;
4854
 
4855
      /* parse_shift will override this if appropriate */
4856
      inst.reloc.exp.X_op = O_constant;
4857
      inst.reloc.exp.X_add_number = 0;
4858
 
4859
      if (skip_past_comma (str) == FAIL)
4860
        return SUCCESS;
4861
 
4862
      /* Shift operation on register.  */
4863
      return parse_shift (str, i, NO_SHIFT_RESTRICT);
4864
    }
4865
 
4866
  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4867
    return FAIL;
4868
 
4869
  if (skip_past_comma (str) == SUCCESS)
4870
    {
4871
      /* #x, y -- ie explicit rotation by Y.  */
4872
      if (my_get_expression (&exp, str, GE_NO_PREFIX))
4873
        return FAIL;
4874
 
4875
      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4876
        {
4877
          inst.error = _("constant expression expected");
4878
          return FAIL;
4879
        }
4880
 
4881
      value = exp.X_add_number;
4882
      if (value < 0 || value > 30 || value % 2 != 0)
4883
        {
4884
          inst.error = _("invalid rotation");
4885
          return FAIL;
4886
        }
4887
      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4888
        {
4889
          inst.error = _("invalid constant");
4890
          return FAIL;
4891
        }
4892
 
4893 163 khays
      /* Encode as specified.  */
4894
      inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4895
      return SUCCESS;
4896 16 khays
    }
4897
 
4898
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4899
  inst.reloc.pc_rel = 0;
4900
  return SUCCESS;
4901
}
4902
 
4903
/* Group relocation information.  Each entry in the table contains the
4904
   textual name of the relocation as may appear in assembler source
4905
   and must end with a colon.
4906
   Along with this textual name are the relocation codes to be used if
4907
   the corresponding instruction is an ALU instruction (ADD or SUB only),
4908
   an LDR, an LDRS, or an LDC.  */
4909
 
4910
struct group_reloc_table_entry
4911
{
4912
  const char *name;
4913
  int alu_code;
4914
  int ldr_code;
4915
  int ldrs_code;
4916
  int ldc_code;
4917
};
4918
 
4919
typedef enum
4920
{
4921
  /* Varieties of non-ALU group relocation.  */
4922
 
4923
  GROUP_LDR,
4924
  GROUP_LDRS,
4925
  GROUP_LDC
4926
} group_reloc_type;
4927
 
4928
static struct group_reloc_table_entry group_reloc_table[] =
4929
  { /* Program counter relative: */
4930
    { "pc_g0_nc",
4931
      BFD_RELOC_ARM_ALU_PC_G0_NC,       /* ALU */
4932
      0,                         /* LDR */
4933
      0,                         /* LDRS */
4934
 
4935
    { "pc_g0",
4936
      BFD_RELOC_ARM_ALU_PC_G0,          /* ALU */
4937
      BFD_RELOC_ARM_LDR_PC_G0,          /* LDR */
4938
      BFD_RELOC_ARM_LDRS_PC_G0,         /* LDRS */
4939
      BFD_RELOC_ARM_LDC_PC_G0 },        /* LDC */
4940
    { "pc_g1_nc",
4941
      BFD_RELOC_ARM_ALU_PC_G1_NC,       /* ALU */
4942
      0,                         /* LDR */
4943
      0,                         /* LDRS */
4944
 
4945
    { "pc_g1",
4946
      BFD_RELOC_ARM_ALU_PC_G1,          /* ALU */
4947
      BFD_RELOC_ARM_LDR_PC_G1,          /* LDR */
4948
      BFD_RELOC_ARM_LDRS_PC_G1,         /* LDRS */
4949
      BFD_RELOC_ARM_LDC_PC_G1 },        /* LDC */
4950
    { "pc_g2",
4951
      BFD_RELOC_ARM_ALU_PC_G2,          /* ALU */
4952
      BFD_RELOC_ARM_LDR_PC_G2,          /* LDR */
4953
      BFD_RELOC_ARM_LDRS_PC_G2,         /* LDRS */
4954
      BFD_RELOC_ARM_LDC_PC_G2 },        /* LDC */
4955
    /* Section base relative */
4956
    { "sb_g0_nc",
4957
      BFD_RELOC_ARM_ALU_SB_G0_NC,       /* ALU */
4958
      0,                         /* LDR */
4959
      0,                         /* LDRS */
4960
 
4961
    { "sb_g0",
4962
      BFD_RELOC_ARM_ALU_SB_G0,          /* ALU */
4963
      BFD_RELOC_ARM_LDR_SB_G0,          /* LDR */
4964
      BFD_RELOC_ARM_LDRS_SB_G0,         /* LDRS */
4965
      BFD_RELOC_ARM_LDC_SB_G0 },        /* LDC */
4966
    { "sb_g1_nc",
4967
      BFD_RELOC_ARM_ALU_SB_G1_NC,       /* ALU */
4968
      0,                         /* LDR */
4969
      0,                         /* LDRS */
4970
 
4971
    { "sb_g1",
4972
      BFD_RELOC_ARM_ALU_SB_G1,          /* ALU */
4973
      BFD_RELOC_ARM_LDR_SB_G1,          /* LDR */
4974
      BFD_RELOC_ARM_LDRS_SB_G1,         /* LDRS */
4975
      BFD_RELOC_ARM_LDC_SB_G1 },        /* LDC */
4976
    { "sb_g2",
4977
      BFD_RELOC_ARM_ALU_SB_G2,          /* ALU */
4978
      BFD_RELOC_ARM_LDR_SB_G2,          /* LDR */
4979
      BFD_RELOC_ARM_LDRS_SB_G2,         /* LDRS */
4980
      BFD_RELOC_ARM_LDC_SB_G2 } };      /* LDC */
4981
 
4982
/* Given the address of a pointer pointing to the textual name of a group
4983
   relocation as may appear in assembler source, attempt to find its details
4984
   in group_reloc_table.  The pointer will be updated to the character after
4985
   the trailing colon.  On failure, FAIL will be returned; SUCCESS
4986
   otherwise.  On success, *entry will be updated to point at the relevant
4987
   group_reloc_table entry. */
4988
 
4989
static int
4990
find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4991
{
4992
  unsigned int i;
4993
  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4994
    {
4995
      int length = strlen (group_reloc_table[i].name);
4996
 
4997
      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4998
          && (*str)[length] == ':')
4999
        {
5000
          *out = &group_reloc_table[i];
5001
          *str += (length + 1);
5002
          return SUCCESS;
5003
        }
5004
    }
5005
 
5006
  return FAIL;
5007
}
5008
 
5009
/* Parse a <shifter_operand> for an ARM data processing instruction
5010
   (as for parse_shifter_operand) where group relocations are allowed:
5011
 
5012
      #<immediate>
5013
      #<immediate>, <rotate>
5014
      #:<group_reloc>:<expression>
5015
      <Rm>
5016
      <Rm>, <shift>
5017
 
5018
   where <group_reloc> is one of the strings defined in group_reloc_table.
5019
   The hashes are optional.
5020
 
5021
   Everything else is as for parse_shifter_operand.  */
5022
 
5023
static parse_operand_result
5024
parse_shifter_operand_group_reloc (char **str, int i)
5025
{
5026
  /* Determine if we have the sequence of characters #: or just :
5027
     coming next.  If we do, then we check for a group relocation.
5028
     If we don't, punt the whole lot to parse_shifter_operand.  */
5029
 
5030
  if (((*str)[0] == '#' && (*str)[1] == ':')
5031
      || (*str)[0] == ':')
5032
    {
5033
      struct group_reloc_table_entry *entry;
5034
 
5035
      if ((*str)[0] == '#')
5036
        (*str) += 2;
5037
      else
5038
        (*str)++;
5039
 
5040
      /* Try to parse a group relocation.  Anything else is an error.  */
5041
      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5042
        {
5043
          inst.error = _("unknown group relocation");
5044
          return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5045
        }
5046
 
5047
      /* We now have the group relocation table entry corresponding to
5048
         the name in the assembler source.  Next, we parse the expression.  */
5049
      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5050
        return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5051
 
5052
      /* Record the relocation type (always the ALU variant here).  */
5053
      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5054
      gas_assert (inst.reloc.type != 0);
5055
 
5056
      return PARSE_OPERAND_SUCCESS;
5057
    }
5058
  else
5059
    return parse_shifter_operand (str, i) == SUCCESS
5060
           ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5061
 
5062
  /* Never reached.  */
5063
}
5064
 
5065
/* Parse a Neon alignment expression.  Information is written to
5066
   inst.operands[i].  We assume the initial ':' has been skipped.
5067
 
5068
   align        .imm = align << 8, .immisalign=1, .preind=0  */
5069
static parse_operand_result
5070
parse_neon_alignment (char **str, int i)
5071
{
5072
  char *p = *str;
5073
  expressionS exp;
5074
 
5075
  my_get_expression (&exp, &p, GE_NO_PREFIX);
5076
 
5077
  if (exp.X_op != O_constant)
5078
    {
5079
      inst.error = _("alignment must be constant");
5080
      return PARSE_OPERAND_FAIL;
5081
    }
5082
 
5083
  inst.operands[i].imm = exp.X_add_number << 8;
5084
  inst.operands[i].immisalign = 1;
5085
  /* Alignments are not pre-indexes.  */
5086
  inst.operands[i].preind = 0;
5087
 
5088
  *str = p;
5089
  return PARSE_OPERAND_SUCCESS;
5090
}
5091
 
5092
/* Parse all forms of an ARM address expression.  Information is written
5093
   to inst.operands[i] and/or inst.reloc.
5094
 
5095
   Preindexed addressing (.preind=1):
5096
 
5097
   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5098
   [Rn, +/-Rm]         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5099
   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5100
                       .shift_kind=shift .reloc.exp=shift_imm
5101
 
5102
   These three may have a trailing ! which causes .writeback to be set also.
5103
 
5104
   Postindexed addressing (.postind=1, .writeback=1):
5105
 
5106
   [Rn], #offset       .reg=Rn .reloc.exp=offset
5107
   [Rn], +/-Rm         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5108
   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5109
                       .shift_kind=shift .reloc.exp=shift_imm
5110
 
5111
   Unindexed addressing (.preind=0, .postind=0):
5112
 
5113
   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5114
 
5115
   Other:
5116
 
5117
   [Rn]{!}             shorthand for [Rn,#0]{!}
5118
   =immediate          .isreg=0 .reloc.exp=immediate
5119
   label               .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5120
 
5121
  It is the caller's responsibility to check for addressing modes not
5122
  supported by the instruction, and to set inst.reloc.type.  */
5123
 
5124
static parse_operand_result
5125
parse_address_main (char **str, int i, int group_relocations,
5126
                    group_reloc_type group_type)
5127
{
5128
  char *p = *str;
5129
  int reg;
5130
 
5131
  if (skip_past_char (&p, '[') == FAIL)
5132
    {
5133
      if (skip_past_char (&p, '=') == FAIL)
5134
        {
5135
          /* Bare address - translate to PC-relative offset.  */
5136
          inst.reloc.pc_rel = 1;
5137
          inst.operands[i].reg = REG_PC;
5138
          inst.operands[i].isreg = 1;
5139
          inst.operands[i].preind = 1;
5140
        }
5141
      /* Otherwise a load-constant pseudo op, no special treatment needed here.  */
5142
 
5143
      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5144
        return PARSE_OPERAND_FAIL;
5145
 
5146
      *str = p;
5147
      return PARSE_OPERAND_SUCCESS;
5148
    }
5149
 
5150
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5151
    {
5152
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5153
      return PARSE_OPERAND_FAIL;
5154
    }
5155
  inst.operands[i].reg = reg;
5156
  inst.operands[i].isreg = 1;
5157
 
5158
  if (skip_past_comma (&p) == SUCCESS)
5159
    {
5160
      inst.operands[i].preind = 1;
5161
 
5162
      if (*p == '+') p++;
5163
      else if (*p == '-') p++, inst.operands[i].negative = 1;
5164
 
5165
      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5166
        {
5167
          inst.operands[i].imm = reg;
5168
          inst.operands[i].immisreg = 1;
5169
 
5170
          if (skip_past_comma (&p) == SUCCESS)
5171
            if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5172
              return PARSE_OPERAND_FAIL;
5173
        }
5174
      else if (skip_past_char (&p, ':') == SUCCESS)
5175
        {
5176
          /* FIXME: '@' should be used here, but it's filtered out by generic
5177
             code before we get to see it here. This may be subject to
5178
             change.  */
5179
          parse_operand_result result = parse_neon_alignment (&p, i);
5180
 
5181
          if (result != PARSE_OPERAND_SUCCESS)
5182
            return result;
5183
        }
5184
      else
5185
        {
5186
          if (inst.operands[i].negative)
5187
            {
5188
              inst.operands[i].negative = 0;
5189
              p--;
5190
            }
5191
 
5192
          if (group_relocations
5193
              && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5194
            {
5195
              struct group_reloc_table_entry *entry;
5196
 
5197
              /* Skip over the #: or : sequence.  */
5198
              if (*p == '#')
5199
                p += 2;
5200
              else
5201
                p++;
5202
 
5203
              /* Try to parse a group relocation.  Anything else is an
5204
                 error.  */
5205
              if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5206
                {
5207
                  inst.error = _("unknown group relocation");
5208
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5209
                }
5210
 
5211
              /* We now have the group relocation table entry corresponding to
5212
                 the name in the assembler source.  Next, we parse the
5213
                 expression.  */
5214
              if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5215
                return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5216
 
5217
              /* Record the relocation type.  */
5218
              switch (group_type)
5219
                {
5220
                  case GROUP_LDR:
5221
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5222
                    break;
5223
 
5224
                  case GROUP_LDRS:
5225
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5226
                    break;
5227
 
5228
                  case GROUP_LDC:
5229
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5230
                    break;
5231
 
5232
                  default:
5233
                    gas_assert (0);
5234
                }
5235
 
5236
              if (inst.reloc.type == 0)
5237
                {
5238
                  inst.error = _("this group relocation is not allowed on this instruction");
5239
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5240
                }
5241
            }
5242
          else
5243
            {
5244
              char *q = p;
5245
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5246
                return PARSE_OPERAND_FAIL;
5247
              /* If the offset is 0, find out if it's a +0 or -0.  */
5248
              if (inst.reloc.exp.X_op == O_constant
5249
                  && inst.reloc.exp.X_add_number == 0)
5250
                {
5251
                  skip_whitespace (q);
5252
                  if (*q == '#')
5253
                    {
5254
                      q++;
5255
                      skip_whitespace (q);
5256
                    }
5257
                  if (*q == '-')
5258
                    inst.operands[i].negative = 1;
5259
                }
5260
            }
5261
        }
5262
    }
5263
  else if (skip_past_char (&p, ':') == SUCCESS)
5264
    {
5265
      /* FIXME: '@' should be used here, but it's filtered out by generic code
5266
         before we get to see it here. This may be subject to change.  */
5267
      parse_operand_result result = parse_neon_alignment (&p, i);
5268
 
5269
      if (result != PARSE_OPERAND_SUCCESS)
5270
        return result;
5271
    }
5272
 
5273
  if (skip_past_char (&p, ']') == FAIL)
5274
    {
5275
      inst.error = _("']' expected");
5276
      return PARSE_OPERAND_FAIL;
5277
    }
5278
 
5279
  if (skip_past_char (&p, '!') == SUCCESS)
5280
    inst.operands[i].writeback = 1;
5281
 
5282
  else if (skip_past_comma (&p) == SUCCESS)
5283
    {
5284
      if (skip_past_char (&p, '{') == SUCCESS)
5285
        {
5286
          /* [Rn], {expr} - unindexed, with option */
5287
          if (parse_immediate (&p, &inst.operands[i].imm,
5288
                               0, 255, TRUE) == FAIL)
5289
            return PARSE_OPERAND_FAIL;
5290
 
5291
          if (skip_past_char (&p, '}') == FAIL)
5292
            {
5293
              inst.error = _("'}' expected at end of 'option' field");
5294
              return PARSE_OPERAND_FAIL;
5295
            }
5296
          if (inst.operands[i].preind)
5297
            {
5298
              inst.error = _("cannot combine index with option");
5299
              return PARSE_OPERAND_FAIL;
5300
            }
5301
          *str = p;
5302
          return PARSE_OPERAND_SUCCESS;
5303
        }
5304
      else
5305
        {
5306
          inst.operands[i].postind = 1;
5307
          inst.operands[i].writeback = 1;
5308
 
5309
          if (inst.operands[i].preind)
5310
            {
5311
              inst.error = _("cannot combine pre- and post-indexing");
5312
              return PARSE_OPERAND_FAIL;
5313
            }
5314
 
5315
          if (*p == '+') p++;
5316
          else if (*p == '-') p++, inst.operands[i].negative = 1;
5317
 
5318
          if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5319
            {
5320
              /* We might be using the immediate for alignment already. If we
5321
                 are, OR the register number into the low-order bits.  */
5322
              if (inst.operands[i].immisalign)
5323
                inst.operands[i].imm |= reg;
5324
              else
5325
                inst.operands[i].imm = reg;
5326
              inst.operands[i].immisreg = 1;
5327
 
5328
              if (skip_past_comma (&p) == SUCCESS)
5329
                if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5330
                  return PARSE_OPERAND_FAIL;
5331
            }
5332
          else
5333
            {
5334
              char *q = p;
5335
              if (inst.operands[i].negative)
5336
                {
5337
                  inst.operands[i].negative = 0;
5338
                  p--;
5339
                }
5340
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5341
                return PARSE_OPERAND_FAIL;
5342
              /* If the offset is 0, find out if it's a +0 or -0.  */
5343
              if (inst.reloc.exp.X_op == O_constant
5344
                  && inst.reloc.exp.X_add_number == 0)
5345
                {
5346
                  skip_whitespace (q);
5347
                  if (*q == '#')
5348
                    {
5349
                      q++;
5350
                      skip_whitespace (q);
5351
                    }
5352
                  if (*q == '-')
5353
                    inst.operands[i].negative = 1;
5354
                }
5355
            }
5356
        }
5357
    }
5358
 
5359
  /* If at this point neither .preind nor .postind is set, we have a
5360
     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5361
  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5362
    {
5363
      inst.operands[i].preind = 1;
5364
      inst.reloc.exp.X_op = O_constant;
5365
      inst.reloc.exp.X_add_number = 0;
5366
    }
5367
  *str = p;
5368
  return PARSE_OPERAND_SUCCESS;
5369
}
5370
 
5371
static int
5372
parse_address (char **str, int i)
5373
{
5374
  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5375
         ? SUCCESS : FAIL;
5376
}
5377
 
5378
static parse_operand_result
5379
parse_address_group_reloc (char **str, int i, group_reloc_type type)
5380
{
5381
  return parse_address_main (str, i, 1, type);
5382
}
5383
 
5384
/* Parse an operand for a MOVW or MOVT instruction.  */
5385
static int
5386
parse_half (char **str)
5387
{
5388
  char * p;
5389
 
5390
  p = *str;
5391
  skip_past_char (&p, '#');
5392
  if (strncasecmp (p, ":lower16:", 9) == 0)
5393
    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5394
  else if (strncasecmp (p, ":upper16:", 9) == 0)
5395
    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5396
 
5397
  if (inst.reloc.type != BFD_RELOC_UNUSED)
5398
    {
5399
      p += 9;
5400
      skip_whitespace (p);
5401
    }
5402
 
5403
  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5404
    return FAIL;
5405
 
5406
  if (inst.reloc.type == BFD_RELOC_UNUSED)
5407
    {
5408
      if (inst.reloc.exp.X_op != O_constant)
5409
        {
5410
          inst.error = _("constant expression expected");
5411
          return FAIL;
5412
        }
5413
      if (inst.reloc.exp.X_add_number < 0
5414
          || inst.reloc.exp.X_add_number > 0xffff)
5415
        {
5416
          inst.error = _("immediate value out of range");
5417
          return FAIL;
5418
        }
5419
    }
5420
  *str = p;
5421
  return SUCCESS;
5422
}
5423
 
5424
/* Miscellaneous. */
5425
 
5426
/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5427
   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5428
static int
5429
parse_psr (char **str, bfd_boolean lhs)
5430
{
5431
  char *p;
5432
  unsigned long psr_field;
5433
  const struct asm_psr *psr;
5434
  char *start;
5435
  bfd_boolean is_apsr = FALSE;
5436
  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5437
 
5438
  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5439
     be TRUE, but we want to ignore it in this case as we are building for any
5440
     CPU type, including non-m variants.  */
5441
  if (selected_cpu.core == arm_arch_any.core)
5442
    m_profile = FALSE;
5443
 
5444
  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5445
     feature for ease of use and backwards compatibility.  */
5446
  p = *str;
5447
  if (strncasecmp (p, "SPSR", 4) == 0)
5448
    {
5449
      if (m_profile)
5450
        goto unsupported_psr;
5451
 
5452
      psr_field = SPSR_BIT;
5453
    }
5454
  else if (strncasecmp (p, "CPSR", 4) == 0)
5455
    {
5456
      if (m_profile)
5457
        goto unsupported_psr;
5458
 
5459
      psr_field = 0;
5460
    }
5461
  else if (strncasecmp (p, "APSR", 4) == 0)
5462
    {
5463
      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5464
         and ARMv7-R architecture CPUs.  */
5465
      is_apsr = TRUE;
5466
      psr_field = 0;
5467
    }
5468
  else if (m_profile)
5469
    {
5470
      start = p;
5471
      do
5472
        p++;
5473
      while (ISALNUM (*p) || *p == '_');
5474
 
5475
      if (strncasecmp (start, "iapsr", 5) == 0
5476
          || strncasecmp (start, "eapsr", 5) == 0
5477
          || strncasecmp (start, "xpsr", 4) == 0
5478
          || strncasecmp (start, "psr", 3) == 0)
5479
        p = start + strcspn (start, "rR") + 1;
5480
 
5481
      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5482
                                                  p - start);
5483
 
5484
      if (!psr)
5485
        return FAIL;
5486
 
5487
      /* If APSR is being written, a bitfield may be specified.  Note that
5488
         APSR itself is handled above.  */
5489
      if (psr->field <= 3)
5490
        {
5491
          psr_field = psr->field;
5492
          is_apsr = TRUE;
5493
          goto check_suffix;
5494
        }
5495
 
5496
      *str = p;
5497
      /* M-profile MSR instructions have the mask field set to "10", except
5498
         *PSR variants which modify APSR, which may use a different mask (and
5499
         have been handled already).  Do that by setting the PSR_f field
5500
         here.  */
5501
      return psr->field | (lhs ? PSR_f : 0);
5502
    }
5503
  else
5504
    goto unsupported_psr;
5505
 
5506
  p += 4;
5507
check_suffix:
5508
  if (*p == '_')
5509
    {
5510
      /* A suffix follows.  */
5511
      p++;
5512
      start = p;
5513
 
5514
      do
5515
        p++;
5516
      while (ISALNUM (*p) || *p == '_');
5517
 
5518
      if (is_apsr)
5519
        {
5520
          /* APSR uses a notation for bits, rather than fields.  */
5521
          unsigned int nzcvq_bits = 0;
5522
          unsigned int g_bit = 0;
5523
          char *bit;
5524
 
5525
          for (bit = start; bit != p; bit++)
5526
            {
5527
              switch (TOLOWER (*bit))
5528
                {
5529
                case 'n':
5530
                  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5531
                  break;
5532
 
5533
                case 'z':
5534
                  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5535
                  break;
5536
 
5537
                case 'c':
5538
                  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5539
                  break;
5540
 
5541
                case 'v':
5542
                  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5543
                  break;
5544
 
5545
                case 'q':
5546
                  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5547
                  break;
5548
 
5549
                case 'g':
5550
                  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5551
                  break;
5552
 
5553
                default:
5554
                  inst.error = _("unexpected bit specified after APSR");
5555
                  return FAIL;
5556
                }
5557
            }
5558
 
5559
          if (nzcvq_bits == 0x1f)
5560
            psr_field |= PSR_f;
5561
 
5562
          if (g_bit == 0x1)
5563
            {
5564
              if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5565
                {
5566
                  inst.error = _("selected processor does not "
5567
                                 "support DSP extension");
5568
                  return FAIL;
5569
                }
5570
 
5571
              psr_field |= PSR_s;
5572
            }
5573
 
5574
          if ((nzcvq_bits & 0x20) != 0
5575
              || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5576
              || (g_bit & 0x2) != 0)
5577
            {
5578
              inst.error = _("bad bitmask specified after APSR");
5579
              return FAIL;
5580
            }
5581
        }
5582
      else
5583
        {
5584
          psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5585
                                                      p - start);
5586
          if (!psr)
5587
            goto error;
5588
 
5589
          psr_field |= psr->field;
5590
        }
5591
    }
5592
  else
5593
    {
5594
      if (ISALNUM (*p))
5595
        goto error;    /* Garbage after "[CS]PSR".  */
5596
 
5597
      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5598
         is deprecated, but allow it anyway.  */
5599
      if (is_apsr && lhs)
5600
        {
5601
          psr_field |= PSR_f;
5602
          as_tsktsk (_("writing to APSR without specifying a bitmask is "
5603
                       "deprecated"));
5604
        }
5605
      else if (!m_profile)
5606
        /* These bits are never right for M-profile devices: don't set them
5607
           (only code paths which read/write APSR reach here).  */
5608
        psr_field |= (PSR_c | PSR_f);
5609
    }
5610
  *str = p;
5611
  return psr_field;
5612
 
5613
 unsupported_psr:
5614
  inst.error = _("selected processor does not support requested special "
5615
                 "purpose register");
5616
  return FAIL;
5617
 
5618
 error:
5619
  inst.error = _("flag for {c}psr instruction expected");
5620
  return FAIL;
5621
}
5622
 
5623
/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5624
   value suitable for splatting into the AIF field of the instruction.  */
5625
 
5626
static int
5627
parse_cps_flags (char **str)
5628
{
5629
  int val = 0;
5630
  int saw_a_flag = 0;
5631
  char *s = *str;
5632
 
5633
  for (;;)
5634
    switch (*s++)
5635
      {
5636
      case '\0': case ',':
5637
        goto done;
5638
 
5639
      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5640
      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5641
      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5642
 
5643
      default:
5644
        inst.error = _("unrecognized CPS flag");
5645
        return FAIL;
5646
      }
5647
 
5648
 done:
5649
  if (saw_a_flag == 0)
5650
    {
5651
      inst.error = _("missing CPS flags");
5652
      return FAIL;
5653
    }
5654
 
5655
  *str = s - 1;
5656
  return val;
5657
}
5658
 
5659
/* Parse an endian specifier ("BE" or "LE", case insensitive);
5660
   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
5661
 
5662
static int
5663
parse_endian_specifier (char **str)
5664
{
5665
  int little_endian;
5666
  char *s = *str;
5667
 
5668
  if (strncasecmp (s, "BE", 2))
5669
    little_endian = 0;
5670
  else if (strncasecmp (s, "LE", 2))
5671
    little_endian = 1;
5672
  else
5673
    {
5674
      inst.error = _("valid endian specifiers are be or le");
5675
      return FAIL;
5676
    }
5677
 
5678
  if (ISALNUM (s[2]) || s[2] == '_')
5679
    {
5680
      inst.error = _("valid endian specifiers are be or le");
5681
      return FAIL;
5682
    }
5683
 
5684
  *str = s + 2;
5685
  return little_endian;
5686
}
5687
 
5688
/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
5689
   value suitable for poking into the rotate field of an sxt or sxta
5690
   instruction, or FAIL on error.  */
5691
 
5692
static int
5693
parse_ror (char **str)
5694
{
5695
  int rot;
5696
  char *s = *str;
5697
 
5698
  if (strncasecmp (s, "ROR", 3) == 0)
5699
    s += 3;
5700
  else
5701
    {
5702
      inst.error = _("missing rotation field after comma");
5703
      return FAIL;
5704
    }
5705
 
5706
  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5707
    return FAIL;
5708
 
5709
  switch (rot)
5710
    {
5711
    case  0: *str = s; return 0x0;
5712
    case  8: *str = s; return 0x1;
5713
    case 16: *str = s; return 0x2;
5714
    case 24: *str = s; return 0x3;
5715
 
5716
    default:
5717
      inst.error = _("rotation can only be 0, 8, 16, or 24");
5718
      return FAIL;
5719
    }
5720
}
5721
 
5722
/* Parse a conditional code (from conds[] below).  The value returned is in the
5723
   range 0 .. 14, or FAIL.  */
5724
static int
5725
parse_cond (char **str)
5726
{
5727
  char *q;
5728
  const struct asm_cond *c;
5729
  int n;
5730
  /* Condition codes are always 2 characters, so matching up to
5731
     3 characters is sufficient.  */
5732
  char cond[3];
5733
 
5734
  q = *str;
5735
  n = 0;
5736
  while (ISALPHA (*q) && n < 3)
5737
    {
5738
      cond[n] = TOLOWER (*q);
5739
      q++;
5740
      n++;
5741
    }
5742
 
5743
  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5744
  if (!c)
5745
    {
5746
      inst.error = _("condition required");
5747
      return FAIL;
5748
    }
5749
 
5750
  *str = q;
5751
  return c->value;
5752
}
5753
 
5754
/* Parse an option for a barrier instruction.  Returns the encoding for the
5755
   option, or FAIL.  */
5756
static int
5757
parse_barrier (char **str)
5758
{
5759
  char *p, *q;
5760
  const struct asm_barrier_opt *o;
5761
 
5762
  p = q = *str;
5763
  while (ISALPHA (*q))
5764
    q++;
5765
 
5766
  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5767
                                                    q - p);
5768
  if (!o)
5769
    return FAIL;
5770
 
5771
  *str = q;
5772
  return o->value;
5773
}
5774
 
5775
/* Parse the operands of a table branch instruction.  Similar to a memory
5776
   operand.  */
5777
static int
5778
parse_tb (char **str)
5779
{
5780
  char * p = *str;
5781
  int reg;
5782
 
5783
  if (skip_past_char (&p, '[') == FAIL)
5784
    {
5785
      inst.error = _("'[' expected");
5786
      return FAIL;
5787
    }
5788
 
5789
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5790
    {
5791
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5792
      return FAIL;
5793
    }
5794
  inst.operands[0].reg = reg;
5795
 
5796
  if (skip_past_comma (&p) == FAIL)
5797
    {
5798
      inst.error = _("',' expected");
5799
      return FAIL;
5800
    }
5801
 
5802
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5803
    {
5804
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5805
      return FAIL;
5806
    }
5807
  inst.operands[0].imm = reg;
5808
 
5809
  if (skip_past_comma (&p) == SUCCESS)
5810
    {
5811
      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5812
        return FAIL;
5813
      if (inst.reloc.exp.X_add_number != 1)
5814
        {
5815
          inst.error = _("invalid shift");
5816
          return FAIL;
5817
        }
5818
      inst.operands[0].shifted = 1;
5819
    }
5820
 
5821
  if (skip_past_char (&p, ']') == FAIL)
5822
    {
5823
      inst.error = _("']' expected");
5824
      return FAIL;
5825
    }
5826
  *str = p;
5827
  return SUCCESS;
5828
}
5829
 
5830
/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5831
   information on the types the operands can take and how they are encoded.
5832
   Up to four operands may be read; this function handles setting the
5833
   ".present" field for each read operand itself.
5834
   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5835
   else returns FAIL.  */
5836
 
5837
static int
5838
parse_neon_mov (char **str, int *which_operand)
5839
{
5840
  int i = *which_operand, val;
5841
  enum arm_reg_type rtype;
5842
  char *ptr = *str;
5843
  struct neon_type_el optype;
5844
 
5845
  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5846
    {
5847
      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
5848
      inst.operands[i].reg = val;
5849
      inst.operands[i].isscalar = 1;
5850
      inst.operands[i].vectype = optype;
5851
      inst.operands[i++].present = 1;
5852
 
5853
      if (skip_past_comma (&ptr) == FAIL)
5854
        goto wanted_comma;
5855
 
5856
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5857
        goto wanted_arm;
5858
 
5859
      inst.operands[i].reg = val;
5860
      inst.operands[i].isreg = 1;
5861
      inst.operands[i].present = 1;
5862
    }
5863
  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5864
           != FAIL)
5865
    {
5866
      /* Cases 0, 1, 2, 3, 5 (D only).  */
5867
      if (skip_past_comma (&ptr) == FAIL)
5868
        goto wanted_comma;
5869
 
5870
      inst.operands[i].reg = val;
5871
      inst.operands[i].isreg = 1;
5872
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5873
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5874
      inst.operands[i].isvec = 1;
5875
      inst.operands[i].vectype = optype;
5876
      inst.operands[i++].present = 1;
5877
 
5878
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5879
        {
5880
          /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5881
             Case 13: VMOV <Sd>, <Rm>  */
5882
          inst.operands[i].reg = val;
5883
          inst.operands[i].isreg = 1;
5884
          inst.operands[i].present = 1;
5885
 
5886
          if (rtype == REG_TYPE_NQ)
5887
            {
5888
              first_error (_("can't use Neon quad register here"));
5889
              return FAIL;
5890
            }
5891
          else if (rtype != REG_TYPE_VFS)
5892
            {
5893
              i++;
5894
              if (skip_past_comma (&ptr) == FAIL)
5895
                goto wanted_comma;
5896
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5897
                goto wanted_arm;
5898
              inst.operands[i].reg = val;
5899
              inst.operands[i].isreg = 1;
5900
              inst.operands[i].present = 1;
5901
            }
5902
        }
5903
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5904
                                           &optype)) != FAIL)
5905
        {
5906
          /* Case 0: VMOV<c><q> <Qd>, <Qm>
5907
             Case 1: VMOV<c><q> <Dd>, <Dm>
5908
             Case 8: VMOV.F32 <Sd>, <Sm>
5909
             Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
5910
 
5911
          inst.operands[i].reg = val;
5912
          inst.operands[i].isreg = 1;
5913
          inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5914
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5915
          inst.operands[i].isvec = 1;
5916
          inst.operands[i].vectype = optype;
5917
          inst.operands[i].present = 1;
5918
 
5919
          if (skip_past_comma (&ptr) == SUCCESS)
5920
            {
5921
              /* Case 15.  */
5922
              i++;
5923
 
5924
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5925
                goto wanted_arm;
5926
 
5927
              inst.operands[i].reg = val;
5928
              inst.operands[i].isreg = 1;
5929
              inst.operands[i++].present = 1;
5930
 
5931
              if (skip_past_comma (&ptr) == FAIL)
5932
                goto wanted_comma;
5933
 
5934
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5935
                goto wanted_arm;
5936
 
5937
              inst.operands[i].reg = val;
5938
              inst.operands[i].isreg = 1;
5939 166 khays
              inst.operands[i].present = 1;
5940 16 khays
            }
5941
        }
5942
      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5943
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5944
             Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5945
             Case 10: VMOV.F32 <Sd>, #<imm>
5946
             Case 11: VMOV.F64 <Dd>, #<imm>  */
5947
        inst.operands[i].immisfloat = 1;
5948
      else if (parse_big_immediate (&ptr, i) == SUCCESS)
5949
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5950
             Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
5951
        ;
5952
      else
5953
        {
5954
          first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5955
          return FAIL;
5956
        }
5957
    }
5958
  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5959
    {
5960
      /* Cases 6, 7.  */
5961
      inst.operands[i].reg = val;
5962
      inst.operands[i].isreg = 1;
5963
      inst.operands[i++].present = 1;
5964
 
5965
      if (skip_past_comma (&ptr) == FAIL)
5966
        goto wanted_comma;
5967
 
5968
      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5969
        {
5970
          /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
5971
          inst.operands[i].reg = val;
5972
          inst.operands[i].isscalar = 1;
5973
          inst.operands[i].present = 1;
5974
          inst.operands[i].vectype = optype;
5975
        }
5976
      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5977
        {
5978
          /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
5979
          inst.operands[i].reg = val;
5980
          inst.operands[i].isreg = 1;
5981
          inst.operands[i++].present = 1;
5982
 
5983
          if (skip_past_comma (&ptr) == FAIL)
5984
            goto wanted_comma;
5985
 
5986
          if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5987
              == FAIL)
5988
            {
5989
              first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5990
              return FAIL;
5991
            }
5992
 
5993
          inst.operands[i].reg = val;
5994
          inst.operands[i].isreg = 1;
5995
          inst.operands[i].isvec = 1;
5996
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5997
          inst.operands[i].vectype = optype;
5998
          inst.operands[i].present = 1;
5999
 
6000
          if (rtype == REG_TYPE_VFS)
6001
            {
6002
              /* Case 14.  */
6003
              i++;
6004
              if (skip_past_comma (&ptr) == FAIL)
6005
                goto wanted_comma;
6006
              if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6007
                                              &optype)) == FAIL)
6008
                {
6009
                  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6010
                  return FAIL;
6011
                }
6012
              inst.operands[i].reg = val;
6013
              inst.operands[i].isreg = 1;
6014
              inst.operands[i].isvec = 1;
6015
              inst.operands[i].issingle = 1;
6016
              inst.operands[i].vectype = optype;
6017
              inst.operands[i].present = 1;
6018
            }
6019
        }
6020
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6021
               != FAIL)
6022
        {
6023
          /* Case 13.  */
6024
          inst.operands[i].reg = val;
6025
          inst.operands[i].isreg = 1;
6026
          inst.operands[i].isvec = 1;
6027
          inst.operands[i].issingle = 1;
6028
          inst.operands[i].vectype = optype;
6029 166 khays
          inst.operands[i].present = 1;
6030 16 khays
        }
6031
    }
6032
  else
6033
    {
6034
      first_error (_("parse error"));
6035
      return FAIL;
6036
    }
6037
 
6038
  /* Successfully parsed the operands. Update args.  */
6039
  *which_operand = i;
6040
  *str = ptr;
6041
  return SUCCESS;
6042
 
6043
 wanted_comma:
6044
  first_error (_("expected comma"));
6045
  return FAIL;
6046
 
6047
 wanted_arm:
6048
  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6049
  return FAIL;
6050
}
6051
 
6052
/* Use this macro when the operand constraints are different
6053
   for ARM and THUMB (e.g. ldrd).  */
6054
#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6055
        ((arm_operand) | ((thumb_operand) << 16))
6056
 
6057
/* Matcher codes for parse_operands.  */
6058
enum operand_parse_code
6059
{
6060
  OP_stop,      /* end of line */
6061
 
6062
  OP_RR,        /* ARM register */
6063
  OP_RRnpc,     /* ARM register, not r15 */
6064
  OP_RRnpcsp,   /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6065
  OP_RRnpcb,    /* ARM register, not r15, in square brackets */
6066
  OP_RRnpctw,   /* ARM register, not r15 in Thumb-state or with writeback,
6067
                   optional trailing ! */
6068
  OP_RRw,       /* ARM register, not r15, optional trailing ! */
6069
  OP_RCP,       /* Coprocessor number */
6070
  OP_RCN,       /* Coprocessor register */
6071
  OP_RF,        /* FPA register */
6072
  OP_RVS,       /* VFP single precision register */
6073
  OP_RVD,       /* VFP double precision register (0..15) */
6074
  OP_RND,       /* Neon double precision register (0..31) */
6075
  OP_RNQ,       /* Neon quad precision register */
6076
  OP_RVSD,      /* VFP single or double precision register */
6077
  OP_RNDQ,      /* Neon double or quad precision register */
6078
  OP_RNSDQ,     /* Neon single, double or quad precision register */
6079
  OP_RNSC,      /* Neon scalar D[X] */
6080
  OP_RVC,       /* VFP control register */
6081
  OP_RMF,       /* Maverick F register */
6082
  OP_RMD,       /* Maverick D register */
6083
  OP_RMFX,      /* Maverick FX register */
6084
  OP_RMDX,      /* Maverick DX register */
6085
  OP_RMAX,      /* Maverick AX register */
6086
  OP_RMDS,      /* Maverick DSPSC register */
6087
  OP_RIWR,      /* iWMMXt wR register */
6088
  OP_RIWC,      /* iWMMXt wC register */
6089
  OP_RIWG,      /* iWMMXt wCG register */
6090
  OP_RXA,       /* XScale accumulator register */
6091
 
6092
  OP_REGLST,    /* ARM register list */
6093
  OP_VRSLST,    /* VFP single-precision register list */
6094
  OP_VRDLST,    /* VFP double-precision register list */
6095
  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6096
  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6097
  OP_NSTRLST,   /* Neon element/structure list */
6098
 
6099
  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6100
  OP_RVSD_I0,   /* VFP S or D reg, or immediate zero.  */
6101
  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6102
  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6103
  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6104
  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6105
  OP_VMOV,      /* Neon VMOV operands.  */
6106
  OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN.  */
6107
  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6108
  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6109
 
6110
  OP_I0,        /* immediate zero */
6111
  OP_I7,        /* immediate value 0 .. 7 */
6112
  OP_I15,       /*                 0 .. 15 */
6113
  OP_I16,       /*                 1 .. 16 */
6114
  OP_I16z,      /*                 0 .. 16 */
6115
  OP_I31,       /*                 0 .. 31 */
6116
  OP_I31w,      /*                 0 .. 31, optional trailing ! */
6117
  OP_I32,       /*                 1 .. 32 */
6118
  OP_I32z,      /*                 0 .. 32 */
6119
  OP_I63,       /*                 0 .. 63 */
6120
  OP_I63s,      /*               -64 .. 63 */
6121
  OP_I64,       /*                 1 .. 64 */
6122
  OP_I64z,      /*                 0 .. 64 */
6123
  OP_I255,      /*                 0 .. 255 */
6124
 
6125
  OP_I4b,       /* immediate, prefix optional, 1 .. 4 */
6126
  OP_I7b,       /*                             0 .. 7 */
6127
  OP_I15b,      /*                             0 .. 15 */
6128
  OP_I31b,      /*                             0 .. 31 */
6129
 
6130
  OP_SH,        /* shifter operand */
6131
  OP_SHG,       /* shifter operand with possible group relocation */
6132
  OP_ADDR,      /* Memory address expression (any mode) */
6133
  OP_ADDRGLDR,  /* Mem addr expr (any mode) with possible LDR group reloc */
6134
  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6135
  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6136
  OP_EXP,       /* arbitrary expression */
6137
  OP_EXPi,      /* same, with optional immediate prefix */
6138
  OP_EXPr,      /* same, with optional relocation suffix */
6139
  OP_HALF,      /* 0 .. 65535 or low/high reloc.  */
6140
 
6141
  OP_CPSF,      /* CPS flags */
6142
  OP_ENDI,      /* Endianness specifier */
6143
  OP_wPSR,      /* CPSR/SPSR/APSR mask for msr (writing).  */
6144
  OP_rPSR,      /* CPSR/SPSR/APSR mask for msr (reading).  */
6145
  OP_COND,      /* conditional code */
6146
  OP_TB,        /* Table branch.  */
6147
 
6148
  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6149
 
6150
  OP_RRnpc_I0,  /* ARM register or literal 0 */
6151
  OP_RR_EXr,    /* ARM register or expression with opt. reloc suff. */
6152
  OP_RR_EXi,    /* ARM register or expression with imm prefix */
6153
  OP_RF_IF,     /* FPA register or immediate */
6154
  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6155
  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6156
 
6157
  /* Optional operands.  */
6158
  OP_oI7b,       /* immediate, prefix optional, 0 .. 7 */
6159
  OP_oI31b,      /*                             0 .. 31 */
6160
  OP_oI32b,      /*                             1 .. 32 */
6161 160 khays
  OP_oI32z,      /*                             0 .. 32 */
6162 16 khays
  OP_oIffffb,    /*                             0 .. 65535 */
6163
  OP_oI255c,     /*       curly-brace enclosed, 0 .. 255 */
6164
 
6165
  OP_oRR,        /* ARM register */
6166
  OP_oRRnpc,     /* ARM register, not the PC */
6167
  OP_oRRnpcsp,   /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6168
  OP_oRRw,       /* ARM register, not r15, optional trailing ! */
6169
  OP_oRND,       /* Optional Neon double precision register */
6170
  OP_oRNQ,       /* Optional Neon quad precision register */
6171
  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6172
  OP_oRNSDQ,     /* Optional single, double or quad precision vector register */
6173
  OP_oSHll,      /* LSL immediate */
6174
  OP_oSHar,      /* ASR immediate */
6175
  OP_oSHllar,    /* LSL or ASR immediate */
6176
  OP_oROR,       /* ROR 0/8/16/24 */
6177
  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6178
 
6179
  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6180
  OP_RR_npcsp           = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6181
  OP_RRnpc_npcsp        = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6182
  OP_oRRnpc_npcsp       = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6183
 
6184
  OP_FIRST_OPTIONAL = OP_oI7b
6185
};
6186
 
6187
/* Generic instruction operand parser.  This does no encoding and no
6188
   semantic validation; it merely squirrels values away in the inst
6189
   structure.  Returns SUCCESS or FAIL depending on whether the
6190
   specified grammar matched.  */
6191
static int
6192
parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6193
{
6194
  unsigned const int *upat = pattern;
6195
  char *backtrack_pos = 0;
6196
  const char *backtrack_error = 0;
6197
  int i, val, backtrack_index = 0;
6198
  enum arm_reg_type rtype;
6199
  parse_operand_result result;
6200
  unsigned int op_parse_code;
6201
 
6202
#define po_char_or_fail(chr)                    \
6203
  do                                            \
6204
    {                                           \
6205
      if (skip_past_char (&str, chr) == FAIL)   \
6206
        goto bad_args;                          \
6207
    }                                           \
6208
  while (0)
6209
 
6210
#define po_reg_or_fail(regtype)                                 \
6211
  do                                                            \
6212
    {                                                           \
6213
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6214
                                 & inst.operands[i].vectype);   \
6215
      if (val == FAIL)                                          \
6216
        {                                                       \
6217
          first_error (_(reg_expected_msgs[regtype]));          \
6218
          goto failure;                                         \
6219
        }                                                       \
6220
      inst.operands[i].reg = val;                               \
6221
      inst.operands[i].isreg = 1;                               \
6222
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6223
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6224
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6225
                             || rtype == REG_TYPE_VFD           \
6226
                             || rtype == REG_TYPE_NQ);          \
6227
    }                                                           \
6228
  while (0)
6229
 
6230
#define po_reg_or_goto(regtype, label)                          \
6231
  do                                                            \
6232
    {                                                           \
6233
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6234
                                 & inst.operands[i].vectype);   \
6235
      if (val == FAIL)                                          \
6236
        goto label;                                             \
6237
                                                                \
6238
      inst.operands[i].reg = val;                               \
6239
      inst.operands[i].isreg = 1;                               \
6240
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6241
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6242
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6243
                             || rtype == REG_TYPE_VFD           \
6244
                             || rtype == REG_TYPE_NQ);          \
6245
    }                                                           \
6246
  while (0)
6247
 
6248
#define po_imm_or_fail(min, max, popt)                          \
6249
  do                                                            \
6250
    {                                                           \
6251
      if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6252
        goto failure;                                           \
6253
      inst.operands[i].imm = val;                               \
6254
    }                                                           \
6255
  while (0)
6256
 
6257
#define po_scalar_or_goto(elsz, label)                                  \
6258
  do                                                                    \
6259
    {                                                                   \
6260
      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);     \
6261
      if (val == FAIL)                                                  \
6262
        goto label;                                                     \
6263
      inst.operands[i].reg = val;                                       \
6264
      inst.operands[i].isscalar = 1;                                    \
6265
    }                                                                   \
6266
  while (0)
6267
 
6268
#define po_misc_or_fail(expr)                   \
6269
  do                                            \
6270
    {                                           \
6271
      if (expr)                                 \
6272
        goto failure;                           \
6273
    }                                           \
6274
  while (0)
6275
 
6276
#define po_misc_or_fail_no_backtrack(expr)              \
6277
  do                                                    \
6278
    {                                                   \
6279
      result = expr;                                    \
6280
      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)    \
6281
        backtrack_pos = 0;                               \
6282
      if (result != PARSE_OPERAND_SUCCESS)              \
6283
        goto failure;                                   \
6284
    }                                                   \
6285
  while (0)
6286
 
6287
#define po_barrier_or_imm(str)                             \
6288
  do                                                       \
6289
    {                                                      \
6290
      val = parse_barrier (&str);                          \
6291
      if (val == FAIL)                                     \
6292
        {                                                  \
6293
          if (ISALPHA (*str))                              \
6294
              goto failure;                                \
6295
          else                                             \
6296
              goto immediate;                              \
6297
        }                                                  \
6298
      else                                                 \
6299
        {                                                  \
6300
          if ((inst.instruction & 0xf0) == 0x60            \
6301
              && val != 0xf)                               \
6302
            {                                              \
6303
               /* ISB can only take SY as an option.  */   \
6304
               inst.error = _("invalid barrier type");     \
6305
               goto failure;                               \
6306
            }                                              \
6307
        }                                                  \
6308
    }                                                      \
6309
  while (0)
6310
 
6311
  skip_whitespace (str);
6312
 
6313
  for (i = 0; upat[i] != OP_stop; i++)
6314
    {
6315
      op_parse_code = upat[i];
6316
      if (op_parse_code >= 1<<16)
6317
        op_parse_code = thumb ? (op_parse_code >> 16)
6318
                                : (op_parse_code & ((1<<16)-1));
6319
 
6320
      if (op_parse_code >= OP_FIRST_OPTIONAL)
6321
        {
6322
          /* Remember where we are in case we need to backtrack.  */
6323
          gas_assert (!backtrack_pos);
6324
          backtrack_pos = str;
6325
          backtrack_error = inst.error;
6326
          backtrack_index = i;
6327
        }
6328
 
6329
      if (i > 0 && (i > 1 || inst.operands[0].present))
6330
        po_char_or_fail (',');
6331
 
6332
      switch (op_parse_code)
6333
        {
6334
          /* Registers */
6335
        case OP_oRRnpc:
6336
        case OP_oRRnpcsp:
6337
        case OP_RRnpc:
6338
        case OP_RRnpcsp:
6339
        case OP_oRR:
6340
        case OP_RR:    po_reg_or_fail (REG_TYPE_RN);      break;
6341
        case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);      break;
6342
        case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);      break;
6343
        case OP_RF:    po_reg_or_fail (REG_TYPE_FN);      break;
6344
        case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);     break;
6345
        case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);     break;
6346
        case OP_oRND:
6347
        case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);     break;
6348
        case OP_RVC:
6349
          po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6350
          break;
6351
          /* Also accept generic coprocessor regs for unknown registers.  */
6352
          coproc_reg:
6353
          po_reg_or_fail (REG_TYPE_CN);
6354
          break;
6355
        case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);     break;
6356
        case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);     break;
6357
        case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);    break;
6358
        case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);    break;
6359
        case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);    break;
6360
        case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);   break;
6361
        case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);   break;
6362
        case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);   break;
6363
        case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6364
        case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6365
        case OP_oRNQ:
6366
        case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6367
        case OP_oRNDQ:
6368
        case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6369
        case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6370
        case OP_oRNSDQ:
6371
        case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6372
 
6373
        /* Neon scalar. Using an element size of 8 means that some invalid
6374
           scalars are accepted here, so deal with those in later code.  */
6375
        case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6376
 
6377
        case OP_RNDQ_I0:
6378
          {
6379
            po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6380
            break;
6381
            try_imm0:
6382
            po_imm_or_fail (0, 0, TRUE);
6383
          }
6384
          break;
6385
 
6386
        case OP_RVSD_I0:
6387
          po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6388
          break;
6389
 
6390
        case OP_RR_RNSC:
6391
          {
6392
            po_scalar_or_goto (8, try_rr);
6393
            break;
6394
            try_rr:
6395
            po_reg_or_fail (REG_TYPE_RN);
6396
          }
6397
          break;
6398
 
6399
        case OP_RNSDQ_RNSC:
6400
          {
6401
            po_scalar_or_goto (8, try_nsdq);
6402
            break;
6403
            try_nsdq:
6404
            po_reg_or_fail (REG_TYPE_NSDQ);
6405
          }
6406
          break;
6407
 
6408
        case OP_RNDQ_RNSC:
6409
          {
6410
            po_scalar_or_goto (8, try_ndq);
6411
            break;
6412
            try_ndq:
6413
            po_reg_or_fail (REG_TYPE_NDQ);
6414
          }
6415
          break;
6416
 
6417
        case OP_RND_RNSC:
6418
          {
6419
            po_scalar_or_goto (8, try_vfd);
6420
            break;
6421
            try_vfd:
6422
            po_reg_or_fail (REG_TYPE_VFD);
6423
          }
6424
          break;
6425
 
6426
        case OP_VMOV:
6427
          /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6428
             not careful then bad things might happen.  */
6429
          po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6430
          break;
6431
 
6432
        case OP_RNDQ_Ibig:
6433
          {
6434
            po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6435
            break;
6436
            try_immbig:
6437
            /* There's a possibility of getting a 64-bit immediate here, so
6438
               we need special handling.  */
6439
            if (parse_big_immediate (&str, i) == FAIL)
6440
              {
6441
                inst.error = _("immediate value is out of range");
6442
                goto failure;
6443
              }
6444
          }
6445
          break;
6446
 
6447
        case OP_RNDQ_I63b:
6448
          {
6449
            po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6450
            break;
6451
            try_shimm:
6452
            po_imm_or_fail (0, 63, TRUE);
6453
          }
6454
          break;
6455
 
6456
        case OP_RRnpcb:
6457
          po_char_or_fail ('[');
6458
          po_reg_or_fail  (REG_TYPE_RN);
6459
          po_char_or_fail (']');
6460
          break;
6461
 
6462
        case OP_RRnpctw:
6463
        case OP_RRw:
6464
        case OP_oRRw:
6465
          po_reg_or_fail (REG_TYPE_RN);
6466
          if (skip_past_char (&str, '!') == SUCCESS)
6467
            inst.operands[i].writeback = 1;
6468
          break;
6469
 
6470
          /* Immediates */
6471
        case OP_I7:      po_imm_or_fail (  0,       7, FALSE);   break;
6472
        case OP_I15:     po_imm_or_fail (  0,      15, FALSE);   break;
6473
        case OP_I16:     po_imm_or_fail (  1,     16, FALSE);   break;
6474
        case OP_I16z:    po_imm_or_fail (  0,     16, FALSE);   break;
6475
        case OP_I31:     po_imm_or_fail (  0,      31, FALSE);   break;
6476
        case OP_I32:     po_imm_or_fail (  1,     32, FALSE);   break;
6477
        case OP_I32z:    po_imm_or_fail (  0,     32, FALSE);   break;
6478
        case OP_I63s:    po_imm_or_fail (-64,     63, FALSE);   break;
6479
        case OP_I63:     po_imm_or_fail (  0,     63, FALSE);   break;
6480
        case OP_I64:     po_imm_or_fail (  1,     64, FALSE);   break;
6481
        case OP_I64z:    po_imm_or_fail (  0,     64, FALSE);   break;
6482
        case OP_I255:    po_imm_or_fail (  0,     255, FALSE);   break;
6483
 
6484
        case OP_I4b:     po_imm_or_fail (  1,      4, TRUE);    break;
6485
        case OP_oI7b:
6486
        case OP_I7b:     po_imm_or_fail (  0,       7, TRUE);    break;
6487
        case OP_I15b:    po_imm_or_fail (  0,      15, TRUE);    break;
6488
        case OP_oI31b:
6489
        case OP_I31b:    po_imm_or_fail (  0,      31, TRUE);    break;
6490
        case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6491 160 khays
        case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6492 16 khays
        case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);     break;
6493
 
6494
          /* Immediate variants */
6495
        case OP_oI255c:
6496
          po_char_or_fail ('{');
6497
          po_imm_or_fail (0, 255, TRUE);
6498
          po_char_or_fail ('}');
6499
          break;
6500
 
6501
        case OP_I31w:
6502
          /* The expression parser chokes on a trailing !, so we have
6503
             to find it first and zap it.  */
6504
          {
6505
            char *s = str;
6506
            while (*s && *s != ',')
6507
              s++;
6508
            if (s[-1] == '!')
6509
              {
6510
                s[-1] = '\0';
6511
                inst.operands[i].writeback = 1;
6512
              }
6513
            po_imm_or_fail (0, 31, TRUE);
6514
            if (str == s - 1)
6515
              str = s;
6516
          }
6517
          break;
6518
 
6519
          /* Expressions */
6520
        case OP_EXPi:   EXPi:
6521
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6522
                                              GE_OPT_PREFIX));
6523
          break;
6524
 
6525
        case OP_EXP:
6526
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6527
                                              GE_NO_PREFIX));
6528
          break;
6529
 
6530
        case OP_EXPr:   EXPr:
6531
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6532
                                              GE_NO_PREFIX));
6533
          if (inst.reloc.exp.X_op == O_symbol)
6534
            {
6535
              val = parse_reloc (&str);
6536
              if (val == -1)
6537
                {
6538
                  inst.error = _("unrecognized relocation suffix");
6539
                  goto failure;
6540
                }
6541
              else if (val != BFD_RELOC_UNUSED)
6542
                {
6543
                  inst.operands[i].imm = val;
6544
                  inst.operands[i].hasreloc = 1;
6545
                }
6546
            }
6547
          break;
6548
 
6549
          /* Operand for MOVW or MOVT.  */
6550
        case OP_HALF:
6551
          po_misc_or_fail (parse_half (&str));
6552
          break;
6553
 
6554
          /* Register or expression.  */
6555
        case OP_RR_EXr:   po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6556
        case OP_RR_EXi:   po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6557
 
6558
          /* Register or immediate.  */
6559
        case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6560
        I0:               po_imm_or_fail (0, 0, FALSE);         break;
6561
 
6562
        case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6563
        IF:
6564
          if (!is_immediate_prefix (*str))
6565
            goto bad_args;
6566
          str++;
6567
          val = parse_fpa_immediate (&str);
6568
          if (val == FAIL)
6569
            goto failure;
6570
          /* FPA immediates are encoded as registers 8-15.
6571
             parse_fpa_immediate has already applied the offset.  */
6572
          inst.operands[i].reg = val;
6573
          inst.operands[i].isreg = 1;
6574
          break;
6575
 
6576
        case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6577
        I32z:             po_imm_or_fail (0, 32, FALSE);   break;
6578
 
6579
          /* Two kinds of register.  */
6580
        case OP_RIWR_RIWC:
6581
          {
6582
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6583
            if (!rege
6584
                || (rege->type != REG_TYPE_MMXWR
6585
                    && rege->type != REG_TYPE_MMXWC
6586
                    && rege->type != REG_TYPE_MMXWCG))
6587
              {
6588
                inst.error = _("iWMMXt data or control register expected");
6589
                goto failure;
6590
              }
6591
            inst.operands[i].reg = rege->number;
6592
            inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6593
          }
6594
          break;
6595
 
6596
        case OP_RIWC_RIWG:
6597
          {
6598
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6599
            if (!rege
6600
                || (rege->type != REG_TYPE_MMXWC
6601
                    && rege->type != REG_TYPE_MMXWCG))
6602
              {
6603
                inst.error = _("iWMMXt control register expected");
6604
                goto failure;
6605
              }
6606
            inst.operands[i].reg = rege->number;
6607
            inst.operands[i].isreg = 1;
6608
          }
6609
          break;
6610
 
6611
          /* Misc */
6612
        case OP_CPSF:    val = parse_cps_flags (&str);          break;
6613
        case OP_ENDI:    val = parse_endian_specifier (&str);   break;
6614
        case OP_oROR:    val = parse_ror (&str);                break;
6615
        case OP_COND:    val = parse_cond (&str);               break;
6616
        case OP_oBARRIER_I15:
6617
          po_barrier_or_imm (str); break;
6618
          immediate:
6619
          if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6620
            goto failure;
6621
          break;
6622
 
6623
        case OP_wPSR:
6624
        case OP_rPSR:
6625
          po_reg_or_goto (REG_TYPE_RNB, try_psr);
6626
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6627
            {
6628
              inst.error = _("Banked registers are not available with this "
6629
                             "architecture.");
6630
              goto failure;
6631
            }
6632
          break;
6633
          try_psr:
6634
          val = parse_psr (&str, op_parse_code == OP_wPSR);
6635
          break;
6636
 
6637
        case OP_APSR_RR:
6638
          po_reg_or_goto (REG_TYPE_RN, try_apsr);
6639
          break;
6640
          try_apsr:
6641
          /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6642
             instruction).  */
6643
          if (strncasecmp (str, "APSR_", 5) == 0)
6644
            {
6645
              unsigned found = 0;
6646
              str += 5;
6647
              while (found < 15)
6648
                switch (*str++)
6649
                  {
6650
                  case 'c': found = (found & 1) ? 16 : found | 1; break;
6651
                  case 'n': found = (found & 2) ? 16 : found | 2; break;
6652
                  case 'z': found = (found & 4) ? 16 : found | 4; break;
6653
                  case 'v': found = (found & 8) ? 16 : found | 8; break;
6654
                  default: found = 16;
6655
                  }
6656
              if (found != 15)
6657
                goto failure;
6658
              inst.operands[i].isvec = 1;
6659
              /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
6660
              inst.operands[i].reg = REG_PC;
6661
            }
6662
          else
6663
            goto failure;
6664
          break;
6665
 
6666
        case OP_TB:
6667
          po_misc_or_fail (parse_tb (&str));
6668
          break;
6669
 
6670
          /* Register lists.  */
6671
        case OP_REGLST:
6672
          val = parse_reg_list (&str);
6673
          if (*str == '^')
6674
            {
6675
              inst.operands[1].writeback = 1;
6676
              str++;
6677
            }
6678
          break;
6679
 
6680
        case OP_VRSLST:
6681
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6682
          break;
6683
 
6684
        case OP_VRDLST:
6685
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6686
          break;
6687
 
6688
        case OP_VRSDLST:
6689
          /* Allow Q registers too.  */
6690
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6691
                                    REGLIST_NEON_D);
6692
          if (val == FAIL)
6693
            {
6694
              inst.error = NULL;
6695
              val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6696
                                        REGLIST_VFP_S);
6697
              inst.operands[i].issingle = 1;
6698
            }
6699
          break;
6700
 
6701
        case OP_NRDLST:
6702
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6703
                                    REGLIST_NEON_D);
6704
          break;
6705
 
6706
        case OP_NSTRLST:
6707
          val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6708
                                           &inst.operands[i].vectype);
6709
          break;
6710
 
6711
          /* Addressing modes */
6712
        case OP_ADDR:
6713
          po_misc_or_fail (parse_address (&str, i));
6714
          break;
6715
 
6716
        case OP_ADDRGLDR:
6717
          po_misc_or_fail_no_backtrack (
6718
            parse_address_group_reloc (&str, i, GROUP_LDR));
6719
          break;
6720
 
6721
        case OP_ADDRGLDRS:
6722
          po_misc_or_fail_no_backtrack (
6723
            parse_address_group_reloc (&str, i, GROUP_LDRS));
6724
          break;
6725
 
6726
        case OP_ADDRGLDC:
6727
          po_misc_or_fail_no_backtrack (
6728
            parse_address_group_reloc (&str, i, GROUP_LDC));
6729
          break;
6730
 
6731
        case OP_SH:
6732
          po_misc_or_fail (parse_shifter_operand (&str, i));
6733
          break;
6734
 
6735
        case OP_SHG:
6736
          po_misc_or_fail_no_backtrack (
6737
            parse_shifter_operand_group_reloc (&str, i));
6738
          break;
6739
 
6740
        case OP_oSHll:
6741
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6742
          break;
6743
 
6744
        case OP_oSHar:
6745
          po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6746
          break;
6747
 
6748
        case OP_oSHllar:
6749
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6750
          break;
6751
 
6752
        default:
6753
          as_fatal (_("unhandled operand code %d"), op_parse_code);
6754
        }
6755
 
6756
      /* Various value-based sanity checks and shared operations.  We
6757
         do not signal immediate failures for the register constraints;
6758
         this allows a syntax error to take precedence.  */
6759
      switch (op_parse_code)
6760
        {
6761
        case OP_oRRnpc:
6762
        case OP_RRnpc:
6763
        case OP_RRnpcb:
6764
        case OP_RRw:
6765
        case OP_oRRw:
6766
        case OP_RRnpc_I0:
6767
          if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6768
            inst.error = BAD_PC;
6769
          break;
6770
 
6771
        case OP_oRRnpcsp:
6772
        case OP_RRnpcsp:
6773
          if (inst.operands[i].isreg)
6774
            {
6775
              if (inst.operands[i].reg == REG_PC)
6776
                inst.error = BAD_PC;
6777
              else if (inst.operands[i].reg == REG_SP)
6778
                inst.error = BAD_SP;
6779
            }
6780
          break;
6781
 
6782
        case OP_RRnpctw:
6783
          if (inst.operands[i].isreg
6784
              && inst.operands[i].reg == REG_PC
6785
              && (inst.operands[i].writeback || thumb))
6786
            inst.error = BAD_PC;
6787
          break;
6788
 
6789
        case OP_CPSF:
6790
        case OP_ENDI:
6791
        case OP_oROR:
6792
        case OP_wPSR:
6793
        case OP_rPSR:
6794
        case OP_COND:
6795
        case OP_oBARRIER_I15:
6796
        case OP_REGLST:
6797
        case OP_VRSLST:
6798
        case OP_VRDLST:
6799
        case OP_VRSDLST:
6800
        case OP_NRDLST:
6801
        case OP_NSTRLST:
6802
          if (val == FAIL)
6803
            goto failure;
6804
          inst.operands[i].imm = val;
6805
          break;
6806
 
6807
        default:
6808
          break;
6809
        }
6810
 
6811
      /* If we get here, this operand was successfully parsed.  */
6812
      inst.operands[i].present = 1;
6813
      continue;
6814
 
6815
    bad_args:
6816
      inst.error = BAD_ARGS;
6817
 
6818
    failure:
6819
      if (!backtrack_pos)
6820
        {
6821
          /* The parse routine should already have set inst.error, but set a
6822
             default here just in case.  */
6823
          if (!inst.error)
6824
            inst.error = _("syntax error");
6825
          return FAIL;
6826
        }
6827
 
6828
      /* Do not backtrack over a trailing optional argument that
6829
         absorbed some text.  We will only fail again, with the
6830
         'garbage following instruction' error message, which is
6831
         probably less helpful than the current one.  */
6832
      if (backtrack_index == i && backtrack_pos != str
6833
          && upat[i+1] == OP_stop)
6834
        {
6835
          if (!inst.error)
6836
            inst.error = _("syntax error");
6837
          return FAIL;
6838
        }
6839
 
6840
      /* Try again, skipping the optional argument at backtrack_pos.  */
6841
      str = backtrack_pos;
6842
      inst.error = backtrack_error;
6843
      inst.operands[backtrack_index].present = 0;
6844
      i = backtrack_index;
6845
      backtrack_pos = 0;
6846
    }
6847
 
6848
  /* Check that we have parsed all the arguments.  */
6849
  if (*str != '\0' && !inst.error)
6850
    inst.error = _("garbage following instruction");
6851
 
6852
  return inst.error ? FAIL : SUCCESS;
6853
}
6854
 
6855
#undef po_char_or_fail
6856
#undef po_reg_or_fail
6857
#undef po_reg_or_goto
6858
#undef po_imm_or_fail
6859
#undef po_scalar_or_fail
6860
#undef po_barrier_or_imm
6861
 
6862
/* Shorthand macro for instruction encoding functions issuing errors.  */
6863
#define constraint(expr, err)                   \
6864
  do                                            \
6865
    {                                           \
6866
      if (expr)                                 \
6867
        {                                       \
6868
          inst.error = err;                     \
6869
          return;                               \
6870
        }                                       \
6871
    }                                           \
6872
  while (0)
6873
 
6874
/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
6875
   instructions are unpredictable if these registers are used.  This
6876
   is the BadReg predicate in ARM's Thumb-2 documentation.  */
6877
#define reject_bad_reg(reg)                             \
6878
  do                                                    \
6879
   if (reg == REG_SP || reg == REG_PC)                  \
6880
     {                                                  \
6881
       inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;  \
6882
       return;                                          \
6883
     }                                                  \
6884
  while (0)
6885
 
6886
/* If REG is R13 (the stack pointer), warn that its use is
6887
   deprecated.  */
6888
#define warn_deprecated_sp(reg)                 \
6889
  do                                            \
6890
    if (warn_on_deprecated && reg == REG_SP)    \
6891
       as_warn (_("use of r13 is deprecated")); \
6892
  while (0)
6893
 
6894
/* Functions for operand encoding.  ARM, then Thumb.  */
6895
 
6896
#define rotate_left(v, n) (v << n | v >> (32 - n))
6897
 
6898
/* If VAL can be encoded in the immediate field of an ARM instruction,
6899
   return the encoded form.  Otherwise, return FAIL.  */
6900
 
6901
static unsigned int
6902
encode_arm_immediate (unsigned int val)
6903
{
6904
  unsigned int a, i;
6905
 
6906
  for (i = 0; i < 32; i += 2)
6907
    if ((a = rotate_left (val, i)) <= 0xff)
6908
      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
6909
 
6910
  return FAIL;
6911
}
6912
 
6913
/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6914
   return the encoded form.  Otherwise, return FAIL.  */
6915
static unsigned int
6916
encode_thumb32_immediate (unsigned int val)
6917
{
6918
  unsigned int a, i;
6919
 
6920
  if (val <= 0xff)
6921
    return val;
6922
 
6923
  for (i = 1; i <= 24; i++)
6924
    {
6925
      a = val >> i;
6926
      if ((val & ~(0xff << i)) == 0)
6927
        return ((val >> i) & 0x7f) | ((32 - i) << 7);
6928
    }
6929
 
6930
  a = val & 0xff;
6931
  if (val == ((a << 16) | a))
6932
    return 0x100 | a;
6933
  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6934
    return 0x300 | a;
6935
 
6936
  a = val & 0xff00;
6937
  if (val == ((a << 16) | a))
6938
    return 0x200 | (a >> 8);
6939
 
6940
  return FAIL;
6941
}
6942
/* Encode a VFP SP or DP register number into inst.instruction.  */
6943
 
6944
static void
6945
encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6946
{
6947
  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6948
      && reg > 15)
6949
    {
6950
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6951
        {
6952
          if (thumb_mode)
6953
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6954
                                    fpu_vfp_ext_d32);
6955
          else
6956
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6957
                                    fpu_vfp_ext_d32);
6958
        }
6959
      else
6960
        {
6961
          first_error (_("D register out of range for selected VFP version"));
6962
          return;
6963
        }
6964
    }
6965
 
6966
  switch (pos)
6967
    {
6968
    case VFP_REG_Sd:
6969
      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6970
      break;
6971
 
6972
    case VFP_REG_Sn:
6973
      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6974
      break;
6975
 
6976
    case VFP_REG_Sm:
6977
      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6978
      break;
6979
 
6980
    case VFP_REG_Dd:
6981
      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6982
      break;
6983
 
6984
    case VFP_REG_Dn:
6985
      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6986
      break;
6987
 
6988
    case VFP_REG_Dm:
6989
      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6990
      break;
6991
 
6992
    default:
6993
      abort ();
6994
    }
6995
}
6996
 
6997
/* Encode a <shift> in an ARM-format instruction.  The immediate,
6998
   if any, is handled by md_apply_fix.   */
6999
static void
7000
encode_arm_shift (int i)
7001
{
7002
  if (inst.operands[i].shift_kind == SHIFT_RRX)
7003
    inst.instruction |= SHIFT_ROR << 5;
7004
  else
7005
    {
7006
      inst.instruction |= inst.operands[i].shift_kind << 5;
7007
      if (inst.operands[i].immisreg)
7008
        {
7009
          inst.instruction |= SHIFT_BY_REG;
7010
          inst.instruction |= inst.operands[i].imm << 8;
7011
        }
7012
      else
7013
        inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7014
    }
7015
}
7016
 
7017
static void
7018
encode_arm_shifter_operand (int i)
7019
{
7020
  if (inst.operands[i].isreg)
7021
    {
7022
      inst.instruction |= inst.operands[i].reg;
7023
      encode_arm_shift (i);
7024
    }
7025
  else
7026 163 khays
    {
7027
      inst.instruction |= INST_IMMEDIATE;
7028
      if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7029
        inst.instruction |= inst.operands[i].imm;
7030
    }
7031 16 khays
}
7032
 
7033
/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7034
static void
7035
encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7036
{
7037
  gas_assert (inst.operands[i].isreg);
7038
  inst.instruction |= inst.operands[i].reg << 16;
7039
 
7040
  if (inst.operands[i].preind)
7041
    {
7042
      if (is_t)
7043
        {
7044
          inst.error = _("instruction does not accept preindexed addressing");
7045
          return;
7046
        }
7047
      inst.instruction |= PRE_INDEX;
7048
      if (inst.operands[i].writeback)
7049
        inst.instruction |= WRITE_BACK;
7050
 
7051
    }
7052
  else if (inst.operands[i].postind)
7053
    {
7054
      gas_assert (inst.operands[i].writeback);
7055
      if (is_t)
7056
        inst.instruction |= WRITE_BACK;
7057
    }
7058
  else /* unindexed - only for coprocessor */
7059
    {
7060
      inst.error = _("instruction does not accept unindexed addressing");
7061
      return;
7062
    }
7063
 
7064
  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7065
      && (((inst.instruction & 0x000f0000) >> 16)
7066
          == ((inst.instruction & 0x0000f000) >> 12)))
7067
    as_warn ((inst.instruction & LOAD_BIT)
7068
             ? _("destination register same as write-back base")
7069
             : _("source register same as write-back base"));
7070
}
7071
 
7072
/* inst.operands[i] was set up by parse_address.  Encode it into an
7073
   ARM-format mode 2 load or store instruction.  If is_t is true,
7074
   reject forms that cannot be used with a T instruction (i.e. not
7075
   post-indexed).  */
7076
static void
7077
encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7078
{
7079
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7080
 
7081
  encode_arm_addr_mode_common (i, is_t);
7082
 
7083
  if (inst.operands[i].immisreg)
7084
    {
7085
      constraint ((inst.operands[i].imm == REG_PC
7086
                   || (is_pc && inst.operands[i].writeback)),
7087
                  BAD_PC_ADDRESSING);
7088
      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7089
      inst.instruction |= inst.operands[i].imm;
7090
      if (!inst.operands[i].negative)
7091
        inst.instruction |= INDEX_UP;
7092
      if (inst.operands[i].shifted)
7093
        {
7094
          if (inst.operands[i].shift_kind == SHIFT_RRX)
7095
            inst.instruction |= SHIFT_ROR << 5;
7096
          else
7097
            {
7098
              inst.instruction |= inst.operands[i].shift_kind << 5;
7099
              inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7100
            }
7101
        }
7102
    }
7103
  else /* immediate offset in inst.reloc */
7104
    {
7105
      if (is_pc && !inst.reloc.pc_rel)
7106
        {
7107
          const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7108
 
7109
          /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7110
             cannot use PC in addressing.
7111
             PC cannot be used in writeback addressing, either.  */
7112
          constraint ((is_t || inst.operands[i].writeback),
7113
                      BAD_PC_ADDRESSING);
7114
 
7115
          /* Use of PC in str is deprecated for ARMv7.  */
7116
          if (warn_on_deprecated
7117
              && !is_load
7118
              && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7119
            as_warn (_("use of PC in this instruction is deprecated"));
7120
        }
7121
 
7122
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7123
        {
7124
          /* Prefer + for zero encoded value.  */
7125
          if (!inst.operands[i].negative)
7126
            inst.instruction |= INDEX_UP;
7127
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7128
        }
7129
    }
7130
}
7131
 
7132
/* inst.operands[i] was set up by parse_address.  Encode it into an
7133
   ARM-format mode 3 load or store instruction.  Reject forms that
7134
   cannot be used with such instructions.  If is_t is true, reject
7135
   forms that cannot be used with a T instruction (i.e. not
7136
   post-indexed).  */
7137
static void
7138
encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7139
{
7140
  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7141
    {
7142
      inst.error = _("instruction does not accept scaled register index");
7143
      return;
7144
    }
7145
 
7146
  encode_arm_addr_mode_common (i, is_t);
7147
 
7148
  if (inst.operands[i].immisreg)
7149
    {
7150
      constraint ((inst.operands[i].imm == REG_PC
7151
                   || inst.operands[i].reg == REG_PC),
7152
                  BAD_PC_ADDRESSING);
7153
      inst.instruction |= inst.operands[i].imm;
7154
      if (!inst.operands[i].negative)
7155
        inst.instruction |= INDEX_UP;
7156
    }
7157
  else /* immediate offset in inst.reloc */
7158
    {
7159
      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7160
                   && inst.operands[i].writeback),
7161
                  BAD_PC_WRITEBACK);
7162
      inst.instruction |= HWOFFSET_IMM;
7163
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7164
        {
7165
          /* Prefer + for zero encoded value.  */
7166
          if (!inst.operands[i].negative)
7167
            inst.instruction |= INDEX_UP;
7168
 
7169
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7170
        }
7171
    }
7172
}
7173
 
7174
/* inst.operands[i] was set up by parse_address.  Encode it into an
7175
   ARM-format instruction.  Reject all forms which cannot be encoded
7176
   into a coprocessor load/store instruction.  If wb_ok is false,
7177
   reject use of writeback; if unind_ok is false, reject use of
7178
   unindexed addressing.  If reloc_override is not 0, use it instead
7179
   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7180
   (in which case it is preserved).  */
7181
 
7182
static int
7183
encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7184
{
7185
  inst.instruction |= inst.operands[i].reg << 16;
7186
 
7187
  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7188
 
7189
  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7190
    {
7191
      gas_assert (!inst.operands[i].writeback);
7192
      if (!unind_ok)
7193
        {
7194
          inst.error = _("instruction does not support unindexed addressing");
7195
          return FAIL;
7196
        }
7197
      inst.instruction |= inst.operands[i].imm;
7198
      inst.instruction |= INDEX_UP;
7199
      return SUCCESS;
7200
    }
7201
 
7202
  if (inst.operands[i].preind)
7203
    inst.instruction |= PRE_INDEX;
7204
 
7205
  if (inst.operands[i].writeback)
7206
    {
7207
      if (inst.operands[i].reg == REG_PC)
7208
        {
7209
          inst.error = _("pc may not be used with write-back");
7210
          return FAIL;
7211
        }
7212
      if (!wb_ok)
7213
        {
7214
          inst.error = _("instruction does not support writeback");
7215
          return FAIL;
7216
        }
7217
      inst.instruction |= WRITE_BACK;
7218
    }
7219
 
7220
  if (reloc_override)
7221
    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7222
  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7223
            || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7224
           && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7225
    {
7226
      if (thumb_mode)
7227
        inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7228
      else
7229
        inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7230
    }
7231
 
7232
  /* Prefer + for zero encoded value.  */
7233
  if (!inst.operands[i].negative)
7234
    inst.instruction |= INDEX_UP;
7235
 
7236
  return SUCCESS;
7237
}
7238
 
7239
/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7240
   Determine whether it can be performed with a move instruction; if
7241
   it can, convert inst.instruction to that move instruction and
7242
   return TRUE; if it can't, convert inst.instruction to a literal-pool
7243
   load and return FALSE.  If this is not a valid thing to do in the
7244
   current context, set inst.error and return TRUE.
7245
 
7246
   inst.operands[i] describes the destination register.  */
7247
 
7248
static bfd_boolean
7249
move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7250
{
7251
  unsigned long tbit;
7252
 
7253
  if (thumb_p)
7254
    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7255
  else
7256
    tbit = LOAD_BIT;
7257
 
7258
  if ((inst.instruction & tbit) == 0)
7259
    {
7260
      inst.error = _("invalid pseudo operation");
7261
      return TRUE;
7262
    }
7263
  if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7264
    {
7265
      inst.error = _("constant expression expected");
7266
      return TRUE;
7267
    }
7268
  if (inst.reloc.exp.X_op == O_constant)
7269
    {
7270
      if (thumb_p)
7271
        {
7272
          if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7273
            {
7274
              /* This can be done with a mov(1) instruction.  */
7275
              inst.instruction  = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7276
              inst.instruction |= inst.reloc.exp.X_add_number;
7277
              return TRUE;
7278
            }
7279
        }
7280
      else
7281
        {
7282
          int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7283
          if (value != FAIL)
7284
            {
7285
              /* This can be done with a mov instruction.  */
7286
              inst.instruction &= LITERAL_MASK;
7287
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7288
              inst.instruction |= value & 0xfff;
7289
              return TRUE;
7290
            }
7291
 
7292
          value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7293
          if (value != FAIL)
7294
            {
7295
              /* This can be done with a mvn instruction.  */
7296
              inst.instruction &= LITERAL_MASK;
7297
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7298
              inst.instruction |= value & 0xfff;
7299
              return TRUE;
7300
            }
7301
        }
7302
    }
7303
 
7304
  if (add_to_lit_pool () == FAIL)
7305
    {
7306
      inst.error = _("literal pool insertion failed");
7307
      return TRUE;
7308
    }
7309
  inst.operands[1].reg = REG_PC;
7310
  inst.operands[1].isreg = 1;
7311
  inst.operands[1].preind = 1;
7312
  inst.reloc.pc_rel = 1;
7313
  inst.reloc.type = (thumb_p
7314
                     ? BFD_RELOC_ARM_THUMB_OFFSET
7315
                     : (mode_3
7316
                        ? BFD_RELOC_ARM_HWLITERAL
7317
                        : BFD_RELOC_ARM_LITERAL));
7318
  return FALSE;
7319
}
7320
 
7321
/* Functions for instruction encoding, sorted by sub-architecture.
7322
   First some generics; their names are taken from the conventional
7323
   bit positions for register arguments in ARM format instructions.  */
7324
 
7325
static void
7326
do_noargs (void)
7327
{
7328
}
7329
 
7330
static void
7331
do_rd (void)
7332
{
7333
  inst.instruction |= inst.operands[0].reg << 12;
7334
}
7335
 
7336
static void
7337
do_rd_rm (void)
7338
{
7339
  inst.instruction |= inst.operands[0].reg << 12;
7340
  inst.instruction |= inst.operands[1].reg;
7341
}
7342
 
7343
static void
7344
do_rd_rn (void)
7345
{
7346
  inst.instruction |= inst.operands[0].reg << 12;
7347
  inst.instruction |= inst.operands[1].reg << 16;
7348
}
7349
 
7350
static void
7351
do_rn_rd (void)
7352
{
7353
  inst.instruction |= inst.operands[0].reg << 16;
7354
  inst.instruction |= inst.operands[1].reg << 12;
7355
}
7356
 
7357
static void
7358
do_rd_rm_rn (void)
7359
{
7360
  unsigned Rn = inst.operands[2].reg;
7361
  /* Enforce restrictions on SWP instruction.  */
7362
  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7363
    {
7364
      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7365
                  _("Rn must not overlap other operands"));
7366
 
7367
      /* SWP{b} is deprecated for ARMv6* and ARMv7.  */
7368
      if (warn_on_deprecated
7369
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7370
        as_warn (_("swp{b} use is deprecated for this architecture"));
7371
 
7372
    }
7373
  inst.instruction |= inst.operands[0].reg << 12;
7374
  inst.instruction |= inst.operands[1].reg;
7375
  inst.instruction |= Rn << 16;
7376
}
7377
 
7378
static void
7379
do_rd_rn_rm (void)
7380
{
7381
  inst.instruction |= inst.operands[0].reg << 12;
7382
  inst.instruction |= inst.operands[1].reg << 16;
7383
  inst.instruction |= inst.operands[2].reg;
7384
}
7385
 
7386
static void
7387
do_rm_rd_rn (void)
7388
{
7389
  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7390
  constraint (((inst.reloc.exp.X_op != O_constant
7391
                && inst.reloc.exp.X_op != O_illegal)
7392
               || inst.reloc.exp.X_add_number != 0),
7393
              BAD_ADDR_MODE);
7394
  inst.instruction |= inst.operands[0].reg;
7395
  inst.instruction |= inst.operands[1].reg << 12;
7396
  inst.instruction |= inst.operands[2].reg << 16;
7397
}
7398
 
7399
static void
7400
do_imm0 (void)
7401
{
7402
  inst.instruction |= inst.operands[0].imm;
7403
}
7404
 
7405
static void
7406
do_rd_cpaddr (void)
7407
{
7408
  inst.instruction |= inst.operands[0].reg << 12;
7409
  encode_arm_cp_address (1, TRUE, TRUE, 0);
7410
}
7411
 
7412
/* ARM instructions, in alphabetical order by function name (except
7413
   that wrapper functions appear immediately after the function they
7414
   wrap).  */
7415
 
7416
/* This is a pseudo-op of the form "adr rd, label" to be converted
7417
   into a relative address of the form "add rd, pc, #label-.-8".  */
7418
 
7419
static void
7420
do_adr (void)
7421
{
7422
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7423
 
7424
  /* Frag hacking will turn this into a sub instruction if the offset turns
7425
     out to be negative.  */
7426
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7427
  inst.reloc.pc_rel = 1;
7428
  inst.reloc.exp.X_add_number -= 8;
7429
}
7430
 
7431
/* This is a pseudo-op of the form "adrl rd, label" to be converted
7432
   into a relative address of the form:
7433
   add rd, pc, #low(label-.-8)"
7434
   add rd, rd, #high(label-.-8)"  */
7435
 
7436
static void
7437
do_adrl (void)
7438
{
7439
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7440
 
7441
  /* Frag hacking will turn this into a sub instruction if the offset turns
7442
     out to be negative.  */
7443
  inst.reloc.type              = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7444
  inst.reloc.pc_rel            = 1;
7445
  inst.size                    = INSN_SIZE * 2;
7446
  inst.reloc.exp.X_add_number -= 8;
7447
}
7448
 
7449
static void
7450
do_arit (void)
7451
{
7452
  if (!inst.operands[1].present)
7453
    inst.operands[1].reg = inst.operands[0].reg;
7454
  inst.instruction |= inst.operands[0].reg << 12;
7455
  inst.instruction |= inst.operands[1].reg << 16;
7456
  encode_arm_shifter_operand (2);
7457
}
7458
 
7459
static void
7460
do_barrier (void)
7461
{
7462
  if (inst.operands[0].present)
7463
    {
7464
      constraint ((inst.instruction & 0xf0) != 0x40
7465
                  && inst.operands[0].imm > 0xf
7466
                  && inst.operands[0].imm < 0x0,
7467
                  _("bad barrier type"));
7468
      inst.instruction |= inst.operands[0].imm;
7469
    }
7470
  else
7471
    inst.instruction |= 0xf;
7472
}
7473
 
7474
static void
7475
do_bfc (void)
7476
{
7477
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7478
  constraint (msb > 32, _("bit-field extends past end of register"));
7479
  /* The instruction encoding stores the LSB and MSB,
7480
     not the LSB and width.  */
7481
  inst.instruction |= inst.operands[0].reg << 12;
7482
  inst.instruction |= inst.operands[1].imm << 7;
7483
  inst.instruction |= (msb - 1) << 16;
7484
}
7485
 
7486
static void
7487
do_bfi (void)
7488
{
7489
  unsigned int msb;
7490
 
7491
  /* #0 in second position is alternative syntax for bfc, which is
7492
     the same instruction but with REG_PC in the Rm field.  */
7493
  if (!inst.operands[1].isreg)
7494
    inst.operands[1].reg = REG_PC;
7495
 
7496
  msb = inst.operands[2].imm + inst.operands[3].imm;
7497
  constraint (msb > 32, _("bit-field extends past end of register"));
7498
  /* The instruction encoding stores the LSB and MSB,
7499
     not the LSB and width.  */
7500
  inst.instruction |= inst.operands[0].reg << 12;
7501
  inst.instruction |= inst.operands[1].reg;
7502
  inst.instruction |= inst.operands[2].imm << 7;
7503
  inst.instruction |= (msb - 1) << 16;
7504
}
7505
 
7506
static void
7507
do_bfx (void)
7508
{
7509
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7510
              _("bit-field extends past end of register"));
7511
  inst.instruction |= inst.operands[0].reg << 12;
7512
  inst.instruction |= inst.operands[1].reg;
7513
  inst.instruction |= inst.operands[2].imm << 7;
7514
  inst.instruction |= (inst.operands[3].imm - 1) << 16;
7515
}
7516
 
7517
/* ARM V5 breakpoint instruction (argument parse)
7518
     BKPT <16 bit unsigned immediate>
7519
     Instruction is not conditional.
7520
        The bit pattern given in insns[] has the COND_ALWAYS condition,
7521
        and it is an error if the caller tried to override that.  */
7522
 
7523
static void
7524
do_bkpt (void)
7525
{
7526
  /* Top 12 of 16 bits to bits 19:8.  */
7527
  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7528
 
7529
  /* Bottom 4 of 16 bits to bits 3:0.  */
7530
  inst.instruction |= inst.operands[0].imm & 0xf;
7531
}
7532
 
7533
static void
7534
encode_branch (int default_reloc)
7535
{
7536
  if (inst.operands[0].hasreloc)
7537
    {
7538
      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7539
                  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7540
                  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7541
      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7542
        ? BFD_RELOC_ARM_PLT32
7543
        : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7544
    }
7545
  else
7546
    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7547
  inst.reloc.pc_rel = 1;
7548
}
7549
 
7550
static void
7551
do_branch (void)
7552
{
7553
#ifdef OBJ_ELF
7554
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7555
    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7556
  else
7557
#endif
7558
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7559
}
7560
 
7561
static void
7562
do_bl (void)
7563
{
7564
#ifdef OBJ_ELF
7565
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7566
    {
7567
      if (inst.cond == COND_ALWAYS)
7568
        encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7569
      else
7570
        encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7571
    }
7572
  else
7573
#endif
7574
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7575
}
7576
 
7577
/* ARM V5 branch-link-exchange instruction (argument parse)
7578
     BLX <target_addr>          ie BLX(1)
7579
     BLX{<condition>} <Rm>      ie BLX(2)
7580
   Unfortunately, there are two different opcodes for this mnemonic.
7581
   So, the insns[].value is not used, and the code here zaps values
7582
        into inst.instruction.
7583
   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
7584
 
7585
static void
7586
do_blx (void)
7587
{
7588
  if (inst.operands[0].isreg)
7589
    {
7590
      /* Arg is a register; the opcode provided by insns[] is correct.
7591
         It is not illegal to do "blx pc", just useless.  */
7592
      if (inst.operands[0].reg == REG_PC)
7593
        as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7594
 
7595
      inst.instruction |= inst.operands[0].reg;
7596
    }
7597
  else
7598
    {
7599
      /* Arg is an address; this instruction cannot be executed
7600
         conditionally, and the opcode must be adjusted.
7601
         We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7602
         where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
7603
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
7604
      inst.instruction = 0xfa000000;
7605
      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7606
    }
7607
}
7608
 
7609
static void
7610
do_bx (void)
7611
{
7612
  bfd_boolean want_reloc;
7613
 
7614
  if (inst.operands[0].reg == REG_PC)
7615
    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7616
 
7617
  inst.instruction |= inst.operands[0].reg;
7618
  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7619
     it is for ARMv4t or earlier.  */
7620
  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7621
  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7622
      want_reloc = TRUE;
7623
 
7624
#ifdef OBJ_ELF
7625
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7626
#endif
7627
    want_reloc = FALSE;
7628
 
7629
  if (want_reloc)
7630
    inst.reloc.type = BFD_RELOC_ARM_V4BX;
7631
}
7632
 
7633
 
7634
/* ARM v5TEJ.  Jump to Jazelle code.  */
7635
 
7636
static void
7637
do_bxj (void)
7638
{
7639
  if (inst.operands[0].reg == REG_PC)
7640
    as_tsktsk (_("use of r15 in bxj is not really useful"));
7641
 
7642
  inst.instruction |= inst.operands[0].reg;
7643
}
7644
 
7645
/* Co-processor data operation:
7646
      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7647
      CDP2      <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}  */
7648
static void
7649
do_cdp (void)
7650
{
7651
  inst.instruction |= inst.operands[0].reg << 8;
7652
  inst.instruction |= inst.operands[1].imm << 20;
7653
  inst.instruction |= inst.operands[2].reg << 12;
7654
  inst.instruction |= inst.operands[3].reg << 16;
7655
  inst.instruction |= inst.operands[4].reg;
7656
  inst.instruction |= inst.operands[5].imm << 5;
7657
}
7658
 
7659
static void
7660
do_cmp (void)
7661
{
7662
  inst.instruction |= inst.operands[0].reg << 16;
7663
  encode_arm_shifter_operand (1);
7664
}
7665
 
7666
/* Transfer between coprocessor and ARM registers.
7667
   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7668
   MRC2
7669
   MCR{cond}
7670
   MCR2
7671
 
7672
   No special properties.  */
7673
 
7674
static void
7675
do_co_reg (void)
7676
{
7677
  unsigned Rd;
7678
 
7679
  Rd = inst.operands[2].reg;
7680
  if (thumb_mode)
7681
    {
7682
      if (inst.instruction == 0xee000010
7683
          || inst.instruction == 0xfe000010)
7684
        /* MCR, MCR2  */
7685
        reject_bad_reg (Rd);
7686
      else
7687
        /* MRC, MRC2  */
7688
        constraint (Rd == REG_SP, BAD_SP);
7689
    }
7690
  else
7691
    {
7692
      /* MCR */
7693
      if (inst.instruction == 0xe000010)
7694
        constraint (Rd == REG_PC, BAD_PC);
7695
    }
7696
 
7697
 
7698
  inst.instruction |= inst.operands[0].reg << 8;
7699
  inst.instruction |= inst.operands[1].imm << 21;
7700
  inst.instruction |= Rd << 12;
7701
  inst.instruction |= inst.operands[3].reg << 16;
7702
  inst.instruction |= inst.operands[4].reg;
7703
  inst.instruction |= inst.operands[5].imm << 5;
7704
}
7705
 
7706
/* Transfer between coprocessor register and pair of ARM registers.
7707
   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7708
   MCRR2
7709
   MRRC{cond}
7710
   MRRC2
7711
 
7712
   Two XScale instructions are special cases of these:
7713
 
7714
     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7715
     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7716
 
7717
   Result unpredictable if Rd or Rn is R15.  */
7718
 
7719
static void
7720
do_co_reg2c (void)
7721
{
7722
  unsigned Rd, Rn;
7723
 
7724
  Rd = inst.operands[2].reg;
7725
  Rn = inst.operands[3].reg;
7726
 
7727
  if (thumb_mode)
7728
    {
7729
      reject_bad_reg (Rd);
7730
      reject_bad_reg (Rn);
7731
    }
7732
  else
7733
    {
7734
      constraint (Rd == REG_PC, BAD_PC);
7735
      constraint (Rn == REG_PC, BAD_PC);
7736
    }
7737
 
7738
  inst.instruction |= inst.operands[0].reg << 8;
7739
  inst.instruction |= inst.operands[1].imm << 4;
7740
  inst.instruction |= Rd << 12;
7741
  inst.instruction |= Rn << 16;
7742
  inst.instruction |= inst.operands[4].reg;
7743
}
7744
 
7745
static void
7746
do_cpsi (void)
7747
{
7748
  inst.instruction |= inst.operands[0].imm << 6;
7749
  if (inst.operands[1].present)
7750
    {
7751
      inst.instruction |= CPSI_MMOD;
7752
      inst.instruction |= inst.operands[1].imm;
7753
    }
7754
}
7755
 
7756
static void
7757
do_dbg (void)
7758
{
7759
  inst.instruction |= inst.operands[0].imm;
7760
}
7761
 
7762
static void
7763
do_div (void)
7764
{
7765
  unsigned Rd, Rn, Rm;
7766
 
7767
  Rd = inst.operands[0].reg;
7768
  Rn = (inst.operands[1].present
7769
        ? inst.operands[1].reg : Rd);
7770
  Rm = inst.operands[2].reg;
7771
 
7772
  constraint ((Rd == REG_PC), BAD_PC);
7773
  constraint ((Rn == REG_PC), BAD_PC);
7774
  constraint ((Rm == REG_PC), BAD_PC);
7775
 
7776
  inst.instruction |= Rd << 16;
7777
  inst.instruction |= Rn << 0;
7778
  inst.instruction |= Rm << 8;
7779
}
7780
 
7781
static void
7782
do_it (void)
7783
{
7784
  /* There is no IT instruction in ARM mode.  We
7785
     process it to do the validation as if in
7786
     thumb mode, just in case the code gets
7787
     assembled for thumb using the unified syntax.  */
7788
 
7789
  inst.size = 0;
7790
  if (unified_syntax)
7791
    {
7792
      set_it_insn_type (IT_INSN);
7793
      now_it.mask = (inst.instruction & 0xf) | 0x10;
7794
      now_it.cc = inst.operands[0].imm;
7795
    }
7796
}
7797
 
7798
static void
7799
do_ldmstm (void)
7800
{
7801
  int base_reg = inst.operands[0].reg;
7802
  int range = inst.operands[1].imm;
7803
 
7804
  inst.instruction |= base_reg << 16;
7805
  inst.instruction |= range;
7806
 
7807
  if (inst.operands[1].writeback)
7808
    inst.instruction |= LDM_TYPE_2_OR_3;
7809
 
7810
  if (inst.operands[0].writeback)
7811
    {
7812
      inst.instruction |= WRITE_BACK;
7813
      /* Check for unpredictable uses of writeback.  */
7814
      if (inst.instruction & LOAD_BIT)
7815
        {
7816
          /* Not allowed in LDM type 2.  */
7817
          if ((inst.instruction & LDM_TYPE_2_OR_3)
7818
              && ((range & (1 << REG_PC)) == 0))
7819
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7820
          /* Only allowed if base reg not in list for other types.  */
7821
          else if (range & (1 << base_reg))
7822
            as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7823
        }
7824
      else /* STM.  */
7825
        {
7826
          /* Not allowed for type 2.  */
7827
          if (inst.instruction & LDM_TYPE_2_OR_3)
7828
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7829
          /* Only allowed if base reg not in list, or first in list.  */
7830
          else if ((range & (1 << base_reg))
7831
                   && (range & ((1 << base_reg) - 1)))
7832
            as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7833
        }
7834
    }
7835
}
7836
 
7837
/* ARMv5TE load-consecutive (argument parse)
7838
   Mode is like LDRH.
7839
 
7840
     LDRccD R, mode
7841
     STRccD R, mode.  */
7842
 
7843
static void
7844
do_ldrd (void)
7845
{
7846
  constraint (inst.operands[0].reg % 2 != 0,
7847 148 khays
              _("first transfer register must be even"));
7848 16 khays
  constraint (inst.operands[1].present
7849
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7850 148 khays
              _("can only transfer two consecutive registers"));
7851 16 khays
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7852
  constraint (!inst.operands[2].isreg, _("'[' expected"));
7853
 
7854
  if (!inst.operands[1].present)
7855
    inst.operands[1].reg = inst.operands[0].reg + 1;
7856
 
7857 148 khays
  /* encode_arm_addr_mode_3 will diagnose overlap between the base
7858
     register and the first register written; we have to diagnose
7859
     overlap between the base and the second register written here.  */
7860 16 khays
 
7861 148 khays
  if (inst.operands[2].reg == inst.operands[1].reg
7862
      && (inst.operands[2].writeback || inst.operands[2].postind))
7863
    as_warn (_("base register written back, and overlaps "
7864
               "second transfer register"));
7865 16 khays
 
7866 148 khays
  if (!(inst.instruction & V4_STR_BIT))
7867
    {
7868 16 khays
      /* For an index-register load, the index register must not overlap the
7869 148 khays
        destination (even if not write-back).  */
7870
      if (inst.operands[2].immisreg
7871
              && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7872
              || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7873
        as_warn (_("index register overlaps transfer register"));
7874 16 khays
    }
7875
  inst.instruction |= inst.operands[0].reg << 12;
7876
  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7877
}
7878
 
7879
static void
7880
do_ldrex (void)
7881
{
7882
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7883
              || inst.operands[1].postind || inst.operands[1].writeback
7884
              || inst.operands[1].immisreg || inst.operands[1].shifted
7885
              || inst.operands[1].negative
7886
              /* This can arise if the programmer has written
7887
                   strex rN, rM, foo
7888
                 or if they have mistakenly used a register name as the last
7889
                 operand,  eg:
7890
                   strex rN, rM, rX
7891
                 It is very difficult to distinguish between these two cases
7892
                 because "rX" might actually be a label. ie the register
7893
                 name has been occluded by a symbol of the same name. So we
7894
                 just generate a general 'bad addressing mode' type error
7895
                 message and leave it up to the programmer to discover the
7896
                 true cause and fix their mistake.  */
7897
              || (inst.operands[1].reg == REG_PC),
7898
              BAD_ADDR_MODE);
7899
 
7900
  constraint (inst.reloc.exp.X_op != O_constant
7901
              || inst.reloc.exp.X_add_number != 0,
7902
              _("offset must be zero in ARM encoding"));
7903
 
7904
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7905
 
7906
  inst.instruction |= inst.operands[0].reg << 12;
7907
  inst.instruction |= inst.operands[1].reg << 16;
7908
  inst.reloc.type = BFD_RELOC_UNUSED;
7909
}
7910
 
7911
static void
7912
do_ldrexd (void)
7913
{
7914
  constraint (inst.operands[0].reg % 2 != 0,
7915
              _("even register required"));
7916
  constraint (inst.operands[1].present
7917
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7918
              _("can only load two consecutive registers"));
7919
  /* If op 1 were present and equal to PC, this function wouldn't
7920
     have been called in the first place.  */
7921
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7922
 
7923
  inst.instruction |= inst.operands[0].reg << 12;
7924
  inst.instruction |= inst.operands[2].reg << 16;
7925
}
7926
 
7927 163 khays
/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
7928
   which is not a multiple of four is UNPREDICTABLE.  */
7929 16 khays
static void
7930 163 khays
check_ldr_r15_aligned (void)
7931
{
7932
  constraint (!(inst.operands[1].immisreg)
7933
              && (inst.operands[0].reg == REG_PC
7934
              && inst.operands[1].reg == REG_PC
7935
              && (inst.reloc.exp.X_add_number & 0x3)),
7936
              _("ldr to register 15 must be 4-byte alligned"));
7937
}
7938
 
7939
static void
7940 16 khays
do_ldst (void)
7941
{
7942
  inst.instruction |= inst.operands[0].reg << 12;
7943
  if (!inst.operands[1].isreg)
7944
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7945
      return;
7946
  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7947 163 khays
  check_ldr_r15_aligned ();
7948 16 khays
}
7949
 
7950
static void
7951
do_ldstt (void)
7952
{
7953
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7954
     reject [Rn,...].  */
7955
  if (inst.operands[1].preind)
7956
    {
7957
      constraint (inst.reloc.exp.X_op != O_constant
7958
                  || inst.reloc.exp.X_add_number != 0,
7959
                  _("this instruction requires a post-indexed address"));
7960
 
7961
      inst.operands[1].preind = 0;
7962
      inst.operands[1].postind = 1;
7963
      inst.operands[1].writeback = 1;
7964
    }
7965
  inst.instruction |= inst.operands[0].reg << 12;
7966
  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7967
}
7968
 
7969
/* Halfword and signed-byte load/store operations.  */
7970
 
7971
static void
7972
do_ldstv4 (void)
7973
{
7974
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7975
  inst.instruction |= inst.operands[0].reg << 12;
7976
  if (!inst.operands[1].isreg)
7977
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7978
      return;
7979
  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7980
}
7981
 
7982
static void
7983
do_ldsttv4 (void)
7984
{
7985
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7986
     reject [Rn,...].  */
7987
  if (inst.operands[1].preind)
7988
    {
7989
      constraint (inst.reloc.exp.X_op != O_constant
7990
                  || inst.reloc.exp.X_add_number != 0,
7991
                  _("this instruction requires a post-indexed address"));
7992
 
7993
      inst.operands[1].preind = 0;
7994
      inst.operands[1].postind = 1;
7995
      inst.operands[1].writeback = 1;
7996
    }
7997
  inst.instruction |= inst.operands[0].reg << 12;
7998
  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7999
}
8000
 
8001
/* Co-processor register load/store.
8002
   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>  */
8003
static void
8004
do_lstc (void)
8005
{
8006
  inst.instruction |= inst.operands[0].reg << 8;
8007
  inst.instruction |= inst.operands[1].reg << 12;
8008
  encode_arm_cp_address (2, TRUE, TRUE, 0);
8009
}
8010
 
8011
static void
8012
do_mlas (void)
8013
{
8014
  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
8015
  if (inst.operands[0].reg == inst.operands[1].reg
8016
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8017
      && !(inst.instruction & 0x00400000))
8018
    as_tsktsk (_("Rd and Rm should be different in mla"));
8019
 
8020
  inst.instruction |= inst.operands[0].reg << 16;
8021
  inst.instruction |= inst.operands[1].reg;
8022
  inst.instruction |= inst.operands[2].reg << 8;
8023
  inst.instruction |= inst.operands[3].reg << 12;
8024
}
8025
 
8026
static void
8027
do_mov (void)
8028
{
8029
  inst.instruction |= inst.operands[0].reg << 12;
8030
  encode_arm_shifter_operand (1);
8031
}
8032
 
8033
/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.  */
8034
static void
8035
do_mov16 (void)
8036
{
8037
  bfd_vma imm;
8038
  bfd_boolean top;
8039
 
8040
  top = (inst.instruction & 0x00400000) != 0;
8041
  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8042
              _(":lower16: not allowed this instruction"));
8043
  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8044
              _(":upper16: not allowed instruction"));
8045
  inst.instruction |= inst.operands[0].reg << 12;
8046
  if (inst.reloc.type == BFD_RELOC_UNUSED)
8047
    {
8048
      imm = inst.reloc.exp.X_add_number;
8049
      /* The value is in two pieces: 0:11, 16:19.  */
8050
      inst.instruction |= (imm & 0x00000fff);
8051
      inst.instruction |= (imm & 0x0000f000) << 4;
8052
    }
8053
}
8054
 
8055
static void do_vfp_nsyn_opcode (const char *);
8056
 
8057
static int
8058
do_vfp_nsyn_mrs (void)
8059
{
8060
  if (inst.operands[0].isvec)
8061
    {
8062
      if (inst.operands[1].reg != 1)
8063
        first_error (_("operand 1 must be FPSCR"));
8064
      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8065
      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8066
      do_vfp_nsyn_opcode ("fmstat");
8067
    }
8068
  else if (inst.operands[1].isvec)
8069
    do_vfp_nsyn_opcode ("fmrx");
8070
  else
8071
    return FAIL;
8072
 
8073
  return SUCCESS;
8074
}
8075
 
8076
static int
8077
do_vfp_nsyn_msr (void)
8078
{
8079
  if (inst.operands[0].isvec)
8080
    do_vfp_nsyn_opcode ("fmxr");
8081
  else
8082
    return FAIL;
8083
 
8084
  return SUCCESS;
8085
}
8086
 
8087
static void
8088
do_vmrs (void)
8089
{
8090
  unsigned Rt = inst.operands[0].reg;
8091
 
8092
  if (thumb_mode && inst.operands[0].reg == REG_SP)
8093
    {
8094
      inst.error = BAD_SP;
8095
      return;
8096
    }
8097
 
8098
  /* APSR_ sets isvec. All other refs to PC are illegal.  */
8099
  if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8100
    {
8101
      inst.error = BAD_PC;
8102
      return;
8103
    }
8104
 
8105
  if (inst.operands[1].reg != 1)
8106
    first_error (_("operand 1 must be FPSCR"));
8107
 
8108
  inst.instruction |= (Rt << 12);
8109
}
8110
 
8111
static void
8112
do_vmsr (void)
8113
{
8114
  unsigned Rt = inst.operands[1].reg;
8115
 
8116
  if (thumb_mode)
8117
    reject_bad_reg (Rt);
8118
  else if (Rt == REG_PC)
8119
    {
8120
      inst.error = BAD_PC;
8121
      return;
8122
    }
8123
 
8124
  if (inst.operands[0].reg != 1)
8125
    first_error (_("operand 0 must be FPSCR"));
8126
 
8127
  inst.instruction |= (Rt << 12);
8128
}
8129
 
8130
static void
8131
do_mrs (void)
8132
{
8133
  unsigned br;
8134
 
8135
  if (do_vfp_nsyn_mrs () == SUCCESS)
8136
    return;
8137
 
8138
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8139
  inst.instruction |= inst.operands[0].reg << 12;
8140
 
8141
  if (inst.operands[1].isreg)
8142
    {
8143
      br = inst.operands[1].reg;
8144
      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8145
        as_bad (_("bad register for mrs"));
8146
    }
8147
  else
8148
    {
8149
      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
8150
      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8151
                  != (PSR_c|PSR_f),
8152
                  _("'APSR', 'CPSR' or 'SPSR' expected"));
8153
      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8154
    }
8155
 
8156
  inst.instruction |= br;
8157
}
8158
 
8159
/* Two possible forms:
8160
      "{C|S}PSR_<field>, Rm",
8161
      "{C|S}PSR_f, #expression".  */
8162
 
8163
static void
8164
do_msr (void)
8165
{
8166
  if (do_vfp_nsyn_msr () == SUCCESS)
8167
    return;
8168
 
8169
  inst.instruction |= inst.operands[0].imm;
8170
  if (inst.operands[1].isreg)
8171
    inst.instruction |= inst.operands[1].reg;
8172
  else
8173
    {
8174
      inst.instruction |= INST_IMMEDIATE;
8175
      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8176
      inst.reloc.pc_rel = 0;
8177
    }
8178
}
8179
 
8180
static void
8181
do_mul (void)
8182
{
8183
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8184
 
8185
  if (!inst.operands[2].present)
8186
    inst.operands[2].reg = inst.operands[0].reg;
8187
  inst.instruction |= inst.operands[0].reg << 16;
8188
  inst.instruction |= inst.operands[1].reg;
8189
  inst.instruction |= inst.operands[2].reg << 8;
8190
 
8191
  if (inst.operands[0].reg == inst.operands[1].reg
8192
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8193
    as_tsktsk (_("Rd and Rm should be different in mul"));
8194
}
8195
 
8196
/* Long Multiply Parser
8197
   UMULL RdLo, RdHi, Rm, Rs
8198
   SMULL RdLo, RdHi, Rm, Rs
8199
   UMLAL RdLo, RdHi, Rm, Rs
8200
   SMLAL RdLo, RdHi, Rm, Rs.  */
8201
 
8202
static void
8203
do_mull (void)
8204
{
8205
  inst.instruction |= inst.operands[0].reg << 12;
8206
  inst.instruction |= inst.operands[1].reg << 16;
8207
  inst.instruction |= inst.operands[2].reg;
8208
  inst.instruction |= inst.operands[3].reg << 8;
8209
 
8210
  /* rdhi and rdlo must be different.  */
8211
  if (inst.operands[0].reg == inst.operands[1].reg)
8212
    as_tsktsk (_("rdhi and rdlo must be different"));
8213
 
8214
  /* rdhi, rdlo and rm must all be different before armv6.  */
8215
  if ((inst.operands[0].reg == inst.operands[2].reg
8216
      || inst.operands[1].reg == inst.operands[2].reg)
8217
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8218
    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8219
}
8220
 
8221
static void
8222
do_nop (void)
8223
{
8224
  if (inst.operands[0].present
8225
      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8226
    {
8227
      /* Architectural NOP hints are CPSR sets with no bits selected.  */
8228
      inst.instruction &= 0xf0000000;
8229
      inst.instruction |= 0x0320f000;
8230
      if (inst.operands[0].present)
8231
        inst.instruction |= inst.operands[0].imm;
8232
    }
8233
}
8234
 
8235
/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8236
   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8237
   Condition defaults to COND_ALWAYS.
8238
   Error if Rd, Rn or Rm are R15.  */
8239
 
8240
static void
8241
do_pkhbt (void)
8242
{
8243
  inst.instruction |= inst.operands[0].reg << 12;
8244
  inst.instruction |= inst.operands[1].reg << 16;
8245
  inst.instruction |= inst.operands[2].reg;
8246
  if (inst.operands[3].present)
8247
    encode_arm_shift (3);
8248
}
8249
 
8250
/* ARM V6 PKHTB (Argument Parse).  */
8251
 
8252
static void
8253
do_pkhtb (void)
8254
{
8255
  if (!inst.operands[3].present)
8256
    {
8257
      /* If the shift specifier is omitted, turn the instruction
8258
         into pkhbt rd, rm, rn. */
8259
      inst.instruction &= 0xfff00010;
8260
      inst.instruction |= inst.operands[0].reg << 12;
8261
      inst.instruction |= inst.operands[1].reg;
8262
      inst.instruction |= inst.operands[2].reg << 16;
8263
    }
8264
  else
8265
    {
8266
      inst.instruction |= inst.operands[0].reg << 12;
8267
      inst.instruction |= inst.operands[1].reg << 16;
8268
      inst.instruction |= inst.operands[2].reg;
8269
      encode_arm_shift (3);
8270
    }
8271
}
8272
 
8273
/* ARMv5TE: Preload-Cache
8274
   MP Extensions: Preload for write
8275
 
8276
    PLD(W) <addr_mode>
8277
 
8278
  Syntactically, like LDR with B=1, W=0, L=1.  */
8279
 
8280
static void
8281
do_pld (void)
8282
{
8283
  constraint (!inst.operands[0].isreg,
8284
              _("'[' expected after PLD mnemonic"));
8285
  constraint (inst.operands[0].postind,
8286
              _("post-indexed expression used in preload instruction"));
8287
  constraint (inst.operands[0].writeback,
8288
              _("writeback used in preload instruction"));
8289
  constraint (!inst.operands[0].preind,
8290
              _("unindexed addressing used in preload instruction"));
8291
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8292
}
8293
 
8294
/* ARMv7: PLI <addr_mode>  */
8295
static void
8296
do_pli (void)
8297
{
8298
  constraint (!inst.operands[0].isreg,
8299
              _("'[' expected after PLI mnemonic"));
8300
  constraint (inst.operands[0].postind,
8301
              _("post-indexed expression used in preload instruction"));
8302
  constraint (inst.operands[0].writeback,
8303
              _("writeback used in preload instruction"));
8304
  constraint (!inst.operands[0].preind,
8305
              _("unindexed addressing used in preload instruction"));
8306
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8307
  inst.instruction &= ~PRE_INDEX;
8308
}
8309
 
8310
static void
8311
do_push_pop (void)
8312
{
8313
  inst.operands[1] = inst.operands[0];
8314
  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8315
  inst.operands[0].isreg = 1;
8316
  inst.operands[0].writeback = 1;
8317
  inst.operands[0].reg = REG_SP;
8318
  do_ldmstm ();
8319
}
8320
 
8321
/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8322
   word at the specified address and the following word
8323
   respectively.
8324
   Unconditionally executed.
8325
   Error if Rn is R15.  */
8326
 
8327
static void
8328
do_rfe (void)
8329
{
8330
  inst.instruction |= inst.operands[0].reg << 16;
8331
  if (inst.operands[0].writeback)
8332
    inst.instruction |= WRITE_BACK;
8333
}
8334
 
8335
/* ARM V6 ssat (argument parse).  */
8336
 
8337
static void
8338
do_ssat (void)
8339
{
8340
  inst.instruction |= inst.operands[0].reg << 12;
8341
  inst.instruction |= (inst.operands[1].imm - 1) << 16;
8342
  inst.instruction |= inst.operands[2].reg;
8343
 
8344
  if (inst.operands[3].present)
8345
    encode_arm_shift (3);
8346
}
8347
 
8348
/* ARM V6 usat (argument parse).  */
8349
 
8350
static void
8351
do_usat (void)
8352
{
8353
  inst.instruction |= inst.operands[0].reg << 12;
8354
  inst.instruction |= inst.operands[1].imm << 16;
8355
  inst.instruction |= inst.operands[2].reg;
8356
 
8357
  if (inst.operands[3].present)
8358
    encode_arm_shift (3);
8359
}
8360
 
8361
/* ARM V6 ssat16 (argument parse).  */
8362
 
8363
static void
8364
do_ssat16 (void)
8365
{
8366
  inst.instruction |= inst.operands[0].reg << 12;
8367
  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8368
  inst.instruction |= inst.operands[2].reg;
8369
}
8370
 
8371
static void
8372
do_usat16 (void)
8373
{
8374
  inst.instruction |= inst.operands[0].reg << 12;
8375
  inst.instruction |= inst.operands[1].imm << 16;
8376
  inst.instruction |= inst.operands[2].reg;
8377
}
8378
 
8379
/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
8380
   preserving the other bits.
8381
 
8382
   setend <endian_specifier>, where <endian_specifier> is either
8383
   BE or LE.  */
8384
 
8385
static void
8386
do_setend (void)
8387
{
8388
  if (inst.operands[0].imm)
8389
    inst.instruction |= 0x200;
8390
}
8391
 
8392
static void
8393
do_shift (void)
8394
{
8395
  unsigned int Rm = (inst.operands[1].present
8396
                     ? inst.operands[1].reg
8397
                     : inst.operands[0].reg);
8398
 
8399
  inst.instruction |= inst.operands[0].reg << 12;
8400
  inst.instruction |= Rm;
8401
  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
8402
    {
8403
      inst.instruction |= inst.operands[2].reg << 8;
8404
      inst.instruction |= SHIFT_BY_REG;
8405 148 khays
      /* PR 12854: Error on extraneous shifts.  */
8406
      constraint (inst.operands[2].shifted,
8407
                  _("extraneous shift as part of operand to shift insn"));
8408 16 khays
    }
8409
  else
8410
    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8411
}
8412
 
8413
static void
8414
do_smc (void)
8415
{
8416
  inst.reloc.type = BFD_RELOC_ARM_SMC;
8417
  inst.reloc.pc_rel = 0;
8418
}
8419
 
8420
static void
8421
do_hvc (void)
8422
{
8423
  inst.reloc.type = BFD_RELOC_ARM_HVC;
8424
  inst.reloc.pc_rel = 0;
8425
}
8426
 
8427
static void
8428
do_swi (void)
8429
{
8430
  inst.reloc.type = BFD_RELOC_ARM_SWI;
8431
  inst.reloc.pc_rel = 0;
8432
}
8433
 
8434
/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8435
   SMLAxy{cond} Rd,Rm,Rs,Rn
8436
   SMLAWy{cond} Rd,Rm,Rs,Rn
8437
   Error if any register is R15.  */
8438
 
8439
static void
8440
do_smla (void)
8441
{
8442
  inst.instruction |= inst.operands[0].reg << 16;
8443
  inst.instruction |= inst.operands[1].reg;
8444
  inst.instruction |= inst.operands[2].reg << 8;
8445
  inst.instruction |= inst.operands[3].reg << 12;
8446
}
8447
 
8448
/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8449
   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8450
   Error if any register is R15.
8451
   Warning if Rdlo == Rdhi.  */
8452
 
8453
static void
8454
do_smlal (void)
8455
{
8456
  inst.instruction |= inst.operands[0].reg << 12;
8457
  inst.instruction |= inst.operands[1].reg << 16;
8458
  inst.instruction |= inst.operands[2].reg;
8459
  inst.instruction |= inst.operands[3].reg << 8;
8460
 
8461
  if (inst.operands[0].reg == inst.operands[1].reg)
8462
    as_tsktsk (_("rdhi and rdlo must be different"));
8463
}
8464
 
8465
/* ARM V5E (El Segundo) signed-multiply (argument parse)
8466
   SMULxy{cond} Rd,Rm,Rs
8467
   Error if any register is R15.  */
8468
 
8469
static void
8470
do_smul (void)
8471
{
8472
  inst.instruction |= inst.operands[0].reg << 16;
8473
  inst.instruction |= inst.operands[1].reg;
8474
  inst.instruction |= inst.operands[2].reg << 8;
8475
}
8476
 
8477
/* ARM V6 srs (argument parse).  The variable fields in the encoding are
8478
   the same for both ARM and Thumb-2.  */
8479
 
8480
static void
8481
do_srs (void)
8482
{
8483
  int reg;
8484
 
8485
  if (inst.operands[0].present)
8486
    {
8487
      reg = inst.operands[0].reg;
8488
      constraint (reg != REG_SP, _("SRS base register must be r13"));
8489
    }
8490
  else
8491
    reg = REG_SP;
8492
 
8493
  inst.instruction |= reg << 16;
8494
  inst.instruction |= inst.operands[1].imm;
8495
  if (inst.operands[0].writeback || inst.operands[1].writeback)
8496
    inst.instruction |= WRITE_BACK;
8497
}
8498
 
8499
/* ARM V6 strex (argument parse).  */
8500
 
8501
static void
8502
do_strex (void)
8503
{
8504
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8505
              || inst.operands[2].postind || inst.operands[2].writeback
8506
              || inst.operands[2].immisreg || inst.operands[2].shifted
8507
              || inst.operands[2].negative
8508
              /* See comment in do_ldrex().  */
8509
              || (inst.operands[2].reg == REG_PC),
8510
              BAD_ADDR_MODE);
8511
 
8512
  constraint (inst.operands[0].reg == inst.operands[1].reg
8513
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8514
 
8515
  constraint (inst.reloc.exp.X_op != O_constant
8516
              || inst.reloc.exp.X_add_number != 0,
8517
              _("offset must be zero in ARM encoding"));
8518
 
8519
  inst.instruction |= inst.operands[0].reg << 12;
8520
  inst.instruction |= inst.operands[1].reg;
8521
  inst.instruction |= inst.operands[2].reg << 16;
8522
  inst.reloc.type = BFD_RELOC_UNUSED;
8523
}
8524
 
8525
static void
8526 160 khays
do_t_strexbh (void)
8527
{
8528
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8529
              || inst.operands[2].postind || inst.operands[2].writeback
8530
              || inst.operands[2].immisreg || inst.operands[2].shifted
8531
              || inst.operands[2].negative,
8532
              BAD_ADDR_MODE);
8533
 
8534
  constraint (inst.operands[0].reg == inst.operands[1].reg
8535
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8536
 
8537
  do_rm_rd_rn ();
8538
}
8539
 
8540
static void
8541 16 khays
do_strexd (void)
8542
{
8543
  constraint (inst.operands[1].reg % 2 != 0,
8544
              _("even register required"));
8545
  constraint (inst.operands[2].present
8546
              && inst.operands[2].reg != inst.operands[1].reg + 1,
8547
              _("can only store two consecutive registers"));
8548
  /* If op 2 were present and equal to PC, this function wouldn't
8549
     have been called in the first place.  */
8550
  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8551
 
8552
  constraint (inst.operands[0].reg == inst.operands[1].reg
8553
              || inst.operands[0].reg == inst.operands[1].reg + 1
8554
              || inst.operands[0].reg == inst.operands[3].reg,
8555
              BAD_OVERLAP);
8556
 
8557
  inst.instruction |= inst.operands[0].reg << 12;
8558
  inst.instruction |= inst.operands[1].reg;
8559
  inst.instruction |= inst.operands[3].reg << 16;
8560
}
8561
 
8562
/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8563
   extends it to 32-bits, and adds the result to a value in another
8564
   register.  You can specify a rotation by 0, 8, 16, or 24 bits
8565
   before extracting the 16-bit value.
8566
   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8567
   Condition defaults to COND_ALWAYS.
8568
   Error if any register uses R15.  */
8569
 
8570
static void
8571
do_sxtah (void)
8572
{
8573
  inst.instruction |= inst.operands[0].reg << 12;
8574
  inst.instruction |= inst.operands[1].reg << 16;
8575
  inst.instruction |= inst.operands[2].reg;
8576
  inst.instruction |= inst.operands[3].imm << 10;
8577
}
8578
 
8579
/* ARM V6 SXTH.
8580
 
8581
   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8582
   Condition defaults to COND_ALWAYS.
8583
   Error if any register uses R15.  */
8584
 
8585
static void
8586
do_sxth (void)
8587
{
8588
  inst.instruction |= inst.operands[0].reg << 12;
8589
  inst.instruction |= inst.operands[1].reg;
8590
  inst.instruction |= inst.operands[2].imm << 10;
8591
}
8592
 
8593
/* VFP instructions.  In a logical order: SP variant first, monad
8594
   before dyad, arithmetic then move then load/store.  */
8595
 
8596
static void
8597
do_vfp_sp_monadic (void)
8598
{
8599
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8600
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8601
}
8602
 
8603
static void
8604
do_vfp_sp_dyadic (void)
8605
{
8606
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8607
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8608
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8609
}
8610
 
8611
static void
8612
do_vfp_sp_compare_z (void)
8613
{
8614
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8615
}
8616
 
8617
static void
8618
do_vfp_dp_sp_cvt (void)
8619
{
8620
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8621
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8622
}
8623
 
8624
static void
8625
do_vfp_sp_dp_cvt (void)
8626
{
8627
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8628
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8629
}
8630
 
8631
static void
8632
do_vfp_reg_from_sp (void)
8633
{
8634
  inst.instruction |= inst.operands[0].reg << 12;
8635
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8636
}
8637
 
8638
static void
8639
do_vfp_reg2_from_sp2 (void)
8640
{
8641
  constraint (inst.operands[2].imm != 2,
8642
              _("only two consecutive VFP SP registers allowed here"));
8643
  inst.instruction |= inst.operands[0].reg << 12;
8644
  inst.instruction |= inst.operands[1].reg << 16;
8645
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8646
}
8647
 
8648
static void
8649
do_vfp_sp_from_reg (void)
8650
{
8651
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8652
  inst.instruction |= inst.operands[1].reg << 12;
8653
}
8654
 
8655
static void
8656
do_vfp_sp2_from_reg2 (void)
8657
{
8658
  constraint (inst.operands[0].imm != 2,
8659
              _("only two consecutive VFP SP registers allowed here"));
8660
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8661
  inst.instruction |= inst.operands[1].reg << 12;
8662
  inst.instruction |= inst.operands[2].reg << 16;
8663
}
8664
 
8665
static void
8666
do_vfp_sp_ldst (void)
8667
{
8668
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8669
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8670
}
8671
 
8672
static void
8673
do_vfp_dp_ldst (void)
8674
{
8675
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8676
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8677
}
8678
 
8679
 
8680
static void
8681
vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8682
{
8683
  if (inst.operands[0].writeback)
8684
    inst.instruction |= WRITE_BACK;
8685
  else
8686
    constraint (ldstm_type != VFP_LDSTMIA,
8687
                _("this addressing mode requires base-register writeback"));
8688
  inst.instruction |= inst.operands[0].reg << 16;
8689
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8690
  inst.instruction |= inst.operands[1].imm;
8691
}
8692
 
8693
static void
8694
vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8695
{
8696
  int count;
8697
 
8698
  if (inst.operands[0].writeback)
8699
    inst.instruction |= WRITE_BACK;
8700
  else
8701
    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8702
                _("this addressing mode requires base-register writeback"));
8703
 
8704
  inst.instruction |= inst.operands[0].reg << 16;
8705
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8706
 
8707
  count = inst.operands[1].imm << 1;
8708
  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8709
    count += 1;
8710
 
8711
  inst.instruction |= count;
8712
}
8713
 
8714
static void
8715
do_vfp_sp_ldstmia (void)
8716
{
8717
  vfp_sp_ldstm (VFP_LDSTMIA);
8718
}
8719
 
8720
static void
8721
do_vfp_sp_ldstmdb (void)
8722
{
8723
  vfp_sp_ldstm (VFP_LDSTMDB);
8724
}
8725
 
8726
static void
8727
do_vfp_dp_ldstmia (void)
8728
{
8729
  vfp_dp_ldstm (VFP_LDSTMIA);
8730
}
8731
 
8732
static void
8733
do_vfp_dp_ldstmdb (void)
8734
{
8735
  vfp_dp_ldstm (VFP_LDSTMDB);
8736
}
8737
 
8738
static void
8739
do_vfp_xp_ldstmia (void)
8740
{
8741
  vfp_dp_ldstm (VFP_LDSTMIAX);
8742
}
8743
 
8744
static void
8745
do_vfp_xp_ldstmdb (void)
8746
{
8747
  vfp_dp_ldstm (VFP_LDSTMDBX);
8748
}
8749
 
8750
static void
8751
do_vfp_dp_rd_rm (void)
8752
{
8753
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8754
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8755
}
8756
 
8757
static void
8758
do_vfp_dp_rn_rd (void)
8759
{
8760
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8761
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8762
}
8763
 
8764
static void
8765
do_vfp_dp_rd_rn (void)
8766
{
8767
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8768
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8769
}
8770
 
8771
static void
8772
do_vfp_dp_rd_rn_rm (void)
8773
{
8774
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8775
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8776
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8777
}
8778
 
8779
static void
8780
do_vfp_dp_rd (void)
8781
{
8782
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8783
}
8784
 
8785
static void
8786
do_vfp_dp_rm_rd_rn (void)
8787
{
8788
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8789
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8790
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8791
}
8792
 
8793
/* VFPv3 instructions.  */
8794
static void
8795
do_vfp_sp_const (void)
8796
{
8797
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8798
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8799
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8800
}
8801
 
8802
static void
8803
do_vfp_dp_const (void)
8804
{
8805
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8806
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8807
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8808
}
8809
 
8810
static void
8811
vfp_conv (int srcsize)
8812
{
8813 160 khays
  int immbits = srcsize - inst.operands[1].imm;
8814
 
8815
  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
8816
    {
8817
      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
8818
         i.e. immbits must be in range 0 - 16.  */
8819
      inst.error = _("immediate value out of range, expected range [0, 16]");
8820
      return;
8821
    }
8822
  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
8823
    {
8824
      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
8825
         i.e. immbits must be in range 0 - 31.  */
8826
      inst.error = _("immediate value out of range, expected range [1, 32]");
8827
      return;
8828
    }
8829
 
8830 16 khays
  inst.instruction |= (immbits & 1) << 5;
8831
  inst.instruction |= (immbits >> 1);
8832
}
8833
 
8834
static void
8835
do_vfp_sp_conv_16 (void)
8836
{
8837
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8838
  vfp_conv (16);
8839
}
8840
 
8841
static void
8842
do_vfp_dp_conv_16 (void)
8843
{
8844
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8845
  vfp_conv (16);
8846
}
8847
 
8848
static void
8849
do_vfp_sp_conv_32 (void)
8850
{
8851
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8852
  vfp_conv (32);
8853
}
8854
 
8855
static void
8856
do_vfp_dp_conv_32 (void)
8857
{
8858
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8859
  vfp_conv (32);
8860
}
8861
 
8862
/* FPA instructions.  Also in a logical order.  */
8863
 
8864
static void
8865
do_fpa_cmp (void)
8866
{
8867
  inst.instruction |= inst.operands[0].reg << 16;
8868
  inst.instruction |= inst.operands[1].reg;
8869
}
8870
 
8871
static void
8872
do_fpa_ldmstm (void)
8873
{
8874
  inst.instruction |= inst.operands[0].reg << 12;
8875
  switch (inst.operands[1].imm)
8876
    {
8877
    case 1: inst.instruction |= CP_T_X;          break;
8878
    case 2: inst.instruction |= CP_T_Y;          break;
8879
    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8880
    case 4:                                      break;
8881
    default: abort ();
8882
    }
8883
 
8884
  if (inst.instruction & (PRE_INDEX | INDEX_UP))
8885
    {
8886
      /* The instruction specified "ea" or "fd", so we can only accept
8887
         [Rn]{!}.  The instruction does not really support stacking or
8888
         unstacking, so we have to emulate these by setting appropriate
8889
         bits and offsets.  */
8890
      constraint (inst.reloc.exp.X_op != O_constant
8891
                  || inst.reloc.exp.X_add_number != 0,
8892
                  _("this instruction does not support indexing"));
8893
 
8894
      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8895
        inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8896
 
8897
      if (!(inst.instruction & INDEX_UP))
8898
        inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8899
 
8900
      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8901
        {
8902
          inst.operands[2].preind = 0;
8903
          inst.operands[2].postind = 1;
8904
        }
8905
    }
8906
 
8907
  encode_arm_cp_address (2, TRUE, TRUE, 0);
8908
}
8909
 
8910
/* iWMMXt instructions: strictly in alphabetical order.  */
8911
 
8912
static void
8913
do_iwmmxt_tandorc (void)
8914
{
8915
  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8916
}
8917
 
8918
static void
8919
do_iwmmxt_textrc (void)
8920
{
8921
  inst.instruction |= inst.operands[0].reg << 12;
8922
  inst.instruction |= inst.operands[1].imm;
8923
}
8924
 
8925
static void
8926
do_iwmmxt_textrm (void)
8927
{
8928
  inst.instruction |= inst.operands[0].reg << 12;
8929
  inst.instruction |= inst.operands[1].reg << 16;
8930
  inst.instruction |= inst.operands[2].imm;
8931
}
8932
 
8933
static void
8934
do_iwmmxt_tinsr (void)
8935
{
8936
  inst.instruction |= inst.operands[0].reg << 16;
8937
  inst.instruction |= inst.operands[1].reg << 12;
8938
  inst.instruction |= inst.operands[2].imm;
8939
}
8940
 
8941
static void
8942
do_iwmmxt_tmia (void)
8943
{
8944
  inst.instruction |= inst.operands[0].reg << 5;
8945
  inst.instruction |= inst.operands[1].reg;
8946
  inst.instruction |= inst.operands[2].reg << 12;
8947
}
8948
 
8949
static void
8950
do_iwmmxt_waligni (void)
8951
{
8952
  inst.instruction |= inst.operands[0].reg << 12;
8953
  inst.instruction |= inst.operands[1].reg << 16;
8954
  inst.instruction |= inst.operands[2].reg;
8955
  inst.instruction |= inst.operands[3].imm << 20;
8956
}
8957
 
8958
static void
8959
do_iwmmxt_wmerge (void)
8960
{
8961
  inst.instruction |= inst.operands[0].reg << 12;
8962
  inst.instruction |= inst.operands[1].reg << 16;
8963
  inst.instruction |= inst.operands[2].reg;
8964
  inst.instruction |= inst.operands[3].imm << 21;
8965
}
8966
 
8967
static void
8968
do_iwmmxt_wmov (void)
8969
{
8970
  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
8971
  inst.instruction |= inst.operands[0].reg << 12;
8972
  inst.instruction |= inst.operands[1].reg << 16;
8973
  inst.instruction |= inst.operands[1].reg;
8974
}
8975
 
8976
static void
8977
do_iwmmxt_wldstbh (void)
8978
{
8979
  int reloc;
8980
  inst.instruction |= inst.operands[0].reg << 12;
8981
  if (thumb_mode)
8982
    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8983
  else
8984
    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8985
  encode_arm_cp_address (1, TRUE, FALSE, reloc);
8986
}
8987
 
8988
static void
8989
do_iwmmxt_wldstw (void)
8990
{
8991
  /* RIWR_RIWC clears .isreg for a control register.  */
8992
  if (!inst.operands[0].isreg)
8993
    {
8994
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8995
      inst.instruction |= 0xf0000000;
8996
    }
8997
 
8998
  inst.instruction |= inst.operands[0].reg << 12;
8999
  encode_arm_cp_address (1, TRUE, TRUE, 0);
9000
}
9001
 
9002
static void
9003
do_iwmmxt_wldstd (void)
9004
{
9005
  inst.instruction |= inst.operands[0].reg << 12;
9006
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9007
      && inst.operands[1].immisreg)
9008
    {
9009
      inst.instruction &= ~0x1a000ff;
9010
      inst.instruction |= (0xf << 28);
9011
      if (inst.operands[1].preind)
9012
        inst.instruction |= PRE_INDEX;
9013
      if (!inst.operands[1].negative)
9014
        inst.instruction |= INDEX_UP;
9015
      if (inst.operands[1].writeback)
9016
        inst.instruction |= WRITE_BACK;
9017
      inst.instruction |= inst.operands[1].reg << 16;
9018
      inst.instruction |= inst.reloc.exp.X_add_number << 4;
9019
      inst.instruction |= inst.operands[1].imm;
9020
    }
9021
  else
9022
    encode_arm_cp_address (1, TRUE, FALSE, 0);
9023
}
9024
 
9025
static void
9026
do_iwmmxt_wshufh (void)
9027
{
9028
  inst.instruction |= inst.operands[0].reg << 12;
9029
  inst.instruction |= inst.operands[1].reg << 16;
9030
  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9031
  inst.instruction |= (inst.operands[2].imm & 0x0f);
9032
}
9033
 
9034
static void
9035
do_iwmmxt_wzero (void)
9036
{
9037
  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
9038
  inst.instruction |= inst.operands[0].reg;
9039
  inst.instruction |= inst.operands[0].reg << 12;
9040
  inst.instruction |= inst.operands[0].reg << 16;
9041
}
9042
 
9043
static void
9044
do_iwmmxt_wrwrwr_or_imm5 (void)
9045
{
9046
  if (inst.operands[2].isreg)
9047
    do_rd_rn_rm ();
9048
  else {
9049
    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9050
                _("immediate operand requires iWMMXt2"));
9051
    do_rd_rn ();
9052
    if (inst.operands[2].imm == 0)
9053
      {
9054
        switch ((inst.instruction >> 20) & 0xf)
9055
          {
9056
          case 4:
9057
          case 5:
9058
          case 6:
9059
          case 7:
9060
            /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
9061
            inst.operands[2].imm = 16;
9062
            inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9063
            break;
9064
          case 8:
9065
          case 9:
9066
          case 10:
9067
          case 11:
9068
            /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
9069
            inst.operands[2].imm = 32;
9070
            inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9071
            break;
9072
          case 12:
9073
          case 13:
9074
          case 14:
9075
          case 15:
9076
            {
9077
              /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
9078
              unsigned long wrn;
9079
              wrn = (inst.instruction >> 16) & 0xf;
9080
              inst.instruction &= 0xff0fff0f;
9081
              inst.instruction |= wrn;
9082
              /* Bail out here; the instruction is now assembled.  */
9083
              return;
9084
            }
9085
          }
9086
      }
9087
    /* Map 32 -> 0, etc.  */
9088
    inst.operands[2].imm &= 0x1f;
9089
    inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9090
  }
9091
}
9092
 
9093
/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
9094
   operations first, then control, shift, and load/store.  */
9095
 
9096
/* Insns like "foo X,Y,Z".  */
9097
 
9098
static void
9099
do_mav_triple (void)
9100
{
9101
  inst.instruction |= inst.operands[0].reg << 16;
9102
  inst.instruction |= inst.operands[1].reg;
9103
  inst.instruction |= inst.operands[2].reg << 12;
9104
}
9105
 
9106
/* Insns like "foo W,X,Y,Z".
9107
    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
9108
 
9109
static void
9110
do_mav_quad (void)
9111
{
9112
  inst.instruction |= inst.operands[0].reg << 5;
9113
  inst.instruction |= inst.operands[1].reg << 12;
9114
  inst.instruction |= inst.operands[2].reg << 16;
9115
  inst.instruction |= inst.operands[3].reg;
9116
}
9117
 
9118
/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
9119
static void
9120
do_mav_dspsc (void)
9121
{
9122
  inst.instruction |= inst.operands[1].reg << 12;
9123
}
9124
 
9125
/* Maverick shift immediate instructions.
9126
   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9127
   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
9128
 
9129
static void
9130
do_mav_shift (void)
9131
{
9132
  int imm = inst.operands[2].imm;
9133
 
9134
  inst.instruction |= inst.operands[0].reg << 12;
9135
  inst.instruction |= inst.operands[1].reg << 16;
9136
 
9137
  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9138
     Bits 5-7 of the insn should have bits 4-6 of the immediate.
9139
     Bit 4 should be 0.  */
9140
  imm = (imm & 0xf) | ((imm & 0x70) << 1);
9141
 
9142
  inst.instruction |= imm;
9143
}
9144
 
9145
/* XScale instructions.  Also sorted arithmetic before move.  */
9146
 
9147
/* Xscale multiply-accumulate (argument parse)
9148
     MIAcc   acc0,Rm,Rs
9149
     MIAPHcc acc0,Rm,Rs
9150
     MIAxycc acc0,Rm,Rs.  */
9151
 
9152
static void
9153
do_xsc_mia (void)
9154
{
9155
  inst.instruction |= inst.operands[1].reg;
9156
  inst.instruction |= inst.operands[2].reg << 12;
9157
}
9158
 
9159
/* Xscale move-accumulator-register (argument parse)
9160
 
9161
     MARcc   acc0,RdLo,RdHi.  */
9162
 
9163
static void
9164
do_xsc_mar (void)
9165
{
9166
  inst.instruction |= inst.operands[1].reg << 12;
9167
  inst.instruction |= inst.operands[2].reg << 16;
9168
}
9169
 
9170
/* Xscale move-register-accumulator (argument parse)
9171
 
9172
     MRAcc   RdLo,RdHi,acc0.  */
9173
 
9174
static void
9175
do_xsc_mra (void)
9176
{
9177
  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9178
  inst.instruction |= inst.operands[0].reg << 12;
9179
  inst.instruction |= inst.operands[1].reg << 16;
9180
}
9181
 
9182
/* Encoding functions relevant only to Thumb.  */
9183
 
9184
/* inst.operands[i] is a shifted-register operand; encode
9185
   it into inst.instruction in the format used by Thumb32.  */
9186
 
9187
static void
9188
encode_thumb32_shifted_operand (int i)
9189
{
9190
  unsigned int value = inst.reloc.exp.X_add_number;
9191
  unsigned int shift = inst.operands[i].shift_kind;
9192
 
9193
  constraint (inst.operands[i].immisreg,
9194
              _("shift by register not allowed in thumb mode"));
9195
  inst.instruction |= inst.operands[i].reg;
9196
  if (shift == SHIFT_RRX)
9197
    inst.instruction |= SHIFT_ROR << 4;
9198
  else
9199
    {
9200
      constraint (inst.reloc.exp.X_op != O_constant,
9201
                  _("expression too complex"));
9202
 
9203
      constraint (value > 32
9204
                  || (value == 32 && (shift == SHIFT_LSL
9205
                                      || shift == SHIFT_ROR)),
9206
                  _("shift expression is too large"));
9207
 
9208
      if (value == 0)
9209
        shift = SHIFT_LSL;
9210
      else if (value == 32)
9211
        value = 0;
9212
 
9213
      inst.instruction |= shift << 4;
9214
      inst.instruction |= (value & 0x1c) << 10;
9215
      inst.instruction |= (value & 0x03) << 6;
9216
    }
9217
}
9218
 
9219
 
9220
/* inst.operands[i] was set up by parse_address.  Encode it into a
9221
   Thumb32 format load or store instruction.  Reject forms that cannot
9222
   be used with such instructions.  If is_t is true, reject forms that
9223
   cannot be used with a T instruction; if is_d is true, reject forms
9224
   that cannot be used with a D instruction.  If it is a store insn,
9225
   reject PC in Rn.  */
9226
 
9227
static void
9228
encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9229
{
9230
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9231
 
9232
  constraint (!inst.operands[i].isreg,
9233
              _("Instruction does not support =N addresses"));
9234
 
9235
  inst.instruction |= inst.operands[i].reg << 16;
9236
  if (inst.operands[i].immisreg)
9237
    {
9238
      constraint (is_pc, BAD_PC_ADDRESSING);
9239
      constraint (is_t || is_d, _("cannot use register index with this instruction"));
9240
      constraint (inst.operands[i].negative,
9241
                  _("Thumb does not support negative register indexing"));
9242
      constraint (inst.operands[i].postind,
9243
                  _("Thumb does not support register post-indexing"));
9244
      constraint (inst.operands[i].writeback,
9245
                  _("Thumb does not support register indexing with writeback"));
9246
      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9247
                  _("Thumb supports only LSL in shifted register indexing"));
9248
 
9249
      inst.instruction |= inst.operands[i].imm;
9250
      if (inst.operands[i].shifted)
9251
        {
9252
          constraint (inst.reloc.exp.X_op != O_constant,
9253
                      _("expression too complex"));
9254
          constraint (inst.reloc.exp.X_add_number < 0
9255
                      || inst.reloc.exp.X_add_number > 3,
9256
                      _("shift out of range"));
9257
          inst.instruction |= inst.reloc.exp.X_add_number << 4;
9258
        }
9259
      inst.reloc.type = BFD_RELOC_UNUSED;
9260
    }
9261
  else if (inst.operands[i].preind)
9262
    {
9263
      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9264
      constraint (is_t && inst.operands[i].writeback,
9265
                  _("cannot use writeback with this instruction"));
9266
      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9267
                  && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9268
 
9269
      if (is_d)
9270
        {
9271
          inst.instruction |= 0x01000000;
9272
          if (inst.operands[i].writeback)
9273
            inst.instruction |= 0x00200000;
9274
        }
9275
      else
9276
        {
9277
          inst.instruction |= 0x00000c00;
9278
          if (inst.operands[i].writeback)
9279
            inst.instruction |= 0x00000100;
9280
        }
9281
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9282
    }
9283
  else if (inst.operands[i].postind)
9284
    {
9285
      gas_assert (inst.operands[i].writeback);
9286
      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9287
      constraint (is_t, _("cannot use post-indexing with this instruction"));
9288
 
9289
      if (is_d)
9290
        inst.instruction |= 0x00200000;
9291
      else
9292
        inst.instruction |= 0x00000900;
9293
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9294
    }
9295
  else /* unindexed - only for coprocessor */
9296
    inst.error = _("instruction does not accept unindexed addressing");
9297
}
9298
 
9299
/* Table of Thumb instructions which exist in both 16- and 32-bit
9300
   encodings (the latter only in post-V6T2 cores).  The index is the
9301
   value used in the insns table below.  When there is more than one
9302
   possible 16-bit encoding for the instruction, this table always
9303
   holds variant (1).
9304
   Also contains several pseudo-instructions used during relaxation.  */
9305
#define T16_32_TAB                              \
9306
  X(_adc,   4140, eb400000),                    \
9307
  X(_adcs,  4140, eb500000),                    \
9308
  X(_add,   1c00, eb000000),                    \
9309
  X(_adds,  1c00, eb100000),                    \
9310
  X(_addi,  0000, f1000000),                    \
9311
  X(_addis, 0000, f1100000),                    \
9312
  X(_add_pc,000f, f20f0000),                    \
9313
  X(_add_sp,000d, f10d0000),                    \
9314
  X(_adr,   000f, f20f0000),                    \
9315
  X(_and,   4000, ea000000),                    \
9316
  X(_ands,  4000, ea100000),                    \
9317
  X(_asr,   1000, fa40f000),                    \
9318
  X(_asrs,  1000, fa50f000),                    \
9319
  X(_b,     e000, f000b000),                    \
9320
  X(_bcond, d000, f0008000),                    \
9321
  X(_bic,   4380, ea200000),                    \
9322
  X(_bics,  4380, ea300000),                    \
9323
  X(_cmn,   42c0, eb100f00),                    \
9324
  X(_cmp,   2800, ebb00f00),                    \
9325
  X(_cpsie, b660, f3af8400),                    \
9326
  X(_cpsid, b670, f3af8600),                    \
9327
  X(_cpy,   4600, ea4f0000),                    \
9328
  X(_dec_sp,80dd, f1ad0d00),                    \
9329
  X(_eor,   4040, ea800000),                    \
9330
  X(_eors,  4040, ea900000),                    \
9331
  X(_inc_sp,00dd, f10d0d00),                    \
9332
  X(_ldmia, c800, e8900000),                    \
9333
  X(_ldr,   6800, f8500000),                    \
9334
  X(_ldrb,  7800, f8100000),                    \
9335
  X(_ldrh,  8800, f8300000),                    \
9336
  X(_ldrsb, 5600, f9100000),                    \
9337
  X(_ldrsh, 5e00, f9300000),                    \
9338
  X(_ldr_pc,4800, f85f0000),                    \
9339
  X(_ldr_pc2,4800, f85f0000),                   \
9340
  X(_ldr_sp,9800, f85d0000),                    \
9341
  X(_lsl,   0000, fa00f000),                    \
9342
  X(_lsls,  0000, fa10f000),                    \
9343
  X(_lsr,   0800, fa20f000),                    \
9344
  X(_lsrs,  0800, fa30f000),                    \
9345
  X(_mov,   2000, ea4f0000),                    \
9346
  X(_movs,  2000, ea5f0000),                    \
9347
  X(_mul,   4340, fb00f000),                     \
9348
  X(_muls,  4340, ffffffff), /* no 32b muls */  \
9349
  X(_mvn,   43c0, ea6f0000),                    \
9350
  X(_mvns,  43c0, ea7f0000),                    \
9351
  X(_neg,   4240, f1c00000), /* rsb #0 */       \
9352
  X(_negs,  4240, f1d00000), /* rsbs #0 */      \
9353
  X(_orr,   4300, ea400000),                    \
9354
  X(_orrs,  4300, ea500000),                    \
9355
  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */        \
9356
  X(_push,  b400, e92d0000), /* stmdb sp!,... */        \
9357
  X(_rev,   ba00, fa90f080),                    \
9358
  X(_rev16, ba40, fa90f090),                    \
9359
  X(_revsh, bac0, fa90f0b0),                    \
9360
  X(_ror,   41c0, fa60f000),                    \
9361
  X(_rors,  41c0, fa70f000),                    \
9362
  X(_sbc,   4180, eb600000),                    \
9363
  X(_sbcs,  4180, eb700000),                    \
9364
  X(_stmia, c000, e8800000),                    \
9365
  X(_str,   6000, f8400000),                    \
9366
  X(_strb,  7000, f8000000),                    \
9367
  X(_strh,  8000, f8200000),                    \
9368
  X(_str_sp,9000, f84d0000),                    \
9369
  X(_sub,   1e00, eba00000),                    \
9370
  X(_subs,  1e00, ebb00000),                    \
9371
  X(_subi,  8000, f1a00000),                    \
9372
  X(_subis, 8000, f1b00000),                    \
9373
  X(_sxtb,  b240, fa4ff080),                    \
9374
  X(_sxth,  b200, fa0ff080),                    \
9375
  X(_tst,   4200, ea100f00),                    \
9376
  X(_uxtb,  b2c0, fa5ff080),                    \
9377
  X(_uxth,  b280, fa1ff080),                    \
9378
  X(_nop,   bf00, f3af8000),                    \
9379
  X(_yield, bf10, f3af8001),                    \
9380
  X(_wfe,   bf20, f3af8002),                    \
9381
  X(_wfi,   bf30, f3af8003),                    \
9382
  X(_sev,   bf40, f3af8004),
9383
 
9384
/* To catch errors in encoding functions, the codes are all offset by
9385
   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9386
   as 16-bit instructions.  */
9387
#define X(a,b,c) T_MNEM##a
9388
enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9389
#undef X
9390
 
9391
#define X(a,b,c) 0x##b
9392
static const unsigned short thumb_op16[] = { T16_32_TAB };
9393
#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9394
#undef X
9395
 
9396
#define X(a,b,c) 0x##c
9397
static const unsigned int thumb_op32[] = { T16_32_TAB };
9398
#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9399
#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
9400
#undef X
9401
#undef T16_32_TAB
9402
 
9403
/* Thumb instruction encoders, in alphabetical order.  */
9404
 
9405
/* ADDW or SUBW.  */
9406
 
9407
static void
9408
do_t_add_sub_w (void)
9409
{
9410
  int Rd, Rn;
9411
 
9412
  Rd = inst.operands[0].reg;
9413
  Rn = inst.operands[1].reg;
9414
 
9415
  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9416
     is the SP-{plus,minus}-immediate form of the instruction.  */
9417
  if (Rn == REG_SP)
9418
    constraint (Rd == REG_PC, BAD_PC);
9419
  else
9420
    reject_bad_reg (Rd);
9421
 
9422
  inst.instruction |= (Rn << 16) | (Rd << 8);
9423
  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9424
}
9425
 
9426
/* Parse an add or subtract instruction.  We get here with inst.instruction
9427
   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
9428
 
9429
static void
9430
do_t_add_sub (void)
9431
{
9432
  int Rd, Rs, Rn;
9433
 
9434
  Rd = inst.operands[0].reg;
9435
  Rs = (inst.operands[1].present
9436
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9437
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9438
 
9439
  if (Rd == REG_PC)
9440
    set_it_insn_type_last ();
9441
 
9442
  if (unified_syntax)
9443
    {
9444
      bfd_boolean flags;
9445
      bfd_boolean narrow;
9446
      int opcode;
9447
 
9448
      flags = (inst.instruction == T_MNEM_adds
9449
               || inst.instruction == T_MNEM_subs);
9450
      if (flags)
9451
        narrow = !in_it_block ();
9452
      else
9453
        narrow = in_it_block ();
9454
      if (!inst.operands[2].isreg)
9455
        {
9456
          int add;
9457
 
9458
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9459
 
9460
          add = (inst.instruction == T_MNEM_add
9461
                 || inst.instruction == T_MNEM_adds);
9462
          opcode = 0;
9463
          if (inst.size_req != 4)
9464
            {
9465
              /* Attempt to use a narrow opcode, with relaxation if
9466
                 appropriate.  */
9467
              if (Rd == REG_SP && Rs == REG_SP && !flags)
9468
                opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9469
              else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9470
                opcode = T_MNEM_add_sp;
9471
              else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9472
                opcode = T_MNEM_add_pc;
9473
              else if (Rd <= 7 && Rs <= 7 && narrow)
9474
                {
9475
                  if (flags)
9476
                    opcode = add ? T_MNEM_addis : T_MNEM_subis;
9477
                  else
9478
                    opcode = add ? T_MNEM_addi : T_MNEM_subi;
9479
                }
9480
              if (opcode)
9481
                {
9482
                  inst.instruction = THUMB_OP16(opcode);
9483
                  inst.instruction |= (Rd << 4) | Rs;
9484
                  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9485
                  if (inst.size_req != 2)
9486
                    inst.relax = opcode;
9487
                }
9488
              else
9489
                constraint (inst.size_req == 2, BAD_HIREG);
9490
            }
9491
          if (inst.size_req == 4
9492
              || (inst.size_req != 2 && !opcode))
9493
            {
9494
              if (Rd == REG_PC)
9495
                {
9496
                  constraint (add, BAD_PC);
9497
                  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9498
                             _("only SUBS PC, LR, #const allowed"));
9499
                  constraint (inst.reloc.exp.X_op != O_constant,
9500
                              _("expression too complex"));
9501
                  constraint (inst.reloc.exp.X_add_number < 0
9502
                              || inst.reloc.exp.X_add_number > 0xff,
9503
                             _("immediate value out of range"));
9504
                  inst.instruction = T2_SUBS_PC_LR
9505
                                     | inst.reloc.exp.X_add_number;
9506
                  inst.reloc.type = BFD_RELOC_UNUSED;
9507
                  return;
9508
                }
9509
              else if (Rs == REG_PC)
9510
                {
9511
                  /* Always use addw/subw.  */
9512
                  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9513
                  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9514
                }
9515
              else
9516
                {
9517
                  inst.instruction = THUMB_OP32 (inst.instruction);
9518
                  inst.instruction = (inst.instruction & 0xe1ffffff)
9519
                                     | 0x10000000;
9520
                  if (flags)
9521
                    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9522
                  else
9523
                    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9524
                }
9525
              inst.instruction |= Rd << 8;
9526
              inst.instruction |= Rs << 16;
9527
            }
9528
        }
9529
      else
9530
        {
9531 160 khays
          unsigned int value = inst.reloc.exp.X_add_number;
9532
          unsigned int shift = inst.operands[2].shift_kind;
9533
 
9534 16 khays
          Rn = inst.operands[2].reg;
9535
          /* See if we can do this with a 16-bit instruction.  */
9536
          if (!inst.operands[2].shifted && inst.size_req != 4)
9537
            {
9538
              if (Rd > 7 || Rs > 7 || Rn > 7)
9539
                narrow = FALSE;
9540
 
9541
              if (narrow)
9542
                {
9543
                  inst.instruction = ((inst.instruction == T_MNEM_adds
9544
                                       || inst.instruction == T_MNEM_add)
9545
                                      ? T_OPCODE_ADD_R3
9546
                                      : T_OPCODE_SUB_R3);
9547
                  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9548
                  return;
9549
                }
9550
 
9551
              if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9552
                {
9553
                  /* Thumb-1 cores (except v6-M) require at least one high
9554
                     register in a narrow non flag setting add.  */
9555
                  if (Rd > 7 || Rn > 7
9556
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9557
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9558
                    {
9559
                      if (Rd == Rn)
9560
                        {
9561
                          Rn = Rs;
9562
                          Rs = Rd;
9563
                        }
9564
                      inst.instruction = T_OPCODE_ADD_HI;
9565
                      inst.instruction |= (Rd & 8) << 4;
9566
                      inst.instruction |= (Rd & 7);
9567
                      inst.instruction |= Rn << 3;
9568
                      return;
9569
                    }
9570
                }
9571
            }
9572
 
9573
          constraint (Rd == REG_PC, BAD_PC);
9574
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9575
          constraint (Rs == REG_PC, BAD_PC);
9576
          reject_bad_reg (Rn);
9577
 
9578
          /* If we get here, it can't be done in 16 bits.  */
9579
          constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9580
                      _("shift must be constant"));
9581
          inst.instruction = THUMB_OP32 (inst.instruction);
9582
          inst.instruction |= Rd << 8;
9583
          inst.instruction |= Rs << 16;
9584 160 khays
          constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9585
                      _("shift value over 3 not allowed in thumb mode"));
9586
          constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9587
                      _("only LSL shift allowed in thumb mode"));
9588 16 khays
          encode_thumb32_shifted_operand (2);
9589
        }
9590
    }
9591
  else
9592
    {
9593
      constraint (inst.instruction == T_MNEM_adds
9594
                  || inst.instruction == T_MNEM_subs,
9595
                  BAD_THUMB32);
9596
 
9597
      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9598
        {
9599
          constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9600
                      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9601
                      BAD_HIREG);
9602
 
9603
          inst.instruction = (inst.instruction == T_MNEM_add
9604
                              ? 0x0000 : 0x8000);
9605
          inst.instruction |= (Rd << 4) | Rs;
9606
          inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9607
          return;
9608
        }
9609
 
9610
      Rn = inst.operands[2].reg;
9611
      constraint (inst.operands[2].shifted, _("unshifted register required"));
9612
 
9613
      /* We now have Rd, Rs, and Rn set to registers.  */
9614
      if (Rd > 7 || Rs > 7 || Rn > 7)
9615
        {
9616
          /* Can't do this for SUB.      */
9617
          constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9618
          inst.instruction = T_OPCODE_ADD_HI;
9619
          inst.instruction |= (Rd & 8) << 4;
9620
          inst.instruction |= (Rd & 7);
9621
          if (Rs == Rd)
9622
            inst.instruction |= Rn << 3;
9623
          else if (Rn == Rd)
9624
            inst.instruction |= Rs << 3;
9625
          else
9626
            constraint (1, _("dest must overlap one source register"));
9627
        }
9628
      else
9629
        {
9630
          inst.instruction = (inst.instruction == T_MNEM_add
9631
                              ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9632
          inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9633
        }
9634
    }
9635
}
9636
 
9637
static void
9638
do_t_adr (void)
9639
{
9640
  unsigned Rd;
9641
 
9642
  Rd = inst.operands[0].reg;
9643
  reject_bad_reg (Rd);
9644
 
9645
  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9646
    {
9647
      /* Defer to section relaxation.  */
9648
      inst.relax = inst.instruction;
9649
      inst.instruction = THUMB_OP16 (inst.instruction);
9650
      inst.instruction |= Rd << 4;
9651
    }
9652
  else if (unified_syntax && inst.size_req != 2)
9653
    {
9654
      /* Generate a 32-bit opcode.  */
9655
      inst.instruction = THUMB_OP32 (inst.instruction);
9656
      inst.instruction |= Rd << 8;
9657
      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9658
      inst.reloc.pc_rel = 1;
9659
    }
9660
  else
9661
    {
9662
      /* Generate a 16-bit opcode.  */
9663
      inst.instruction = THUMB_OP16 (inst.instruction);
9664
      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9665
      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
9666
      inst.reloc.pc_rel = 1;
9667
 
9668
      inst.instruction |= Rd << 4;
9669
    }
9670
}
9671
 
9672
/* Arithmetic instructions for which there is just one 16-bit
9673
   instruction encoding, and it allows only two low registers.
9674
   For maximal compatibility with ARM syntax, we allow three register
9675
   operands even when Thumb-32 instructions are not available, as long
9676
   as the first two are identical.  For instance, both "sbc r0,r1" and
9677
   "sbc r0,r0,r1" are allowed.  */
9678
static void
9679
do_t_arit3 (void)
9680
{
9681
  int Rd, Rs, Rn;
9682
 
9683
  Rd = inst.operands[0].reg;
9684
  Rs = (inst.operands[1].present
9685
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9686
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9687
  Rn = inst.operands[2].reg;
9688
 
9689
  reject_bad_reg (Rd);
9690
  reject_bad_reg (Rs);
9691
  if (inst.operands[2].isreg)
9692
    reject_bad_reg (Rn);
9693
 
9694
  if (unified_syntax)
9695
    {
9696
      if (!inst.operands[2].isreg)
9697
        {
9698
          /* For an immediate, we always generate a 32-bit opcode;
9699
             section relaxation will shrink it later if possible.  */
9700
          inst.instruction = THUMB_OP32 (inst.instruction);
9701
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9702
          inst.instruction |= Rd << 8;
9703
          inst.instruction |= Rs << 16;
9704
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9705
        }
9706
      else
9707
        {
9708
          bfd_boolean narrow;
9709
 
9710
          /* See if we can do this with a 16-bit instruction.  */
9711
          if (THUMB_SETS_FLAGS (inst.instruction))
9712
            narrow = !in_it_block ();
9713
          else
9714
            narrow = in_it_block ();
9715
 
9716
          if (Rd > 7 || Rn > 7 || Rs > 7)
9717
            narrow = FALSE;
9718
          if (inst.operands[2].shifted)
9719
            narrow = FALSE;
9720
          if (inst.size_req == 4)
9721
            narrow = FALSE;
9722
 
9723
          if (narrow
9724
              && Rd == Rs)
9725
            {
9726
              inst.instruction = THUMB_OP16 (inst.instruction);
9727
              inst.instruction |= Rd;
9728
              inst.instruction |= Rn << 3;
9729
              return;
9730
            }
9731
 
9732
          /* If we get here, it can't be done in 16 bits.  */
9733
          constraint (inst.operands[2].shifted
9734
                      && inst.operands[2].immisreg,
9735
                      _("shift must be constant"));
9736
          inst.instruction = THUMB_OP32 (inst.instruction);
9737
          inst.instruction |= Rd << 8;
9738
          inst.instruction |= Rs << 16;
9739
          encode_thumb32_shifted_operand (2);
9740
        }
9741
    }
9742
  else
9743
    {
9744
      /* On its face this is a lie - the instruction does set the
9745
         flags.  However, the only supported mnemonic in this mode
9746
         says it doesn't.  */
9747
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9748
 
9749
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9750
                  _("unshifted register required"));
9751
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9752
      constraint (Rd != Rs,
9753
                  _("dest and source1 must be the same register"));
9754
 
9755
      inst.instruction = THUMB_OP16 (inst.instruction);
9756
      inst.instruction |= Rd;
9757
      inst.instruction |= Rn << 3;
9758
    }
9759
}
9760
 
9761
/* Similarly, but for instructions where the arithmetic operation is
9762
   commutative, so we can allow either of them to be different from
9763
   the destination operand in a 16-bit instruction.  For instance, all
9764
   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9765
   accepted.  */
9766
static void
9767
do_t_arit3c (void)
9768
{
9769
  int Rd, Rs, Rn;
9770
 
9771
  Rd = inst.operands[0].reg;
9772
  Rs = (inst.operands[1].present
9773
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9774
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9775
  Rn = inst.operands[2].reg;
9776
 
9777
  reject_bad_reg (Rd);
9778
  reject_bad_reg (Rs);
9779
  if (inst.operands[2].isreg)
9780
    reject_bad_reg (Rn);
9781
 
9782
  if (unified_syntax)
9783
    {
9784
      if (!inst.operands[2].isreg)
9785
        {
9786
          /* For an immediate, we always generate a 32-bit opcode;
9787
             section relaxation will shrink it later if possible.  */
9788
          inst.instruction = THUMB_OP32 (inst.instruction);
9789
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9790
          inst.instruction |= Rd << 8;
9791
          inst.instruction |= Rs << 16;
9792
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9793
        }
9794
      else
9795
        {
9796
          bfd_boolean narrow;
9797
 
9798
          /* See if we can do this with a 16-bit instruction.  */
9799
          if (THUMB_SETS_FLAGS (inst.instruction))
9800
            narrow = !in_it_block ();
9801
          else
9802
            narrow = in_it_block ();
9803
 
9804
          if (Rd > 7 || Rn > 7 || Rs > 7)
9805
            narrow = FALSE;
9806
          if (inst.operands[2].shifted)
9807
            narrow = FALSE;
9808
          if (inst.size_req == 4)
9809
            narrow = FALSE;
9810
 
9811
          if (narrow)
9812
            {
9813
              if (Rd == Rs)
9814
                {
9815
                  inst.instruction = THUMB_OP16 (inst.instruction);
9816
                  inst.instruction |= Rd;
9817
                  inst.instruction |= Rn << 3;
9818
                  return;
9819
                }
9820
              if (Rd == Rn)
9821
                {
9822
                  inst.instruction = THUMB_OP16 (inst.instruction);
9823
                  inst.instruction |= Rd;
9824
                  inst.instruction |= Rs << 3;
9825
                  return;
9826
                }
9827
            }
9828
 
9829
          /* If we get here, it can't be done in 16 bits.  */
9830
          constraint (inst.operands[2].shifted
9831
                      && inst.operands[2].immisreg,
9832
                      _("shift must be constant"));
9833
          inst.instruction = THUMB_OP32 (inst.instruction);
9834
          inst.instruction |= Rd << 8;
9835
          inst.instruction |= Rs << 16;
9836
          encode_thumb32_shifted_operand (2);
9837
        }
9838
    }
9839
  else
9840
    {
9841
      /* On its face this is a lie - the instruction does set the
9842
         flags.  However, the only supported mnemonic in this mode
9843
         says it doesn't.  */
9844
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9845
 
9846
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9847
                  _("unshifted register required"));
9848
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9849
 
9850
      inst.instruction = THUMB_OP16 (inst.instruction);
9851
      inst.instruction |= Rd;
9852
 
9853
      if (Rd == Rs)
9854
        inst.instruction |= Rn << 3;
9855
      else if (Rd == Rn)
9856
        inst.instruction |= Rs << 3;
9857
      else
9858
        constraint (1, _("dest must overlap one source register"));
9859
    }
9860
}
9861
 
9862
static void
9863
do_t_barrier (void)
9864
{
9865
  if (inst.operands[0].present)
9866
    {
9867
      constraint ((inst.instruction & 0xf0) != 0x40
9868
                  && inst.operands[0].imm > 0xf
9869
                  && inst.operands[0].imm < 0x0,
9870
                  _("bad barrier type"));
9871
      inst.instruction |= inst.operands[0].imm;
9872
    }
9873
  else
9874
    inst.instruction |= 0xf;
9875
}
9876
 
9877
static void
9878
do_t_bfc (void)
9879
{
9880
  unsigned Rd;
9881
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9882
  constraint (msb > 32, _("bit-field extends past end of register"));
9883
  /* The instruction encoding stores the LSB and MSB,
9884
     not the LSB and width.  */
9885
  Rd = inst.operands[0].reg;
9886
  reject_bad_reg (Rd);
9887
  inst.instruction |= Rd << 8;
9888
  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9889
  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9890
  inst.instruction |= msb - 1;
9891
}
9892
 
9893
static void
9894
do_t_bfi (void)
9895
{
9896
  int Rd, Rn;
9897
  unsigned int msb;
9898
 
9899
  Rd = inst.operands[0].reg;
9900
  reject_bad_reg (Rd);
9901
 
9902
  /* #0 in second position is alternative syntax for bfc, which is
9903
     the same instruction but with REG_PC in the Rm field.  */
9904
  if (!inst.operands[1].isreg)
9905
    Rn = REG_PC;
9906
  else
9907
    {
9908
      Rn = inst.operands[1].reg;
9909
      reject_bad_reg (Rn);
9910
    }
9911
 
9912
  msb = inst.operands[2].imm + inst.operands[3].imm;
9913
  constraint (msb > 32, _("bit-field extends past end of register"));
9914
  /* The instruction encoding stores the LSB and MSB,
9915
     not the LSB and width.  */
9916
  inst.instruction |= Rd << 8;
9917
  inst.instruction |= Rn << 16;
9918
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9919
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9920
  inst.instruction |= msb - 1;
9921
}
9922
 
9923
static void
9924
do_t_bfx (void)
9925
{
9926
  unsigned Rd, Rn;
9927
 
9928
  Rd = inst.operands[0].reg;
9929
  Rn = inst.operands[1].reg;
9930
 
9931
  reject_bad_reg (Rd);
9932
  reject_bad_reg (Rn);
9933
 
9934
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9935
              _("bit-field extends past end of register"));
9936
  inst.instruction |= Rd << 8;
9937
  inst.instruction |= Rn << 16;
9938
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9939
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9940
  inst.instruction |= inst.operands[3].imm - 1;
9941
}
9942
 
9943
/* ARM V5 Thumb BLX (argument parse)
9944
        BLX <target_addr>       which is BLX(1)
9945
        BLX <Rm>                which is BLX(2)
9946
   Unfortunately, there are two different opcodes for this mnemonic.
9947
   So, the insns[].value is not used, and the code here zaps values
9948
        into inst.instruction.
9949
 
9950
   ??? How to take advantage of the additional two bits of displacement
9951
   available in Thumb32 mode?  Need new relocation?  */
9952
 
9953
static void
9954
do_t_blx (void)
9955
{
9956
  set_it_insn_type_last ();
9957
 
9958
  if (inst.operands[0].isreg)
9959
    {
9960
      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9961
      /* We have a register, so this is BLX(2).  */
9962
      inst.instruction |= inst.operands[0].reg << 3;
9963
    }
9964
  else
9965
    {
9966
      /* No register.  This must be BLX(1).  */
9967
      inst.instruction = 0xf000e800;
9968
      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
9969
    }
9970
}
9971
 
9972
static void
9973
do_t_branch (void)
9974
{
9975
  int opcode;
9976
  int cond;
9977
  int reloc;
9978
 
9979
  cond = inst.cond;
9980
  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9981
 
9982
  if (in_it_block ())
9983
    {
9984
      /* Conditional branches inside IT blocks are encoded as unconditional
9985
         branches.  */
9986
      cond = COND_ALWAYS;
9987
    }
9988
  else
9989
    cond = inst.cond;
9990
 
9991
  if (cond != COND_ALWAYS)
9992
    opcode = T_MNEM_bcond;
9993
  else
9994
    opcode = inst.instruction;
9995
 
9996
  if (unified_syntax
9997
      && (inst.size_req == 4
9998
          || (inst.size_req != 2
9999
              && (inst.operands[0].hasreloc
10000
                  || inst.reloc.exp.X_op == O_constant))))
10001
    {
10002
      inst.instruction = THUMB_OP32(opcode);
10003
      if (cond == COND_ALWAYS)
10004
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10005
      else
10006
        {
10007
          gas_assert (cond != 0xF);
10008
          inst.instruction |= cond << 22;
10009
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10010
        }
10011
    }
10012
  else
10013
    {
10014
      inst.instruction = THUMB_OP16(opcode);
10015
      if (cond == COND_ALWAYS)
10016
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10017
      else
10018
        {
10019
          inst.instruction |= cond << 8;
10020
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10021
        }
10022
      /* Allow section relaxation.  */
10023
      if (unified_syntax && inst.size_req != 2)
10024
        inst.relax = opcode;
10025
    }
10026
  inst.reloc.type = reloc;
10027
  inst.reloc.pc_rel = 1;
10028
}
10029
 
10030
static void
10031
do_t_bkpt (void)
10032
{
10033
  constraint (inst.cond != COND_ALWAYS,
10034
              _("instruction is always unconditional"));
10035
  if (inst.operands[0].present)
10036
    {
10037
      constraint (inst.operands[0].imm > 255,
10038
                  _("immediate value out of range"));
10039
      inst.instruction |= inst.operands[0].imm;
10040
      set_it_insn_type (NEUTRAL_IT_INSN);
10041
    }
10042
}
10043
 
10044
static void
10045
do_t_branch23 (void)
10046
{
10047
  set_it_insn_type_last ();
10048
  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10049
 
10050
  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10051
     this file.  We used to simply ignore the PLT reloc type here --
10052
     the branch encoding is now needed to deal with TLSCALL relocs.
10053
     So if we see a PLT reloc now, put it back to how it used to be to
10054
     keep the preexisting behaviour.  */
10055
  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10056
    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10057
 
10058
#if defined(OBJ_COFF)
10059
  /* If the destination of the branch is a defined symbol which does not have
10060
     the THUMB_FUNC attribute, then we must be calling a function which has
10061
     the (interfacearm) attribute.  We look for the Thumb entry point to that
10062
     function and change the branch to refer to that function instead.  */
10063
  if (   inst.reloc.exp.X_op == O_symbol
10064
      && inst.reloc.exp.X_add_symbol != NULL
10065
      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10066
      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10067
    inst.reloc.exp.X_add_symbol =
10068
      find_real_start (inst.reloc.exp.X_add_symbol);
10069
#endif
10070
}
10071
 
10072
static void
10073
do_t_bx (void)
10074
{
10075
  set_it_insn_type_last ();
10076
  inst.instruction |= inst.operands[0].reg << 3;
10077
  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.  The reloc
10078
     should cause the alignment to be checked once it is known.  This is
10079
     because BX PC only works if the instruction is word aligned.  */
10080
}
10081
 
10082
static void
10083
do_t_bxj (void)
10084
{
10085
  int Rm;
10086
 
10087
  set_it_insn_type_last ();
10088
  Rm = inst.operands[0].reg;
10089
  reject_bad_reg (Rm);
10090
  inst.instruction |= Rm << 16;
10091
}
10092
 
10093
static void
10094
do_t_clz (void)
10095
{
10096
  unsigned Rd;
10097
  unsigned Rm;
10098
 
10099
  Rd = inst.operands[0].reg;
10100
  Rm = inst.operands[1].reg;
10101
 
10102
  reject_bad_reg (Rd);
10103
  reject_bad_reg (Rm);
10104
 
10105
  inst.instruction |= Rd << 8;
10106
  inst.instruction |= Rm << 16;
10107
  inst.instruction |= Rm;
10108
}
10109
 
10110
static void
10111
do_t_cps (void)
10112
{
10113
  set_it_insn_type (OUTSIDE_IT_INSN);
10114
  inst.instruction |= inst.operands[0].imm;
10115
}
10116
 
10117
static void
10118
do_t_cpsi (void)
10119
{
10120
  set_it_insn_type (OUTSIDE_IT_INSN);
10121
  if (unified_syntax
10122
      && (inst.operands[1].present || inst.size_req == 4)
10123
      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10124
    {
10125
      unsigned int imod = (inst.instruction & 0x0030) >> 4;
10126
      inst.instruction = 0xf3af8000;
10127
      inst.instruction |= imod << 9;
10128
      inst.instruction |= inst.operands[0].imm << 5;
10129
      if (inst.operands[1].present)
10130
        inst.instruction |= 0x100 | inst.operands[1].imm;
10131
    }
10132
  else
10133
    {
10134
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10135
                  && (inst.operands[0].imm & 4),
10136
                  _("selected processor does not support 'A' form "
10137
                    "of this instruction"));
10138
      constraint (inst.operands[1].present || inst.size_req == 4,
10139
                  _("Thumb does not support the 2-argument "
10140
                    "form of this instruction"));
10141
      inst.instruction |= inst.operands[0].imm;
10142
    }
10143
}
10144
 
10145
/* THUMB CPY instruction (argument parse).  */
10146
 
10147
static void
10148
do_t_cpy (void)
10149
{
10150
  if (inst.size_req == 4)
10151
    {
10152
      inst.instruction = THUMB_OP32 (T_MNEM_mov);
10153
      inst.instruction |= inst.operands[0].reg << 8;
10154
      inst.instruction |= inst.operands[1].reg;
10155
    }
10156
  else
10157
    {
10158
      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10159
      inst.instruction |= (inst.operands[0].reg & 0x7);
10160
      inst.instruction |= inst.operands[1].reg << 3;
10161
    }
10162
}
10163
 
10164
static void
10165
do_t_cbz (void)
10166
{
10167
  set_it_insn_type (OUTSIDE_IT_INSN);
10168
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10169
  inst.instruction |= inst.operands[0].reg;
10170
  inst.reloc.pc_rel = 1;
10171
  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10172
}
10173
 
10174
static void
10175
do_t_dbg (void)
10176
{
10177
  inst.instruction |= inst.operands[0].imm;
10178
}
10179
 
10180
static void
10181
do_t_div (void)
10182
{
10183
  unsigned Rd, Rn, Rm;
10184
 
10185
  Rd = inst.operands[0].reg;
10186
  Rn = (inst.operands[1].present
10187
        ? inst.operands[1].reg : Rd);
10188
  Rm = inst.operands[2].reg;
10189
 
10190
  reject_bad_reg (Rd);
10191
  reject_bad_reg (Rn);
10192
  reject_bad_reg (Rm);
10193
 
10194
  inst.instruction |= Rd << 8;
10195
  inst.instruction |= Rn << 16;
10196
  inst.instruction |= Rm;
10197
}
10198
 
10199
static void
10200
do_t_hint (void)
10201
{
10202
  if (unified_syntax && inst.size_req == 4)
10203
    inst.instruction = THUMB_OP32 (inst.instruction);
10204
  else
10205
    inst.instruction = THUMB_OP16 (inst.instruction);
10206
}
10207
 
10208
static void
10209
do_t_it (void)
10210
{
10211
  unsigned int cond = inst.operands[0].imm;
10212
 
10213
  set_it_insn_type (IT_INSN);
10214
  now_it.mask = (inst.instruction & 0xf) | 0x10;
10215
  now_it.cc = cond;
10216
 
10217
  /* If the condition is a negative condition, invert the mask.  */
10218
  if ((cond & 0x1) == 0x0)
10219
    {
10220
      unsigned int mask = inst.instruction & 0x000f;
10221
 
10222
      if ((mask & 0x7) == 0)
10223
        /* no conversion needed */;
10224
      else if ((mask & 0x3) == 0)
10225
        mask ^= 0x8;
10226
      else if ((mask & 0x1) == 0)
10227
        mask ^= 0xC;
10228
      else
10229
        mask ^= 0xE;
10230
 
10231
      inst.instruction &= 0xfff0;
10232
      inst.instruction |= mask;
10233
    }
10234
 
10235
  inst.instruction |= cond << 4;
10236
}
10237
 
10238
/* Helper function used for both push/pop and ldm/stm.  */
10239
static void
10240
encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10241
{
10242
  bfd_boolean load;
10243
 
10244
  load = (inst.instruction & (1 << 20)) != 0;
10245
 
10246
  if (mask & (1 << 13))
10247
    inst.error =  _("SP not allowed in register list");
10248
 
10249
  if ((mask & (1 << base)) != 0
10250
      && writeback)
10251
    inst.error = _("having the base register in the register list when "
10252
                   "using write back is UNPREDICTABLE");
10253
 
10254
  if (load)
10255
    {
10256
      if (mask & (1 << 15))
10257
        {
10258
          if (mask & (1 << 14))
10259
            inst.error = _("LR and PC should not both be in register list");
10260
          else
10261
            set_it_insn_type_last ();
10262
        }
10263
    }
10264
  else
10265
    {
10266
      if (mask & (1 << 15))
10267
        inst.error = _("PC not allowed in register list");
10268
    }
10269
 
10270
  if ((mask & (mask - 1)) == 0)
10271
    {
10272
      /* Single register transfers implemented as str/ldr.  */
10273
      if (writeback)
10274
        {
10275
          if (inst.instruction & (1 << 23))
10276
            inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10277
          else
10278
            inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10279
        }
10280
      else
10281
        {
10282
          if (inst.instruction & (1 << 23))
10283
            inst.instruction = 0x00800000; /* ia -> [base] */
10284
          else
10285
            inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10286
        }
10287
 
10288
      inst.instruction |= 0xf8400000;
10289
      if (load)
10290
        inst.instruction |= 0x00100000;
10291
 
10292
      mask = ffs (mask) - 1;
10293
      mask <<= 12;
10294
    }
10295
  else if (writeback)
10296
    inst.instruction |= WRITE_BACK;
10297
 
10298
  inst.instruction |= mask;
10299
  inst.instruction |= base << 16;
10300
}
10301
 
10302
static void
10303
do_t_ldmstm (void)
10304
{
10305
  /* This really doesn't seem worth it.  */
10306
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10307
              _("expression too complex"));
10308
  constraint (inst.operands[1].writeback,
10309
              _("Thumb load/store multiple does not support {reglist}^"));
10310
 
10311
  if (unified_syntax)
10312
    {
10313
      bfd_boolean narrow;
10314
      unsigned mask;
10315
 
10316
      narrow = FALSE;
10317
      /* See if we can use a 16-bit instruction.  */
10318
      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10319
          && inst.size_req != 4
10320
          && !(inst.operands[1].imm & ~0xff))
10321
        {
10322
          mask = 1 << inst.operands[0].reg;
10323
 
10324
          if (inst.operands[0].reg <= 7)
10325
            {
10326
              if (inst.instruction == T_MNEM_stmia
10327
                  ? inst.operands[0].writeback
10328
                  : (inst.operands[0].writeback
10329
                     == !(inst.operands[1].imm & mask)))
10330
                {
10331
                  if (inst.instruction == T_MNEM_stmia
10332
                      && (inst.operands[1].imm & mask)
10333
                      && (inst.operands[1].imm & (mask - 1)))
10334
                    as_warn (_("value stored for r%d is UNKNOWN"),
10335
                             inst.operands[0].reg);
10336
 
10337
                  inst.instruction = THUMB_OP16 (inst.instruction);
10338
                  inst.instruction |= inst.operands[0].reg << 8;
10339
                  inst.instruction |= inst.operands[1].imm;
10340
                  narrow = TRUE;
10341
                }
10342
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10343
                {
10344
                  /* This means 1 register in reg list one of 3 situations:
10345
                     1. Instruction is stmia, but without writeback.
10346
                     2. lmdia without writeback, but with Rn not in
10347
                        reglist.
10348
                     3. ldmia with writeback, but with Rn in reglist.
10349
                     Case 3 is UNPREDICTABLE behaviour, so we handle
10350
                     case 1 and 2 which can be converted into a 16-bit
10351
                     str or ldr. The SP cases are handled below.  */
10352
                  unsigned long opcode;
10353
                  /* First, record an error for Case 3.  */
10354
                  if (inst.operands[1].imm & mask
10355
                      && inst.operands[0].writeback)
10356
                    inst.error =
10357
                        _("having the base register in the register list when "
10358
                          "using write back is UNPREDICTABLE");
10359
 
10360
                  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10361
                                                             : T_MNEM_ldr);
10362
                  inst.instruction = THUMB_OP16 (opcode);
10363
                  inst.instruction |= inst.operands[0].reg << 3;
10364
                  inst.instruction |= (ffs (inst.operands[1].imm)-1);
10365
                  narrow = TRUE;
10366
                }
10367
            }
10368
          else if (inst.operands[0] .reg == REG_SP)
10369
            {
10370
              if (inst.operands[0].writeback)
10371
                {
10372
                  inst.instruction =
10373
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10374
                                    ? T_MNEM_push : T_MNEM_pop);
10375
                  inst.instruction |= inst.operands[1].imm;
10376
                  narrow = TRUE;
10377
                }
10378
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10379
                {
10380
                  inst.instruction =
10381
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10382
                                    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10383
                  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10384
                  narrow = TRUE;
10385
                }
10386
            }
10387
        }
10388
 
10389
      if (!narrow)
10390
        {
10391
          if (inst.instruction < 0xffff)
10392
            inst.instruction = THUMB_OP32 (inst.instruction);
10393
 
10394
          encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10395
                                inst.operands[0].writeback);
10396
        }
10397
    }
10398
  else
10399
    {
10400
      constraint (inst.operands[0].reg > 7
10401
                  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10402
      constraint (inst.instruction != T_MNEM_ldmia
10403
                  && inst.instruction != T_MNEM_stmia,
10404
                  _("Thumb-2 instruction only valid in unified syntax"));
10405
      if (inst.instruction == T_MNEM_stmia)
10406
        {
10407
          if (!inst.operands[0].writeback)
10408
            as_warn (_("this instruction will write back the base register"));
10409
          if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10410
              && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10411
            as_warn (_("value stored for r%d is UNKNOWN"),
10412
                     inst.operands[0].reg);
10413
        }
10414
      else
10415
        {
10416
          if (!inst.operands[0].writeback
10417
              && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10418
            as_warn (_("this instruction will write back the base register"));
10419
          else if (inst.operands[0].writeback
10420
                   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10421
            as_warn (_("this instruction will not write back the base register"));
10422
        }
10423
 
10424
      inst.instruction = THUMB_OP16 (inst.instruction);
10425
      inst.instruction |= inst.operands[0].reg << 8;
10426
      inst.instruction |= inst.operands[1].imm;
10427
    }
10428
}
10429
 
10430
static void
10431
do_t_ldrex (void)
10432
{
10433
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10434
              || inst.operands[1].postind || inst.operands[1].writeback
10435
              || inst.operands[1].immisreg || inst.operands[1].shifted
10436
              || inst.operands[1].negative,
10437
              BAD_ADDR_MODE);
10438
 
10439
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10440
 
10441
  inst.instruction |= inst.operands[0].reg << 12;
10442
  inst.instruction |= inst.operands[1].reg << 16;
10443
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10444
}
10445
 
10446
static void
10447
do_t_ldrexd (void)
10448
{
10449
  if (!inst.operands[1].present)
10450
    {
10451
      constraint (inst.operands[0].reg == REG_LR,
10452
                  _("r14 not allowed as first register "
10453
                    "when second register is omitted"));
10454
      inst.operands[1].reg = inst.operands[0].reg + 1;
10455
    }
10456
  constraint (inst.operands[0].reg == inst.operands[1].reg,
10457
              BAD_OVERLAP);
10458
 
10459
  inst.instruction |= inst.operands[0].reg << 12;
10460
  inst.instruction |= inst.operands[1].reg << 8;
10461
  inst.instruction |= inst.operands[2].reg << 16;
10462
}
10463
 
10464
static void
10465
do_t_ldst (void)
10466
{
10467
  unsigned long opcode;
10468
  int Rn;
10469
 
10470
  if (inst.operands[0].isreg
10471
      && !inst.operands[0].preind
10472
      && inst.operands[0].reg == REG_PC)
10473
    set_it_insn_type_last ();
10474
 
10475
  opcode = inst.instruction;
10476
  if (unified_syntax)
10477
    {
10478
      if (!inst.operands[1].isreg)
10479
        {
10480
          if (opcode <= 0xffff)
10481
            inst.instruction = THUMB_OP32 (opcode);
10482
          if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10483
            return;
10484
        }
10485
      if (inst.operands[1].isreg
10486
          && !inst.operands[1].writeback
10487
          && !inst.operands[1].shifted && !inst.operands[1].postind
10488
          && !inst.operands[1].negative && inst.operands[0].reg <= 7
10489
          && opcode <= 0xffff
10490
          && inst.size_req != 4)
10491
        {
10492
          /* Insn may have a 16-bit form.  */
10493
          Rn = inst.operands[1].reg;
10494
          if (inst.operands[1].immisreg)
10495
            {
10496
              inst.instruction = THUMB_OP16 (opcode);
10497
              /* [Rn, Rik] */
10498
              if (Rn <= 7 && inst.operands[1].imm <= 7)
10499
                goto op16;
10500
              else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10501
                reject_bad_reg (inst.operands[1].imm);
10502
            }
10503
          else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10504
                    && opcode != T_MNEM_ldrsb)
10505
                   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10506
                   || (Rn == REG_SP && opcode == T_MNEM_str))
10507
            {
10508
              /* [Rn, #const] */
10509
              if (Rn > 7)
10510
                {
10511
                  if (Rn == REG_PC)
10512
                    {
10513
                      if (inst.reloc.pc_rel)
10514
                        opcode = T_MNEM_ldr_pc2;
10515
                      else
10516
                        opcode = T_MNEM_ldr_pc;
10517
                    }
10518
                  else
10519
                    {
10520
                      if (opcode == T_MNEM_ldr)
10521
                        opcode = T_MNEM_ldr_sp;
10522
                      else
10523
                        opcode = T_MNEM_str_sp;
10524
                    }
10525
                  inst.instruction = inst.operands[0].reg << 8;
10526
                }
10527
              else
10528
                {
10529
                  inst.instruction = inst.operands[0].reg;
10530
                  inst.instruction |= inst.operands[1].reg << 3;
10531
                }
10532
              inst.instruction |= THUMB_OP16 (opcode);
10533
              if (inst.size_req == 2)
10534
                inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10535
              else
10536
                inst.relax = opcode;
10537
              return;
10538
            }
10539
        }
10540
      /* Definitely a 32-bit variant.  */
10541
 
10542
      /* Warning for Erratum 752419.  */
10543
      if (opcode == T_MNEM_ldr
10544
          && inst.operands[0].reg == REG_SP
10545
          && inst.operands[1].writeback == 1
10546
          && !inst.operands[1].immisreg)
10547
        {
10548
          if (no_cpu_selected ()
10549
              || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10550
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10551
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10552
            as_warn (_("This instruction may be unpredictable "
10553
                       "if executed on M-profile cores "
10554
                       "with interrupts enabled."));
10555
        }
10556
 
10557
      /* Do some validations regarding addressing modes.  */
10558 163 khays
      if (inst.operands[1].immisreg)
10559 16 khays
        reject_bad_reg (inst.operands[1].imm);
10560
 
10561 163 khays
      constraint (inst.operands[1].writeback == 1
10562
                  && inst.operands[0].reg == inst.operands[1].reg,
10563
                  BAD_OVERLAP);
10564
 
10565 16 khays
      inst.instruction = THUMB_OP32 (opcode);
10566
      inst.instruction |= inst.operands[0].reg << 12;
10567
      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10568 163 khays
      check_ldr_r15_aligned ();
10569 16 khays
      return;
10570
    }
10571
 
10572
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10573
 
10574
  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10575
    {
10576
      /* Only [Rn,Rm] is acceptable.  */
10577
      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10578
      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10579
                  || inst.operands[1].postind || inst.operands[1].shifted
10580
                  || inst.operands[1].negative,
10581
                  _("Thumb does not support this addressing mode"));
10582
      inst.instruction = THUMB_OP16 (inst.instruction);
10583
      goto op16;
10584
    }
10585
 
10586
  inst.instruction = THUMB_OP16 (inst.instruction);
10587
  if (!inst.operands[1].isreg)
10588
    if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10589
      return;
10590
 
10591
  constraint (!inst.operands[1].preind
10592
              || inst.operands[1].shifted
10593
              || inst.operands[1].writeback,
10594
              _("Thumb does not support this addressing mode"));
10595
  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10596
    {
10597
      constraint (inst.instruction & 0x0600,
10598
                  _("byte or halfword not valid for base register"));
10599
      constraint (inst.operands[1].reg == REG_PC
10600
                  && !(inst.instruction & THUMB_LOAD_BIT),
10601
                  _("r15 based store not allowed"));
10602
      constraint (inst.operands[1].immisreg,
10603
                  _("invalid base register for register offset"));
10604
 
10605
      if (inst.operands[1].reg == REG_PC)
10606
        inst.instruction = T_OPCODE_LDR_PC;
10607
      else if (inst.instruction & THUMB_LOAD_BIT)
10608
        inst.instruction = T_OPCODE_LDR_SP;
10609
      else
10610
        inst.instruction = T_OPCODE_STR_SP;
10611
 
10612
      inst.instruction |= inst.operands[0].reg << 8;
10613
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10614
      return;
10615
    }
10616
 
10617
  constraint (inst.operands[1].reg > 7, BAD_HIREG);
10618
  if (!inst.operands[1].immisreg)
10619
    {
10620
      /* Immediate offset.  */
10621
      inst.instruction |= inst.operands[0].reg;
10622
      inst.instruction |= inst.operands[1].reg << 3;
10623
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10624
      return;
10625
    }
10626
 
10627
  /* Register offset.  */
10628
  constraint (inst.operands[1].imm > 7, BAD_HIREG);
10629
  constraint (inst.operands[1].negative,
10630
              _("Thumb does not support this addressing mode"));
10631
 
10632
 op16:
10633
  switch (inst.instruction)
10634
    {
10635
    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10636
    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10637
    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10638
    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10639
    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10640
    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10641
    case 0x5600 /* ldrsb */:
10642
    case 0x5e00 /* ldrsh */: break;
10643
    default: abort ();
10644
    }
10645
 
10646
  inst.instruction |= inst.operands[0].reg;
10647
  inst.instruction |= inst.operands[1].reg << 3;
10648
  inst.instruction |= inst.operands[1].imm << 6;
10649
}
10650
 
10651
static void
10652
do_t_ldstd (void)
10653
{
10654
  if (!inst.operands[1].present)
10655
    {
10656
      inst.operands[1].reg = inst.operands[0].reg + 1;
10657
      constraint (inst.operands[0].reg == REG_LR,
10658
                  _("r14 not allowed here"));
10659 166 khays
      constraint (inst.operands[0].reg == REG_R12,
10660
                  _("r12 not allowed here"));
10661 16 khays
    }
10662 166 khays
 
10663
  if (inst.operands[2].writeback
10664
      && (inst.operands[0].reg == inst.operands[2].reg
10665
      || inst.operands[1].reg == inst.operands[2].reg))
10666
    as_warn (_("base register written back, and overlaps "
10667
               "one of transfer registers"));
10668
 
10669 16 khays
  inst.instruction |= inst.operands[0].reg << 12;
10670
  inst.instruction |= inst.operands[1].reg << 8;
10671
  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10672
}
10673
 
10674
static void
10675
do_t_ldstt (void)
10676
{
10677
  inst.instruction |= inst.operands[0].reg << 12;
10678
  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10679
}
10680
 
10681
static void
10682
do_t_mla (void)
10683
{
10684
  unsigned Rd, Rn, Rm, Ra;
10685
 
10686
  Rd = inst.operands[0].reg;
10687
  Rn = inst.operands[1].reg;
10688
  Rm = inst.operands[2].reg;
10689
  Ra = inst.operands[3].reg;
10690
 
10691
  reject_bad_reg (Rd);
10692
  reject_bad_reg (Rn);
10693
  reject_bad_reg (Rm);
10694
  reject_bad_reg (Ra);
10695
 
10696
  inst.instruction |= Rd << 8;
10697
  inst.instruction |= Rn << 16;
10698
  inst.instruction |= Rm;
10699
  inst.instruction |= Ra << 12;
10700
}
10701
 
10702
static void
10703
do_t_mlal (void)
10704
{
10705
  unsigned RdLo, RdHi, Rn, Rm;
10706
 
10707
  RdLo = inst.operands[0].reg;
10708
  RdHi = inst.operands[1].reg;
10709
  Rn = inst.operands[2].reg;
10710
  Rm = inst.operands[3].reg;
10711
 
10712
  reject_bad_reg (RdLo);
10713
  reject_bad_reg (RdHi);
10714
  reject_bad_reg (Rn);
10715
  reject_bad_reg (Rm);
10716
 
10717
  inst.instruction |= RdLo << 12;
10718
  inst.instruction |= RdHi << 8;
10719
  inst.instruction |= Rn << 16;
10720
  inst.instruction |= Rm;
10721
}
10722
 
10723
static void
10724
do_t_mov_cmp (void)
10725
{
10726
  unsigned Rn, Rm;
10727
 
10728
  Rn = inst.operands[0].reg;
10729
  Rm = inst.operands[1].reg;
10730
 
10731
  if (Rn == REG_PC)
10732
    set_it_insn_type_last ();
10733
 
10734
  if (unified_syntax)
10735
    {
10736
      int r0off = (inst.instruction == T_MNEM_mov
10737
                   || inst.instruction == T_MNEM_movs) ? 8 : 16;
10738
      unsigned long opcode;
10739
      bfd_boolean narrow;
10740
      bfd_boolean low_regs;
10741
 
10742
      low_regs = (Rn <= 7 && Rm <= 7);
10743
      opcode = inst.instruction;
10744
      if (in_it_block ())
10745
        narrow = opcode != T_MNEM_movs;
10746
      else
10747
        narrow = opcode != T_MNEM_movs || low_regs;
10748
      if (inst.size_req == 4
10749
          || inst.operands[1].shifted)
10750
        narrow = FALSE;
10751
 
10752
      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
10753
      if (opcode == T_MNEM_movs && inst.operands[1].isreg
10754
          && !inst.operands[1].shifted
10755
          && Rn == REG_PC
10756
          && Rm == REG_LR)
10757
        {
10758
          inst.instruction = T2_SUBS_PC_LR;
10759
          return;
10760
        }
10761
 
10762
      if (opcode == T_MNEM_cmp)
10763
        {
10764
          constraint (Rn == REG_PC, BAD_PC);
10765
          if (narrow)
10766
            {
10767
              /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10768
                 but valid.  */
10769
              warn_deprecated_sp (Rm);
10770
              /* R15 was documented as a valid choice for Rm in ARMv6,
10771
                 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
10772
                 tools reject R15, so we do too.  */
10773
              constraint (Rm == REG_PC, BAD_PC);
10774
            }
10775
          else
10776
            reject_bad_reg (Rm);
10777
        }
10778
      else if (opcode == T_MNEM_mov
10779
               || opcode == T_MNEM_movs)
10780
        {
10781
          if (inst.operands[1].isreg)
10782
            {
10783
              if (opcode == T_MNEM_movs)
10784
                {
10785
                  reject_bad_reg (Rn);
10786
                  reject_bad_reg (Rm);
10787
                }
10788
              else if (narrow)
10789
                {
10790
                  /* This is mov.n.  */
10791
                  if ((Rn == REG_SP || Rn == REG_PC)
10792
                      && (Rm == REG_SP || Rm == REG_PC))
10793
                    {
10794
                      as_warn (_("Use of r%u as a source register is "
10795
                                 "deprecated when r%u is the destination "
10796
                                 "register."), Rm, Rn);
10797
                    }
10798
                }
10799
              else
10800
                {
10801
                  /* This is mov.w.  */
10802
                  constraint (Rn == REG_PC, BAD_PC);
10803
                  constraint (Rm == REG_PC, BAD_PC);
10804
                  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10805
                }
10806
            }
10807
          else
10808
            reject_bad_reg (Rn);
10809
        }
10810
 
10811
      if (!inst.operands[1].isreg)
10812
        {
10813
          /* Immediate operand.  */
10814
          if (!in_it_block () && opcode == T_MNEM_mov)
10815
            narrow = 0;
10816
          if (low_regs && narrow)
10817
            {
10818
              inst.instruction = THUMB_OP16 (opcode);
10819
              inst.instruction |= Rn << 8;
10820
              if (inst.size_req == 2)
10821
                inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10822
              else
10823
                inst.relax = opcode;
10824
            }
10825
          else
10826
            {
10827
              inst.instruction = THUMB_OP32 (inst.instruction);
10828
              inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10829
              inst.instruction |= Rn << r0off;
10830
              inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10831
            }
10832
        }
10833
      else if (inst.operands[1].shifted && inst.operands[1].immisreg
10834
               && (inst.instruction == T_MNEM_mov
10835
                   || inst.instruction == T_MNEM_movs))
10836
        {
10837
          /* Register shifts are encoded as separate shift instructions.  */
10838
          bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10839
 
10840
          if (in_it_block ())
10841
            narrow = !flags;
10842
          else
10843
            narrow = flags;
10844
 
10845
          if (inst.size_req == 4)
10846
            narrow = FALSE;
10847
 
10848
          if (!low_regs || inst.operands[1].imm > 7)
10849
            narrow = FALSE;
10850
 
10851
          if (Rn != Rm)
10852
            narrow = FALSE;
10853
 
10854
          switch (inst.operands[1].shift_kind)
10855
            {
10856
            case SHIFT_LSL:
10857
              opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10858
              break;
10859
            case SHIFT_ASR:
10860
              opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10861
              break;
10862
            case SHIFT_LSR:
10863
              opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10864
              break;
10865
            case SHIFT_ROR:
10866
              opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10867
              break;
10868
            default:
10869
              abort ();
10870
            }
10871
 
10872
          inst.instruction = opcode;
10873
          if (narrow)
10874
            {
10875
              inst.instruction |= Rn;
10876
              inst.instruction |= inst.operands[1].imm << 3;
10877
            }
10878
          else
10879
            {
10880
              if (flags)
10881
                inst.instruction |= CONDS_BIT;
10882
 
10883
              inst.instruction |= Rn << 8;
10884
              inst.instruction |= Rm << 16;
10885
              inst.instruction |= inst.operands[1].imm;
10886
            }
10887
        }
10888
      else if (!narrow)
10889
        {
10890
          /* Some mov with immediate shift have narrow variants.
10891
             Register shifts are handled above.  */
10892
          if (low_regs && inst.operands[1].shifted
10893
              && (inst.instruction == T_MNEM_mov
10894
                  || inst.instruction == T_MNEM_movs))
10895
            {
10896
              if (in_it_block ())
10897
                narrow = (inst.instruction == T_MNEM_mov);
10898
              else
10899
                narrow = (inst.instruction == T_MNEM_movs);
10900
            }
10901
 
10902
          if (narrow)
10903
            {
10904
              switch (inst.operands[1].shift_kind)
10905
                {
10906
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10907
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10908
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10909
                default: narrow = FALSE; break;
10910
                }
10911
            }
10912
 
10913
          if (narrow)
10914
            {
10915
              inst.instruction |= Rn;
10916
              inst.instruction |= Rm << 3;
10917
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10918
            }
10919
          else
10920
            {
10921
              inst.instruction = THUMB_OP32 (inst.instruction);
10922
              inst.instruction |= Rn << r0off;
10923
              encode_thumb32_shifted_operand (1);
10924
            }
10925
        }
10926
      else
10927
        switch (inst.instruction)
10928
          {
10929
          case T_MNEM_mov:
10930 166 khays
            /* In v4t or v5t a move of two lowregs produces unpredictable
10931
               results. Don't allow this.  */
10932
            if (low_regs)
10933
              {
10934
                constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
10935
                            "MOV Rd, Rs with two low registers is not "
10936
                            "permitted on this architecture");
10937
                ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
10938
                                        arm_ext_v6);
10939
              }
10940
 
10941 16 khays
            inst.instruction = T_OPCODE_MOV_HR;
10942
            inst.instruction |= (Rn & 0x8) << 4;
10943
            inst.instruction |= (Rn & 0x7);
10944
            inst.instruction |= Rm << 3;
10945
            break;
10946
 
10947
          case T_MNEM_movs:
10948
            /* We know we have low registers at this point.
10949
               Generate LSLS Rd, Rs, #0.  */
10950
            inst.instruction = T_OPCODE_LSL_I;
10951
            inst.instruction |= Rn;
10952
            inst.instruction |= Rm << 3;
10953
            break;
10954
 
10955
          case T_MNEM_cmp:
10956
            if (low_regs)
10957
              {
10958
                inst.instruction = T_OPCODE_CMP_LR;
10959
                inst.instruction |= Rn;
10960
                inst.instruction |= Rm << 3;
10961
              }
10962
            else
10963
              {
10964
                inst.instruction = T_OPCODE_CMP_HR;
10965
                inst.instruction |= (Rn & 0x8) << 4;
10966
                inst.instruction |= (Rn & 0x7);
10967
                inst.instruction |= Rm << 3;
10968
              }
10969
            break;
10970
          }
10971
      return;
10972
    }
10973
 
10974
  inst.instruction = THUMB_OP16 (inst.instruction);
10975
 
10976
  /* PR 10443: Do not silently ignore shifted operands.  */
10977
  constraint (inst.operands[1].shifted,
10978
              _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10979
 
10980
  if (inst.operands[1].isreg)
10981
    {
10982
      if (Rn < 8 && Rm < 8)
10983
        {
10984
          /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10985
             since a MOV instruction produces unpredictable results.  */
10986
          if (inst.instruction == T_OPCODE_MOV_I8)
10987
            inst.instruction = T_OPCODE_ADD_I3;
10988
          else
10989
            inst.instruction = T_OPCODE_CMP_LR;
10990
 
10991
          inst.instruction |= Rn;
10992
          inst.instruction |= Rm << 3;
10993
        }
10994
      else
10995
        {
10996
          if (inst.instruction == T_OPCODE_MOV_I8)
10997
            inst.instruction = T_OPCODE_MOV_HR;
10998
          else
10999
            inst.instruction = T_OPCODE_CMP_HR;
11000
          do_t_cpy ();
11001
        }
11002
    }
11003
  else
11004
    {
11005
      constraint (Rn > 7,
11006
                  _("only lo regs allowed with immediate"));
11007
      inst.instruction |= Rn << 8;
11008
      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11009
    }
11010
}
11011
 
11012
static void
11013
do_t_mov16 (void)
11014
{
11015
  unsigned Rd;
11016
  bfd_vma imm;
11017
  bfd_boolean top;
11018
 
11019
  top = (inst.instruction & 0x00800000) != 0;
11020
  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11021
    {
11022
      constraint (top, _(":lower16: not allowed this instruction"));
11023
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11024
    }
11025
  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11026
    {
11027
      constraint (!top, _(":upper16: not allowed this instruction"));
11028
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11029
    }
11030
 
11031
  Rd = inst.operands[0].reg;
11032
  reject_bad_reg (Rd);
11033
 
11034
  inst.instruction |= Rd << 8;
11035
  if (inst.reloc.type == BFD_RELOC_UNUSED)
11036
    {
11037
      imm = inst.reloc.exp.X_add_number;
11038
      inst.instruction |= (imm & 0xf000) << 4;
11039
      inst.instruction |= (imm & 0x0800) << 15;
11040
      inst.instruction |= (imm & 0x0700) << 4;
11041
      inst.instruction |= (imm & 0x00ff);
11042
    }
11043
}
11044
 
11045
static void
11046
do_t_mvn_tst (void)
11047
{
11048
  unsigned Rn, Rm;
11049
 
11050
  Rn = inst.operands[0].reg;
11051
  Rm = inst.operands[1].reg;
11052
 
11053
  if (inst.instruction == T_MNEM_cmp
11054
      || inst.instruction == T_MNEM_cmn)
11055
    constraint (Rn == REG_PC, BAD_PC);
11056
  else
11057
    reject_bad_reg (Rn);
11058
  reject_bad_reg (Rm);
11059
 
11060
  if (unified_syntax)
11061
    {
11062
      int r0off = (inst.instruction == T_MNEM_mvn
11063
                   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11064
      bfd_boolean narrow;
11065
 
11066
      if (inst.size_req == 4
11067
          || inst.instruction > 0xffff
11068
          || inst.operands[1].shifted
11069
          || Rn > 7 || Rm > 7)
11070
        narrow = FALSE;
11071
      else if (inst.instruction == T_MNEM_cmn)
11072
        narrow = TRUE;
11073
      else if (THUMB_SETS_FLAGS (inst.instruction))
11074
        narrow = !in_it_block ();
11075
      else
11076
        narrow = in_it_block ();
11077
 
11078
      if (!inst.operands[1].isreg)
11079
        {
11080
          /* For an immediate, we always generate a 32-bit opcode;
11081
             section relaxation will shrink it later if possible.  */
11082
          if (inst.instruction < 0xffff)
11083
            inst.instruction = THUMB_OP32 (inst.instruction);
11084
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11085
          inst.instruction |= Rn << r0off;
11086
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11087
        }
11088
      else
11089
        {
11090
          /* See if we can do this with a 16-bit instruction.  */
11091
          if (narrow)
11092
            {
11093
              inst.instruction = THUMB_OP16 (inst.instruction);
11094
              inst.instruction |= Rn;
11095
              inst.instruction |= Rm << 3;
11096
            }
11097
          else
11098
            {
11099
              constraint (inst.operands[1].shifted
11100
                          && inst.operands[1].immisreg,
11101
                          _("shift must be constant"));
11102
              if (inst.instruction < 0xffff)
11103
                inst.instruction = THUMB_OP32 (inst.instruction);
11104
              inst.instruction |= Rn << r0off;
11105
              encode_thumb32_shifted_operand (1);
11106
            }
11107
        }
11108
    }
11109
  else
11110
    {
11111
      constraint (inst.instruction > 0xffff
11112
                  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11113
      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11114
                  _("unshifted register required"));
11115
      constraint (Rn > 7 || Rm > 7,
11116
                  BAD_HIREG);
11117
 
11118
      inst.instruction = THUMB_OP16 (inst.instruction);
11119
      inst.instruction |= Rn;
11120
      inst.instruction |= Rm << 3;
11121
    }
11122
}
11123
 
11124
static void
11125
do_t_mrs (void)
11126
{
11127
  unsigned Rd;
11128
 
11129
  if (do_vfp_nsyn_mrs () == SUCCESS)
11130
    return;
11131
 
11132
  Rd = inst.operands[0].reg;
11133
  reject_bad_reg (Rd);
11134
  inst.instruction |= Rd << 8;
11135
 
11136
  if (inst.operands[1].isreg)
11137
    {
11138
      unsigned br = inst.operands[1].reg;
11139
      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11140
        as_bad (_("bad register for mrs"));
11141
 
11142
      inst.instruction |= br & (0xf << 16);
11143
      inst.instruction |= (br & 0x300) >> 4;
11144
      inst.instruction |= (br & SPSR_BIT) >> 2;
11145
    }
11146
  else
11147
    {
11148
      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11149
 
11150
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11151
        constraint (flags != 0, _("selected processor does not support "
11152
                    "requested special purpose register"));
11153
      else
11154
        /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11155
           devices).  */
11156
        constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11157
                    _("'APSR', 'CPSR' or 'SPSR' expected"));
11158
 
11159
      inst.instruction |= (flags & SPSR_BIT) >> 2;
11160
      inst.instruction |= inst.operands[1].imm & 0xff;
11161
      inst.instruction |= 0xf0000;
11162
    }
11163
}
11164
 
11165
static void
11166
do_t_msr (void)
11167
{
11168
  int flags;
11169
  unsigned Rn;
11170
 
11171
  if (do_vfp_nsyn_msr () == SUCCESS)
11172
    return;
11173
 
11174
  constraint (!inst.operands[1].isreg,
11175
              _("Thumb encoding does not support an immediate here"));
11176
 
11177
  if (inst.operands[0].isreg)
11178
    flags = (int)(inst.operands[0].reg);
11179
  else
11180
    flags = inst.operands[0].imm;
11181
 
11182
  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11183
    {
11184
      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11185
 
11186
      constraint ((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11187
                   && (bits & ~(PSR_s | PSR_f)) != 0)
11188
                  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11189
                      && bits != PSR_f),
11190
                  _("selected processor does not support requested special "
11191
                    "purpose register"));
11192
    }
11193
  else
11194
     constraint ((flags & 0xff) != 0, _("selected processor does not support "
11195
                 "requested special purpose register"));
11196
 
11197
  Rn = inst.operands[1].reg;
11198
  reject_bad_reg (Rn);
11199
 
11200
  inst.instruction |= (flags & SPSR_BIT) >> 2;
11201
  inst.instruction |= (flags & 0xf0000) >> 8;
11202
  inst.instruction |= (flags & 0x300) >> 4;
11203
  inst.instruction |= (flags & 0xff);
11204
  inst.instruction |= Rn << 16;
11205
}
11206
 
11207
static void
11208
do_t_mul (void)
11209
{
11210
  bfd_boolean narrow;
11211
  unsigned Rd, Rn, Rm;
11212
 
11213
  if (!inst.operands[2].present)
11214
    inst.operands[2].reg = inst.operands[0].reg;
11215
 
11216
  Rd = inst.operands[0].reg;
11217
  Rn = inst.operands[1].reg;
11218
  Rm = inst.operands[2].reg;
11219
 
11220
  if (unified_syntax)
11221
    {
11222
      if (inst.size_req == 4
11223
          || (Rd != Rn
11224
              && Rd != Rm)
11225
          || Rn > 7
11226
          || Rm > 7)
11227
        narrow = FALSE;
11228
      else if (inst.instruction == T_MNEM_muls)
11229
        narrow = !in_it_block ();
11230
      else
11231
        narrow = in_it_block ();
11232
    }
11233
  else
11234
    {
11235
      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11236
      constraint (Rn > 7 || Rm > 7,
11237
                  BAD_HIREG);
11238
      narrow = TRUE;
11239
    }
11240
 
11241
  if (narrow)
11242
    {
11243
      /* 16-bit MULS/Conditional MUL.  */
11244
      inst.instruction = THUMB_OP16 (inst.instruction);
11245
      inst.instruction |= Rd;
11246
 
11247
      if (Rd == Rn)
11248
        inst.instruction |= Rm << 3;
11249
      else if (Rd == Rm)
11250
        inst.instruction |= Rn << 3;
11251
      else
11252
        constraint (1, _("dest must overlap one source register"));
11253
    }
11254
  else
11255
    {
11256
      constraint (inst.instruction != T_MNEM_mul,
11257
                  _("Thumb-2 MUL must not set flags"));
11258
      /* 32-bit MUL.  */
11259
      inst.instruction = THUMB_OP32 (inst.instruction);
11260
      inst.instruction |= Rd << 8;
11261
      inst.instruction |= Rn << 16;
11262
      inst.instruction |= Rm << 0;
11263
 
11264
      reject_bad_reg (Rd);
11265
      reject_bad_reg (Rn);
11266
      reject_bad_reg (Rm);
11267
    }
11268
}
11269
 
11270
static void
11271
do_t_mull (void)
11272
{
11273
  unsigned RdLo, RdHi, Rn, Rm;
11274
 
11275
  RdLo = inst.operands[0].reg;
11276
  RdHi = inst.operands[1].reg;
11277
  Rn = inst.operands[2].reg;
11278
  Rm = inst.operands[3].reg;
11279
 
11280
  reject_bad_reg (RdLo);
11281
  reject_bad_reg (RdHi);
11282
  reject_bad_reg (Rn);
11283
  reject_bad_reg (Rm);
11284
 
11285
  inst.instruction |= RdLo << 12;
11286
  inst.instruction |= RdHi << 8;
11287
  inst.instruction |= Rn << 16;
11288
  inst.instruction |= Rm;
11289
 
11290
 if (RdLo == RdHi)
11291
    as_tsktsk (_("rdhi and rdlo must be different"));
11292
}
11293
 
11294
static void
11295
do_t_nop (void)
11296
{
11297
  set_it_insn_type (NEUTRAL_IT_INSN);
11298
 
11299
  if (unified_syntax)
11300
    {
11301
      if (inst.size_req == 4 || inst.operands[0].imm > 15)
11302
        {
11303
          inst.instruction = THUMB_OP32 (inst.instruction);
11304
          inst.instruction |= inst.operands[0].imm;
11305
        }
11306
      else
11307
        {
11308
          /* PR9722: Check for Thumb2 availability before
11309
             generating a thumb2 nop instruction.  */
11310
          if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11311
            {
11312
              inst.instruction = THUMB_OP16 (inst.instruction);
11313
              inst.instruction |= inst.operands[0].imm << 4;
11314
            }
11315
          else
11316
            inst.instruction = 0x46c0;
11317
        }
11318
    }
11319
  else
11320
    {
11321
      constraint (inst.operands[0].present,
11322
                  _("Thumb does not support NOP with hints"));
11323
      inst.instruction = 0x46c0;
11324
    }
11325
}
11326
 
11327
static void
11328
do_t_neg (void)
11329
{
11330
  if (unified_syntax)
11331
    {
11332
      bfd_boolean narrow;
11333
 
11334
      if (THUMB_SETS_FLAGS (inst.instruction))
11335
        narrow = !in_it_block ();
11336
      else
11337
        narrow = in_it_block ();
11338
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11339
        narrow = FALSE;
11340
      if (inst.size_req == 4)
11341
        narrow = FALSE;
11342
 
11343
      if (!narrow)
11344
        {
11345
          inst.instruction = THUMB_OP32 (inst.instruction);
11346
          inst.instruction |= inst.operands[0].reg << 8;
11347
          inst.instruction |= inst.operands[1].reg << 16;
11348
        }
11349
      else
11350
        {
11351
          inst.instruction = THUMB_OP16 (inst.instruction);
11352
          inst.instruction |= inst.operands[0].reg;
11353
          inst.instruction |= inst.operands[1].reg << 3;
11354
        }
11355
    }
11356
  else
11357
    {
11358
      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11359
                  BAD_HIREG);
11360
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11361
 
11362
      inst.instruction = THUMB_OP16 (inst.instruction);
11363
      inst.instruction |= inst.operands[0].reg;
11364
      inst.instruction |= inst.operands[1].reg << 3;
11365
    }
11366
}
11367
 
11368
static void
11369
do_t_orn (void)
11370
{
11371
  unsigned Rd, Rn;
11372
 
11373
  Rd = inst.operands[0].reg;
11374
  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11375
 
11376
  reject_bad_reg (Rd);
11377
  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
11378
  reject_bad_reg (Rn);
11379
 
11380
  inst.instruction |= Rd << 8;
11381
  inst.instruction |= Rn << 16;
11382
 
11383
  if (!inst.operands[2].isreg)
11384
    {
11385
      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11386
      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11387
    }
11388
  else
11389
    {
11390
      unsigned Rm;
11391
 
11392
      Rm = inst.operands[2].reg;
11393
      reject_bad_reg (Rm);
11394
 
11395
      constraint (inst.operands[2].shifted
11396
                  && inst.operands[2].immisreg,
11397
                  _("shift must be constant"));
11398
      encode_thumb32_shifted_operand (2);
11399
    }
11400
}
11401
 
11402
static void
11403
do_t_pkhbt (void)
11404
{
11405
  unsigned Rd, Rn, Rm;
11406
 
11407
  Rd = inst.operands[0].reg;
11408
  Rn = inst.operands[1].reg;
11409
  Rm = inst.operands[2].reg;
11410
 
11411
  reject_bad_reg (Rd);
11412
  reject_bad_reg (Rn);
11413
  reject_bad_reg (Rm);
11414
 
11415
  inst.instruction |= Rd << 8;
11416
  inst.instruction |= Rn << 16;
11417
  inst.instruction |= Rm;
11418
  if (inst.operands[3].present)
11419
    {
11420
      unsigned int val = inst.reloc.exp.X_add_number;
11421
      constraint (inst.reloc.exp.X_op != O_constant,
11422
                  _("expression too complex"));
11423
      inst.instruction |= (val & 0x1c) << 10;
11424
      inst.instruction |= (val & 0x03) << 6;
11425
    }
11426
}
11427
 
11428
static void
11429
do_t_pkhtb (void)
11430
{
11431
  if (!inst.operands[3].present)
11432
    {
11433
      unsigned Rtmp;
11434
 
11435
      inst.instruction &= ~0x00000020;
11436
 
11437
      /* PR 10168.  Swap the Rm and Rn registers.  */
11438
      Rtmp = inst.operands[1].reg;
11439
      inst.operands[1].reg = inst.operands[2].reg;
11440
      inst.operands[2].reg = Rtmp;
11441
    }
11442
  do_t_pkhbt ();
11443
}
11444
 
11445
static void
11446
do_t_pld (void)
11447
{
11448
  if (inst.operands[0].immisreg)
11449
    reject_bad_reg (inst.operands[0].imm);
11450
 
11451
  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11452
}
11453
 
11454
static void
11455
do_t_push_pop (void)
11456
{
11457
  unsigned mask;
11458
 
11459
  constraint (inst.operands[0].writeback,
11460
              _("push/pop do not support {reglist}^"));
11461
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11462
              _("expression too complex"));
11463
 
11464
  mask = inst.operands[0].imm;
11465
  if ((mask & ~0xff) == 0)
11466
    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11467
  else if ((inst.instruction == T_MNEM_push
11468
            && (mask & ~0xff) == 1 << REG_LR)
11469
           || (inst.instruction == T_MNEM_pop
11470
               && (mask & ~0xff) == 1 << REG_PC))
11471
    {
11472
      inst.instruction = THUMB_OP16 (inst.instruction);
11473
      inst.instruction |= THUMB_PP_PC_LR;
11474
      inst.instruction |= mask & 0xff;
11475
    }
11476
  else if (unified_syntax)
11477
    {
11478
      inst.instruction = THUMB_OP32 (inst.instruction);
11479
      encode_thumb2_ldmstm (13, mask, TRUE);
11480
    }
11481
  else
11482
    {
11483
      inst.error = _("invalid register list to push/pop instruction");
11484
      return;
11485
    }
11486
}
11487
 
11488
static void
11489
do_t_rbit (void)
11490
{
11491
  unsigned Rd, Rm;
11492
 
11493
  Rd = inst.operands[0].reg;
11494
  Rm = inst.operands[1].reg;
11495
 
11496
  reject_bad_reg (Rd);
11497
  reject_bad_reg (Rm);
11498
 
11499
  inst.instruction |= Rd << 8;
11500
  inst.instruction |= Rm << 16;
11501
  inst.instruction |= Rm;
11502
}
11503
 
11504
static void
11505
do_t_rev (void)
11506
{
11507
  unsigned Rd, Rm;
11508
 
11509
  Rd = inst.operands[0].reg;
11510
  Rm = inst.operands[1].reg;
11511
 
11512
  reject_bad_reg (Rd);
11513
  reject_bad_reg (Rm);
11514
 
11515
  if (Rd <= 7 && Rm <= 7
11516
      && inst.size_req != 4)
11517
    {
11518
      inst.instruction = THUMB_OP16 (inst.instruction);
11519
      inst.instruction |= Rd;
11520
      inst.instruction |= Rm << 3;
11521
    }
11522
  else if (unified_syntax)
11523
    {
11524
      inst.instruction = THUMB_OP32 (inst.instruction);
11525
      inst.instruction |= Rd << 8;
11526
      inst.instruction |= Rm << 16;
11527
      inst.instruction |= Rm;
11528
    }
11529
  else
11530
    inst.error = BAD_HIREG;
11531
}
11532
 
11533
static void
11534
do_t_rrx (void)
11535
{
11536
  unsigned Rd, Rm;
11537
 
11538
  Rd = inst.operands[0].reg;
11539
  Rm = inst.operands[1].reg;
11540
 
11541
  reject_bad_reg (Rd);
11542
  reject_bad_reg (Rm);
11543
 
11544
  inst.instruction |= Rd << 8;
11545
  inst.instruction |= Rm;
11546
}
11547
 
11548
static void
11549
do_t_rsb (void)
11550
{
11551
  unsigned Rd, Rs;
11552
 
11553
  Rd = inst.operands[0].reg;
11554
  Rs = (inst.operands[1].present
11555
        ? inst.operands[1].reg    /* Rd, Rs, foo */
11556
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11557
 
11558
  reject_bad_reg (Rd);
11559
  reject_bad_reg (Rs);
11560
  if (inst.operands[2].isreg)
11561
    reject_bad_reg (inst.operands[2].reg);
11562
 
11563
  inst.instruction |= Rd << 8;
11564
  inst.instruction |= Rs << 16;
11565
  if (!inst.operands[2].isreg)
11566
    {
11567
      bfd_boolean narrow;
11568
 
11569
      if ((inst.instruction & 0x00100000) != 0)
11570
        narrow = !in_it_block ();
11571
      else
11572
        narrow = in_it_block ();
11573
 
11574
      if (Rd > 7 || Rs > 7)
11575
        narrow = FALSE;
11576
 
11577
      if (inst.size_req == 4 || !unified_syntax)
11578
        narrow = FALSE;
11579
 
11580
      if (inst.reloc.exp.X_op != O_constant
11581
          || inst.reloc.exp.X_add_number != 0)
11582
        narrow = FALSE;
11583
 
11584
      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
11585
         relaxation, but it doesn't seem worth the hassle.  */
11586
      if (narrow)
11587
        {
11588
          inst.reloc.type = BFD_RELOC_UNUSED;
11589
          inst.instruction = THUMB_OP16 (T_MNEM_negs);
11590
          inst.instruction |= Rs << 3;
11591
          inst.instruction |= Rd;
11592
        }
11593
      else
11594
        {
11595
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11596
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11597
        }
11598
    }
11599
  else
11600
    encode_thumb32_shifted_operand (2);
11601
}
11602
 
11603
static void
11604
do_t_setend (void)
11605
{
11606
  set_it_insn_type (OUTSIDE_IT_INSN);
11607
  if (inst.operands[0].imm)
11608
    inst.instruction |= 0x8;
11609
}
11610
 
11611
static void
11612
do_t_shift (void)
11613
{
11614
  if (!inst.operands[1].present)
11615
    inst.operands[1].reg = inst.operands[0].reg;
11616
 
11617
  if (unified_syntax)
11618
    {
11619
      bfd_boolean narrow;
11620
      int shift_kind;
11621
 
11622
      switch (inst.instruction)
11623
        {
11624
        case T_MNEM_asr:
11625
        case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11626
        case T_MNEM_lsl:
11627
        case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11628
        case T_MNEM_lsr:
11629
        case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11630
        case T_MNEM_ror:
11631
        case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11632
        default: abort ();
11633
        }
11634
 
11635
      if (THUMB_SETS_FLAGS (inst.instruction))
11636
        narrow = !in_it_block ();
11637
      else
11638
        narrow = in_it_block ();
11639
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11640
        narrow = FALSE;
11641
      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11642
        narrow = FALSE;
11643
      if (inst.operands[2].isreg
11644
          && (inst.operands[1].reg != inst.operands[0].reg
11645
              || inst.operands[2].reg > 7))
11646
        narrow = FALSE;
11647
      if (inst.size_req == 4)
11648
        narrow = FALSE;
11649
 
11650
      reject_bad_reg (inst.operands[0].reg);
11651
      reject_bad_reg (inst.operands[1].reg);
11652
 
11653
      if (!narrow)
11654
        {
11655
          if (inst.operands[2].isreg)
11656
            {
11657
              reject_bad_reg (inst.operands[2].reg);
11658
              inst.instruction = THUMB_OP32 (inst.instruction);
11659
              inst.instruction |= inst.operands[0].reg << 8;
11660
              inst.instruction |= inst.operands[1].reg << 16;
11661
              inst.instruction |= inst.operands[2].reg;
11662 148 khays
 
11663
              /* PR 12854: Error on extraneous shifts.  */
11664
              constraint (inst.operands[2].shifted,
11665
                          _("extraneous shift as part of operand to shift insn"));
11666 16 khays
            }
11667
          else
11668
            {
11669
              inst.operands[1].shifted = 1;
11670
              inst.operands[1].shift_kind = shift_kind;
11671
              inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11672
                                             ? T_MNEM_movs : T_MNEM_mov);
11673
              inst.instruction |= inst.operands[0].reg << 8;
11674
              encode_thumb32_shifted_operand (1);
11675
              /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
11676
              inst.reloc.type = BFD_RELOC_UNUSED;
11677
            }
11678
        }
11679
      else
11680
        {
11681
          if (inst.operands[2].isreg)
11682
            {
11683
              switch (shift_kind)
11684
                {
11685
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11686
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11687
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11688
                case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11689
                default: abort ();
11690
                }
11691
 
11692
              inst.instruction |= inst.operands[0].reg;
11693
              inst.instruction |= inst.operands[2].reg << 3;
11694 148 khays
 
11695
              /* PR 12854: Error on extraneous shifts.  */
11696
              constraint (inst.operands[2].shifted,
11697
                          _("extraneous shift as part of operand to shift insn"));
11698 16 khays
            }
11699
          else
11700
            {
11701
              switch (shift_kind)
11702
                {
11703
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11704
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11705
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11706
                default: abort ();
11707
                }
11708
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11709
              inst.instruction |= inst.operands[0].reg;
11710
              inst.instruction |= inst.operands[1].reg << 3;
11711
            }
11712
        }
11713
    }
11714
  else
11715
    {
11716
      constraint (inst.operands[0].reg > 7
11717
                  || inst.operands[1].reg > 7, BAD_HIREG);
11718
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11719
 
11720
      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
11721
        {
11722
          constraint (inst.operands[2].reg > 7, BAD_HIREG);
11723
          constraint (inst.operands[0].reg != inst.operands[1].reg,
11724
                      _("source1 and dest must be same register"));
11725
 
11726
          switch (inst.instruction)
11727
            {
11728
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11729
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11730
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11731
            case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11732
            default: abort ();
11733
            }
11734
 
11735
          inst.instruction |= inst.operands[0].reg;
11736
          inst.instruction |= inst.operands[2].reg << 3;
11737 148 khays
 
11738
          /* PR 12854: Error on extraneous shifts.  */
11739
          constraint (inst.operands[2].shifted,
11740
                      _("extraneous shift as part of operand to shift insn"));
11741 16 khays
        }
11742
      else
11743
        {
11744
          switch (inst.instruction)
11745
            {
11746
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11747
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11748
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11749
            case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11750
            default: abort ();
11751
            }
11752
          inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11753
          inst.instruction |= inst.operands[0].reg;
11754
          inst.instruction |= inst.operands[1].reg << 3;
11755
        }
11756
    }
11757
}
11758
 
11759
static void
11760
do_t_simd (void)
11761
{
11762
  unsigned Rd, Rn, Rm;
11763
 
11764
  Rd = inst.operands[0].reg;
11765
  Rn = inst.operands[1].reg;
11766
  Rm = inst.operands[2].reg;
11767
 
11768
  reject_bad_reg (Rd);
11769
  reject_bad_reg (Rn);
11770
  reject_bad_reg (Rm);
11771
 
11772
  inst.instruction |= Rd << 8;
11773
  inst.instruction |= Rn << 16;
11774
  inst.instruction |= Rm;
11775
}
11776
 
11777
static void
11778
do_t_simd2 (void)
11779
{
11780
  unsigned Rd, Rn, Rm;
11781
 
11782
  Rd = inst.operands[0].reg;
11783
  Rm = inst.operands[1].reg;
11784
  Rn = inst.operands[2].reg;
11785
 
11786
  reject_bad_reg (Rd);
11787
  reject_bad_reg (Rn);
11788
  reject_bad_reg (Rm);
11789
 
11790
  inst.instruction |= Rd << 8;
11791
  inst.instruction |= Rn << 16;
11792
  inst.instruction |= Rm;
11793
}
11794
 
11795
static void
11796
do_t_smc (void)
11797
{
11798
  unsigned int value = inst.reloc.exp.X_add_number;
11799
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11800
              _("SMC is not permitted on this architecture"));
11801
  constraint (inst.reloc.exp.X_op != O_constant,
11802
              _("expression too complex"));
11803
  inst.reloc.type = BFD_RELOC_UNUSED;
11804
  inst.instruction |= (value & 0xf000) >> 12;
11805
  inst.instruction |= (value & 0x0ff0);
11806
  inst.instruction |= (value & 0x000f) << 16;
11807
}
11808
 
11809
static void
11810
do_t_hvc (void)
11811
{
11812
  unsigned int value = inst.reloc.exp.X_add_number;
11813
 
11814
  inst.reloc.type = BFD_RELOC_UNUSED;
11815
  inst.instruction |= (value & 0x0fff);
11816
  inst.instruction |= (value & 0xf000) << 4;
11817
}
11818
 
11819
static void
11820
do_t_ssat_usat (int bias)
11821
{
11822
  unsigned Rd, Rn;
11823
 
11824
  Rd = inst.operands[0].reg;
11825
  Rn = inst.operands[2].reg;
11826
 
11827
  reject_bad_reg (Rd);
11828
  reject_bad_reg (Rn);
11829
 
11830
  inst.instruction |= Rd << 8;
11831
  inst.instruction |= inst.operands[1].imm - bias;
11832
  inst.instruction |= Rn << 16;
11833
 
11834
  if (inst.operands[3].present)
11835
    {
11836
      offsetT shift_amount = inst.reloc.exp.X_add_number;
11837
 
11838
      inst.reloc.type = BFD_RELOC_UNUSED;
11839
 
11840
      constraint (inst.reloc.exp.X_op != O_constant,
11841
                  _("expression too complex"));
11842
 
11843
      if (shift_amount != 0)
11844
        {
11845
          constraint (shift_amount > 31,
11846
                      _("shift expression is too large"));
11847
 
11848
          if (inst.operands[3].shift_kind == SHIFT_ASR)
11849
            inst.instruction |= 0x00200000;  /* sh bit.  */
11850
 
11851
          inst.instruction |= (shift_amount & 0x1c) << 10;
11852
          inst.instruction |= (shift_amount & 0x03) << 6;
11853
        }
11854
    }
11855
}
11856
 
11857
static void
11858
do_t_ssat (void)
11859
{
11860
  do_t_ssat_usat (1);
11861
}
11862
 
11863
static void
11864
do_t_ssat16 (void)
11865
{
11866
  unsigned Rd, Rn;
11867
 
11868
  Rd = inst.operands[0].reg;
11869
  Rn = inst.operands[2].reg;
11870
 
11871
  reject_bad_reg (Rd);
11872
  reject_bad_reg (Rn);
11873
 
11874
  inst.instruction |= Rd << 8;
11875
  inst.instruction |= inst.operands[1].imm - 1;
11876
  inst.instruction |= Rn << 16;
11877
}
11878
 
11879
static void
11880
do_t_strex (void)
11881
{
11882
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11883
              || inst.operands[2].postind || inst.operands[2].writeback
11884
              || inst.operands[2].immisreg || inst.operands[2].shifted
11885
              || inst.operands[2].negative,
11886
              BAD_ADDR_MODE);
11887
 
11888
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11889
 
11890
  inst.instruction |= inst.operands[0].reg << 8;
11891
  inst.instruction |= inst.operands[1].reg << 12;
11892
  inst.instruction |= inst.operands[2].reg << 16;
11893
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11894
}
11895
 
11896
static void
11897
do_t_strexd (void)
11898
{
11899
  if (!inst.operands[2].present)
11900
    inst.operands[2].reg = inst.operands[1].reg + 1;
11901
 
11902
  constraint (inst.operands[0].reg == inst.operands[1].reg
11903
              || inst.operands[0].reg == inst.operands[2].reg
11904
              || inst.operands[0].reg == inst.operands[3].reg,
11905
              BAD_OVERLAP);
11906
 
11907
  inst.instruction |= inst.operands[0].reg;
11908
  inst.instruction |= inst.operands[1].reg << 12;
11909
  inst.instruction |= inst.operands[2].reg << 8;
11910
  inst.instruction |= inst.operands[3].reg << 16;
11911
}
11912
 
11913
static void
11914
do_t_sxtah (void)
11915
{
11916
  unsigned Rd, Rn, Rm;
11917
 
11918
  Rd = inst.operands[0].reg;
11919
  Rn = inst.operands[1].reg;
11920
  Rm = inst.operands[2].reg;
11921
 
11922
  reject_bad_reg (Rd);
11923
  reject_bad_reg (Rn);
11924
  reject_bad_reg (Rm);
11925
 
11926
  inst.instruction |= Rd << 8;
11927
  inst.instruction |= Rn << 16;
11928
  inst.instruction |= Rm;
11929
  inst.instruction |= inst.operands[3].imm << 4;
11930
}
11931
 
11932
static void
11933
do_t_sxth (void)
11934
{
11935
  unsigned Rd, Rm;
11936
 
11937
  Rd = inst.operands[0].reg;
11938
  Rm = inst.operands[1].reg;
11939
 
11940
  reject_bad_reg (Rd);
11941
  reject_bad_reg (Rm);
11942
 
11943
  if (inst.instruction <= 0xffff
11944
      && inst.size_req != 4
11945
      && Rd <= 7 && Rm <= 7
11946
      && (!inst.operands[2].present || inst.operands[2].imm == 0))
11947
    {
11948
      inst.instruction = THUMB_OP16 (inst.instruction);
11949
      inst.instruction |= Rd;
11950
      inst.instruction |= Rm << 3;
11951
    }
11952
  else if (unified_syntax)
11953
    {
11954
      if (inst.instruction <= 0xffff)
11955
        inst.instruction = THUMB_OP32 (inst.instruction);
11956
      inst.instruction |= Rd << 8;
11957
      inst.instruction |= Rm;
11958
      inst.instruction |= inst.operands[2].imm << 4;
11959
    }
11960
  else
11961
    {
11962
      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11963
                  _("Thumb encoding does not support rotation"));
11964
      constraint (1, BAD_HIREG);
11965
    }
11966
}
11967
 
11968
static void
11969
do_t_swi (void)
11970
{
11971
  /* We have to do the following check manually as ARM_EXT_OS only applies
11972
     to ARM_EXT_V6M.  */
11973
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
11974
    {
11975
      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
11976
          /* This only applies to the v6m howver, not later architectures.  */
11977
          && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
11978
        as_bad (_("SVC is not permitted on this architecture"));
11979
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
11980
    }
11981
 
11982
  inst.reloc.type = BFD_RELOC_ARM_SWI;
11983
}
11984
 
11985
static void
11986
do_t_tb (void)
11987
{
11988
  unsigned Rn, Rm;
11989
  int half;
11990
 
11991
  half = (inst.instruction & 0x10) != 0;
11992
  set_it_insn_type_last ();
11993
  constraint (inst.operands[0].immisreg,
11994
              _("instruction requires register index"));
11995
 
11996
  Rn = inst.operands[0].reg;
11997
  Rm = inst.operands[0].imm;
11998
 
11999
  constraint (Rn == REG_SP, BAD_SP);
12000
  reject_bad_reg (Rm);
12001
 
12002
  constraint (!half && inst.operands[0].shifted,
12003
              _("instruction does not allow shifted index"));
12004
  inst.instruction |= (Rn << 16) | Rm;
12005
}
12006
 
12007
static void
12008
do_t_usat (void)
12009
{
12010
  do_t_ssat_usat (0);
12011
}
12012
 
12013
static void
12014
do_t_usat16 (void)
12015
{
12016
  unsigned Rd, Rn;
12017
 
12018
  Rd = inst.operands[0].reg;
12019
  Rn = inst.operands[2].reg;
12020
 
12021
  reject_bad_reg (Rd);
12022
  reject_bad_reg (Rn);
12023
 
12024
  inst.instruction |= Rd << 8;
12025
  inst.instruction |= inst.operands[1].imm;
12026
  inst.instruction |= Rn << 16;
12027
}
12028
 
12029
/* Neon instruction encoder helpers.  */
12030
 
12031
/* Encodings for the different types for various Neon opcodes.  */
12032
 
12033
/* An "invalid" code for the following tables.  */
12034
#define N_INV -1u
12035
 
12036
struct neon_tab_entry
12037
{
12038
  unsigned integer;
12039
  unsigned float_or_poly;
12040
  unsigned scalar_or_imm;
12041
};
12042
 
12043
/* Map overloaded Neon opcodes to their respective encodings.  */
12044
#define NEON_ENC_TAB                                    \
12045
  X(vabd,       0x0000700, 0x1200d00, N_INV),           \
12046
  X(vmax,       0x0000600, 0x0000f00, N_INV),           \
12047
  X(vmin,       0x0000610, 0x0200f00, N_INV),           \
12048
  X(vpadd,      0x0000b10, 0x1000d00, N_INV),           \
12049
  X(vpmax,      0x0000a00, 0x1000f00, N_INV),           \
12050
  X(vpmin,      0x0000a10, 0x1200f00, N_INV),           \
12051
  X(vadd,       0x0000800, 0x0000d00, N_INV),           \
12052
  X(vsub,       0x1000800, 0x0200d00, N_INV),           \
12053
  X(vceq,       0x1000810, 0x0000e00, 0x1b10100),       \
12054
  X(vcge,       0x0000310, 0x1000e00, 0x1b10080),       \
12055
  X(vcgt,       0x0000300, 0x1200e00, 0x1b10000),       \
12056
  /* Register variants of the following two instructions are encoded as
12057
     vcge / vcgt with the operands reversed.  */        \
12058
  X(vclt,       0x0000300, 0x1200e00, 0x1b10200),       \
12059
  X(vcle,       0x0000310, 0x1000e00, 0x1b10180),       \
12060
  X(vfma,       N_INV, 0x0000c10, N_INV),               \
12061
  X(vfms,       N_INV, 0x0200c10, N_INV),               \
12062
  X(vmla,       0x0000900, 0x0000d10, 0x0800040),       \
12063
  X(vmls,       0x1000900, 0x0200d10, 0x0800440),       \
12064
  X(vmul,       0x0000910, 0x1000d10, 0x0800840),       \
12065
  X(vmull,      0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
12066
  X(vmlal,      0x0800800, N_INV,     0x0800240),       \
12067
  X(vmlsl,      0x0800a00, N_INV,     0x0800640),       \
12068
  X(vqdmlal,    0x0800900, N_INV,     0x0800340),       \
12069
  X(vqdmlsl,    0x0800b00, N_INV,     0x0800740),       \
12070
  X(vqdmull,    0x0800d00, N_INV,     0x0800b40),       \
12071
  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),       \
12072
  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),       \
12073
  X(vshl,       0x0000400, N_INV,     0x0800510),       \
12074
  X(vqshl,      0x0000410, N_INV,     0x0800710),       \
12075
  X(vand,       0x0000110, N_INV,     0x0800030),       \
12076
  X(vbic,       0x0100110, N_INV,     0x0800030),       \
12077
  X(veor,       0x1000110, N_INV,     N_INV),           \
12078
  X(vorn,       0x0300110, N_INV,     0x0800010),       \
12079
  X(vorr,       0x0200110, N_INV,     0x0800010),       \
12080
  X(vmvn,       0x1b00580, N_INV,     0x0800030),       \
12081
  X(vshll,      0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
12082
  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
12083
  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
12084
  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
12085
  X(vst1,       0x0000000, 0x0800000, N_INV),           \
12086
  X(vld2,       0x0200100, 0x0a00100, 0x0a00d00),       \
12087
  X(vst2,       0x0000100, 0x0800100, N_INV),           \
12088
  X(vld3,       0x0200200, 0x0a00200, 0x0a00e00),       \
12089
  X(vst3,       0x0000200, 0x0800200, N_INV),           \
12090
  X(vld4,       0x0200300, 0x0a00300, 0x0a00f00),       \
12091
  X(vst4,       0x0000300, 0x0800300, N_INV),           \
12092
  X(vmovn,      0x1b20200, N_INV,     N_INV),           \
12093
  X(vtrn,       0x1b20080, N_INV,     N_INV),           \
12094
  X(vqmovn,     0x1b20200, N_INV,     N_INV),           \
12095
  X(vqmovun,    0x1b20240, N_INV,     N_INV),           \
12096
  X(vnmul,      0xe200a40, 0xe200b40, N_INV),           \
12097
  X(vnmla,      0xe100a40, 0xe100b40, N_INV),           \
12098
  X(vnmls,      0xe100a00, 0xe100b00, N_INV),           \
12099
  X(vfnma,      0xe900a40, 0xe900b40, N_INV),           \
12100
  X(vfnms,      0xe900a00, 0xe900b00, N_INV),           \
12101
  X(vcmp,       0xeb40a40, 0xeb40b40, N_INV),           \
12102
  X(vcmpz,      0xeb50a40, 0xeb50b40, N_INV),           \
12103
  X(vcmpe,      0xeb40ac0, 0xeb40bc0, N_INV),           \
12104
  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV)
12105
 
12106
enum neon_opc
12107
{
12108
#define X(OPC,I,F,S) N_MNEM_##OPC
12109
NEON_ENC_TAB
12110
#undef X
12111
};
12112
 
12113
static const struct neon_tab_entry neon_enc_tab[] =
12114
{
12115
#define X(OPC,I,F,S) { (I), (F), (S) }
12116
NEON_ENC_TAB
12117
#undef X
12118
};
12119
 
12120
/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
12121
#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12122
#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
12123
#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12124
#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12125
#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12126
#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12127
#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12128
#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12129
#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12130
#define NEON_ENC_SINGLE_(X) \
12131
  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12132
#define NEON_ENC_DOUBLE_(X) \
12133
  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12134
 
12135
#define NEON_ENCODE(type, inst)                                 \
12136
  do                                                            \
12137
    {                                                           \
12138
      inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12139
      inst.is_neon = 1;                                         \
12140
    }                                                           \
12141
  while (0)
12142
 
12143
#define check_neon_suffixes                                             \
12144
  do                                                                    \
12145
    {                                                                   \
12146
      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)        \
12147
        {                                                               \
12148
          as_bad (_("invalid neon suffix for non neon instruction"));   \
12149
          return;                                                       \
12150
        }                                                               \
12151
    }                                                                   \
12152
  while (0)
12153
 
12154
/* Define shapes for instruction operands. The following mnemonic characters
12155
   are used in this table:
12156
 
12157
     F - VFP S<n> register
12158
     D - Neon D<n> register
12159
     Q - Neon Q<n> register
12160
     I - Immediate
12161
     S - Scalar
12162
     R - ARM register
12163
     L - D<n> register list
12164
 
12165
   This table is used to generate various data:
12166
     - enumerations of the form NS_DDR to be used as arguments to
12167
       neon_select_shape.
12168
     - a table classifying shapes into single, double, quad, mixed.
12169
     - a table used to drive neon_select_shape.  */
12170
 
12171
#define NEON_SHAPE_DEF                  \
12172
  X(3, (D, D, D), DOUBLE),              \
12173
  X(3, (Q, Q, Q), QUAD),                \
12174
  X(3, (D, D, I), DOUBLE),              \
12175
  X(3, (Q, Q, I), QUAD),                \
12176
  X(3, (D, D, S), DOUBLE),              \
12177
  X(3, (Q, Q, S), QUAD),                \
12178
  X(2, (D, D), DOUBLE),                 \
12179
  X(2, (Q, Q), QUAD),                   \
12180
  X(2, (D, S), DOUBLE),                 \
12181
  X(2, (Q, S), QUAD),                   \
12182
  X(2, (D, R), DOUBLE),                 \
12183
  X(2, (Q, R), QUAD),                   \
12184
  X(2, (D, I), DOUBLE),                 \
12185
  X(2, (Q, I), QUAD),                   \
12186
  X(3, (D, L, D), DOUBLE),              \
12187
  X(2, (D, Q), MIXED),                  \
12188
  X(2, (Q, D), MIXED),                  \
12189
  X(3, (D, Q, I), MIXED),               \
12190
  X(3, (Q, D, I), MIXED),               \
12191
  X(3, (Q, D, D), MIXED),               \
12192
  X(3, (D, Q, Q), MIXED),               \
12193
  X(3, (Q, Q, D), MIXED),               \
12194
  X(3, (Q, D, S), MIXED),               \
12195
  X(3, (D, Q, S), MIXED),               \
12196
  X(4, (D, D, D, I), DOUBLE),           \
12197
  X(4, (Q, Q, Q, I), QUAD),             \
12198
  X(2, (F, F), SINGLE),                 \
12199
  X(3, (F, F, F), SINGLE),              \
12200
  X(2, (F, I), SINGLE),                 \
12201
  X(2, (F, D), MIXED),                  \
12202
  X(2, (D, F), MIXED),                  \
12203
  X(3, (F, F, I), MIXED),               \
12204
  X(4, (R, R, F, F), SINGLE),           \
12205
  X(4, (F, F, R, R), SINGLE),           \
12206
  X(3, (D, R, R), DOUBLE),              \
12207
  X(3, (R, R, D), DOUBLE),              \
12208
  X(2, (S, R), SINGLE),                 \
12209
  X(2, (R, S), SINGLE),                 \
12210
  X(2, (F, R), SINGLE),                 \
12211
  X(2, (R, F), SINGLE)
12212
 
12213
#define S2(A,B)         NS_##A##B
12214
#define S3(A,B,C)       NS_##A##B##C
12215
#define S4(A,B,C,D)     NS_##A##B##C##D
12216
 
12217
#define X(N, L, C) S##N L
12218
 
12219
enum neon_shape
12220
{
12221
  NEON_SHAPE_DEF,
12222
  NS_NULL
12223
};
12224
 
12225
#undef X
12226
#undef S2
12227
#undef S3
12228
#undef S4
12229
 
12230
enum neon_shape_class
12231
{
12232
  SC_SINGLE,
12233
  SC_DOUBLE,
12234
  SC_QUAD,
12235
  SC_MIXED
12236
};
12237
 
12238
#define X(N, L, C) SC_##C
12239
 
12240
static enum neon_shape_class neon_shape_class[] =
12241
{
12242
  NEON_SHAPE_DEF
12243
};
12244
 
12245
#undef X
12246
 
12247
enum neon_shape_el
12248
{
12249
  SE_F,
12250
  SE_D,
12251
  SE_Q,
12252
  SE_I,
12253
  SE_S,
12254
  SE_R,
12255
  SE_L
12256
};
12257
 
12258
/* Register widths of above.  */
12259
static unsigned neon_shape_el_size[] =
12260
{
12261
  32,
12262
  64,
12263
  128,
12264
  0,
12265
  32,
12266
  32,
12267
 
12268
};
12269
 
12270
struct neon_shape_info
12271
{
12272
  unsigned els;
12273
  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12274
};
12275
 
12276
#define S2(A,B)         { SE_##A, SE_##B }
12277
#define S3(A,B,C)       { SE_##A, SE_##B, SE_##C }
12278
#define S4(A,B,C,D)     { SE_##A, SE_##B, SE_##C, SE_##D }
12279
 
12280
#define X(N, L, C) { N, S##N L }
12281
 
12282
static struct neon_shape_info neon_shape_tab[] =
12283
{
12284
  NEON_SHAPE_DEF
12285
};
12286
 
12287
#undef X
12288
#undef S2
12289
#undef S3
12290
#undef S4
12291
 
12292
/* Bit masks used in type checking given instructions.
12293
  'N_EQK' means the type must be the same as (or based on in some way) the key
12294
   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12295
   set, various other bits can be set as well in order to modify the meaning of
12296
   the type constraint.  */
12297
 
12298
enum neon_type_mask
12299
{
12300
  N_S8   = 0x0000001,
12301
  N_S16  = 0x0000002,
12302
  N_S32  = 0x0000004,
12303
  N_S64  = 0x0000008,
12304
  N_U8   = 0x0000010,
12305
  N_U16  = 0x0000020,
12306
  N_U32  = 0x0000040,
12307
  N_U64  = 0x0000080,
12308
  N_I8   = 0x0000100,
12309
  N_I16  = 0x0000200,
12310
  N_I32  = 0x0000400,
12311
  N_I64  = 0x0000800,
12312
  N_8    = 0x0001000,
12313
  N_16   = 0x0002000,
12314
  N_32   = 0x0004000,
12315
  N_64   = 0x0008000,
12316
  N_P8   = 0x0010000,
12317
  N_P16  = 0x0020000,
12318
  N_F16  = 0x0040000,
12319
  N_F32  = 0x0080000,
12320
  N_F64  = 0x0100000,
12321
  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
12322
  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
12323
  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
12324
  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
12325
  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
12326
  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
12327
  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
12328
  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
12329
  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
12330
  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
12331
  N_UTYP = 0,
12332
  N_MAX_NONSPECIAL = N_F64
12333
};
12334
 
12335
#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12336
 
12337
#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12338
#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12339
#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12340
#define N_SUF_32   (N_SU_32 | N_F32)
12341
#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
12342
#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
12343
 
12344
/* Pass this as the first type argument to neon_check_type to ignore types
12345
   altogether.  */
12346
#define N_IGNORE_TYPE (N_KEY | N_EQK)
12347
 
12348
/* Select a "shape" for the current instruction (describing register types or
12349
   sizes) from a list of alternatives. Return NS_NULL if the current instruction
12350
   doesn't fit. For non-polymorphic shapes, checking is usually done as a
12351
   function of operand parsing, so this function doesn't need to be called.
12352
   Shapes should be listed in order of decreasing length.  */
12353
 
12354
static enum neon_shape
12355
neon_select_shape (enum neon_shape shape, ...)
12356
{
12357
  va_list ap;
12358
  enum neon_shape first_shape = shape;
12359
 
12360
  /* Fix missing optional operands. FIXME: we don't know at this point how
12361
     many arguments we should have, so this makes the assumption that we have
12362
     > 1. This is true of all current Neon opcodes, I think, but may not be
12363
     true in the future.  */
12364
  if (!inst.operands[1].present)
12365
    inst.operands[1] = inst.operands[0];
12366
 
12367
  va_start (ap, shape);
12368
 
12369
  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12370
    {
12371
      unsigned j;
12372
      int matches = 1;
12373
 
12374
      for (j = 0; j < neon_shape_tab[shape].els; j++)
12375
        {
12376
          if (!inst.operands[j].present)
12377
            {
12378
              matches = 0;
12379
              break;
12380
            }
12381
 
12382
          switch (neon_shape_tab[shape].el[j])
12383
            {
12384
            case SE_F:
12385
              if (!(inst.operands[j].isreg
12386
                    && inst.operands[j].isvec
12387
                    && inst.operands[j].issingle
12388
                    && !inst.operands[j].isquad))
12389
                matches = 0;
12390
              break;
12391
 
12392
            case SE_D:
12393
              if (!(inst.operands[j].isreg
12394
                    && inst.operands[j].isvec
12395
                    && !inst.operands[j].isquad
12396
                    && !inst.operands[j].issingle))
12397
                matches = 0;
12398
              break;
12399
 
12400
            case SE_R:
12401
              if (!(inst.operands[j].isreg
12402
                    && !inst.operands[j].isvec))
12403
                matches = 0;
12404
              break;
12405
 
12406
            case SE_Q:
12407
              if (!(inst.operands[j].isreg
12408
                    && inst.operands[j].isvec
12409
                    && inst.operands[j].isquad
12410
                    && !inst.operands[j].issingle))
12411
                matches = 0;
12412
              break;
12413
 
12414
            case SE_I:
12415
              if (!(!inst.operands[j].isreg
12416
                    && !inst.operands[j].isscalar))
12417
                matches = 0;
12418
              break;
12419
 
12420
            case SE_S:
12421
              if (!(!inst.operands[j].isreg
12422
                    && inst.operands[j].isscalar))
12423
                matches = 0;
12424
              break;
12425
 
12426
            case SE_L:
12427
              break;
12428
            }
12429
          if (!matches)
12430
            break;
12431
        }
12432 166 khays
      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12433
        /* We've matched all the entries in the shape table, and we don't
12434
           have any left over operands which have not been matched.  */
12435 16 khays
        break;
12436
    }
12437
 
12438
  va_end (ap);
12439
 
12440
  if (shape == NS_NULL && first_shape != NS_NULL)
12441
    first_error (_("invalid instruction shape"));
12442
 
12443
  return shape;
12444
}
12445
 
12446
/* True if SHAPE is predominantly a quadword operation (most of the time, this
12447
   means the Q bit should be set).  */
12448
 
12449
static int
12450
neon_quad (enum neon_shape shape)
12451
{
12452
  return neon_shape_class[shape] == SC_QUAD;
12453
}
12454
 
12455
static void
12456
neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12457
                       unsigned *g_size)
12458
{
12459
  /* Allow modification to be made to types which are constrained to be
12460
     based on the key element, based on bits set alongside N_EQK.  */
12461
  if ((typebits & N_EQK) != 0)
12462
    {
12463
      if ((typebits & N_HLF) != 0)
12464
        *g_size /= 2;
12465
      else if ((typebits & N_DBL) != 0)
12466
        *g_size *= 2;
12467
      if ((typebits & N_SGN) != 0)
12468
        *g_type = NT_signed;
12469
      else if ((typebits & N_UNS) != 0)
12470
        *g_type = NT_unsigned;
12471
      else if ((typebits & N_INT) != 0)
12472
        *g_type = NT_integer;
12473
      else if ((typebits & N_FLT) != 0)
12474
        *g_type = NT_float;
12475
      else if ((typebits & N_SIZ) != 0)
12476
        *g_type = NT_untyped;
12477
    }
12478
}
12479
 
12480
/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12481
   operand type, i.e. the single type specified in a Neon instruction when it
12482
   is the only one given.  */
12483
 
12484
static struct neon_type_el
12485
neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12486
{
12487
  struct neon_type_el dest = *key;
12488
 
12489
  gas_assert ((thisarg & N_EQK) != 0);
12490
 
12491
  neon_modify_type_size (thisarg, &dest.type, &dest.size);
12492
 
12493
  return dest;
12494
}
12495
 
12496
/* Convert Neon type and size into compact bitmask representation.  */
12497
 
12498
static enum neon_type_mask
12499
type_chk_of_el_type (enum neon_el_type type, unsigned size)
12500
{
12501
  switch (type)
12502
    {
12503
    case NT_untyped:
12504
      switch (size)
12505
        {
12506
        case 8:  return N_8;
12507
        case 16: return N_16;
12508
        case 32: return N_32;
12509
        case 64: return N_64;
12510
        default: ;
12511
        }
12512
      break;
12513
 
12514
    case NT_integer:
12515
      switch (size)
12516
        {
12517
        case 8:  return N_I8;
12518
        case 16: return N_I16;
12519
        case 32: return N_I32;
12520
        case 64: return N_I64;
12521
        default: ;
12522
        }
12523
      break;
12524
 
12525
    case NT_float:
12526
      switch (size)
12527
        {
12528
        case 16: return N_F16;
12529
        case 32: return N_F32;
12530
        case 64: return N_F64;
12531
        default: ;
12532
        }
12533
      break;
12534
 
12535
    case NT_poly:
12536
      switch (size)
12537
        {
12538
        case 8:  return N_P8;
12539
        case 16: return N_P16;
12540
        default: ;
12541
        }
12542
      break;
12543
 
12544
    case NT_signed:
12545
      switch (size)
12546
        {
12547
        case 8:  return N_S8;
12548
        case 16: return N_S16;
12549
        case 32: return N_S32;
12550
        case 64: return N_S64;
12551
        default: ;
12552
        }
12553
      break;
12554
 
12555
    case NT_unsigned:
12556
      switch (size)
12557
        {
12558
        case 8:  return N_U8;
12559
        case 16: return N_U16;
12560
        case 32: return N_U32;
12561
        case 64: return N_U64;
12562
        default: ;
12563
        }
12564
      break;
12565
 
12566
    default: ;
12567
    }
12568
 
12569
  return N_UTYP;
12570
}
12571
 
12572
/* Convert compact Neon bitmask type representation to a type and size. Only
12573
   handles the case where a single bit is set in the mask.  */
12574
 
12575
static int
12576
el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12577
                     enum neon_type_mask mask)
12578
{
12579
  if ((mask & N_EQK) != 0)
12580
    return FAIL;
12581
 
12582
  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12583
    *size = 8;
12584
  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12585
    *size = 16;
12586
  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12587
    *size = 32;
12588
  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12589
    *size = 64;
12590
  else
12591
    return FAIL;
12592
 
12593
  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12594
    *type = NT_signed;
12595
  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12596
    *type = NT_unsigned;
12597
  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12598
    *type = NT_integer;
12599
  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12600
    *type = NT_untyped;
12601
  else if ((mask & (N_P8 | N_P16)) != 0)
12602
    *type = NT_poly;
12603
  else if ((mask & (N_F32 | N_F64)) != 0)
12604
    *type = NT_float;
12605
  else
12606
    return FAIL;
12607
 
12608
  return SUCCESS;
12609
}
12610
 
12611
/* Modify a bitmask of allowed types. This is only needed for type
12612
   relaxation.  */
12613
 
12614
static unsigned
12615
modify_types_allowed (unsigned allowed, unsigned mods)
12616
{
12617
  unsigned size;
12618
  enum neon_el_type type;
12619
  unsigned destmask;
12620
  int i;
12621
 
12622
  destmask = 0;
12623
 
12624
  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12625
    {
12626
      if (el_type_of_type_chk (&type, &size,
12627
                               (enum neon_type_mask) (allowed & i)) == SUCCESS)
12628
        {
12629
          neon_modify_type_size (mods, &type, &size);
12630
          destmask |= type_chk_of_el_type (type, size);
12631
        }
12632
    }
12633
 
12634
  return destmask;
12635
}
12636
 
12637
/* Check type and return type classification.
12638
   The manual states (paraphrase): If one datatype is given, it indicates the
12639
   type given in:
12640
    - the second operand, if there is one
12641
    - the operand, if there is no second operand
12642
    - the result, if there are no operands.
12643
   This isn't quite good enough though, so we use a concept of a "key" datatype
12644
   which is set on a per-instruction basis, which is the one which matters when
12645
   only one data type is written.
12646
   Note: this function has side-effects (e.g. filling in missing operands). All
12647
   Neon instructions should call it before performing bit encoding.  */
12648
 
12649
static struct neon_type_el
12650
neon_check_type (unsigned els, enum neon_shape ns, ...)
12651
{
12652
  va_list ap;
12653
  unsigned i, pass, key_el = 0;
12654
  unsigned types[NEON_MAX_TYPE_ELS];
12655
  enum neon_el_type k_type = NT_invtype;
12656
  unsigned k_size = -1u;
12657
  struct neon_type_el badtype = {NT_invtype, -1};
12658
  unsigned key_allowed = 0;
12659
 
12660
  /* Optional registers in Neon instructions are always (not) in operand 1.
12661
     Fill in the missing operand here, if it was omitted.  */
12662
  if (els > 1 && !inst.operands[1].present)
12663
    inst.operands[1] = inst.operands[0];
12664
 
12665
  /* Suck up all the varargs.  */
12666
  va_start (ap, ns);
12667
  for (i = 0; i < els; i++)
12668
    {
12669
      unsigned thisarg = va_arg (ap, unsigned);
12670
      if (thisarg == N_IGNORE_TYPE)
12671
        {
12672
          va_end (ap);
12673
          return badtype;
12674
        }
12675
      types[i] = thisarg;
12676
      if ((thisarg & N_KEY) != 0)
12677
        key_el = i;
12678
    }
12679
  va_end (ap);
12680
 
12681
  if (inst.vectype.elems > 0)
12682
    for (i = 0; i < els; i++)
12683
      if (inst.operands[i].vectype.type != NT_invtype)
12684
        {
12685
          first_error (_("types specified in both the mnemonic and operands"));
12686
          return badtype;
12687
        }
12688
 
12689
  /* Duplicate inst.vectype elements here as necessary.
12690
     FIXME: No idea if this is exactly the same as the ARM assembler,
12691
     particularly when an insn takes one register and one non-register
12692
     operand. */
12693
  if (inst.vectype.elems == 1 && els > 1)
12694
    {
12695
      unsigned j;
12696
      inst.vectype.elems = els;
12697
      inst.vectype.el[key_el] = inst.vectype.el[0];
12698
      for (j = 0; j < els; j++)
12699
        if (j != key_el)
12700
          inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12701
                                                  types[j]);
12702
    }
12703
  else if (inst.vectype.elems == 0 && els > 0)
12704
    {
12705
      unsigned j;
12706
      /* No types were given after the mnemonic, so look for types specified
12707
         after each operand. We allow some flexibility here; as long as the
12708
         "key" operand has a type, we can infer the others.  */
12709
      for (j = 0; j < els; j++)
12710
        if (inst.operands[j].vectype.type != NT_invtype)
12711
          inst.vectype.el[j] = inst.operands[j].vectype;
12712
 
12713
      if (inst.operands[key_el].vectype.type != NT_invtype)
12714
        {
12715
          for (j = 0; j < els; j++)
12716
            if (inst.operands[j].vectype.type == NT_invtype)
12717
              inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12718
                                                      types[j]);
12719
        }
12720
      else
12721
        {
12722
          first_error (_("operand types can't be inferred"));
12723
          return badtype;
12724
        }
12725
    }
12726
  else if (inst.vectype.elems != els)
12727
    {
12728
      first_error (_("type specifier has the wrong number of parts"));
12729
      return badtype;
12730
    }
12731
 
12732
  for (pass = 0; pass < 2; pass++)
12733
    {
12734
      for (i = 0; i < els; i++)
12735
        {
12736
          unsigned thisarg = types[i];
12737
          unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12738
            ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12739
          enum neon_el_type g_type = inst.vectype.el[i].type;
12740
          unsigned g_size = inst.vectype.el[i].size;
12741
 
12742
          /* Decay more-specific signed & unsigned types to sign-insensitive
12743
             integer types if sign-specific variants are unavailable.  */
12744
          if ((g_type == NT_signed || g_type == NT_unsigned)
12745
              && (types_allowed & N_SU_ALL) == 0)
12746
            g_type = NT_integer;
12747
 
12748
          /* If only untyped args are allowed, decay any more specific types to
12749
             them. Some instructions only care about signs for some element
12750
             sizes, so handle that properly.  */
12751
          if ((g_size == 8 && (types_allowed & N_8) != 0)
12752
              || (g_size == 16 && (types_allowed & N_16) != 0)
12753
              || (g_size == 32 && (types_allowed & N_32) != 0)
12754
              || (g_size == 64 && (types_allowed & N_64) != 0))
12755
            g_type = NT_untyped;
12756
 
12757
          if (pass == 0)
12758
            {
12759
              if ((thisarg & N_KEY) != 0)
12760
                {
12761
                  k_type = g_type;
12762
                  k_size = g_size;
12763
                  key_allowed = thisarg & ~N_KEY;
12764
                }
12765
            }
12766
          else
12767
            {
12768
              if ((thisarg & N_VFP) != 0)
12769
                {
12770
                  enum neon_shape_el regshape;
12771
                  unsigned regwidth, match;
12772
 
12773
                  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
12774
                  if (ns == NS_NULL)
12775
                    {
12776
                      first_error (_("invalid instruction shape"));
12777
                      return badtype;
12778
                    }
12779
                  regshape = neon_shape_tab[ns].el[i];
12780
                  regwidth = neon_shape_el_size[regshape];
12781
 
12782
                  /* In VFP mode, operands must match register widths. If we
12783
                     have a key operand, use its width, else use the width of
12784
                     the current operand.  */
12785
                  if (k_size != -1u)
12786
                    match = k_size;
12787
                  else
12788
                    match = g_size;
12789
 
12790
                  if (regwidth != match)
12791
                    {
12792
                      first_error (_("operand size must match register width"));
12793
                      return badtype;
12794
                    }
12795
                }
12796
 
12797
              if ((thisarg & N_EQK) == 0)
12798
                {
12799
                  unsigned given_type = type_chk_of_el_type (g_type, g_size);
12800
 
12801
                  if ((given_type & types_allowed) == 0)
12802
                    {
12803
                      first_error (_("bad type in Neon instruction"));
12804
                      return badtype;
12805
                    }
12806
                }
12807
              else
12808
                {
12809
                  enum neon_el_type mod_k_type = k_type;
12810
                  unsigned mod_k_size = k_size;
12811
                  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12812
                  if (g_type != mod_k_type || g_size != mod_k_size)
12813
                    {
12814
                      first_error (_("inconsistent types in Neon instruction"));
12815
                      return badtype;
12816
                    }
12817
                }
12818
            }
12819
        }
12820
    }
12821
 
12822
  return inst.vectype.el[key_el];
12823
}
12824
 
12825
/* Neon-style VFP instruction forwarding.  */
12826
 
12827
/* Thumb VFP instructions have 0xE in the condition field.  */
12828
 
12829
static void
12830
do_vfp_cond_or_thumb (void)
12831
{
12832
  inst.is_neon = 1;
12833
 
12834
  if (thumb_mode)
12835
    inst.instruction |= 0xe0000000;
12836
  else
12837
    inst.instruction |= inst.cond << 28;
12838
}
12839
 
12840
/* Look up and encode a simple mnemonic, for use as a helper function for the
12841
   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
12842
   etc.  It is assumed that operand parsing has already been done, and that the
12843
   operands are in the form expected by the given opcode (this isn't necessarily
12844
   the same as the form in which they were parsed, hence some massaging must
12845
   take place before this function is called).
12846
   Checks current arch version against that in the looked-up opcode.  */
12847
 
12848
static void
12849
do_vfp_nsyn_opcode (const char *opname)
12850
{
12851
  const struct asm_opcode *opcode;
12852
 
12853
  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12854
 
12855
  if (!opcode)
12856
    abort ();
12857
 
12858
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12859
                thumb_mode ? *opcode->tvariant : *opcode->avariant),
12860
              _(BAD_FPU));
12861
 
12862
  inst.is_neon = 1;
12863
 
12864
  if (thumb_mode)
12865
    {
12866
      inst.instruction = opcode->tvalue;
12867
      opcode->tencode ();
12868
    }
12869
  else
12870
    {
12871
      inst.instruction = (inst.cond << 28) | opcode->avalue;
12872
      opcode->aencode ();
12873
    }
12874
}
12875
 
12876
static void
12877
do_vfp_nsyn_add_sub (enum neon_shape rs)
12878
{
12879
  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12880
 
12881
  if (rs == NS_FFF)
12882
    {
12883
      if (is_add)
12884
        do_vfp_nsyn_opcode ("fadds");
12885
      else
12886
        do_vfp_nsyn_opcode ("fsubs");
12887
    }
12888
  else
12889
    {
12890
      if (is_add)
12891
        do_vfp_nsyn_opcode ("faddd");
12892
      else
12893
        do_vfp_nsyn_opcode ("fsubd");
12894
    }
12895
}
12896
 
12897
/* Check operand types to see if this is a VFP instruction, and if so call
12898
   PFN ().  */
12899
 
12900
static int
12901
try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12902
{
12903
  enum neon_shape rs;
12904
  struct neon_type_el et;
12905
 
12906
  switch (args)
12907
    {
12908
    case 2:
12909
      rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12910
      et = neon_check_type (2, rs,
12911
        N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12912
      break;
12913
 
12914
    case 3:
12915
      rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12916
      et = neon_check_type (3, rs,
12917
        N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12918
      break;
12919
 
12920
    default:
12921
      abort ();
12922
    }
12923
 
12924
  if (et.type != NT_invtype)
12925
    {
12926
      pfn (rs);
12927
      return SUCCESS;
12928
    }
12929
 
12930
  inst.error = NULL;
12931
  return FAIL;
12932
}
12933
 
12934
static void
12935
do_vfp_nsyn_mla_mls (enum neon_shape rs)
12936
{
12937
  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12938
 
12939
  if (rs == NS_FFF)
12940
    {
12941
      if (is_mla)
12942
        do_vfp_nsyn_opcode ("fmacs");
12943
      else
12944
        do_vfp_nsyn_opcode ("fnmacs");
12945
    }
12946
  else
12947
    {
12948
      if (is_mla)
12949
        do_vfp_nsyn_opcode ("fmacd");
12950
      else
12951
        do_vfp_nsyn_opcode ("fnmacd");
12952
    }
12953
}
12954
 
12955
static void
12956
do_vfp_nsyn_fma_fms (enum neon_shape rs)
12957
{
12958
  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12959
 
12960
  if (rs == NS_FFF)
12961
    {
12962
      if (is_fma)
12963
        do_vfp_nsyn_opcode ("ffmas");
12964
      else
12965
        do_vfp_nsyn_opcode ("ffnmas");
12966
    }
12967
  else
12968
    {
12969
      if (is_fma)
12970
        do_vfp_nsyn_opcode ("ffmad");
12971
      else
12972
        do_vfp_nsyn_opcode ("ffnmad");
12973
    }
12974
}
12975
 
12976
static void
12977
do_vfp_nsyn_mul (enum neon_shape rs)
12978
{
12979
  if (rs == NS_FFF)
12980
    do_vfp_nsyn_opcode ("fmuls");
12981
  else
12982
    do_vfp_nsyn_opcode ("fmuld");
12983
}
12984
 
12985
static void
12986
do_vfp_nsyn_abs_neg (enum neon_shape rs)
12987
{
12988
  int is_neg = (inst.instruction & 0x80) != 0;
12989
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12990
 
12991
  if (rs == NS_FF)
12992
    {
12993
      if (is_neg)
12994
        do_vfp_nsyn_opcode ("fnegs");
12995
      else
12996
        do_vfp_nsyn_opcode ("fabss");
12997
    }
12998
  else
12999
    {
13000
      if (is_neg)
13001
        do_vfp_nsyn_opcode ("fnegd");
13002
      else
13003
        do_vfp_nsyn_opcode ("fabsd");
13004
    }
13005
}
13006
 
13007
/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13008
   insns belong to Neon, and are handled elsewhere.  */
13009
 
13010
static void
13011
do_vfp_nsyn_ldm_stm (int is_dbmode)
13012
{
13013
  int is_ldm = (inst.instruction & (1 << 20)) != 0;
13014
  if (is_ldm)
13015
    {
13016
      if (is_dbmode)
13017
        do_vfp_nsyn_opcode ("fldmdbs");
13018
      else
13019
        do_vfp_nsyn_opcode ("fldmias");
13020
    }
13021
  else
13022
    {
13023
      if (is_dbmode)
13024
        do_vfp_nsyn_opcode ("fstmdbs");
13025
      else
13026
        do_vfp_nsyn_opcode ("fstmias");
13027
    }
13028
}
13029
 
13030
static void
13031
do_vfp_nsyn_sqrt (void)
13032
{
13033
  enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13034
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13035
 
13036
  if (rs == NS_FF)
13037
    do_vfp_nsyn_opcode ("fsqrts");
13038
  else
13039
    do_vfp_nsyn_opcode ("fsqrtd");
13040
}
13041
 
13042
static void
13043
do_vfp_nsyn_div (void)
13044
{
13045
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13046
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13047
    N_F32 | N_F64 | N_KEY | N_VFP);
13048
 
13049
  if (rs == NS_FFF)
13050
    do_vfp_nsyn_opcode ("fdivs");
13051
  else
13052
    do_vfp_nsyn_opcode ("fdivd");
13053
}
13054
 
13055
static void
13056
do_vfp_nsyn_nmul (void)
13057
{
13058
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13059
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13060
    N_F32 | N_F64 | N_KEY | N_VFP);
13061
 
13062
  if (rs == NS_FFF)
13063
    {
13064
      NEON_ENCODE (SINGLE, inst);
13065
      do_vfp_sp_dyadic ();
13066
    }
13067
  else
13068
    {
13069
      NEON_ENCODE (DOUBLE, inst);
13070
      do_vfp_dp_rd_rn_rm ();
13071
    }
13072
  do_vfp_cond_or_thumb ();
13073
}
13074
 
13075
static void
13076
do_vfp_nsyn_cmp (void)
13077
{
13078
  if (inst.operands[1].isreg)
13079
    {
13080
      enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13081
      neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13082
 
13083
      if (rs == NS_FF)
13084
        {
13085
          NEON_ENCODE (SINGLE, inst);
13086
          do_vfp_sp_monadic ();
13087
        }
13088
      else
13089
        {
13090
          NEON_ENCODE (DOUBLE, inst);
13091
          do_vfp_dp_rd_rm ();
13092
        }
13093
    }
13094
  else
13095
    {
13096
      enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13097
      neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13098
 
13099
      switch (inst.instruction & 0x0fffffff)
13100
        {
13101
        case N_MNEM_vcmp:
13102
          inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13103
          break;
13104
        case N_MNEM_vcmpe:
13105
          inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13106
          break;
13107
        default:
13108
          abort ();
13109
        }
13110
 
13111
      if (rs == NS_FI)
13112
        {
13113
          NEON_ENCODE (SINGLE, inst);
13114
          do_vfp_sp_compare_z ();
13115
        }
13116
      else
13117
        {
13118
          NEON_ENCODE (DOUBLE, inst);
13119
          do_vfp_dp_rd ();
13120
        }
13121
    }
13122
  do_vfp_cond_or_thumb ();
13123
}
13124
 
13125
static void
13126
nsyn_insert_sp (void)
13127
{
13128
  inst.operands[1] = inst.operands[0];
13129
  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13130
  inst.operands[0].reg = REG_SP;
13131
  inst.operands[0].isreg = 1;
13132
  inst.operands[0].writeback = 1;
13133
  inst.operands[0].present = 1;
13134
}
13135
 
13136
static void
13137
do_vfp_nsyn_push (void)
13138
{
13139
  nsyn_insert_sp ();
13140
  if (inst.operands[1].issingle)
13141
    do_vfp_nsyn_opcode ("fstmdbs");
13142
  else
13143
    do_vfp_nsyn_opcode ("fstmdbd");
13144
}
13145
 
13146
static void
13147
do_vfp_nsyn_pop (void)
13148
{
13149
  nsyn_insert_sp ();
13150
  if (inst.operands[1].issingle)
13151
    do_vfp_nsyn_opcode ("fldmias");
13152
  else
13153
    do_vfp_nsyn_opcode ("fldmiad");
13154
}
13155
 
13156
/* Fix up Neon data-processing instructions, ORing in the correct bits for
13157
   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
13158
 
13159
static void
13160
neon_dp_fixup (struct arm_it* insn)
13161
{
13162
  unsigned int i = insn->instruction;
13163
  insn->is_neon = 1;
13164
 
13165
  if (thumb_mode)
13166
    {
13167
      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
13168
      if (i & (1 << 24))
13169
        i |= 1 << 28;
13170
 
13171
      i &= ~(1 << 24);
13172
 
13173
      i |= 0xef000000;
13174
    }
13175
  else
13176
    i |= 0xf2000000;
13177
 
13178
  insn->instruction = i;
13179
}
13180
 
13181
/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13182
   (0, 1, 2, 3).  */
13183
 
13184
static unsigned
13185
neon_logbits (unsigned x)
13186
{
13187
  return ffs (x) - 4;
13188
}
13189
 
13190
#define LOW4(R) ((R) & 0xf)
13191
#define HI1(R) (((R) >> 4) & 1)
13192
 
13193
/* Encode insns with bit pattern:
13194
 
13195
  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
13196
  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
13197
 
13198
  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13199
  different meaning for some instruction.  */
13200
 
13201
static void
13202
neon_three_same (int isquad, int ubit, int size)
13203
{
13204
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13205
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13206
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13207
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13208
  inst.instruction |= LOW4 (inst.operands[2].reg);
13209
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13210
  inst.instruction |= (isquad != 0) << 6;
13211
  inst.instruction |= (ubit != 0) << 24;
13212
  if (size != -1)
13213
    inst.instruction |= neon_logbits (size) << 20;
13214
 
13215
  neon_dp_fixup (&inst);
13216
}
13217
 
13218
/* Encode instructions of the form:
13219
 
13220
  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
13221
  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
13222
 
13223
  Don't write size if SIZE == -1.  */
13224
 
13225
static void
13226
neon_two_same (int qbit, int ubit, int size)
13227
{
13228
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13229
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13230
  inst.instruction |= LOW4 (inst.operands[1].reg);
13231
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13232
  inst.instruction |= (qbit != 0) << 6;
13233
  inst.instruction |= (ubit != 0) << 24;
13234
 
13235
  if (size != -1)
13236
    inst.instruction |= neon_logbits (size) << 18;
13237
 
13238
  neon_dp_fixup (&inst);
13239
}
13240
 
13241
/* Neon instruction encoders, in approximate order of appearance.  */
13242
 
13243
static void
13244
do_neon_dyadic_i_su (void)
13245
{
13246
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13247
  struct neon_type_el et = neon_check_type (3, rs,
13248
    N_EQK, N_EQK, N_SU_32 | N_KEY);
13249
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13250
}
13251
 
13252
static void
13253
do_neon_dyadic_i64_su (void)
13254
{
13255
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13256
  struct neon_type_el et = neon_check_type (3, rs,
13257
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13258
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13259
}
13260
 
13261
static void
13262
neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13263
                unsigned immbits)
13264
{
13265
  unsigned size = et.size >> 3;
13266
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13267
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13268
  inst.instruction |= LOW4 (inst.operands[1].reg);
13269
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13270
  inst.instruction |= (isquad != 0) << 6;
13271
  inst.instruction |= immbits << 16;
13272
  inst.instruction |= (size >> 3) << 7;
13273
  inst.instruction |= (size & 0x7) << 19;
13274
  if (write_ubit)
13275
    inst.instruction |= (uval != 0) << 24;
13276
 
13277
  neon_dp_fixup (&inst);
13278
}
13279
 
13280
static void
13281
do_neon_shl_imm (void)
13282
{
13283
  if (!inst.operands[2].isreg)
13284
    {
13285
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13286
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13287
      NEON_ENCODE (IMMED, inst);
13288
      neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13289
    }
13290
  else
13291
    {
13292
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13293
      struct neon_type_el et = neon_check_type (3, rs,
13294
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13295
      unsigned int tmp;
13296
 
13297
      /* VSHL/VQSHL 3-register variants have syntax such as:
13298
           vshl.xx Dd, Dm, Dn
13299
         whereas other 3-register operations encoded by neon_three_same have
13300
         syntax like:
13301
           vadd.xx Dd, Dn, Dm
13302
         (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13303
         here.  */
13304
      tmp = inst.operands[2].reg;
13305
      inst.operands[2].reg = inst.operands[1].reg;
13306
      inst.operands[1].reg = tmp;
13307
      NEON_ENCODE (INTEGER, inst);
13308
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13309
    }
13310
}
13311
 
13312
static void
13313
do_neon_qshl_imm (void)
13314
{
13315
  if (!inst.operands[2].isreg)
13316
    {
13317
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13318
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13319
 
13320
      NEON_ENCODE (IMMED, inst);
13321
      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13322
                      inst.operands[2].imm);
13323
    }
13324
  else
13325
    {
13326
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13327
      struct neon_type_el et = neon_check_type (3, rs,
13328
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13329
      unsigned int tmp;
13330
 
13331
      /* See note in do_neon_shl_imm.  */
13332
      tmp = inst.operands[2].reg;
13333
      inst.operands[2].reg = inst.operands[1].reg;
13334
      inst.operands[1].reg = tmp;
13335
      NEON_ENCODE (INTEGER, inst);
13336
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13337
    }
13338
}
13339
 
13340
static void
13341
do_neon_rshl (void)
13342
{
13343
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13344
  struct neon_type_el et = neon_check_type (3, rs,
13345
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13346
  unsigned int tmp;
13347
 
13348
  tmp = inst.operands[2].reg;
13349
  inst.operands[2].reg = inst.operands[1].reg;
13350
  inst.operands[1].reg = tmp;
13351
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13352
}
13353
 
13354
static int
13355
neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13356
{
13357
  /* Handle .I8 pseudo-instructions.  */
13358
  if (size == 8)
13359
    {
13360
      /* Unfortunately, this will make everything apart from zero out-of-range.
13361
         FIXME is this the intended semantics? There doesn't seem much point in
13362
         accepting .I8 if so.  */
13363
      immediate |= immediate << 8;
13364
      size = 16;
13365
    }
13366
 
13367
  if (size >= 32)
13368
    {
13369
      if (immediate == (immediate & 0x000000ff))
13370
        {
13371
          *immbits = immediate;
13372
          return 0x1;
13373
        }
13374
      else if (immediate == (immediate & 0x0000ff00))
13375
        {
13376
          *immbits = immediate >> 8;
13377
          return 0x3;
13378
        }
13379
      else if (immediate == (immediate & 0x00ff0000))
13380
        {
13381
          *immbits = immediate >> 16;
13382
          return 0x5;
13383
        }
13384
      else if (immediate == (immediate & 0xff000000))
13385
        {
13386
          *immbits = immediate >> 24;
13387
          return 0x7;
13388
        }
13389
      if ((immediate & 0xffff) != (immediate >> 16))
13390
        goto bad_immediate;
13391
      immediate &= 0xffff;
13392
    }
13393
 
13394
  if (immediate == (immediate & 0x000000ff))
13395
    {
13396
      *immbits = immediate;
13397
      return 0x9;
13398
    }
13399
  else if (immediate == (immediate & 0x0000ff00))
13400
    {
13401
      *immbits = immediate >> 8;
13402
      return 0xb;
13403
    }
13404
 
13405
  bad_immediate:
13406
  first_error (_("immediate value out of range"));
13407
  return FAIL;
13408
}
13409
 
13410
/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13411
   A, B, C, D.  */
13412
 
13413
static int
13414
neon_bits_same_in_bytes (unsigned imm)
13415
{
13416
  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13417
         && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13418
         && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13419
         && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13420
}
13421
 
13422
/* For immediate of above form, return 0bABCD.  */
13423
 
13424
static unsigned
13425
neon_squash_bits (unsigned imm)
13426
{
13427
  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13428
         | ((imm & 0x01000000) >> 21);
13429
}
13430
 
13431
/* Compress quarter-float representation to 0b...000 abcdefgh.  */
13432
 
13433
static unsigned
13434
neon_qfloat_bits (unsigned imm)
13435
{
13436
  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13437
}
13438
 
13439
/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13440
   the instruction. *OP is passed as the initial value of the op field, and
13441
   may be set to a different value depending on the constant (i.e.
13442
   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13443
   MVN).  If the immediate looks like a repeated pattern then also
13444
   try smaller element sizes.  */
13445
 
13446
static int
13447
neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13448
                         unsigned *immbits, int *op, int size,
13449
                         enum neon_el_type type)
13450
{
13451
  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13452
     float.  */
13453
  if (type == NT_float && !float_p)
13454
    return FAIL;
13455
 
13456
  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13457
    {
13458
      if (size != 32 || *op == 1)
13459
        return FAIL;
13460
      *immbits = neon_qfloat_bits (immlo);
13461
      return 0xf;
13462
    }
13463
 
13464
  if (size == 64)
13465
    {
13466
      if (neon_bits_same_in_bytes (immhi)
13467
          && neon_bits_same_in_bytes (immlo))
13468
        {
13469
          if (*op == 1)
13470
            return FAIL;
13471
          *immbits = (neon_squash_bits (immhi) << 4)
13472
                     | neon_squash_bits (immlo);
13473
          *op = 1;
13474
          return 0xe;
13475
        }
13476
 
13477
      if (immhi != immlo)
13478
        return FAIL;
13479
    }
13480
 
13481
  if (size >= 32)
13482
    {
13483
      if (immlo == (immlo & 0x000000ff))
13484
        {
13485
          *immbits = immlo;
13486
          return 0x0;
13487
        }
13488
      else if (immlo == (immlo & 0x0000ff00))
13489
        {
13490
          *immbits = immlo >> 8;
13491
          return 0x2;
13492
        }
13493
      else if (immlo == (immlo & 0x00ff0000))
13494
        {
13495
          *immbits = immlo >> 16;
13496
          return 0x4;
13497
        }
13498
      else if (immlo == (immlo & 0xff000000))
13499
        {
13500
          *immbits = immlo >> 24;
13501
          return 0x6;
13502
        }
13503
      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13504
        {
13505
          *immbits = (immlo >> 8) & 0xff;
13506
          return 0xc;
13507
        }
13508
      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13509
        {
13510
          *immbits = (immlo >> 16) & 0xff;
13511
          return 0xd;
13512
        }
13513
 
13514
      if ((immlo & 0xffff) != (immlo >> 16))
13515
        return FAIL;
13516
      immlo &= 0xffff;
13517
    }
13518
 
13519
  if (size >= 16)
13520
    {
13521
      if (immlo == (immlo & 0x000000ff))
13522
        {
13523
          *immbits = immlo;
13524
          return 0x8;
13525
        }
13526
      else if (immlo == (immlo & 0x0000ff00))
13527
        {
13528
          *immbits = immlo >> 8;
13529
          return 0xa;
13530
        }
13531
 
13532
      if ((immlo & 0xff) != (immlo >> 8))
13533
        return FAIL;
13534
      immlo &= 0xff;
13535
    }
13536
 
13537
  if (immlo == (immlo & 0x000000ff))
13538
    {
13539
      /* Don't allow MVN with 8-bit immediate.  */
13540
      if (*op == 1)
13541
        return FAIL;
13542
      *immbits = immlo;
13543
      return 0xe;
13544
    }
13545
 
13546
  return FAIL;
13547
}
13548
 
13549
/* Write immediate bits [7:0] to the following locations:
13550
 
13551
  |28/24|23     19|18 16|15                    4|3     0|
13552
  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13553
 
13554
  This function is used by VMOV/VMVN/VORR/VBIC.  */
13555
 
13556
static void
13557
neon_write_immbits (unsigned immbits)
13558
{
13559
  inst.instruction |= immbits & 0xf;
13560
  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13561
  inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13562
}
13563
 
13564
/* Invert low-order SIZE bits of XHI:XLO.  */
13565
 
13566
static void
13567
neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13568
{
13569
  unsigned immlo = xlo ? *xlo : 0;
13570
  unsigned immhi = xhi ? *xhi : 0;
13571
 
13572
  switch (size)
13573
    {
13574
    case 8:
13575
      immlo = (~immlo) & 0xff;
13576
      break;
13577
 
13578
    case 16:
13579
      immlo = (~immlo) & 0xffff;
13580
      break;
13581
 
13582
    case 64:
13583
      immhi = (~immhi) & 0xffffffff;
13584
      /* fall through.  */
13585
 
13586
    case 32:
13587
      immlo = (~immlo) & 0xffffffff;
13588
      break;
13589
 
13590
    default:
13591
      abort ();
13592
    }
13593
 
13594
  if (xlo)
13595
    *xlo = immlo;
13596
 
13597
  if (xhi)
13598
    *xhi = immhi;
13599
}
13600
 
13601
static void
13602
do_neon_logic (void)
13603
{
13604
  if (inst.operands[2].present && inst.operands[2].isreg)
13605
    {
13606
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13607
      neon_check_type (3, rs, N_IGNORE_TYPE);
13608
      /* U bit and size field were set as part of the bitmask.  */
13609
      NEON_ENCODE (INTEGER, inst);
13610
      neon_three_same (neon_quad (rs), 0, -1);
13611
    }
13612
  else
13613
    {
13614
      const int three_ops_form = (inst.operands[2].present
13615
                                  && !inst.operands[2].isreg);
13616
      const int immoperand = (three_ops_form ? 2 : 1);
13617
      enum neon_shape rs = (three_ops_form
13618
                            ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13619
                            : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13620
      struct neon_type_el et = neon_check_type (2, rs,
13621
        N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13622
      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13623
      unsigned immbits;
13624
      int cmode;
13625
 
13626
      if (et.type == NT_invtype)
13627
        return;
13628
 
13629
      if (three_ops_form)
13630
        constraint (inst.operands[0].reg != inst.operands[1].reg,
13631
                    _("first and second operands shall be the same register"));
13632
 
13633
      NEON_ENCODE (IMMED, inst);
13634
 
13635
      immbits = inst.operands[immoperand].imm;
13636
      if (et.size == 64)
13637
        {
13638
          /* .i64 is a pseudo-op, so the immediate must be a repeating
13639
             pattern.  */
13640
          if (immbits != (inst.operands[immoperand].regisimm ?
13641
                          inst.operands[immoperand].reg : 0))
13642
            {
13643
              /* Set immbits to an invalid constant.  */
13644
              immbits = 0xdeadbeef;
13645
            }
13646
        }
13647
 
13648
      switch (opcode)
13649
        {
13650
        case N_MNEM_vbic:
13651
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13652
          break;
13653
 
13654
        case N_MNEM_vorr:
13655
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13656
          break;
13657
 
13658
        case N_MNEM_vand:
13659
          /* Pseudo-instruction for VBIC.  */
13660
          neon_invert_size (&immbits, 0, et.size);
13661
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13662
          break;
13663
 
13664
        case N_MNEM_vorn:
13665
          /* Pseudo-instruction for VORR.  */
13666
          neon_invert_size (&immbits, 0, et.size);
13667
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13668
          break;
13669
 
13670
        default:
13671
          abort ();
13672
        }
13673
 
13674
      if (cmode == FAIL)
13675
        return;
13676
 
13677
      inst.instruction |= neon_quad (rs) << 6;
13678
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13679
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13680
      inst.instruction |= cmode << 8;
13681
      neon_write_immbits (immbits);
13682
 
13683
      neon_dp_fixup (&inst);
13684
    }
13685
}
13686
 
13687
static void
13688
do_neon_bitfield (void)
13689
{
13690
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13691
  neon_check_type (3, rs, N_IGNORE_TYPE);
13692
  neon_three_same (neon_quad (rs), 0, -1);
13693
}
13694
 
13695
static void
13696
neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13697
                  unsigned destbits)
13698
{
13699
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13700
  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13701
                                            types | N_KEY);
13702
  if (et.type == NT_float)
13703
    {
13704
      NEON_ENCODE (FLOAT, inst);
13705
      neon_three_same (neon_quad (rs), 0, -1);
13706
    }
13707
  else
13708
    {
13709
      NEON_ENCODE (INTEGER, inst);
13710
      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13711
    }
13712
}
13713
 
13714
static void
13715
do_neon_dyadic_if_su (void)
13716
{
13717
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13718
}
13719
 
13720
static void
13721
do_neon_dyadic_if_su_d (void)
13722
{
13723
  /* This version only allow D registers, but that constraint is enforced during
13724
     operand parsing so we don't need to do anything extra here.  */
13725
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13726
}
13727
 
13728
static void
13729
do_neon_dyadic_if_i_d (void)
13730
{
13731
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13732
     affected if we specify unsigned args.  */
13733
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13734
}
13735
 
13736
enum vfp_or_neon_is_neon_bits
13737
{
13738
  NEON_CHECK_CC = 1,
13739
  NEON_CHECK_ARCH = 2
13740
};
13741
 
13742
/* Call this function if an instruction which may have belonged to the VFP or
13743
   Neon instruction sets, but turned out to be a Neon instruction (due to the
13744
   operand types involved, etc.). We have to check and/or fix-up a couple of
13745
   things:
13746
 
13747
     - Make sure the user hasn't attempted to make a Neon instruction
13748
       conditional.
13749
     - Alter the value in the condition code field if necessary.
13750
     - Make sure that the arch supports Neon instructions.
13751
 
13752
   Which of these operations take place depends on bits from enum
13753
   vfp_or_neon_is_neon_bits.
13754
 
13755
   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13756
   current instruction's condition is COND_ALWAYS, the condition field is
13757
   changed to inst.uncond_value. This is necessary because instructions shared
13758
   between VFP and Neon may be conditional for the VFP variants only, and the
13759
   unconditional Neon version must have, e.g., 0xF in the condition field.  */
13760
 
13761
static int
13762
vfp_or_neon_is_neon (unsigned check)
13763
{
13764
  /* Conditions are always legal in Thumb mode (IT blocks).  */
13765
  if (!thumb_mode && (check & NEON_CHECK_CC))
13766
    {
13767
      if (inst.cond != COND_ALWAYS)
13768
        {
13769
          first_error (_(BAD_COND));
13770
          return FAIL;
13771
        }
13772
      if (inst.uncond_value != -1)
13773
        inst.instruction |= inst.uncond_value << 28;
13774
    }
13775
 
13776
  if ((check & NEON_CHECK_ARCH)
13777
      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13778
    {
13779
      first_error (_(BAD_FPU));
13780
      return FAIL;
13781
    }
13782
 
13783
  return SUCCESS;
13784
}
13785
 
13786
static void
13787
do_neon_addsub_if_i (void)
13788
{
13789
  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13790
    return;
13791
 
13792
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13793
    return;
13794
 
13795
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13796
     affected if we specify unsigned args.  */
13797
  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13798
}
13799
 
13800
/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13801
   result to be:
13802
     V<op> A,B     (A is operand 0, B is operand 2)
13803
   to mean:
13804
     V<op> A,B,A
13805
   not:
13806
     V<op> A,B,B
13807
   so handle that case specially.  */
13808
 
13809
static void
13810
neon_exchange_operands (void)
13811
{
13812
  void *scratch = alloca (sizeof (inst.operands[0]));
13813
  if (inst.operands[1].present)
13814
    {
13815
      /* Swap operands[1] and operands[2].  */
13816
      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13817
      inst.operands[1] = inst.operands[2];
13818
      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13819
    }
13820
  else
13821
    {
13822
      inst.operands[1] = inst.operands[2];
13823
      inst.operands[2] = inst.operands[0];
13824
    }
13825
}
13826
 
13827
static void
13828
neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13829
{
13830
  if (inst.operands[2].isreg)
13831
    {
13832
      if (invert)
13833
        neon_exchange_operands ();
13834
      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13835
    }
13836
  else
13837
    {
13838
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13839
      struct neon_type_el et = neon_check_type (2, rs,
13840
        N_EQK | N_SIZ, immtypes | N_KEY);
13841
 
13842
      NEON_ENCODE (IMMED, inst);
13843
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13844
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13845
      inst.instruction |= LOW4 (inst.operands[1].reg);
13846
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13847
      inst.instruction |= neon_quad (rs) << 6;
13848
      inst.instruction |= (et.type == NT_float) << 10;
13849
      inst.instruction |= neon_logbits (et.size) << 18;
13850
 
13851
      neon_dp_fixup (&inst);
13852
    }
13853
}
13854
 
13855
static void
13856
do_neon_cmp (void)
13857
{
13858
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13859
}
13860
 
13861
static void
13862
do_neon_cmp_inv (void)
13863
{
13864
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13865
}
13866
 
13867
static void
13868
do_neon_ceq (void)
13869
{
13870
  neon_compare (N_IF_32, N_IF_32, FALSE);
13871
}
13872
 
13873
/* For multiply instructions, we have the possibility of 16-bit or 32-bit
13874
   scalars, which are encoded in 5 bits, M : Rm.
13875
   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13876
   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13877
   index in M.  */
13878
 
13879
static unsigned
13880
neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13881
{
13882
  unsigned regno = NEON_SCALAR_REG (scalar);
13883
  unsigned elno = NEON_SCALAR_INDEX (scalar);
13884
 
13885
  switch (elsize)
13886
    {
13887
    case 16:
13888
      if (regno > 7 || elno > 3)
13889
        goto bad_scalar;
13890
      return regno | (elno << 3);
13891
 
13892
    case 32:
13893
      if (regno > 15 || elno > 1)
13894
        goto bad_scalar;
13895
      return regno | (elno << 4);
13896
 
13897
    default:
13898
    bad_scalar:
13899
      first_error (_("scalar out of range for multiply instruction"));
13900
    }
13901
 
13902
  return 0;
13903
}
13904
 
13905
/* Encode multiply / multiply-accumulate scalar instructions.  */
13906
 
13907
static void
13908
neon_mul_mac (struct neon_type_el et, int ubit)
13909
{
13910
  unsigned scalar;
13911
 
13912
  /* Give a more helpful error message if we have an invalid type.  */
13913
  if (et.type == NT_invtype)
13914
    return;
13915
 
13916
  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13917
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13918
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13919
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13920
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13921
  inst.instruction |= LOW4 (scalar);
13922
  inst.instruction |= HI1 (scalar) << 5;
13923
  inst.instruction |= (et.type == NT_float) << 8;
13924
  inst.instruction |= neon_logbits (et.size) << 20;
13925
  inst.instruction |= (ubit != 0) << 24;
13926
 
13927
  neon_dp_fixup (&inst);
13928
}
13929
 
13930
static void
13931
do_neon_mac_maybe_scalar (void)
13932
{
13933
  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13934
    return;
13935
 
13936
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13937
    return;
13938
 
13939
  if (inst.operands[2].isscalar)
13940
    {
13941
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13942
      struct neon_type_el et = neon_check_type (3, rs,
13943
        N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13944
      NEON_ENCODE (SCALAR, inst);
13945
      neon_mul_mac (et, neon_quad (rs));
13946
    }
13947
  else
13948
    {
13949
      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
13950
         affected if we specify unsigned args.  */
13951
      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13952
    }
13953
}
13954
 
13955
static void
13956
do_neon_fmac (void)
13957
{
13958
  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13959
    return;
13960
 
13961
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13962
    return;
13963
 
13964
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13965
}
13966
 
13967
static void
13968
do_neon_tst (void)
13969
{
13970
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13971
  struct neon_type_el et = neon_check_type (3, rs,
13972
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13973
  neon_three_same (neon_quad (rs), 0, et.size);
13974
}
13975
 
13976
/* VMUL with 3 registers allows the P8 type. The scalar version supports the
13977
   same types as the MAC equivalents. The polynomial type for this instruction
13978
   is encoded the same as the integer type.  */
13979
 
13980
static void
13981
do_neon_mul (void)
13982
{
13983
  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13984
    return;
13985
 
13986
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13987
    return;
13988
 
13989
  if (inst.operands[2].isscalar)
13990
    do_neon_mac_maybe_scalar ();
13991
  else
13992
    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13993
}
13994
 
13995
static void
13996
do_neon_qdmulh (void)
13997
{
13998
  if (inst.operands[2].isscalar)
13999
    {
14000
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14001
      struct neon_type_el et = neon_check_type (3, rs,
14002
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14003
      NEON_ENCODE (SCALAR, inst);
14004
      neon_mul_mac (et, neon_quad (rs));
14005
    }
14006
  else
14007
    {
14008
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14009
      struct neon_type_el et = neon_check_type (3, rs,
14010
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14011
      NEON_ENCODE (INTEGER, inst);
14012
      /* The U bit (rounding) comes from bit mask.  */
14013
      neon_three_same (neon_quad (rs), 0, et.size);
14014
    }
14015
}
14016
 
14017
static void
14018
do_neon_fcmp_absolute (void)
14019
{
14020
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14021
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14022
  /* Size field comes from bit mask.  */
14023
  neon_three_same (neon_quad (rs), 1, -1);
14024
}
14025
 
14026
static void
14027
do_neon_fcmp_absolute_inv (void)
14028
{
14029
  neon_exchange_operands ();
14030
  do_neon_fcmp_absolute ();
14031
}
14032
 
14033
static void
14034
do_neon_step (void)
14035
{
14036
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14037
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14038
  neon_three_same (neon_quad (rs), 0, -1);
14039
}
14040
 
14041
static void
14042
do_neon_abs_neg (void)
14043
{
14044
  enum neon_shape rs;
14045
  struct neon_type_el et;
14046
 
14047
  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14048
    return;
14049
 
14050
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14051
    return;
14052
 
14053
  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14054
  et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14055
 
14056
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14057
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14058
  inst.instruction |= LOW4 (inst.operands[1].reg);
14059
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14060
  inst.instruction |= neon_quad (rs) << 6;
14061
  inst.instruction |= (et.type == NT_float) << 10;
14062
  inst.instruction |= neon_logbits (et.size) << 18;
14063
 
14064
  neon_dp_fixup (&inst);
14065
}
14066
 
14067
static void
14068
do_neon_sli (void)
14069
{
14070
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14071
  struct neon_type_el et = neon_check_type (2, rs,
14072
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14073
  int imm = inst.operands[2].imm;
14074
  constraint (imm < 0 || (unsigned)imm >= et.size,
14075
              _("immediate out of range for insert"));
14076
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14077
}
14078
 
14079
static void
14080
do_neon_sri (void)
14081
{
14082
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14083
  struct neon_type_el et = neon_check_type (2, rs,
14084
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14085
  int imm = inst.operands[2].imm;
14086
  constraint (imm < 1 || (unsigned)imm > et.size,
14087
              _("immediate out of range for insert"));
14088
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14089
}
14090
 
14091
static void
14092
do_neon_qshlu_imm (void)
14093
{
14094
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14095
  struct neon_type_el et = neon_check_type (2, rs,
14096
    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14097
  int imm = inst.operands[2].imm;
14098
  constraint (imm < 0 || (unsigned)imm >= et.size,
14099
              _("immediate out of range for shift"));
14100
  /* Only encodes the 'U present' variant of the instruction.
14101
     In this case, signed types have OP (bit 8) set to 0.
14102
     Unsigned types have OP set to 1.  */
14103
  inst.instruction |= (et.type == NT_unsigned) << 8;
14104
  /* The rest of the bits are the same as other immediate shifts.  */
14105
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14106
}
14107
 
14108
static void
14109
do_neon_qmovn (void)
14110
{
14111
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14112
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14113
  /* Saturating move where operands can be signed or unsigned, and the
14114
     destination has the same signedness.  */
14115
  NEON_ENCODE (INTEGER, inst);
14116
  if (et.type == NT_unsigned)
14117
    inst.instruction |= 0xc0;
14118
  else
14119
    inst.instruction |= 0x80;
14120
  neon_two_same (0, 1, et.size / 2);
14121
}
14122
 
14123
static void
14124
do_neon_qmovun (void)
14125
{
14126
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14127
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14128
  /* Saturating move with unsigned results. Operands must be signed.  */
14129
  NEON_ENCODE (INTEGER, inst);
14130
  neon_two_same (0, 1, et.size / 2);
14131
}
14132
 
14133
static void
14134
do_neon_rshift_sat_narrow (void)
14135
{
14136
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14137
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14138
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14139
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14140
  int imm = inst.operands[2].imm;
14141
  /* This gets the bounds check, size encoding and immediate bits calculation
14142
     right.  */
14143
  et.size /= 2;
14144
 
14145
  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14146
     VQMOVN.I<size> <Dd>, <Qm>.  */
14147
  if (imm == 0)
14148
    {
14149
      inst.operands[2].present = 0;
14150
      inst.instruction = N_MNEM_vqmovn;
14151
      do_neon_qmovn ();
14152
      return;
14153
    }
14154
 
14155
  constraint (imm < 1 || (unsigned)imm > et.size,
14156
              _("immediate out of range"));
14157
  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14158
}
14159
 
14160
static void
14161
do_neon_rshift_sat_narrow_u (void)
14162
{
14163
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14164
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14165
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14166
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14167
  int imm = inst.operands[2].imm;
14168
  /* This gets the bounds check, size encoding and immediate bits calculation
14169
     right.  */
14170
  et.size /= 2;
14171
 
14172
  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14173
     VQMOVUN.I<size> <Dd>, <Qm>.  */
14174
  if (imm == 0)
14175
    {
14176
      inst.operands[2].present = 0;
14177
      inst.instruction = N_MNEM_vqmovun;
14178
      do_neon_qmovun ();
14179
      return;
14180
    }
14181
 
14182
  constraint (imm < 1 || (unsigned)imm > et.size,
14183
              _("immediate out of range"));
14184
  /* FIXME: The manual is kind of unclear about what value U should have in
14185
     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14186
     must be 1.  */
14187
  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14188
}
14189
 
14190
static void
14191
do_neon_movn (void)
14192
{
14193
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14194
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14195
  NEON_ENCODE (INTEGER, inst);
14196
  neon_two_same (0, 1, et.size / 2);
14197
}
14198
 
14199
static void
14200
do_neon_rshift_narrow (void)
14201
{
14202
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14203
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14204
  int imm = inst.operands[2].imm;
14205
  /* This gets the bounds check, size encoding and immediate bits calculation
14206
     right.  */
14207
  et.size /= 2;
14208
 
14209
  /* If immediate is zero then we are a pseudo-instruction for
14210
     VMOVN.I<size> <Dd>, <Qm>  */
14211
  if (imm == 0)
14212
    {
14213
      inst.operands[2].present = 0;
14214
      inst.instruction = N_MNEM_vmovn;
14215
      do_neon_movn ();
14216
      return;
14217
    }
14218
 
14219
  constraint (imm < 1 || (unsigned)imm > et.size,
14220
              _("immediate out of range for narrowing operation"));
14221
  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14222
}
14223
 
14224
static void
14225
do_neon_shll (void)
14226
{
14227
  /* FIXME: Type checking when lengthening.  */
14228
  struct neon_type_el et = neon_check_type (2, NS_QDI,
14229
    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14230
  unsigned imm = inst.operands[2].imm;
14231
 
14232
  if (imm == et.size)
14233
    {
14234
      /* Maximum shift variant.  */
14235
      NEON_ENCODE (INTEGER, inst);
14236
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14237
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14238
      inst.instruction |= LOW4 (inst.operands[1].reg);
14239
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14240
      inst.instruction |= neon_logbits (et.size) << 18;
14241
 
14242
      neon_dp_fixup (&inst);
14243
    }
14244
  else
14245
    {
14246
      /* A more-specific type check for non-max versions.  */
14247
      et = neon_check_type (2, NS_QDI,
14248
        N_EQK | N_DBL, N_SU_32 | N_KEY);
14249
      NEON_ENCODE (IMMED, inst);
14250
      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14251
    }
14252
}
14253
 
14254
/* Check the various types for the VCVT instruction, and return which version
14255
   the current instruction is.  */
14256
 
14257
static int
14258
neon_cvt_flavour (enum neon_shape rs)
14259
{
14260
#define CVT_VAR(C,X,Y)                                                  \
14261
  et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y));       \
14262
  if (et.type != NT_invtype)                                            \
14263
    {                                                                   \
14264
      inst.error = NULL;                                                \
14265
      return (C);                                                       \
14266
    }
14267
  struct neon_type_el et;
14268
  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14269
                        || rs == NS_FF) ? N_VFP : 0;
14270
  /* The instruction versions which take an immediate take one register
14271
     argument, which is extended to the width of the full register. Thus the
14272
     "source" and "destination" registers must have the same width.  Hack that
14273
     here by making the size equal to the key (wider, in this case) operand.  */
14274
  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14275
 
14276
  CVT_VAR (0, N_S32, N_F32);
14277
  CVT_VAR (1, N_U32, N_F32);
14278
  CVT_VAR (2, N_F32, N_S32);
14279
  CVT_VAR (3, N_F32, N_U32);
14280
  /* Half-precision conversions.  */
14281
  CVT_VAR (4, N_F32, N_F16);
14282
  CVT_VAR (5, N_F16, N_F32);
14283
 
14284
  whole_reg = N_VFP;
14285
 
14286
  /* VFP instructions.  */
14287
  CVT_VAR (6, N_F32, N_F64);
14288
  CVT_VAR (7, N_F64, N_F32);
14289
  CVT_VAR (8, N_S32, N_F64 | key);
14290
  CVT_VAR (9, N_U32, N_F64 | key);
14291
  CVT_VAR (10, N_F64 | key, N_S32);
14292
  CVT_VAR (11, N_F64 | key, N_U32);
14293
  /* VFP instructions with bitshift.  */
14294
  CVT_VAR (12, N_F32 | key, N_S16);
14295
  CVT_VAR (13, N_F32 | key, N_U16);
14296
  CVT_VAR (14, N_F64 | key, N_S16);
14297
  CVT_VAR (15, N_F64 | key, N_U16);
14298
  CVT_VAR (16, N_S16, N_F32 | key);
14299
  CVT_VAR (17, N_U16, N_F32 | key);
14300
  CVT_VAR (18, N_S16, N_F64 | key);
14301
  CVT_VAR (19, N_U16, N_F64 | key);
14302
 
14303
  return -1;
14304
#undef CVT_VAR
14305
}
14306
 
14307
/* Neon-syntax VFP conversions.  */
14308
 
14309
static void
14310
do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
14311
{
14312
  const char *opname = 0;
14313
 
14314
  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14315
    {
14316
      /* Conversions with immediate bitshift.  */
14317
      const char *enc[] =
14318
        {
14319
          "ftosls",
14320
          "ftouls",
14321
          "fsltos",
14322
          "fultos",
14323
          NULL,
14324
          NULL,
14325
          NULL,
14326
          NULL,
14327
          "ftosld",
14328
          "ftould",
14329
          "fsltod",
14330
          "fultod",
14331
          "fshtos",
14332
          "fuhtos",
14333
          "fshtod",
14334
          "fuhtod",
14335
          "ftoshs",
14336
          "ftouhs",
14337
          "ftoshd",
14338
          "ftouhd"
14339
        };
14340
 
14341
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14342
        {
14343
          opname = enc[flavour];
14344
          constraint (inst.operands[0].reg != inst.operands[1].reg,
14345
                      _("operands 0 and 1 must be the same register"));
14346
          inst.operands[1] = inst.operands[2];
14347
          memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14348
        }
14349
    }
14350
  else
14351
    {
14352
      /* Conversions without bitshift.  */
14353
      const char *enc[] =
14354
        {
14355
          "ftosis",
14356
          "ftouis",
14357
          "fsitos",
14358
          "fuitos",
14359
          "NULL",
14360
          "NULL",
14361
          "fcvtsd",
14362
          "fcvtds",
14363
          "ftosid",
14364
          "ftouid",
14365
          "fsitod",
14366
          "fuitod"
14367
        };
14368
 
14369
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14370
        opname = enc[flavour];
14371
    }
14372
 
14373
  if (opname)
14374
    do_vfp_nsyn_opcode (opname);
14375
}
14376
 
14377
static void
14378
do_vfp_nsyn_cvtz (void)
14379
{
14380
  enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14381
  int flavour = neon_cvt_flavour (rs);
14382
  const char *enc[] =
14383
    {
14384
      "ftosizs",
14385
      "ftouizs",
14386
      NULL,
14387
      NULL,
14388
      NULL,
14389
      NULL,
14390
      NULL,
14391
      NULL,
14392
      "ftosizd",
14393
      "ftouizd"
14394
    };
14395
 
14396
  if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14397
    do_vfp_nsyn_opcode (enc[flavour]);
14398
}
14399
 
14400
static void
14401
do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
14402
{
14403
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14404
    NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14405
  int flavour = neon_cvt_flavour (rs);
14406
 
14407
  /* PR11109: Handle round-to-zero for VCVT conversions.  */
14408
  if (round_to_zero
14409
      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14410
      && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
14411
      && (rs == NS_FD || rs == NS_FF))
14412
    {
14413
      do_vfp_nsyn_cvtz ();
14414
      return;
14415
    }
14416
 
14417
  /* VFP rather than Neon conversions.  */
14418
  if (flavour >= 6)
14419
    {
14420
      do_vfp_nsyn_cvt (rs, flavour);
14421
      return;
14422
    }
14423
 
14424
  switch (rs)
14425
    {
14426
    case NS_DDI:
14427
    case NS_QQI:
14428
      {
14429
        unsigned immbits;
14430
        unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14431
 
14432
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14433
          return;
14434
 
14435
        /* Fixed-point conversion with #0 immediate is encoded as an
14436
           integer conversion.  */
14437
        if (inst.operands[2].present && inst.operands[2].imm == 0)
14438
          goto int_encode;
14439
       immbits = 32 - inst.operands[2].imm;
14440
        NEON_ENCODE (IMMED, inst);
14441
        if (flavour != -1)
14442
          inst.instruction |= enctab[flavour];
14443
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14444
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14445
        inst.instruction |= LOW4 (inst.operands[1].reg);
14446
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14447
        inst.instruction |= neon_quad (rs) << 6;
14448
        inst.instruction |= 1 << 21;
14449
        inst.instruction |= immbits << 16;
14450
 
14451
        neon_dp_fixup (&inst);
14452
      }
14453
      break;
14454
 
14455
    case NS_DD:
14456
    case NS_QQ:
14457
    int_encode:
14458
      {
14459
        unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14460
 
14461
        NEON_ENCODE (INTEGER, inst);
14462
 
14463
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14464
          return;
14465
 
14466
        if (flavour != -1)
14467
          inst.instruction |= enctab[flavour];
14468
 
14469
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14470
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14471
        inst.instruction |= LOW4 (inst.operands[1].reg);
14472
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14473
        inst.instruction |= neon_quad (rs) << 6;
14474
        inst.instruction |= 2 << 18;
14475
 
14476
        neon_dp_fixup (&inst);
14477
      }
14478
    break;
14479
 
14480
    /* Half-precision conversions for Advanced SIMD -- neon.  */
14481
    case NS_QD:
14482
    case NS_DQ:
14483
 
14484
      if ((rs == NS_DQ)
14485
          && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14486
          {
14487
            as_bad (_("operand size must match register width"));
14488
            break;
14489
          }
14490
 
14491
      if ((rs == NS_QD)
14492
          && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14493
          {
14494
            as_bad (_("operand size must match register width"));
14495
            break;
14496
          }
14497
 
14498
      if (rs == NS_DQ)
14499
        inst.instruction = 0x3b60600;
14500
      else
14501
        inst.instruction = 0x3b60700;
14502
 
14503
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14504
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14505
      inst.instruction |= LOW4 (inst.operands[1].reg);
14506
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14507
      neon_dp_fixup (&inst);
14508
      break;
14509
 
14510
    default:
14511
      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
14512
      do_vfp_nsyn_cvt (rs, flavour);
14513
    }
14514
}
14515
 
14516
static void
14517
do_neon_cvtr (void)
14518
{
14519
  do_neon_cvt_1 (FALSE);
14520
}
14521
 
14522
static void
14523
do_neon_cvt (void)
14524
{
14525
  do_neon_cvt_1 (TRUE);
14526
}
14527
 
14528
static void
14529
do_neon_cvtb (void)
14530
{
14531
  inst.instruction = 0xeb20a40;
14532
 
14533
  /* The sizes are attached to the mnemonic.  */
14534
  if (inst.vectype.el[0].type != NT_invtype
14535
      && inst.vectype.el[0].size == 16)
14536
    inst.instruction |= 0x00010000;
14537
 
14538
  /* Programmer's syntax: the sizes are attached to the operands.  */
14539
  else if (inst.operands[0].vectype.type != NT_invtype
14540
           && inst.operands[0].vectype.size == 16)
14541
    inst.instruction |= 0x00010000;
14542
 
14543
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14544
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14545
  do_vfp_cond_or_thumb ();
14546
}
14547
 
14548
 
14549
static void
14550
do_neon_cvtt (void)
14551
{
14552
  do_neon_cvtb ();
14553
  inst.instruction |= 0x80;
14554
}
14555
 
14556
static void
14557
neon_move_immediate (void)
14558
{
14559
  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14560
  struct neon_type_el et = neon_check_type (2, rs,
14561
    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14562
  unsigned immlo, immhi = 0, immbits;
14563
  int op, cmode, float_p;
14564
 
14565
  constraint (et.type == NT_invtype,
14566
              _("operand size must be specified for immediate VMOV"));
14567
 
14568
  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
14569
  op = (inst.instruction & (1 << 5)) != 0;
14570
 
14571
  immlo = inst.operands[1].imm;
14572
  if (inst.operands[1].regisimm)
14573
    immhi = inst.operands[1].reg;
14574
 
14575
  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14576
              _("immediate has bits set outside the operand size"));
14577
 
14578
  float_p = inst.operands[1].immisfloat;
14579
 
14580
  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14581
                                        et.size, et.type)) == FAIL)
14582
    {
14583
      /* Invert relevant bits only.  */
14584
      neon_invert_size (&immlo, &immhi, et.size);
14585
      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14586
         with one or the other; those cases are caught by
14587
         neon_cmode_for_move_imm.  */
14588
      op = !op;
14589
      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14590
                                            &op, et.size, et.type)) == FAIL)
14591
        {
14592
          first_error (_("immediate out of range"));
14593
          return;
14594
        }
14595
    }
14596
 
14597
  inst.instruction &= ~(1 << 5);
14598
  inst.instruction |= op << 5;
14599
 
14600
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14601
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14602
  inst.instruction |= neon_quad (rs) << 6;
14603
  inst.instruction |= cmode << 8;
14604
 
14605
  neon_write_immbits (immbits);
14606
}
14607
 
14608
static void
14609
do_neon_mvn (void)
14610
{
14611
  if (inst.operands[1].isreg)
14612
    {
14613
      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14614
 
14615
      NEON_ENCODE (INTEGER, inst);
14616
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14617
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14618
      inst.instruction |= LOW4 (inst.operands[1].reg);
14619
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14620
      inst.instruction |= neon_quad (rs) << 6;
14621
    }
14622
  else
14623
    {
14624
      NEON_ENCODE (IMMED, inst);
14625
      neon_move_immediate ();
14626
    }
14627
 
14628
  neon_dp_fixup (&inst);
14629
}
14630
 
14631
/* Encode instructions of form:
14632
 
14633
  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14634
  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
14635
 
14636
static void
14637
neon_mixed_length (struct neon_type_el et, unsigned size)
14638
{
14639
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14640
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14641
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14642
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14643
  inst.instruction |= LOW4 (inst.operands[2].reg);
14644
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14645
  inst.instruction |= (et.type == NT_unsigned) << 24;
14646
  inst.instruction |= neon_logbits (size) << 20;
14647
 
14648
  neon_dp_fixup (&inst);
14649
}
14650
 
14651
static void
14652
do_neon_dyadic_long (void)
14653
{
14654
  /* FIXME: Type checking for lengthening op.  */
14655
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14656
    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14657
  neon_mixed_length (et, et.size);
14658
}
14659
 
14660
static void
14661
do_neon_abal (void)
14662
{
14663
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14664
    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14665
  neon_mixed_length (et, et.size);
14666
}
14667
 
14668
static void
14669
neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14670
{
14671
  if (inst.operands[2].isscalar)
14672
    {
14673
      struct neon_type_el et = neon_check_type (3, NS_QDS,
14674
        N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14675
      NEON_ENCODE (SCALAR, inst);
14676
      neon_mul_mac (et, et.type == NT_unsigned);
14677
    }
14678
  else
14679
    {
14680
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14681
        N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14682
      NEON_ENCODE (INTEGER, inst);
14683
      neon_mixed_length (et, et.size);
14684
    }
14685
}
14686
 
14687
static void
14688
do_neon_mac_maybe_scalar_long (void)
14689
{
14690
  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14691
}
14692
 
14693
static void
14694
do_neon_dyadic_wide (void)
14695
{
14696
  struct neon_type_el et = neon_check_type (3, NS_QQD,
14697
    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14698
  neon_mixed_length (et, et.size);
14699
}
14700
 
14701
static void
14702
do_neon_dyadic_narrow (void)
14703
{
14704
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14705
    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14706
  /* Operand sign is unimportant, and the U bit is part of the opcode,
14707
     so force the operand type to integer.  */
14708
  et.type = NT_integer;
14709
  neon_mixed_length (et, et.size / 2);
14710
}
14711
 
14712
static void
14713
do_neon_mul_sat_scalar_long (void)
14714
{
14715
  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14716
}
14717
 
14718
static void
14719
do_neon_vmull (void)
14720
{
14721
  if (inst.operands[2].isscalar)
14722
    do_neon_mac_maybe_scalar_long ();
14723
  else
14724
    {
14725
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14726
        N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14727
      if (et.type == NT_poly)
14728
        NEON_ENCODE (POLY, inst);
14729
      else
14730
        NEON_ENCODE (INTEGER, inst);
14731
      /* For polynomial encoding, size field must be 0b00 and the U bit must be
14732
         zero. Should be OK as-is.  */
14733
      neon_mixed_length (et, et.size);
14734
    }
14735
}
14736
 
14737
static void
14738
do_neon_ext (void)
14739
{
14740
  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14741
  struct neon_type_el et = neon_check_type (3, rs,
14742
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14743
  unsigned imm = (inst.operands[3].imm * et.size) / 8;
14744
 
14745
  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14746
              _("shift out of range"));
14747
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14748
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14749
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14750
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14751
  inst.instruction |= LOW4 (inst.operands[2].reg);
14752
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14753
  inst.instruction |= neon_quad (rs) << 6;
14754
  inst.instruction |= imm << 8;
14755
 
14756
  neon_dp_fixup (&inst);
14757
}
14758
 
14759
static void
14760
do_neon_rev (void)
14761
{
14762
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14763
  struct neon_type_el et = neon_check_type (2, rs,
14764
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
14765
  unsigned op = (inst.instruction >> 7) & 3;
14766
  /* N (width of reversed regions) is encoded as part of the bitmask. We
14767
     extract it here to check the elements to be reversed are smaller.
14768
     Otherwise we'd get a reserved instruction.  */
14769
  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14770
  gas_assert (elsize != 0);
14771
  constraint (et.size >= elsize,
14772
              _("elements must be smaller than reversal region"));
14773
  neon_two_same (neon_quad (rs), 1, et.size);
14774
}
14775
 
14776
static void
14777
do_neon_dup (void)
14778
{
14779
  if (inst.operands[1].isscalar)
14780
    {
14781
      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14782
      struct neon_type_el et = neon_check_type (2, rs,
14783
        N_EQK, N_8 | N_16 | N_32 | N_KEY);
14784
      unsigned sizebits = et.size >> 3;
14785
      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14786
      int logsize = neon_logbits (et.size);
14787
      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14788
 
14789
      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14790
        return;
14791
 
14792
      NEON_ENCODE (SCALAR, inst);
14793
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14794
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14795
      inst.instruction |= LOW4 (dm);
14796
      inst.instruction |= HI1 (dm) << 5;
14797
      inst.instruction |= neon_quad (rs) << 6;
14798
      inst.instruction |= x << 17;
14799
      inst.instruction |= sizebits << 16;
14800
 
14801
      neon_dp_fixup (&inst);
14802
    }
14803
  else
14804
    {
14805
      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14806
      struct neon_type_el et = neon_check_type (2, rs,
14807
        N_8 | N_16 | N_32 | N_KEY, N_EQK);
14808
      /* Duplicate ARM register to lanes of vector.  */
14809
      NEON_ENCODE (ARMREG, inst);
14810
      switch (et.size)
14811
        {
14812
        case 8:  inst.instruction |= 0x400000; break;
14813
        case 16: inst.instruction |= 0x000020; break;
14814
        case 32: inst.instruction |= 0x000000; break;
14815
        default: break;
14816
        }
14817
      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14818
      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14819
      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14820
      inst.instruction |= neon_quad (rs) << 21;
14821
      /* The encoding for this instruction is identical for the ARM and Thumb
14822
         variants, except for the condition field.  */
14823
      do_vfp_cond_or_thumb ();
14824
    }
14825
}
14826
 
14827
/* VMOV has particularly many variations. It can be one of:
14828
     0. VMOV<c><q> <Qd>, <Qm>
14829
     1. VMOV<c><q> <Dd>, <Dm>
14830
   (Register operations, which are VORR with Rm = Rn.)
14831
     2. VMOV<c><q>.<dt> <Qd>, #<imm>
14832
     3. VMOV<c><q>.<dt> <Dd>, #<imm>
14833
   (Immediate loads.)
14834
     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14835
   (ARM register to scalar.)
14836
     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14837
   (Two ARM registers to vector.)
14838
     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14839
   (Scalar to ARM register.)
14840
     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14841
   (Vector to two ARM registers.)
14842
     8. VMOV.F32 <Sd>, <Sm>
14843
     9. VMOV.F64 <Dd>, <Dm>
14844
   (VFP register moves.)
14845
    10. VMOV.F32 <Sd>, #imm
14846
    11. VMOV.F64 <Dd>, #imm
14847
   (VFP float immediate load.)
14848
    12. VMOV <Rd>, <Sm>
14849
   (VFP single to ARM reg.)
14850
    13. VMOV <Sd>, <Rm>
14851
   (ARM reg to VFP single.)
14852
    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14853
   (Two ARM regs to two VFP singles.)
14854
    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14855
   (Two VFP singles to two ARM regs.)
14856
 
14857
   These cases can be disambiguated using neon_select_shape, except cases 1/9
14858
   and 3/11 which depend on the operand type too.
14859
 
14860
   All the encoded bits are hardcoded by this function.
14861
 
14862
   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14863
   Cases 5, 7 may be used with VFPv2 and above.
14864
 
14865
   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14866
   can specify a type where it doesn't make sense to, and is ignored).  */
14867
 
14868
static void
14869
do_neon_mov (void)
14870
{
14871
  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14872
    NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14873
    NS_NULL);
14874
  struct neon_type_el et;
14875
  const char *ldconst = 0;
14876
 
14877
  switch (rs)
14878
    {
14879
    case NS_DD:  /* case 1/9.  */
14880
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14881
      /* It is not an error here if no type is given.  */
14882
      inst.error = NULL;
14883
      if (et.type == NT_float && et.size == 64)
14884
        {
14885
          do_vfp_nsyn_opcode ("fcpyd");
14886
          break;
14887
        }
14888
      /* fall through.  */
14889
 
14890
    case NS_QQ:  /* case 0/1.  */
14891
      {
14892
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14893
          return;
14894
        /* The architecture manual I have doesn't explicitly state which
14895
           value the U bit should have for register->register moves, but
14896
           the equivalent VORR instruction has U = 0, so do that.  */
14897
        inst.instruction = 0x0200110;
14898
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14899
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14900
        inst.instruction |= LOW4 (inst.operands[1].reg);
14901
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14902
        inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14903
        inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14904
        inst.instruction |= neon_quad (rs) << 6;
14905
 
14906
        neon_dp_fixup (&inst);
14907
      }
14908
      break;
14909
 
14910
    case NS_DI:  /* case 3/11.  */
14911
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14912
      inst.error = NULL;
14913
      if (et.type == NT_float && et.size == 64)
14914
        {
14915
          /* case 11 (fconstd).  */
14916
          ldconst = "fconstd";
14917
          goto encode_fconstd;
14918
        }
14919
      /* fall through.  */
14920
 
14921
    case NS_QI:  /* case 2/3.  */
14922
      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14923
        return;
14924
      inst.instruction = 0x0800010;
14925
      neon_move_immediate ();
14926
      neon_dp_fixup (&inst);
14927
      break;
14928
 
14929
    case NS_SR:  /* case 4.  */
14930
      {
14931
        unsigned bcdebits = 0;
14932
        int logsize;
14933
        unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14934
        unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14935
 
14936
        et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14937
        logsize = neon_logbits (et.size);
14938
 
14939
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14940
                    _(BAD_FPU));
14941
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14942
                    && et.size != 32, _(BAD_FPU));
14943
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14944
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14945
 
14946
        switch (et.size)
14947
          {
14948
          case 8:  bcdebits = 0x8; break;
14949
          case 16: bcdebits = 0x1; break;
14950
          case 32: bcdebits = 0x0; break;
14951
          default: ;
14952
          }
14953
 
14954
        bcdebits |= x << logsize;
14955
 
14956
        inst.instruction = 0xe000b10;
14957
        do_vfp_cond_or_thumb ();
14958
        inst.instruction |= LOW4 (dn) << 16;
14959
        inst.instruction |= HI1 (dn) << 7;
14960
        inst.instruction |= inst.operands[1].reg << 12;
14961
        inst.instruction |= (bcdebits & 3) << 5;
14962
        inst.instruction |= (bcdebits >> 2) << 21;
14963
      }
14964
      break;
14965
 
14966
    case NS_DRR:  /* case 5 (fmdrr).  */
14967
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14968
                  _(BAD_FPU));
14969
 
14970
      inst.instruction = 0xc400b10;
14971
      do_vfp_cond_or_thumb ();
14972
      inst.instruction |= LOW4 (inst.operands[0].reg);
14973
      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14974
      inst.instruction |= inst.operands[1].reg << 12;
14975
      inst.instruction |= inst.operands[2].reg << 16;
14976
      break;
14977
 
14978
    case NS_RS:  /* case 6.  */
14979
      {
14980
        unsigned logsize;
14981
        unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14982
        unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14983
        unsigned abcdebits = 0;
14984
 
14985
        et = neon_check_type (2, NS_NULL,
14986
                              N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14987
        logsize = neon_logbits (et.size);
14988
 
14989
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14990
                    _(BAD_FPU));
14991
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14992
                    && et.size != 32, _(BAD_FPU));
14993
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14994
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14995
 
14996
        switch (et.size)
14997
          {
14998
          case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14999
          case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15000
          case 32: abcdebits = 0x00; break;
15001
          default: ;
15002
          }
15003
 
15004
        abcdebits |= x << logsize;
15005
        inst.instruction = 0xe100b10;
15006
        do_vfp_cond_or_thumb ();
15007
        inst.instruction |= LOW4 (dn) << 16;
15008
        inst.instruction |= HI1 (dn) << 7;
15009
        inst.instruction |= inst.operands[0].reg << 12;
15010
        inst.instruction |= (abcdebits & 3) << 5;
15011
        inst.instruction |= (abcdebits >> 2) << 21;
15012
      }
15013
      break;
15014
 
15015
    case NS_RRD:  /* case 7 (fmrrd).  */
15016
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15017
                  _(BAD_FPU));
15018
 
15019
      inst.instruction = 0xc500b10;
15020
      do_vfp_cond_or_thumb ();
15021
      inst.instruction |= inst.operands[0].reg << 12;
15022
      inst.instruction |= inst.operands[1].reg << 16;
15023
      inst.instruction |= LOW4 (inst.operands[2].reg);
15024
      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15025
      break;
15026
 
15027
    case NS_FF:  /* case 8 (fcpys).  */
15028
      do_vfp_nsyn_opcode ("fcpys");
15029
      break;
15030
 
15031
    case NS_FI:  /* case 10 (fconsts).  */
15032
      ldconst = "fconsts";
15033
      encode_fconstd:
15034
      if (is_quarter_float (inst.operands[1].imm))
15035
        {
15036
          inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15037
          do_vfp_nsyn_opcode (ldconst);
15038
        }
15039
      else
15040
        first_error (_("immediate out of range"));
15041
      break;
15042
 
15043
    case NS_RF:  /* case 12 (fmrs).  */
15044
      do_vfp_nsyn_opcode ("fmrs");
15045
      break;
15046
 
15047
    case NS_FR:  /* case 13 (fmsr).  */
15048
      do_vfp_nsyn_opcode ("fmsr");
15049
      break;
15050
 
15051
    /* The encoders for the fmrrs and fmsrr instructions expect three operands
15052
       (one of which is a list), but we have parsed four.  Do some fiddling to
15053
       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15054
       expect.  */
15055
    case NS_RRFF:  /* case 14 (fmrrs).  */
15056
      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15057
                  _("VFP registers must be adjacent"));
15058
      inst.operands[2].imm = 2;
15059
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15060
      do_vfp_nsyn_opcode ("fmrrs");
15061
      break;
15062
 
15063
    case NS_FFRR:  /* case 15 (fmsrr).  */
15064
      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15065
                  _("VFP registers must be adjacent"));
15066
      inst.operands[1] = inst.operands[2];
15067
      inst.operands[2] = inst.operands[3];
15068
      inst.operands[0].imm = 2;
15069
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15070
      do_vfp_nsyn_opcode ("fmsrr");
15071
      break;
15072
 
15073
    default:
15074
      abort ();
15075
    }
15076
}
15077
 
15078
static void
15079
do_neon_rshift_round_imm (void)
15080
{
15081
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15082
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15083
  int imm = inst.operands[2].imm;
15084
 
15085
  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
15086
  if (imm == 0)
15087
    {
15088
      inst.operands[2].present = 0;
15089
      do_neon_mov ();
15090
      return;
15091
    }
15092
 
15093
  constraint (imm < 1 || (unsigned)imm > et.size,
15094
              _("immediate out of range for shift"));
15095
  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15096
                  et.size - imm);
15097
}
15098
 
15099
static void
15100
do_neon_movl (void)
15101
{
15102
  struct neon_type_el et = neon_check_type (2, NS_QD,
15103
    N_EQK | N_DBL, N_SU_32 | N_KEY);
15104
  unsigned sizebits = et.size >> 3;
15105
  inst.instruction |= sizebits << 19;
15106
  neon_two_same (0, et.type == NT_unsigned, -1);
15107
}
15108
 
15109
static void
15110
do_neon_trn (void)
15111
{
15112
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15113
  struct neon_type_el et = neon_check_type (2, rs,
15114
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15115
  NEON_ENCODE (INTEGER, inst);
15116
  neon_two_same (neon_quad (rs), 1, et.size);
15117
}
15118
 
15119
static void
15120
do_neon_zip_uzp (void)
15121
{
15122
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15123
  struct neon_type_el et = neon_check_type (2, rs,
15124
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15125
  if (rs == NS_DD && et.size == 32)
15126
    {
15127
      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
15128
      inst.instruction = N_MNEM_vtrn;
15129
      do_neon_trn ();
15130
      return;
15131
    }
15132
  neon_two_same (neon_quad (rs), 1, et.size);
15133
}
15134
 
15135
static void
15136
do_neon_sat_abs_neg (void)
15137
{
15138
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15139
  struct neon_type_el et = neon_check_type (2, rs,
15140
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15141
  neon_two_same (neon_quad (rs), 1, et.size);
15142
}
15143
 
15144
static void
15145
do_neon_pair_long (void)
15146
{
15147
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15148
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15149
  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
15150
  inst.instruction |= (et.type == NT_unsigned) << 7;
15151
  neon_two_same (neon_quad (rs), 1, et.size);
15152
}
15153
 
15154
static void
15155
do_neon_recip_est (void)
15156
{
15157
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15158
  struct neon_type_el et = neon_check_type (2, rs,
15159
    N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15160
  inst.instruction |= (et.type == NT_float) << 8;
15161
  neon_two_same (neon_quad (rs), 1, et.size);
15162
}
15163
 
15164
static void
15165
do_neon_cls (void)
15166
{
15167
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15168
  struct neon_type_el et = neon_check_type (2, rs,
15169
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15170
  neon_two_same (neon_quad (rs), 1, et.size);
15171
}
15172
 
15173
static void
15174
do_neon_clz (void)
15175
{
15176
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15177
  struct neon_type_el et = neon_check_type (2, rs,
15178
    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15179
  neon_two_same (neon_quad (rs), 1, et.size);
15180
}
15181
 
15182
static void
15183
do_neon_cnt (void)
15184
{
15185
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15186
  struct neon_type_el et = neon_check_type (2, rs,
15187
    N_EQK | N_INT, N_8 | N_KEY);
15188
  neon_two_same (neon_quad (rs), 1, et.size);
15189
}
15190
 
15191
static void
15192
do_neon_swp (void)
15193
{
15194
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15195
  neon_two_same (neon_quad (rs), 1, -1);
15196
}
15197
 
15198
static void
15199
do_neon_tbl_tbx (void)
15200
{
15201
  unsigned listlenbits;
15202
  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15203
 
15204
  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15205
    {
15206
      first_error (_("bad list length for table lookup"));
15207
      return;
15208
    }
15209
 
15210
  listlenbits = inst.operands[1].imm - 1;
15211
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15212
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15213
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15214
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15215
  inst.instruction |= LOW4 (inst.operands[2].reg);
15216
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15217
  inst.instruction |= listlenbits << 8;
15218
 
15219
  neon_dp_fixup (&inst);
15220
}
15221
 
15222
static void
15223
do_neon_ldm_stm (void)
15224
{
15225
  /* P, U and L bits are part of bitmask.  */
15226
  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15227
  unsigned offsetbits = inst.operands[1].imm * 2;
15228
 
15229
  if (inst.operands[1].issingle)
15230
    {
15231
      do_vfp_nsyn_ldm_stm (is_dbmode);
15232
      return;
15233
    }
15234
 
15235
  constraint (is_dbmode && !inst.operands[0].writeback,
15236
              _("writeback (!) must be used for VLDMDB and VSTMDB"));
15237
 
15238
  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15239
              _("register list must contain at least 1 and at most 16 "
15240
                "registers"));
15241
 
15242
  inst.instruction |= inst.operands[0].reg << 16;
15243
  inst.instruction |= inst.operands[0].writeback << 21;
15244
  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15245
  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15246
 
15247
  inst.instruction |= offsetbits;
15248
 
15249
  do_vfp_cond_or_thumb ();
15250
}
15251
 
15252
static void
15253
do_neon_ldr_str (void)
15254
{
15255
  int is_ldr = (inst.instruction & (1 << 20)) != 0;
15256
 
15257
  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15258
     And is UNPREDICTABLE in thumb mode.  */
15259
  if (!is_ldr
15260
      && inst.operands[1].reg == REG_PC
15261
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15262
    {
15263
      if (!thumb_mode && warn_on_deprecated)
15264
        as_warn (_("Use of PC here is deprecated"));
15265
      else
15266
        inst.error = _("Use of PC here is UNPREDICTABLE");
15267
    }
15268
 
15269
  if (inst.operands[0].issingle)
15270
    {
15271
      if (is_ldr)
15272
        do_vfp_nsyn_opcode ("flds");
15273
      else
15274
        do_vfp_nsyn_opcode ("fsts");
15275
    }
15276
  else
15277
    {
15278
      if (is_ldr)
15279
        do_vfp_nsyn_opcode ("fldd");
15280
      else
15281
        do_vfp_nsyn_opcode ("fstd");
15282
    }
15283
}
15284
 
15285
/* "interleave" version also handles non-interleaving register VLD1/VST1
15286
   instructions.  */
15287
 
15288
static void
15289
do_neon_ld_st_interleave (void)
15290
{
15291
  struct neon_type_el et = neon_check_type (1, NS_NULL,
15292
                                            N_8 | N_16 | N_32 | N_64);
15293
  unsigned alignbits = 0;
15294
  unsigned idx;
15295
  /* The bits in this table go:
15296
     0: register stride of one (0) or two (1)
15297
     1,2: register list length, minus one (1, 2, 3, 4).
15298
     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15299
     We use -1 for invalid entries.  */
15300
  const int typetable[] =
15301
    {
15302
      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
15303
       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
15304
       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
15305
       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
15306
    };
15307
  int typebits;
15308
 
15309
  if (et.type == NT_invtype)
15310
    return;
15311
 
15312
  if (inst.operands[1].immisalign)
15313
    switch (inst.operands[1].imm >> 8)
15314
      {
15315
      case 64: alignbits = 1; break;
15316
      case 128:
15317
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15318
            && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15319
          goto bad_alignment;
15320
        alignbits = 2;
15321
        break;
15322
      case 256:
15323
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15324
          goto bad_alignment;
15325
        alignbits = 3;
15326
        break;
15327
      default:
15328
      bad_alignment:
15329
        first_error (_("bad alignment"));
15330
        return;
15331
      }
15332
 
15333
  inst.instruction |= alignbits << 4;
15334
  inst.instruction |= neon_logbits (et.size) << 6;
15335
 
15336
  /* Bits [4:6] of the immediate in a list specifier encode register stride
15337
     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15338
     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15339
     up the right value for "type" in a table based on this value and the given
15340
     list style, then stick it back.  */
15341
  idx = ((inst.operands[0].imm >> 4) & 7)
15342
        | (((inst.instruction >> 8) & 3) << 3);
15343
 
15344
  typebits = typetable[idx];
15345
 
15346
  constraint (typebits == -1, _("bad list type for instruction"));
15347
 
15348
  inst.instruction &= ~0xf00;
15349
  inst.instruction |= typebits << 8;
15350
}
15351
 
15352
/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15353
   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15354
   otherwise. The variable arguments are a list of pairs of legal (size, align)
15355
   values, terminated with -1.  */
15356
 
15357
static int
15358
neon_alignment_bit (int size, int align, int *do_align, ...)
15359
{
15360
  va_list ap;
15361
  int result = FAIL, thissize, thisalign;
15362
 
15363
  if (!inst.operands[1].immisalign)
15364
    {
15365
      *do_align = 0;
15366
      return SUCCESS;
15367
    }
15368
 
15369
  va_start (ap, do_align);
15370
 
15371
  do
15372
    {
15373
      thissize = va_arg (ap, int);
15374
      if (thissize == -1)
15375
        break;
15376
      thisalign = va_arg (ap, int);
15377
 
15378
      if (size == thissize && align == thisalign)
15379
        result = SUCCESS;
15380
    }
15381
  while (result != SUCCESS);
15382
 
15383
  va_end (ap);
15384
 
15385
  if (result == SUCCESS)
15386
    *do_align = 1;
15387
  else
15388
    first_error (_("unsupported alignment for instruction"));
15389
 
15390
  return result;
15391
}
15392
 
15393
static void
15394
do_neon_ld_st_lane (void)
15395
{
15396
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15397
  int align_good, do_align = 0;
15398
  int logsize = neon_logbits (et.size);
15399
  int align = inst.operands[1].imm >> 8;
15400
  int n = (inst.instruction >> 8) & 3;
15401
  int max_el = 64 / et.size;
15402
 
15403
  if (et.type == NT_invtype)
15404
    return;
15405
 
15406
  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15407
              _("bad list length"));
15408
  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15409
              _("scalar index out of range"));
15410
  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15411
              && et.size == 8,
15412
              _("stride of 2 unavailable when element size is 8"));
15413
 
15414
  switch (n)
15415
    {
15416
    case 0:  /* VLD1 / VST1.  */
15417
      align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15418
                                       32, 32, -1);
15419
      if (align_good == FAIL)
15420
        return;
15421
      if (do_align)
15422
        {
15423
          unsigned alignbits = 0;
15424
          switch (et.size)
15425
            {
15426
            case 16: alignbits = 0x1; break;
15427
            case 32: alignbits = 0x3; break;
15428
            default: ;
15429
            }
15430
          inst.instruction |= alignbits << 4;
15431
        }
15432
      break;
15433
 
15434
    case 1:  /* VLD2 / VST2.  */
15435
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15436
                                       32, 64, -1);
15437
      if (align_good == FAIL)
15438
        return;
15439
      if (do_align)
15440
        inst.instruction |= 1 << 4;
15441
      break;
15442
 
15443
    case 2:  /* VLD3 / VST3.  */
15444
      constraint (inst.operands[1].immisalign,
15445
                  _("can't use alignment with this instruction"));
15446
      break;
15447
 
15448
    case 3:  /* VLD4 / VST4.  */
15449
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15450
                                       16, 64, 32, 64, 32, 128, -1);
15451
      if (align_good == FAIL)
15452
        return;
15453
      if (do_align)
15454
        {
15455
          unsigned alignbits = 0;
15456
          switch (et.size)
15457
            {
15458
            case 8:  alignbits = 0x1; break;
15459
            case 16: alignbits = 0x1; break;
15460
            case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15461
            default: ;
15462
            }
15463
          inst.instruction |= alignbits << 4;
15464
        }
15465
      break;
15466
 
15467
    default: ;
15468
    }
15469
 
15470
  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
15471
  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15472
    inst.instruction |= 1 << (4 + logsize);
15473
 
15474
  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15475
  inst.instruction |= logsize << 10;
15476
}
15477
 
15478
/* Encode single n-element structure to all lanes VLD<n> instructions.  */
15479
 
15480
static void
15481
do_neon_ld_dup (void)
15482
{
15483
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15484
  int align_good, do_align = 0;
15485
 
15486
  if (et.type == NT_invtype)
15487
    return;
15488
 
15489
  switch ((inst.instruction >> 8) & 3)
15490
    {
15491
    case 0:  /* VLD1.  */
15492
      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15493
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15494
                                       &do_align, 16, 16, 32, 32, -1);
15495
      if (align_good == FAIL)
15496
        return;
15497
      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15498
        {
15499
        case 1: break;
15500
        case 2: inst.instruction |= 1 << 5; break;
15501
        default: first_error (_("bad list length")); return;
15502
        }
15503
      inst.instruction |= neon_logbits (et.size) << 6;
15504
      break;
15505
 
15506
    case 1:  /* VLD2.  */
15507
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15508
                                       &do_align, 8, 16, 16, 32, 32, 64, -1);
15509
      if (align_good == FAIL)
15510
        return;
15511
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15512
                  _("bad list length"));
15513
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15514
        inst.instruction |= 1 << 5;
15515
      inst.instruction |= neon_logbits (et.size) << 6;
15516
      break;
15517
 
15518
    case 2:  /* VLD3.  */
15519
      constraint (inst.operands[1].immisalign,
15520
                  _("can't use alignment with this instruction"));
15521
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15522
                  _("bad list length"));
15523
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15524
        inst.instruction |= 1 << 5;
15525
      inst.instruction |= neon_logbits (et.size) << 6;
15526
      break;
15527
 
15528
    case 3:  /* VLD4.  */
15529
      {
15530
        int align = inst.operands[1].imm >> 8;
15531
        align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15532
                                         16, 64, 32, 64, 32, 128, -1);
15533
        if (align_good == FAIL)
15534
          return;
15535
        constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15536
                    _("bad list length"));
15537
        if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15538
          inst.instruction |= 1 << 5;
15539
        if (et.size == 32 && align == 128)
15540
          inst.instruction |= 0x3 << 6;
15541
        else
15542
          inst.instruction |= neon_logbits (et.size) << 6;
15543
      }
15544
      break;
15545
 
15546
    default: ;
15547
    }
15548
 
15549
  inst.instruction |= do_align << 4;
15550
}
15551
 
15552
/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15553
   apart from bits [11:4].  */
15554
 
15555
static void
15556
do_neon_ldx_stx (void)
15557
{
15558
  if (inst.operands[1].isreg)
15559
    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15560
 
15561
  switch (NEON_LANE (inst.operands[0].imm))
15562
    {
15563
    case NEON_INTERLEAVE_LANES:
15564
      NEON_ENCODE (INTERLV, inst);
15565
      do_neon_ld_st_interleave ();
15566
      break;
15567
 
15568
    case NEON_ALL_LANES:
15569
      NEON_ENCODE (DUP, inst);
15570
      do_neon_ld_dup ();
15571
      break;
15572
 
15573
    default:
15574
      NEON_ENCODE (LANE, inst);
15575
      do_neon_ld_st_lane ();
15576
    }
15577
 
15578
  /* L bit comes from bit mask.  */
15579
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15580
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15581
  inst.instruction |= inst.operands[1].reg << 16;
15582
 
15583
  if (inst.operands[1].postind)
15584
    {
15585
      int postreg = inst.operands[1].imm & 0xf;
15586
      constraint (!inst.operands[1].immisreg,
15587
                  _("post-index must be a register"));
15588
      constraint (postreg == 0xd || postreg == 0xf,
15589
                  _("bad register for post-index"));
15590
      inst.instruction |= postreg;
15591
    }
15592
  else if (inst.operands[1].writeback)
15593
    {
15594
      inst.instruction |= 0xd;
15595
    }
15596
  else
15597
    inst.instruction |= 0xf;
15598
 
15599
  if (thumb_mode)
15600
    inst.instruction |= 0xf9000000;
15601
  else
15602
    inst.instruction |= 0xf4000000;
15603
}
15604
 
15605
/* Overall per-instruction processing.  */
15606
 
15607
/* We need to be able to fix up arbitrary expressions in some statements.
15608
   This is so that we can handle symbols that are an arbitrary distance from
15609
   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15610
   which returns part of an address in a form which will be valid for
15611
   a data instruction.  We do this by pushing the expression into a symbol
15612
   in the expr_section, and creating a fix for that.  */
15613
 
15614
static void
15615
fix_new_arm (fragS *       frag,
15616
             int           where,
15617
             short int     size,
15618
             expressionS * exp,
15619
             int           pc_rel,
15620
             int           reloc)
15621
{
15622
  fixS *           new_fix;
15623
 
15624
  switch (exp->X_op)
15625
    {
15626
    case O_constant:
15627
      if (pc_rel)
15628
        {
15629
          /* Create an absolute valued symbol, so we have something to
15630
             refer to in the object file.  Unfortunately for us, gas's
15631
             generic expression parsing will already have folded out
15632
             any use of .set foo/.type foo %function that may have
15633
             been used to set type information of the target location,
15634
             that's being specified symbolically.  We have to presume
15635
             the user knows what they are doing.  */
15636
          char name[16 + 8];
15637
          symbolS *symbol;
15638
 
15639
          sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
15640
 
15641
          symbol = symbol_find_or_make (name);
15642
          S_SET_SEGMENT (symbol, absolute_section);
15643
          symbol_set_frag (symbol, &zero_address_frag);
15644
          S_SET_VALUE (symbol, exp->X_add_number);
15645
          exp->X_op = O_symbol;
15646
          exp->X_add_symbol = symbol;
15647
          exp->X_add_number = 0;
15648
        }
15649
      /* FALLTHROUGH */
15650
    case O_symbol:
15651
    case O_add:
15652
    case O_subtract:
15653
      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15654
                             (enum bfd_reloc_code_real) reloc);
15655
      break;
15656
 
15657
    default:
15658
      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15659
                                  pc_rel, (enum bfd_reloc_code_real) reloc);
15660
      break;
15661
    }
15662
 
15663
  /* Mark whether the fix is to a THUMB instruction, or an ARM
15664
     instruction.  */
15665
  new_fix->tc_fix_data = thumb_mode;
15666
}
15667
 
15668
/* Create a frg for an instruction requiring relaxation.  */
15669
static void
15670
output_relax_insn (void)
15671
{
15672
  char * to;
15673
  symbolS *sym;
15674
  int offset;
15675
 
15676
  /* The size of the instruction is unknown, so tie the debug info to the
15677
     start of the instruction.  */
15678
  dwarf2_emit_insn (0);
15679
 
15680
  switch (inst.reloc.exp.X_op)
15681
    {
15682
    case O_symbol:
15683
      sym = inst.reloc.exp.X_add_symbol;
15684
      offset = inst.reloc.exp.X_add_number;
15685
      break;
15686
    case O_constant:
15687
      sym = NULL;
15688
      offset = inst.reloc.exp.X_add_number;
15689
      break;
15690
    default:
15691
      sym = make_expr_symbol (&inst.reloc.exp);
15692
      offset = 0;
15693
      break;
15694
  }
15695
  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15696
                 inst.relax, sym, offset, NULL/*offset, opcode*/);
15697
  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15698
}
15699
 
15700
/* Write a 32-bit thumb instruction to buf.  */
15701
static void
15702
put_thumb32_insn (char * buf, unsigned long insn)
15703
{
15704
  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15705
  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15706
}
15707
 
15708
static void
15709
output_inst (const char * str)
15710
{
15711
  char * to = NULL;
15712
 
15713
  if (inst.error)
15714
    {
15715
      as_bad ("%s -- `%s'", inst.error, str);
15716
      return;
15717
    }
15718
  if (inst.relax)
15719
    {
15720
      output_relax_insn ();
15721
      return;
15722
    }
15723
  if (inst.size == 0)
15724
    return;
15725
 
15726
  to = frag_more (inst.size);
15727
  /* PR 9814: Record the thumb mode into the current frag so that we know
15728
     what type of NOP padding to use, if necessary.  We override any previous
15729
     setting so that if the mode has changed then the NOPS that we use will
15730
     match the encoding of the last instruction in the frag.  */
15731
  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15732
 
15733
  if (thumb_mode && (inst.size > THUMB_SIZE))
15734
    {
15735
      gas_assert (inst.size == (2 * THUMB_SIZE));
15736
      put_thumb32_insn (to, inst.instruction);
15737
    }
15738
  else if (inst.size > INSN_SIZE)
15739
    {
15740
      gas_assert (inst.size == (2 * INSN_SIZE));
15741
      md_number_to_chars (to, inst.instruction, INSN_SIZE);
15742
      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15743
    }
15744
  else
15745
    md_number_to_chars (to, inst.instruction, inst.size);
15746
 
15747
  if (inst.reloc.type != BFD_RELOC_UNUSED)
15748
    fix_new_arm (frag_now, to - frag_now->fr_literal,
15749
                 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15750
                 inst.reloc.type);
15751
 
15752
  dwarf2_emit_insn (inst.size);
15753
}
15754
 
15755
static char *
15756
output_it_inst (int cond, int mask, char * to)
15757
{
15758
  unsigned long instruction = 0xbf00;
15759
 
15760
  mask &= 0xf;
15761
  instruction |= mask;
15762
  instruction |= cond << 4;
15763
 
15764
  if (to == NULL)
15765
    {
15766
      to = frag_more (2);
15767
#ifdef OBJ_ELF
15768
      dwarf2_emit_insn (2);
15769
#endif
15770
    }
15771
 
15772
  md_number_to_chars (to, instruction, 2);
15773
 
15774
  return to;
15775
}
15776
 
15777
/* Tag values used in struct asm_opcode's tag field.  */
15778
enum opcode_tag
15779
{
15780
  OT_unconditional,     /* Instruction cannot be conditionalized.
15781
                           The ARM condition field is still 0xE.  */
15782
  OT_unconditionalF,    /* Instruction cannot be conditionalized
15783
                           and carries 0xF in its ARM condition field.  */
15784
  OT_csuffix,           /* Instruction takes a conditional suffix.  */
15785
  OT_csuffixF,          /* Some forms of the instruction take a conditional
15786
                           suffix, others place 0xF where the condition field
15787
                           would be.  */
15788
  OT_cinfix3,           /* Instruction takes a conditional infix,
15789
                           beginning at character index 3.  (In
15790
                           unified mode, it becomes a suffix.)  */
15791
  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
15792
                            tsts, cmps, cmns, and teqs. */
15793
  OT_cinfix3_legacy,    /* Legacy instruction takes a conditional infix at
15794
                           character index 3, even in unified mode.  Used for
15795
                           legacy instructions where suffix and infix forms
15796
                           may be ambiguous.  */
15797
  OT_csuf_or_in3,       /* Instruction takes either a conditional
15798
                           suffix or an infix at character index 3.  */
15799
  OT_odd_infix_unc,     /* This is the unconditional variant of an
15800
                           instruction that takes a conditional infix
15801
                           at an unusual position.  In unified mode,
15802
                           this variant will accept a suffix.  */
15803
  OT_odd_infix_0        /* Values greater than or equal to OT_odd_infix_0
15804
                           are the conditional variants of instructions that
15805
                           take conditional infixes in unusual positions.
15806
                           The infix appears at character index
15807
                           (tag - OT_odd_infix_0).  These are not accepted
15808
                           in unified mode.  */
15809
};
15810
 
15811
/* Subroutine of md_assemble, responsible for looking up the primary
15812
   opcode from the mnemonic the user wrote.  STR points to the
15813
   beginning of the mnemonic.
15814
 
15815
   This is not simply a hash table lookup, because of conditional
15816
   variants.  Most instructions have conditional variants, which are
15817
   expressed with a _conditional affix_ to the mnemonic.  If we were
15818
   to encode each conditional variant as a literal string in the opcode
15819
   table, it would have approximately 20,000 entries.
15820
 
15821
   Most mnemonics take this affix as a suffix, and in unified syntax,
15822
   'most' is upgraded to 'all'.  However, in the divided syntax, some
15823
   instructions take the affix as an infix, notably the s-variants of
15824
   the arithmetic instructions.  Of those instructions, all but six
15825
   have the infix appear after the third character of the mnemonic.
15826
 
15827
   Accordingly, the algorithm for looking up primary opcodes given
15828
   an identifier is:
15829
 
15830
   1. Look up the identifier in the opcode table.
15831
      If we find a match, go to step U.
15832
 
15833
   2. Look up the last two characters of the identifier in the
15834
      conditions table.  If we find a match, look up the first N-2
15835
      characters of the identifier in the opcode table.  If we
15836
      find a match, go to step CE.
15837
 
15838
   3. Look up the fourth and fifth characters of the identifier in
15839
      the conditions table.  If we find a match, extract those
15840
      characters from the identifier, and look up the remaining
15841
      characters in the opcode table.  If we find a match, go
15842
      to step CM.
15843
 
15844
   4. Fail.
15845
 
15846
   U. Examine the tag field of the opcode structure, in case this is
15847
      one of the six instructions with its conditional infix in an
15848
      unusual place.  If it is, the tag tells us where to find the
15849
      infix; look it up in the conditions table and set inst.cond
15850
      accordingly.  Otherwise, this is an unconditional instruction.
15851
      Again set inst.cond accordingly.  Return the opcode structure.
15852
 
15853
  CE. Examine the tag field to make sure this is an instruction that
15854
      should receive a conditional suffix.  If it is not, fail.
15855
      Otherwise, set inst.cond from the suffix we already looked up,
15856
      and return the opcode structure.
15857
 
15858
  CM. Examine the tag field to make sure this is an instruction that
15859
      should receive a conditional infix after the third character.
15860
      If it is not, fail.  Otherwise, undo the edits to the current
15861
      line of input and proceed as for case CE.  */
15862
 
15863
static const struct asm_opcode *
15864
opcode_lookup (char **str)
15865
{
15866
  char *end, *base;
15867
  char *affix;
15868
  const struct asm_opcode *opcode;
15869
  const struct asm_cond *cond;
15870
  char save[2];
15871
 
15872
  /* Scan up to the end of the mnemonic, which must end in white space,
15873
     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
15874
  for (base = end = *str; *end != '\0'; end++)
15875
    if (*end == ' ' || *end == '.')
15876
      break;
15877
 
15878
  if (end == base)
15879
    return NULL;
15880
 
15881
  /* Handle a possible width suffix and/or Neon type suffix.  */
15882
  if (end[0] == '.')
15883
    {
15884
      int offset = 2;
15885
 
15886
      /* The .w and .n suffixes are only valid if the unified syntax is in
15887
         use.  */
15888
      if (unified_syntax && end[1] == 'w')
15889
        inst.size_req = 4;
15890
      else if (unified_syntax && end[1] == 'n')
15891
        inst.size_req = 2;
15892
      else
15893
        offset = 0;
15894
 
15895
      inst.vectype.elems = 0;
15896
 
15897
      *str = end + offset;
15898
 
15899
      if (end[offset] == '.')
15900
        {
15901
          /* See if we have a Neon type suffix (possible in either unified or
15902
             non-unified ARM syntax mode).  */
15903
          if (parse_neon_type (&inst.vectype, str) == FAIL)
15904
            return NULL;
15905
        }
15906
      else if (end[offset] != '\0' && end[offset] != ' ')
15907
        return NULL;
15908
    }
15909
  else
15910
    *str = end;
15911
 
15912
  /* Look for unaffixed or special-case affixed mnemonic.  */
15913
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15914
                                                    end - base);
15915
  if (opcode)
15916
    {
15917
      /* step U */
15918
      if (opcode->tag < OT_odd_infix_0)
15919
        {
15920
          inst.cond = COND_ALWAYS;
15921
          return opcode;
15922
        }
15923
 
15924
      if (warn_on_deprecated && unified_syntax)
15925
        as_warn (_("conditional infixes are deprecated in unified syntax"));
15926
      affix = base + (opcode->tag - OT_odd_infix_0);
15927
      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15928
      gas_assert (cond);
15929
 
15930
      inst.cond = cond->value;
15931
      return opcode;
15932
    }
15933
 
15934
  /* Cannot have a conditional suffix on a mnemonic of less than two
15935
     characters.  */
15936
  if (end - base < 3)
15937
    return NULL;
15938
 
15939
  /* Look for suffixed mnemonic.  */
15940
  affix = end - 2;
15941
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15942
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15943
                                                    affix - base);
15944
  if (opcode && cond)
15945
    {
15946
      /* step CE */
15947
      switch (opcode->tag)
15948
        {
15949
        case OT_cinfix3_legacy:
15950
          /* Ignore conditional suffixes matched on infix only mnemonics.  */
15951
          break;
15952
 
15953
        case OT_cinfix3:
15954
        case OT_cinfix3_deprecated:
15955
        case OT_odd_infix_unc:
15956
          if (!unified_syntax)
15957
            return 0;
15958
          /* else fall through */
15959
 
15960
        case OT_csuffix:
15961
        case OT_csuffixF:
15962
        case OT_csuf_or_in3:
15963
          inst.cond = cond->value;
15964
          return opcode;
15965
 
15966
        case OT_unconditional:
15967
        case OT_unconditionalF:
15968
          if (thumb_mode)
15969
            inst.cond = cond->value;
15970
          else
15971
            {
15972
              /* Delayed diagnostic.  */
15973
              inst.error = BAD_COND;
15974
              inst.cond = COND_ALWAYS;
15975
            }
15976
          return opcode;
15977
 
15978
        default:
15979
          return NULL;
15980
        }
15981
    }
15982
 
15983
  /* Cannot have a usual-position infix on a mnemonic of less than
15984
     six characters (five would be a suffix).  */
15985
  if (end - base < 6)
15986
    return NULL;
15987
 
15988
  /* Look for infixed mnemonic in the usual position.  */
15989
  affix = base + 3;
15990
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15991
  if (!cond)
15992
    return NULL;
15993
 
15994
  memcpy (save, affix, 2);
15995
  memmove (affix, affix + 2, (end - affix) - 2);
15996
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15997
                                                    (end - base) - 2);
15998
  memmove (affix + 2, affix, (end - affix) - 2);
15999
  memcpy (affix, save, 2);
16000
 
16001
  if (opcode
16002
      && (opcode->tag == OT_cinfix3
16003
          || opcode->tag == OT_cinfix3_deprecated
16004
          || opcode->tag == OT_csuf_or_in3
16005
          || opcode->tag == OT_cinfix3_legacy))
16006
    {
16007
      /* Step CM.  */
16008
      if (warn_on_deprecated && unified_syntax
16009
          && (opcode->tag == OT_cinfix3
16010
              || opcode->tag == OT_cinfix3_deprecated))
16011
        as_warn (_("conditional infixes are deprecated in unified syntax"));
16012
 
16013
      inst.cond = cond->value;
16014
      return opcode;
16015
    }
16016
 
16017
  return NULL;
16018
}
16019
 
16020
/* This function generates an initial IT instruction, leaving its block
16021
   virtually open for the new instructions. Eventually,
16022
   the mask will be updated by now_it_add_mask () each time
16023
   a new instruction needs to be included in the IT block.
16024
   Finally, the block is closed with close_automatic_it_block ().
16025
   The block closure can be requested either from md_assemble (),
16026
   a tencode (), or due to a label hook.  */
16027
 
16028
static void
16029
new_automatic_it_block (int cond)
16030
{
16031
  now_it.state = AUTOMATIC_IT_BLOCK;
16032
  now_it.mask = 0x18;
16033
  now_it.cc = cond;
16034
  now_it.block_length = 1;
16035
  mapping_state (MAP_THUMB);
16036
  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16037
}
16038
 
16039
/* Close an automatic IT block.
16040
   See comments in new_automatic_it_block ().  */
16041
 
16042
static void
16043
close_automatic_it_block (void)
16044
{
16045
  now_it.mask = 0x10;
16046
  now_it.block_length = 0;
16047
}
16048
 
16049
/* Update the mask of the current automatically-generated IT
16050
   instruction. See comments in new_automatic_it_block ().  */
16051
 
16052
static void
16053
now_it_add_mask (int cond)
16054
{
16055
#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
16056
#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
16057
                                              | ((bitvalue) << (nbit)))
16058
  const int resulting_bit = (cond & 1);
16059
 
16060
  now_it.mask &= 0xf;
16061
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16062
                                   resulting_bit,
16063
                                  (5 - now_it.block_length));
16064
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16065
                                   1,
16066
                                   ((5 - now_it.block_length) - 1) );
16067
  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16068
 
16069
#undef CLEAR_BIT
16070
#undef SET_BIT_VALUE
16071
}
16072
 
16073
/* The IT blocks handling machinery is accessed through the these functions:
16074
     it_fsm_pre_encode ()               from md_assemble ()
16075
     set_it_insn_type ()                optional, from the tencode functions
16076
     set_it_insn_type_last ()           ditto
16077
     in_it_block ()                     ditto
16078
     it_fsm_post_encode ()              from md_assemble ()
16079
     force_automatic_it_block_close ()  from label habdling functions
16080
 
16081
   Rationale:
16082
     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16083
        initializing the IT insn type with a generic initial value depending
16084
        on the inst.condition.
16085
     2) During the tencode function, two things may happen:
16086
        a) The tencode function overrides the IT insn type by
16087
           calling either set_it_insn_type (type) or set_it_insn_type_last ().
16088
        b) The tencode function queries the IT block state by
16089
           calling in_it_block () (i.e. to determine narrow/not narrow mode).
16090
 
16091
        Both set_it_insn_type and in_it_block run the internal FSM state
16092
        handling function (handle_it_state), because: a) setting the IT insn
16093
        type may incur in an invalid state (exiting the function),
16094
        and b) querying the state requires the FSM to be updated.
16095
        Specifically we want to avoid creating an IT block for conditional
16096
        branches, so it_fsm_pre_encode is actually a guess and we can't
16097
        determine whether an IT block is required until the tencode () routine
16098
        has decided what type of instruction this actually it.
16099
        Because of this, if set_it_insn_type and in_it_block have to be used,
16100
        set_it_insn_type has to be called first.
16101
 
16102
        set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16103
        determines the insn IT type depending on the inst.cond code.
16104
        When a tencode () routine encodes an instruction that can be
16105
        either outside an IT block, or, in the case of being inside, has to be
16106
        the last one, set_it_insn_type_last () will determine the proper
16107
        IT instruction type based on the inst.cond code. Otherwise,
16108
        set_it_insn_type can be called for overriding that logic or
16109
        for covering other cases.
16110
 
16111
        Calling handle_it_state () may not transition the IT block state to
16112
        OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16113
        still queried. Instead, if the FSM determines that the state should
16114
        be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16115
        after the tencode () function: that's what it_fsm_post_encode () does.
16116
 
16117
        Since in_it_block () calls the state handling function to get an
16118
        updated state, an error may occur (due to invalid insns combination).
16119
        In that case, inst.error is set.
16120
        Therefore, inst.error has to be checked after the execution of
16121
        the tencode () routine.
16122
 
16123
     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16124
        any pending state change (if any) that didn't take place in
16125
        handle_it_state () as explained above.  */
16126
 
16127
static void
16128
it_fsm_pre_encode (void)
16129
{
16130
  if (inst.cond != COND_ALWAYS)
16131
    inst.it_insn_type = INSIDE_IT_INSN;
16132
  else
16133
    inst.it_insn_type = OUTSIDE_IT_INSN;
16134
 
16135
  now_it.state_handled = 0;
16136
}
16137
 
16138
/* IT state FSM handling function.  */
16139
 
16140
static int
16141
handle_it_state (void)
16142
{
16143
  now_it.state_handled = 1;
16144
 
16145
  switch (now_it.state)
16146
    {
16147
    case OUTSIDE_IT_BLOCK:
16148
      switch (inst.it_insn_type)
16149
        {
16150
        case OUTSIDE_IT_INSN:
16151
          break;
16152
 
16153
        case INSIDE_IT_INSN:
16154
        case INSIDE_IT_LAST_INSN:
16155
          if (thumb_mode == 0)
16156
            {
16157
              if (unified_syntax
16158
                  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16159
                as_tsktsk (_("Warning: conditional outside an IT block"\
16160
                             " for Thumb."));
16161
            }
16162
          else
16163
            {
16164
              if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16165
                  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16166
                {
16167
                  /* Automatically generate the IT instruction.  */
16168
                  new_automatic_it_block (inst.cond);
16169
                  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16170
                    close_automatic_it_block ();
16171
                }
16172
              else
16173
                {
16174
                  inst.error = BAD_OUT_IT;
16175
                  return FAIL;
16176
                }
16177
            }
16178
          break;
16179
 
16180
        case IF_INSIDE_IT_LAST_INSN:
16181
        case NEUTRAL_IT_INSN:
16182
          break;
16183
 
16184
        case IT_INSN:
16185
          now_it.state = MANUAL_IT_BLOCK;
16186
          now_it.block_length = 0;
16187
          break;
16188
        }
16189
      break;
16190
 
16191
    case AUTOMATIC_IT_BLOCK:
16192
      /* Three things may happen now:
16193
         a) We should increment current it block size;
16194
         b) We should close current it block (closing insn or 4 insns);
16195
         c) We should close current it block and start a new one (due
16196
         to incompatible conditions or
16197
         4 insns-length block reached).  */
16198
 
16199
      switch (inst.it_insn_type)
16200
        {
16201
        case OUTSIDE_IT_INSN:
16202
          /* The closure of the block shall happen immediatelly,
16203
             so any in_it_block () call reports the block as closed.  */
16204
          force_automatic_it_block_close ();
16205
          break;
16206
 
16207
        case INSIDE_IT_INSN:
16208
        case INSIDE_IT_LAST_INSN:
16209
        case IF_INSIDE_IT_LAST_INSN:
16210
          now_it.block_length++;
16211
 
16212
          if (now_it.block_length > 4
16213
              || !now_it_compatible (inst.cond))
16214
            {
16215
              force_automatic_it_block_close ();
16216
              if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16217
                new_automatic_it_block (inst.cond);
16218
            }
16219
          else
16220
            {
16221
              now_it_add_mask (inst.cond);
16222
            }
16223
 
16224
          if (now_it.state == AUTOMATIC_IT_BLOCK
16225
              && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16226
                  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16227
            close_automatic_it_block ();
16228
          break;
16229
 
16230
        case NEUTRAL_IT_INSN:
16231
          now_it.block_length++;
16232
 
16233
          if (now_it.block_length > 4)
16234
            force_automatic_it_block_close ();
16235
          else
16236
            now_it_add_mask (now_it.cc & 1);
16237
          break;
16238
 
16239
        case IT_INSN:
16240
          close_automatic_it_block ();
16241
          now_it.state = MANUAL_IT_BLOCK;
16242
          break;
16243
        }
16244
      break;
16245
 
16246
    case MANUAL_IT_BLOCK:
16247
      {
16248
        /* Check conditional suffixes.  */
16249
        const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16250
        int is_last;
16251
        now_it.mask <<= 1;
16252
        now_it.mask &= 0x1f;
16253
        is_last = (now_it.mask == 0x10);
16254
 
16255
        switch (inst.it_insn_type)
16256
          {
16257
          case OUTSIDE_IT_INSN:
16258
            inst.error = BAD_NOT_IT;
16259
            return FAIL;
16260
 
16261
          case INSIDE_IT_INSN:
16262
            if (cond != inst.cond)
16263
              {
16264
                inst.error = BAD_IT_COND;
16265
                return FAIL;
16266
              }
16267
            break;
16268
 
16269
          case INSIDE_IT_LAST_INSN:
16270
          case IF_INSIDE_IT_LAST_INSN:
16271
            if (cond != inst.cond)
16272
              {
16273
                inst.error = BAD_IT_COND;
16274
                return FAIL;
16275
              }
16276
            if (!is_last)
16277
              {
16278
                inst.error = BAD_BRANCH;
16279
                return FAIL;
16280
              }
16281
            break;
16282
 
16283
          case NEUTRAL_IT_INSN:
16284
            /* The BKPT instruction is unconditional even in an IT block.  */
16285
            break;
16286
 
16287
          case IT_INSN:
16288
            inst.error = BAD_IT_IT;
16289
            return FAIL;
16290
          }
16291
      }
16292
      break;
16293
    }
16294
 
16295
  return SUCCESS;
16296
}
16297
 
16298
static void
16299
it_fsm_post_encode (void)
16300
{
16301
  int is_last;
16302
 
16303
  if (!now_it.state_handled)
16304
    handle_it_state ();
16305
 
16306
  is_last = (now_it.mask == 0x10);
16307
  if (is_last)
16308
    {
16309
      now_it.state = OUTSIDE_IT_BLOCK;
16310
      now_it.mask = 0;
16311
    }
16312
}
16313
 
16314
static void
16315
force_automatic_it_block_close (void)
16316
{
16317
  if (now_it.state == AUTOMATIC_IT_BLOCK)
16318
    {
16319
      close_automatic_it_block ();
16320
      now_it.state = OUTSIDE_IT_BLOCK;
16321
      now_it.mask = 0;
16322
    }
16323
}
16324
 
16325
static int
16326
in_it_block (void)
16327
{
16328
  if (!now_it.state_handled)
16329
    handle_it_state ();
16330
 
16331
  return now_it.state != OUTSIDE_IT_BLOCK;
16332
}
16333
 
16334
void
16335
md_assemble (char *str)
16336
{
16337
  char *p = str;
16338
  const struct asm_opcode * opcode;
16339
 
16340
  /* Align the previous label if needed.  */
16341
  if (last_label_seen != NULL)
16342
    {
16343
      symbol_set_frag (last_label_seen, frag_now);
16344
      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
16345
      S_SET_SEGMENT (last_label_seen, now_seg);
16346
    }
16347
 
16348
  memset (&inst, '\0', sizeof (inst));
16349
  inst.reloc.type = BFD_RELOC_UNUSED;
16350
 
16351
  opcode = opcode_lookup (&p);
16352
  if (!opcode)
16353
    {
16354
      /* It wasn't an instruction, but it might be a register alias of
16355
         the form alias .req reg, or a Neon .dn/.qn directive.  */
16356
      if (! create_register_alias (str, p)
16357
          && ! create_neon_reg_alias (str, p))
16358
        as_bad (_("bad instruction `%s'"), str);
16359
 
16360
      return;
16361
    }
16362
 
16363
  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
16364
    as_warn (_("s suffix on comparison instruction is deprecated"));
16365
 
16366
  /* The value which unconditional instructions should have in place of the
16367
     condition field.  */
16368
  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
16369
 
16370
  if (thumb_mode)
16371
    {
16372
      arm_feature_set variant;
16373
 
16374
      variant = cpu_variant;
16375
      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
16376
      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
16377
        ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
16378
      /* Check that this instruction is supported for this CPU.  */
16379
      if (!opcode->tvariant
16380
          || (thumb_mode == 1
16381
              && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
16382
        {
16383
          as_bad (_("selected processor does not support Thumb mode `%s'"), str);
16384
          return;
16385
        }
16386
      if (inst.cond != COND_ALWAYS && !unified_syntax
16387
          && opcode->tencode != do_t_branch)
16388
        {
16389
          as_bad (_("Thumb does not support conditional execution"));
16390
          return;
16391
        }
16392
 
16393
      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
16394
        {
16395
          if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
16396
              && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
16397
                   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
16398
            {
16399
              /* Two things are addressed here.
16400
                 1) Implicit require narrow instructions on Thumb-1.
16401
                    This avoids relaxation accidentally introducing Thumb-2
16402
                     instructions.
16403
                 2) Reject wide instructions in non Thumb-2 cores.  */
16404
              if (inst.size_req == 0)
16405
                inst.size_req = 2;
16406
              else if (inst.size_req == 4)
16407
                {
16408
                  as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
16409
                  return;
16410
                }
16411
            }
16412
        }
16413
 
16414
      inst.instruction = opcode->tvalue;
16415
 
16416
      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
16417
        {
16418
          /* Prepare the it_insn_type for those encodings that don't set
16419
             it.  */
16420
          it_fsm_pre_encode ();
16421
 
16422
          opcode->tencode ();
16423
 
16424
          it_fsm_post_encode ();
16425
        }
16426
 
16427
      if (!(inst.error || inst.relax))
16428
        {
16429
          gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
16430
          inst.size = (inst.instruction > 0xffff ? 4 : 2);
16431
          if (inst.size_req && inst.size_req != inst.size)
16432
            {
16433
              as_bad (_("cannot honor width suffix -- `%s'"), str);
16434
              return;
16435
            }
16436
        }
16437
 
16438
      /* Something has gone badly wrong if we try to relax a fixed size
16439
         instruction.  */
16440
      gas_assert (inst.size_req == 0 || !inst.relax);
16441
 
16442
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16443
                              *opcode->tvariant);
16444
      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
16445
         set those bits when Thumb-2 32-bit instructions are seen.  ie.
16446
         anything other than bl/blx and v6-M instructions.
16447
         This is overly pessimistic for relaxable instructions.  */
16448
      if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
16449
           || inst.relax)
16450
          && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
16451
               || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
16452
        ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16453
                                arm_ext_v6t2);
16454
 
16455
      check_neon_suffixes;
16456
 
16457
      if (!inst.error)
16458
        {
16459
          mapping_state (MAP_THUMB);
16460
        }
16461
    }
16462
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
16463
    {
16464
      bfd_boolean is_bx;
16465
 
16466
      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
16467
      is_bx = (opcode->aencode == do_bx);
16468
 
16469
      /* Check that this instruction is supported for this CPU.  */
16470
      if (!(is_bx && fix_v4bx)
16471
          && !(opcode->avariant &&
16472
               ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16473
        {
16474
          as_bad (_("selected processor does not support ARM mode `%s'"), str);
16475
          return;
16476
        }
16477
      if (inst.size_req)
16478
        {
16479
          as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16480
          return;
16481
        }
16482
 
16483
      inst.instruction = opcode->avalue;
16484
      if (opcode->tag == OT_unconditionalF)
16485
        inst.instruction |= 0xF << 28;
16486
      else
16487
        inst.instruction |= inst.cond << 28;
16488
      inst.size = INSN_SIZE;
16489
      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16490
        {
16491
          it_fsm_pre_encode ();
16492
          opcode->aencode ();
16493
          it_fsm_post_encode ();
16494
        }
16495
      /* Arm mode bx is marked as both v4T and v5 because it's still required
16496
         on a hypothetical non-thumb v5 core.  */
16497
      if (is_bx)
16498
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16499
      else
16500
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16501
                                *opcode->avariant);
16502
 
16503
      check_neon_suffixes;
16504
 
16505
      if (!inst.error)
16506
        {
16507
          mapping_state (MAP_ARM);
16508
        }
16509
    }
16510
  else
16511
    {
16512
      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16513
                "-- `%s'"), str);
16514
      return;
16515
    }
16516
  output_inst (str);
16517
}
16518
 
16519
static void
16520
check_it_blocks_finished (void)
16521
{
16522
#ifdef OBJ_ELF
16523
  asection *sect;
16524
 
16525
  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16526
    if (seg_info (sect)->tc_segment_info_data.current_it.state
16527
        == MANUAL_IT_BLOCK)
16528
      {
16529
        as_warn (_("section '%s' finished with an open IT block."),
16530
                 sect->name);
16531
      }
16532
#else
16533
  if (now_it.state == MANUAL_IT_BLOCK)
16534
    as_warn (_("file finished with an open IT block."));
16535
#endif
16536
}
16537
 
16538
/* Various frobbings of labels and their addresses.  */
16539
 
16540
void
16541
arm_start_line_hook (void)
16542
{
16543
  last_label_seen = NULL;
16544
}
16545
 
16546
void
16547
arm_frob_label (symbolS * sym)
16548
{
16549
  last_label_seen = sym;
16550
 
16551
  ARM_SET_THUMB (sym, thumb_mode);
16552
 
16553
#if defined OBJ_COFF || defined OBJ_ELF
16554
  ARM_SET_INTERWORK (sym, support_interwork);
16555
#endif
16556
 
16557
  force_automatic_it_block_close ();
16558
 
16559
  /* Note - do not allow local symbols (.Lxxx) to be labelled
16560
     as Thumb functions.  This is because these labels, whilst
16561
     they exist inside Thumb code, are not the entry points for
16562
     possible ARM->Thumb calls.  Also, these labels can be used
16563
     as part of a computed goto or switch statement.  eg gcc
16564
     can generate code that looks like this:
16565
 
16566
                ldr  r2, [pc, .Laaa]
16567
                lsl  r3, r3, #2
16568
                ldr  r2, [r3, r2]
16569
                mov  pc, r2
16570
 
16571
       .Lbbb:  .word .Lxxx
16572
       .Lccc:  .word .Lyyy
16573
       ..etc...
16574
       .Laaa:   .word Lbbb
16575
 
16576
     The first instruction loads the address of the jump table.
16577
     The second instruction converts a table index into a byte offset.
16578
     The third instruction gets the jump address out of the table.
16579
     The fourth instruction performs the jump.
16580
 
16581
     If the address stored at .Laaa is that of a symbol which has the
16582
     Thumb_Func bit set, then the linker will arrange for this address
16583
     to have the bottom bit set, which in turn would mean that the
16584
     address computation performed by the third instruction would end
16585
     up with the bottom bit set.  Since the ARM is capable of unaligned
16586
     word loads, the instruction would then load the incorrect address
16587
     out of the jump table, and chaos would ensue.  */
16588
  if (label_is_thumb_function_name
16589
      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16590
      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16591
    {
16592
      /* When the address of a Thumb function is taken the bottom
16593
         bit of that address should be set.  This will allow
16594
         interworking between Arm and Thumb functions to work
16595
         correctly.  */
16596
 
16597
      THUMB_SET_FUNC (sym, 1);
16598
 
16599
      label_is_thumb_function_name = FALSE;
16600
    }
16601
 
16602
  dwarf2_emit_label (sym);
16603
}
16604
 
16605
bfd_boolean
16606
arm_data_in_code (void)
16607
{
16608
  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16609
    {
16610
      *input_line_pointer = '/';
16611
      input_line_pointer += 5;
16612
      *input_line_pointer = 0;
16613
      return TRUE;
16614
    }
16615
 
16616
  return FALSE;
16617
}
16618
 
16619
char *
16620
arm_canonicalize_symbol_name (char * name)
16621
{
16622
  int len;
16623
 
16624
  if (thumb_mode && (len = strlen (name)) > 5
16625
      && streq (name + len - 5, "/data"))
16626
    *(name + len - 5) = 0;
16627
 
16628
  return name;
16629
}
16630
 
16631
/* Table of all register names defined by default.  The user can
16632
   define additional names with .req.  Note that all register names
16633
   should appear in both upper and lowercase variants.  Some registers
16634
   also have mixed-case names.  */
16635
 
16636
#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16637
#define REGNUM(p,n,t) REGDEF(p##n, n, t)
16638
#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16639
#define REGSET(p,t) \
16640
  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16641
  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16642
  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16643
  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16644
#define REGSETH(p,t) \
16645
  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16646
  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16647
  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16648
  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16649
#define REGSET2(p,t) \
16650
  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16651
  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16652
  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16653
  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16654
#define SPLRBANK(base,bank,t) \
16655
  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
16656
  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
16657
  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
16658
  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
16659
  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
16660
  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
16661
 
16662
static const struct reg_entry reg_names[] =
16663
{
16664
  /* ARM integer registers.  */
16665
  REGSET(r, RN), REGSET(R, RN),
16666
 
16667
  /* ATPCS synonyms.  */
16668
  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16669
  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16670
  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16671
 
16672
  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16673
  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16674
  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16675
 
16676
  /* Well-known aliases.  */
16677
  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16678
  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16679
 
16680
  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16681
  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16682
 
16683
  /* Coprocessor numbers.  */
16684
  REGSET(p, CP), REGSET(P, CP),
16685
 
16686
  /* Coprocessor register numbers.  The "cr" variants are for backward
16687
     compatibility.  */
16688
  REGSET(c,  CN), REGSET(C, CN),
16689
  REGSET(cr, CN), REGSET(CR, CN),
16690
 
16691
  /* ARM banked registers.  */
16692
  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
16693
  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
16694
  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
16695
  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
16696
  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
16697
  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
16698
  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
16699
 
16700
  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
16701
  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
16702
  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
16703
  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
16704
  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
16705
  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
16706
  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
16707
  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
16708
 
16709
  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
16710
  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
16711
  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
16712
  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
16713
  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
16714
  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
16715
  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
16716
  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
16717
  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
16718
 
16719
  /* FPA registers.  */
16720
  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16721
  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16722
 
16723
  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16724
  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16725
 
16726
  /* VFP SP registers.  */
16727
  REGSET(s,VFS),  REGSET(S,VFS),
16728
  REGSETH(s,VFS), REGSETH(S,VFS),
16729
 
16730
  /* VFP DP Registers.  */
16731
  REGSET(d,VFD),  REGSET(D,VFD),
16732
  /* Extra Neon DP registers.  */
16733
  REGSETH(d,VFD), REGSETH(D,VFD),
16734
 
16735
  /* Neon QP registers.  */
16736
  REGSET2(q,NQ),  REGSET2(Q,NQ),
16737
 
16738
  /* VFP control registers.  */
16739
  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16740
  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16741
  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16742
  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16743
  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16744
  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16745
 
16746
  /* Maverick DSP coprocessor registers.  */
16747
  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
16748
  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
16749
 
16750
  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16751
  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16752
  REGDEF(dspsc,0,DSPSC),
16753
 
16754
  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16755
  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16756
  REGDEF(DSPSC,0,DSPSC),
16757
 
16758
  /* iWMMXt data registers - p0, c0-15.  */
16759
  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16760
 
16761
  /* iWMMXt control registers - p1, c0-3.  */
16762
  REGDEF(wcid,  0,MMXWC),  REGDEF(wCID,   0,MMXWC),  REGDEF(WCID,  0,MMXWC),
16763
  REGDEF(wcon,  1,MMXWC),  REGDEF(wCon,  1,MMXWC),  REGDEF(WCON,  1,MMXWC),
16764
  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
16765
  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
16766
 
16767
  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
16768
  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
16769
  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
16770
  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
16771
  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
16772
 
16773
  /* XScale accumulator registers.  */
16774
  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16775
};
16776
#undef REGDEF
16777
#undef REGNUM
16778
#undef REGSET
16779
 
16780
/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
16781
   within psr_required_here.  */
16782
static const struct asm_psr psrs[] =
16783
{
16784
  /* Backward compatibility notation.  Note that "all" is no longer
16785
     truly all possible PSR bits.  */
16786
  {"all",  PSR_c | PSR_f},
16787
  {"flg",  PSR_f},
16788
  {"ctl",  PSR_c},
16789
 
16790
  /* Individual flags.  */
16791
  {"f",    PSR_f},
16792
  {"c",    PSR_c},
16793
  {"x",    PSR_x},
16794
  {"s",    PSR_s},
16795
 
16796
  /* Combinations of flags.  */
16797
  {"fs",   PSR_f | PSR_s},
16798
  {"fx",   PSR_f | PSR_x},
16799
  {"fc",   PSR_f | PSR_c},
16800
  {"sf",   PSR_s | PSR_f},
16801
  {"sx",   PSR_s | PSR_x},
16802
  {"sc",   PSR_s | PSR_c},
16803
  {"xf",   PSR_x | PSR_f},
16804
  {"xs",   PSR_x | PSR_s},
16805
  {"xc",   PSR_x | PSR_c},
16806
  {"cf",   PSR_c | PSR_f},
16807
  {"cs",   PSR_c | PSR_s},
16808
  {"cx",   PSR_c | PSR_x},
16809
  {"fsx",  PSR_f | PSR_s | PSR_x},
16810
  {"fsc",  PSR_f | PSR_s | PSR_c},
16811
  {"fxs",  PSR_f | PSR_x | PSR_s},
16812
  {"fxc",  PSR_f | PSR_x | PSR_c},
16813
  {"fcs",  PSR_f | PSR_c | PSR_s},
16814
  {"fcx",  PSR_f | PSR_c | PSR_x},
16815
  {"sfx",  PSR_s | PSR_f | PSR_x},
16816
  {"sfc",  PSR_s | PSR_f | PSR_c},
16817
  {"sxf",  PSR_s | PSR_x | PSR_f},
16818
  {"sxc",  PSR_s | PSR_x | PSR_c},
16819
  {"scf",  PSR_s | PSR_c | PSR_f},
16820
  {"scx",  PSR_s | PSR_c | PSR_x},
16821
  {"xfs",  PSR_x | PSR_f | PSR_s},
16822
  {"xfc",  PSR_x | PSR_f | PSR_c},
16823
  {"xsf",  PSR_x | PSR_s | PSR_f},
16824
  {"xsc",  PSR_x | PSR_s | PSR_c},
16825
  {"xcf",  PSR_x | PSR_c | PSR_f},
16826
  {"xcs",  PSR_x | PSR_c | PSR_s},
16827
  {"cfs",  PSR_c | PSR_f | PSR_s},
16828
  {"cfx",  PSR_c | PSR_f | PSR_x},
16829
  {"csf",  PSR_c | PSR_s | PSR_f},
16830
  {"csx",  PSR_c | PSR_s | PSR_x},
16831
  {"cxf",  PSR_c | PSR_x | PSR_f},
16832
  {"cxs",  PSR_c | PSR_x | PSR_s},
16833
  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16834
  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16835
  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16836
  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16837
  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16838
  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16839
  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16840
  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16841
  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16842
  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16843
  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16844
  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16845
  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16846
  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16847
  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16848
  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16849
  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16850
  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16851
  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16852
  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16853
  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16854
  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16855
  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16856
  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16857
};
16858
 
16859
/* Table of V7M psr names.  */
16860
static const struct asm_psr v7m_psrs[] =
16861
{
16862
  {"apsr",        0 }, {"APSR",          0 },
16863
  {"iapsr",       1 }, {"IAPSR",        1 },
16864
  {"eapsr",       2 }, {"EAPSR",        2 },
16865
  {"psr",         3 }, {"PSR",          3 },
16866
  {"xpsr",        3 }, {"XPSR",         3 }, {"xPSR",     3 },
16867
  {"ipsr",        5 }, {"IPSR",         5 },
16868
  {"epsr",        6 }, {"EPSR",         6 },
16869
  {"iepsr",       7 }, {"IEPSR",        7 },
16870
  {"msp",         8 }, {"MSP",          8 },
16871
  {"psp",         9 }, {"PSP",          9 },
16872
  {"primask",     16}, {"PRIMASK",      16},
16873
  {"basepri",     17}, {"BASEPRI",      17},
16874
  {"basepri_max", 18}, {"BASEPRI_MAX",  18},
16875
  {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility.  */
16876
  {"faultmask",   19}, {"FAULTMASK",    19},
16877
  {"control",     20}, {"CONTROL",      20}
16878
};
16879
 
16880
/* Table of all shift-in-operand names.  */
16881
static const struct asm_shift_name shift_names [] =
16882
{
16883
  { "asl", SHIFT_LSL },  { "ASL", SHIFT_LSL },
16884
  { "lsl", SHIFT_LSL },  { "LSL", SHIFT_LSL },
16885
  { "lsr", SHIFT_LSR },  { "LSR", SHIFT_LSR },
16886
  { "asr", SHIFT_ASR },  { "ASR", SHIFT_ASR },
16887
  { "ror", SHIFT_ROR },  { "ROR", SHIFT_ROR },
16888
  { "rrx", SHIFT_RRX },  { "RRX", SHIFT_RRX }
16889
};
16890
 
16891
/* Table of all explicit relocation names.  */
16892
#ifdef OBJ_ELF
16893
static struct reloc_entry reloc_names[] =
16894
{
16895
  { "got",     BFD_RELOC_ARM_GOT32   },  { "GOT",     BFD_RELOC_ARM_GOT32   },
16896
  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },  { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
16897
  { "plt",     BFD_RELOC_ARM_PLT32   },  { "PLT",     BFD_RELOC_ARM_PLT32   },
16898
  { "target1", BFD_RELOC_ARM_TARGET1 },  { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16899
  { "target2", BFD_RELOC_ARM_TARGET2 },  { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16900
  { "sbrel",   BFD_RELOC_ARM_SBREL32 },  { "SBREL",   BFD_RELOC_ARM_SBREL32 },
16901
  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
16902
  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
16903
  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
16904
  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16905
  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
16906
  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
16907
  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
16908
        { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
16909
  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
16910
        { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
16911
  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
16912
        { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
16913
};
16914
#endif
16915
 
16916
/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
16917
static const struct asm_cond conds[] =
16918
{
16919
  {"eq", 0x0},
16920
  {"ne", 0x1},
16921
  {"cs", 0x2}, {"hs", 0x2},
16922
  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16923
  {"mi", 0x4},
16924
  {"pl", 0x5},
16925
  {"vs", 0x6},
16926
  {"vc", 0x7},
16927
  {"hi", 0x8},
16928
  {"ls", 0x9},
16929
  {"ge", 0xa},
16930
  {"lt", 0xb},
16931
  {"gt", 0xc},
16932
  {"le", 0xd},
16933
  {"al", 0xe}
16934
};
16935
 
16936
static struct asm_barrier_opt barrier_opt_names[] =
16937
{
16938
  { "sy",    0xf }, { "SY",    0xf },
16939
  { "un",    0x7 }, { "UN",    0x7 },
16940
  { "st",    0xe }, { "ST",    0xe },
16941
  { "unst",  0x6 }, { "UNST",  0x6 },
16942
  { "ish",   0xb }, { "ISH",   0xb },
16943
  { "sh",    0xb }, { "SH",    0xb },
16944
  { "ishst", 0xa }, { "ISHST", 0xa },
16945
  { "shst",  0xa }, { "SHST",  0xa },
16946
  { "nsh",   0x7 }, { "NSH",   0x7 },
16947
  { "nshst", 0x6 }, { "NSHST", 0x6 },
16948
  { "osh",   0x3 }, { "OSH",   0x3 },
16949
  { "oshst", 0x2 }, { "OSHST", 0x2 }
16950
};
16951
 
16952
/* Table of ARM-format instructions.    */
16953
 
16954
/* Macros for gluing together operand strings.  N.B. In all cases
16955
   other than OPS0, the trailing OP_stop comes from default
16956
   zero-initialization of the unspecified elements of the array.  */
16957
#define OPS0()            { OP_stop, }
16958
#define OPS1(a)           { OP_##a, }
16959
#define OPS2(a,b)         { OP_##a,OP_##b, }
16960
#define OPS3(a,b,c)       { OP_##a,OP_##b,OP_##c, }
16961
#define OPS4(a,b,c,d)     { OP_##a,OP_##b,OP_##c,OP_##d, }
16962
#define OPS5(a,b,c,d,e)   { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16963
#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16964
 
16965
/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16966
   This is useful when mixing operands for ARM and THUMB, i.e. using the
16967
   MIX_ARM_THUMB_OPERANDS macro.
16968
   In order to use these macros, prefix the number of operands with _
16969
   e.g. _3.  */
16970
#define OPS_1(a)           { a, }
16971
#define OPS_2(a,b)         { a,b, }
16972
#define OPS_3(a,b,c)       { a,b,c, }
16973
#define OPS_4(a,b,c,d)     { a,b,c,d, }
16974
#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
16975
#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16976
 
16977
/* These macros abstract out the exact format of the mnemonic table and
16978
   save some repeated characters.  */
16979
 
16980
/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
16981
#define TxCE(mnem, op, top, nops, ops, ae, te) \
16982
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16983
    THUMB_VARIANT, do_##ae, do_##te }
16984
 
16985
/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16986
   a T_MNEM_xyz enumerator.  */
16987
#define TCE(mnem, aop, top, nops, ops, ae, te) \
16988
      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16989
#define tCE(mnem, aop, top, nops, ops, ae, te) \
16990
      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16991
 
16992
/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16993
   infix after the third character.  */
16994
#define TxC3(mnem, op, top, nops, ops, ae, te) \
16995
  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16996
    THUMB_VARIANT, do_##ae, do_##te }
16997
#define TxC3w(mnem, op, top, nops, ops, ae, te) \
16998
  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16999
    THUMB_VARIANT, do_##ae, do_##te }
17000
#define TC3(mnem, aop, top, nops, ops, ae, te) \
17001
      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17002
#define TC3w(mnem, aop, top, nops, ops, ae, te) \
17003
      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17004
#define tC3(mnem, aop, top, nops, ops, ae, te) \
17005
      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17006
#define tC3w(mnem, aop, top, nops, ops, ae, te) \
17007
      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17008
 
17009
/* Mnemonic with a conditional infix in an unusual place.  Each and every variant has to
17010
   appear in the condition table.  */
17011
#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te)   \
17012
  { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17013
    0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
17014
 
17015
#define TxCM(m1, m2, op, top, nops, ops, ae, te)        \
17016
  TxCM_ (m1,   , m2, op, top, nops, ops, ae, te),       \
17017
  TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te),       \
17018
  TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te),       \
17019
  TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te),       \
17020
  TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te),       \
17021
  TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te),       \
17022
  TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te),       \
17023
  TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te),       \
17024
  TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te),       \
17025
  TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te),       \
17026
  TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te),       \
17027
  TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te),       \
17028
  TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te),       \
17029
  TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te),       \
17030
  TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te),       \
17031
  TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te),       \
17032
  TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te),       \
17033
  TxCM_ (m1, le, m2, op, top, nops, ops, ae, te),       \
17034
  TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
17035
 
17036
#define TCM(m1,m2, aop, top, nops, ops, ae, te)         \
17037
      TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
17038
#define tCM(m1,m2, aop, top, nops, ops, ae, te)         \
17039
      TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
17040
 
17041
/* Mnemonic that cannot be conditionalized.  The ARM condition-code
17042
   field is still 0xE.  Many of the Thumb variants can be executed
17043
   conditionally, so this is checked separately.  */
17044
#define TUE(mnem, op, top, nops, ops, ae, te)                           \
17045
  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17046
    THUMB_VARIANT, do_##ae, do_##te }
17047
 
17048
/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17049
   condition code field.  */
17050
#define TUF(mnem, op, top, nops, ops, ae, te)                           \
17051
  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17052
    THUMB_VARIANT, do_##ae, do_##te }
17053
 
17054
/* ARM-only variants of all the above.  */
17055
#define CE(mnem,  op, nops, ops, ae)    \
17056
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17057
 
17058
#define C3(mnem, op, nops, ops, ae)     \
17059
  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17060
 
17061
/* Legacy mnemonics that always have conditional infix after the third
17062
   character.  */
17063
#define CL(mnem, op, nops, ops, ae)     \
17064
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17065
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17066
 
17067
/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
17068
#define cCE(mnem,  op, nops, ops, ae)   \
17069
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17070
 
17071
/* Legacy coprocessor instructions where conditional infix and conditional
17072
   suffix are ambiguous.  For consistency this includes all FPA instructions,
17073
   not just the potentially ambiguous ones.  */
17074
#define cCL(mnem, op, nops, ops, ae)    \
17075
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17076
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17077
 
17078
/* Coprocessor, takes either a suffix or a position-3 infix
17079
   (for an FPA corner case). */
17080
#define C3E(mnem, op, nops, ops, ae) \
17081
  { mnem, OPS##nops ops, OT_csuf_or_in3, \
17082
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17083
 
17084
#define xCM_(m1, m2, m3, op, nops, ops, ae)     \
17085
  { m1 #m2 m3, OPS##nops ops, \
17086
    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17087
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17088
 
17089
#define CM(m1, m2, op, nops, ops, ae)   \
17090
  xCM_ (m1,   , m2, op, nops, ops, ae), \
17091
  xCM_ (m1, eq, m2, op, nops, ops, ae), \
17092
  xCM_ (m1, ne, m2, op, nops, ops, ae), \
17093
  xCM_ (m1, cs, m2, op, nops, ops, ae), \
17094
  xCM_ (m1, hs, m2, op, nops, ops, ae), \
17095
  xCM_ (m1, cc, m2, op, nops, ops, ae), \
17096
  xCM_ (m1, ul, m2, op, nops, ops, ae), \
17097
  xCM_ (m1, lo, m2, op, nops, ops, ae), \
17098
  xCM_ (m1, mi, m2, op, nops, ops, ae), \
17099
  xCM_ (m1, pl, m2, op, nops, ops, ae), \
17100
  xCM_ (m1, vs, m2, op, nops, ops, ae), \
17101
  xCM_ (m1, vc, m2, op, nops, ops, ae), \
17102
  xCM_ (m1, hi, m2, op, nops, ops, ae), \
17103
  xCM_ (m1, ls, m2, op, nops, ops, ae), \
17104
  xCM_ (m1, ge, m2, op, nops, ops, ae), \
17105
  xCM_ (m1, lt, m2, op, nops, ops, ae), \
17106
  xCM_ (m1, gt, m2, op, nops, ops, ae), \
17107
  xCM_ (m1, le, m2, op, nops, ops, ae), \
17108
  xCM_ (m1, al, m2, op, nops, ops, ae)
17109
 
17110
#define UE(mnem, op, nops, ops, ae)     \
17111
  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17112
 
17113
#define UF(mnem, op, nops, ops, ae)     \
17114
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17115
 
17116
/* Neon data-processing. ARM versions are unconditional with cond=0xf.
17117
   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17118
   use the same encoding function for each.  */
17119
#define NUF(mnem, op, nops, ops, enc)                                   \
17120
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,            \
17121
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17122
 
17123
/* Neon data processing, version which indirects through neon_enc_tab for
17124
   the various overloaded versions of opcodes.  */
17125
#define nUF(mnem, op, nops, ops, enc)                                   \
17126
  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,    \
17127
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17128
 
17129
/* Neon insn with conditional suffix for the ARM version, non-overloaded
17130
   version.  */
17131
#define NCE_tag(mnem, op, nops, ops, enc, tag)                          \
17132
  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,             \
17133
    THUMB_VARIANT, do_##enc, do_##enc }
17134
 
17135
#define NCE(mnem, op, nops, ops, enc)                                   \
17136
   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17137
 
17138
#define NCEF(mnem, op, nops, ops, enc)                                  \
17139
    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17140
 
17141
/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
17142
#define nCE_tag(mnem, op, nops, ops, enc, tag)                          \
17143
  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,          \
17144
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17145
 
17146
#define nCE(mnem, op, nops, ops, enc)                                   \
17147
   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17148
 
17149
#define nCEF(mnem, op, nops, ops, enc)                                  \
17150
    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17151
 
17152
#define do_0 0
17153
 
17154
static const struct asm_opcode insns[] =
17155
{
17156
#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions.  */
17157
#define THUMB_VARIANT &arm_ext_v4t
17158
 tCE("and",     0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
17159
 tC3("ands",    0100000, _ands,    3, (RR, oRR, SH), arit, t_arit3c),
17160
 tCE("eor",     0200000, _eor,     3, (RR, oRR, SH), arit, t_arit3c),
17161
 tC3("eors",    0300000, _eors,    3, (RR, oRR, SH), arit, t_arit3c),
17162
 tCE("sub",     0400000, _sub,     3, (RR, oRR, SH), arit, t_add_sub),
17163
 tC3("subs",    0500000, _subs,    3, (RR, oRR, SH), arit, t_add_sub),
17164
 tCE("add",     0800000, _add,     3, (RR, oRR, SHG), arit, t_add_sub),
17165
 tC3("adds",    0900000, _adds,    3, (RR, oRR, SHG), arit, t_add_sub),
17166
 tCE("adc",     0a00000, _adc,     3, (RR, oRR, SH), arit, t_arit3c),
17167
 tC3("adcs",    0b00000, _adcs,    3, (RR, oRR, SH), arit, t_arit3c),
17168
 tCE("sbc",     0c00000, _sbc,     3, (RR, oRR, SH), arit, t_arit3),
17169
 tC3("sbcs",    0d00000, _sbcs,    3, (RR, oRR, SH), arit, t_arit3),
17170
 tCE("orr",     1800000, _orr,     3, (RR, oRR, SH), arit, t_arit3c),
17171
 tC3("orrs",    1900000, _orrs,    3, (RR, oRR, SH), arit, t_arit3c),
17172
 tCE("bic",     1c00000, _bic,     3, (RR, oRR, SH), arit, t_arit3),
17173
 tC3("bics",    1d00000, _bics,    3, (RR, oRR, SH), arit, t_arit3),
17174
 
17175
 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17176
    for setting PSR flag bits.  They are obsolete in V6 and do not
17177
    have Thumb equivalents. */
17178
 tCE("tst",     1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17179
 tC3w("tsts",   1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17180
  CL("tstp",    110f000,           2, (RR, SH),      cmp),
17181
 tCE("cmp",     1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17182
 tC3w("cmps",   1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17183
  CL("cmpp",    150f000,           2, (RR, SH),      cmp),
17184
 tCE("cmn",     1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17185
 tC3w("cmns",   1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17186
  CL("cmnp",    170f000,           2, (RR, SH),      cmp),
17187
 
17188
 tCE("mov",     1a00000, _mov,     2, (RR, SH),      mov,  t_mov_cmp),
17189
 tC3("movs",    1b00000, _movs,    2, (RR, SH),      mov,  t_mov_cmp),
17190
 tCE("mvn",     1e00000, _mvn,     2, (RR, SH),      mov,  t_mvn_tst),
17191
 tC3("mvns",    1f00000, _mvns,    2, (RR, SH),      mov,  t_mvn_tst),
17192
 
17193
 tCE("ldr",     4100000, _ldr,     2, (RR, ADDRGLDR),ldst, t_ldst),
17194
 tC3("ldrb",    4500000, _ldrb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17195
 tCE("str",     4000000, _str,     _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17196
                                                                OP_RRnpc),
17197
                                        OP_ADDRGLDR),ldst, t_ldst),
17198
 tC3("strb",    4400000, _strb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17199
 
17200
 tCE("stm",     8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17201
 tC3("stmia",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17202
 tC3("stmea",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17203
 tCE("ldm",     8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17204
 tC3("ldmia",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17205
 tC3("ldmfd",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17206
 
17207
 TCE("swi",     f000000, df00,     1, (EXPi),        swi, t_swi),
17208
 TCE("svc",     f000000, df00,     1, (EXPi),        swi, t_swi),
17209
 tCE("b",       a000000, _b,       1, (EXPr),        branch, t_branch),
17210
 TCE("bl",      b000000, f000f800, 1, (EXPr),        bl, t_branch23),
17211
 
17212
  /* Pseudo ops.  */
17213
 tCE("adr",     28f0000, _adr,     2, (RR, EXP),     adr,  t_adr),
17214
  C3(adrl,      28f0000,           2, (RR, EXP),     adrl),
17215
 tCE("nop",     1a00000, _nop,     1, (oI255c),      nop,  t_nop),
17216
 
17217
  /* Thumb-compatibility pseudo ops.  */
17218
 tCE("lsl",     1a00000, _lsl,     3, (RR, oRR, SH), shift, t_shift),
17219
 tC3("lsls",    1b00000, _lsls,    3, (RR, oRR, SH), shift, t_shift),
17220
 tCE("lsr",     1a00020, _lsr,     3, (RR, oRR, SH), shift, t_shift),
17221
 tC3("lsrs",    1b00020, _lsrs,    3, (RR, oRR, SH), shift, t_shift),
17222
 tCE("asr",     1a00040, _asr,     3, (RR, oRR, SH), shift, t_shift),
17223
 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
17224
 tCE("ror",     1a00060, _ror,     3, (RR, oRR, SH), shift, t_shift),
17225
 tC3("rors",    1b00060, _rors,    3, (RR, oRR, SH), shift, t_shift),
17226
 tCE("neg",     2600000, _neg,     2, (RR, RR),      rd_rn, t_neg),
17227
 tC3("negs",    2700000, _negs,    2, (RR, RR),      rd_rn, t_neg),
17228
 tCE("push",    92d0000, _push,     1, (REGLST),             push_pop, t_push_pop),
17229
 tCE("pop",     8bd0000, _pop,     1, (REGLST),      push_pop, t_push_pop),
17230
 
17231
 /* These may simplify to neg.  */
17232
 TCE("rsb",     0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17233
 TC3("rsbs",    0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17234
 
17235
#undef  THUMB_VARIANT
17236
#define THUMB_VARIANT  & arm_ext_v6
17237
 
17238
 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
17239
 
17240
 /* V1 instructions with no Thumb analogue prior to V6T2.  */
17241
#undef  THUMB_VARIANT
17242
#define THUMB_VARIANT  & arm_ext_v6t2
17243
 
17244
 TCE("teq",     1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17245
 TC3w("teqs",   1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17246
  CL("teqp",    130f000,           2, (RR, SH),      cmp),
17247
 
17248
 TC3("ldrt",    4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17249
 TC3("ldrbt",   4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17250
 TC3("strt",    4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
17251
 TC3("strbt",   4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17252
 
17253
 TC3("stmdb",   9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17254
 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17255
 
17256
 TC3("ldmdb",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17257
 TC3("ldmea",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17258
 
17259
 /* V1 instructions with no Thumb analogue at all.  */
17260
  CE("rsc",     0e00000,           3, (RR, oRR, SH), arit),
17261
  C3(rscs,      0f00000,           3, (RR, oRR, SH), arit),
17262
 
17263
  C3(stmib,     9800000,           2, (RRw, REGLST), ldmstm),
17264
  C3(stmfa,     9800000,           2, (RRw, REGLST), ldmstm),
17265
  C3(stmda,     8000000,           2, (RRw, REGLST), ldmstm),
17266
  C3(stmed,     8000000,           2, (RRw, REGLST), ldmstm),
17267
  C3(ldmib,     9900000,           2, (RRw, REGLST), ldmstm),
17268
  C3(ldmed,     9900000,           2, (RRw, REGLST), ldmstm),
17269
  C3(ldmda,     8100000,           2, (RRw, REGLST), ldmstm),
17270
  C3(ldmfa,     8100000,           2, (RRw, REGLST), ldmstm),
17271
 
17272
#undef  ARM_VARIANT
17273
#define ARM_VARIANT    & arm_ext_v2     /* ARM 2 - multiplies.  */
17274
#undef  THUMB_VARIANT
17275
#define THUMB_VARIANT  & arm_ext_v4t
17276
 
17277
 tCE("mul",     0000090, _mul,     3, (RRnpc, RRnpc, oRR), mul, t_mul),
17278
 tC3("muls",    0100090, _muls,    3, (RRnpc, RRnpc, oRR), mul, t_mul),
17279
 
17280
#undef  THUMB_VARIANT
17281
#define THUMB_VARIANT  & arm_ext_v6t2
17282
 
17283
 TCE("mla",     0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17284
  C3(mlas,      0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
17285
 
17286
  /* Generic coprocessor instructions.  */
17287
 TCE("cdp",     e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17288
 TCE("ldc",     c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17289
 TC3("ldcl",    c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17290
 TCE("stc",     c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17291
 TC3("stcl",    c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17292
 TCE("mcr",     e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17293
 TCE("mrc",     e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
17294
 
17295
#undef  ARM_VARIANT
17296
#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
17297
 
17298
  CE("swp",     1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17299
  C3(swpb,      1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17300
 
17301
#undef  ARM_VARIANT
17302
#define ARM_VARIANT    & arm_ext_v3     /* ARM 6 Status register instructions.  */
17303
#undef  THUMB_VARIANT
17304
#define THUMB_VARIANT  & arm_ext_msr
17305
 
17306
 TCE("mrs",     1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
17307
 TCE("msr",     120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
17308
 
17309
#undef  ARM_VARIANT
17310
#define ARM_VARIANT    & arm_ext_v3m     /* ARM 7M long multiplies.  */
17311
#undef  THUMB_VARIANT
17312
#define THUMB_VARIANT  & arm_ext_v6t2
17313
 
17314
 TCE("smull",   0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17315
  CM("smull","s",       0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17316
 TCE("umull",   0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17317
  CM("umull","s",       0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17318
 TCE("smlal",   0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17319
  CM("smlal","s",       0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17320
 TCE("umlal",   0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17321
  CM("umlal","s",       0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17322
 
17323
#undef  ARM_VARIANT
17324
#define ARM_VARIANT    & arm_ext_v4     /* ARM Architecture 4.  */
17325
#undef  THUMB_VARIANT
17326
#define THUMB_VARIANT  & arm_ext_v4t
17327
 
17328
 tC3("ldrh",    01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17329
 tC3("strh",    00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17330
 tC3("ldrsh",   01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17331
 tC3("ldrsb",   01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17332
 tCM("ld","sh", 01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17333
 tCM("ld","sb", 01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17334
 
17335
#undef  ARM_VARIANT
17336
#define ARM_VARIANT  & arm_ext_v4t_5
17337
 
17338
  /* ARM Architecture 4T.  */
17339
  /* Note: bx (and blx) are required on V5, even if the processor does
17340
     not support Thumb.  */
17341
 TCE("bx",      12fff10, 4700, 1, (RR), bx, t_bx),
17342
 
17343
#undef  ARM_VARIANT
17344
#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.     */
17345
#undef  THUMB_VARIANT
17346
#define THUMB_VARIANT  & arm_ext_v5t
17347
 
17348
  /* Note: blx has 2 variants; the .value coded here is for
17349
     BLX(2).  Only this variant has conditional execution.  */
17350
 TCE("blx",     12fff30, 4780, 1, (RR_EXr),                         blx,  t_blx),
17351
 TUE("bkpt",    1200070, be00, 1, (oIffffb),                        bkpt, t_bkpt),
17352
 
17353
#undef  THUMB_VARIANT
17354
#define THUMB_VARIANT  & arm_ext_v6t2
17355
 
17356
 TCE("clz",     16f0f10, fab0f080, 2, (RRnpc, RRnpc),                   rd_rm,  t_clz),
17357
 TUF("ldc2",    c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17358
 TUF("ldc2l",   c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17359
 TUF("stc2",    c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17360
 TUF("stc2l",   c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17361
 TUF("cdp2",    e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17362
 TUF("mcr2",    e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17363
 TUF("mrc2",    e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17364
 
17365
#undef  ARM_VARIANT
17366
#define ARM_VARIANT  & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
17367
#undef THUMB_VARIANT
17368
#define THUMB_VARIANT &arm_ext_v5exp
17369
 
17370
 TCE("smlabb",  1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17371
 TCE("smlatb",  10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17372
 TCE("smlabt",  10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17373
 TCE("smlatt",  10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17374
 
17375
 TCE("smlawb",  1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17376
 TCE("smlawt",  12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17377
 
17378
 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17379
 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17380
 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17381
 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17382
 
17383
 TCE("smulbb",  1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17384
 TCE("smultb",  16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17385
 TCE("smulbt",  16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17386
 TCE("smultt",  16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17387
 
17388
 TCE("smulwb",  12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17389
 TCE("smulwt",  12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17390
 
17391
 TCE("qadd",    1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17392
 TCE("qdadd",   1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17393
 TCE("qsub",    1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17394
 TCE("qdsub",   1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17395
 
17396
#undef  ARM_VARIANT
17397
#define ARM_VARIANT  & arm_ext_v5e /*  ARM Architecture 5TE.  */
17398
#undef THUMB_VARIANT
17399
#define THUMB_VARIANT &arm_ext_v6t2
17400
 
17401
 TUF("pld",     450f000, f810f000, 1, (ADDR),                pld,  t_pld),
17402
 TC3("ldrd",    00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
17403
     ldrd, t_ldstd),
17404
 TC3("strd",    00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
17405
                                       ADDRGLDRS), ldrd, t_ldstd),
17406
 
17407
 TCE("mcrr",    c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17408
 TCE("mrrc",    c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17409
 
17410
#undef  ARM_VARIANT
17411
#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
17412
 
17413
 TCE("bxj",     12fff20, f3c08f00, 1, (RR),                       bxj, t_bxj),
17414
 
17415
#undef  ARM_VARIANT
17416
#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
17417
#undef  THUMB_VARIANT
17418
#define THUMB_VARIANT  & arm_ext_v6
17419
 
17420
 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17421
 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17422
 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17423
 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17424
 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17425
 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17426
 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17427
 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17428
 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17429
 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
17430
 
17431
#undef  THUMB_VARIANT
17432
#define THUMB_VARIANT  & arm_ext_v6t2
17433
 
17434
 TCE("ldrex",   1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),        ldrex, t_ldrex),
17435
 TCE("strex",   1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17436
                                      strex,  t_strex),
17437
 TUF("mcrr2",   c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17438
 TUF("mrrc2",   c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17439
 
17440
 TCE("ssat",    6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
17441
 TCE("usat",    6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
17442
 
17443
/*  ARM V6 not included in V7M.  */
17444
#undef  THUMB_VARIANT
17445
#define THUMB_VARIANT  & arm_ext_v6_notm
17446
 TUF("rfeia",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17447
  UF(rfeib,     9900a00,           1, (RRw),                       rfe),
17448
  UF(rfeda,     8100a00,           1, (RRw),                       rfe),
17449
 TUF("rfedb",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17450
 TUF("rfefd",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17451
  UF(rfefa,     9900a00,           1, (RRw),                       rfe),
17452
  UF(rfeea,     8100a00,           1, (RRw),                       rfe),
17453
 TUF("rfeed",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17454
 TUF("srsia",   8c00500, e980c000, 2, (oRRw, I31w),                srs,  srs),
17455
  UF(srsib,     9c00500,           2, (oRRw, I31w),                srs),
17456
  UF(srsda,     8400500,           2, (oRRw, I31w),                srs),
17457
 TUF("srsdb",   9400500, e800c000, 2, (oRRw, I31w),                srs,  srs),
17458
 
17459
/*  ARM V6 not included in V7M (eg. integer SIMD).  */
17460
#undef  THUMB_VARIANT
17461
#define THUMB_VARIANT  & arm_ext_v6_dsp
17462
 TUF("cps",     1020000, f3af8100, 1, (I31b),                     imm0, t_cps),
17463
 TCE("pkhbt",   6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
17464
 TCE("pkhtb",   6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
17465
 TCE("qadd16",  6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17466
 TCE("qadd8",   6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17467
 TCE("qasx",    6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17468
 /* Old name for QASX.  */
17469
 TCE("qaddsubx",        6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17470
 TCE("qsax",    6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17471
 /* Old name for QSAX.  */
17472
 TCE("qsubaddx",        6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17473
 TCE("qsub16",  6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17474
 TCE("qsub8",   6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17475
 TCE("sadd16",  6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17476
 TCE("sadd8",   6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17477
 TCE("sasx",    6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17478
 /* Old name for SASX.  */
17479
 TCE("saddsubx",        6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17480
 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17481
 TCE("shadd8",  6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17482
 TCE("shasx",     6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17483
 /* Old name for SHASX.  */
17484
 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17485
 TCE("shsax",      6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),    rd_rn_rm, t_simd),
17486
 /* Old name for SHSAX.  */
17487
 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17488
 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17489
 TCE("shsub8",  6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17490
 TCE("ssax",    6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17491
 /* Old name for SSAX.  */
17492
 TCE("ssubaddx",        6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17493
 TCE("ssub16",  6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17494
 TCE("ssub8",   6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17495
 TCE("uadd16",  6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17496
 TCE("uadd8",   6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17497
 TCE("uasx",    6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17498
 /* Old name for UASX.  */
17499
 TCE("uaddsubx",        6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17500
 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17501
 TCE("uhadd8",  6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17502
 TCE("uhasx",     6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17503
 /* Old name for UHASX.  */
17504
 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17505
 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17506
 /* Old name for UHSAX.  */
17507
 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17508
 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17509
 TCE("uhsub8",  6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17510
 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17511
 TCE("uqadd8",  6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17512
 TCE("uqasx",     6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17513
 /* Old name for UQASX.  */
17514
 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17515
 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17516
 /* Old name for UQSAX.  */
17517
 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17518
 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17519
 TCE("uqsub8",  6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17520
 TCE("usub16",  6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17521
 TCE("usax",    6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17522
 /* Old name for USAX.  */
17523
 TCE("usubaddx",        6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17524
 TCE("usub8",   6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17525
 TCE("sxtah",   6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17526
 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17527
 TCE("sxtab",   6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17528
 TCE("sxtb16",  68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17529
 TCE("uxtah",   6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17530
 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17531
 TCE("uxtab",   6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17532
 TCE("uxtb16",  6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17533
 TCE("sel",     6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17534
 TCE("smlad",   7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17535
 TCE("smladx",  7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17536
 TCE("smlald",  7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17537
 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17538
 TCE("smlsd",   7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17539
 TCE("smlsdx",  7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17540
 TCE("smlsld",  7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17541
 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17542
 TCE("smmla",   7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17543
 TCE("smmlar",  7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17544
 TCE("smmls",   75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17545
 TCE("smmlsr",  75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17546
 TCE("smmul",   750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17547
 TCE("smmulr",  750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17548
 TCE("smuad",   700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17549
 TCE("smuadx",  700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17550
 TCE("smusd",   700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17551
 TCE("smusdx",  700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17552
 TCE("ssat16",  6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),         ssat16, t_ssat16),
17553
 TCE("umaal",   0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
17554
 TCE("usad8",   780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),       smul,   t_simd),
17555
 TCE("usada8",  7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
17556
 TCE("usat16",  6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),         usat16, t_usat16),
17557
 
17558
#undef  ARM_VARIANT
17559
#define ARM_VARIANT   & arm_ext_v6k
17560
#undef  THUMB_VARIANT
17561
#define THUMB_VARIANT & arm_ext_v6k
17562
 
17563
 tCE("yield",   320f001, _yield,    0, (), noargs, t_hint),
17564
 tCE("wfe",     320f002, _wfe,      0, (), noargs, t_hint),
17565
 tCE("wfi",     320f003, _wfi,      0, (), noargs, t_hint),
17566
 tCE("sev",     320f004, _sev,      0, (), noargs, t_hint),
17567
 
17568
#undef  THUMB_VARIANT
17569
#define THUMB_VARIANT  & arm_ext_v6_notm
17570
 TCE("ldrexd",  1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17571
                                      ldrexd, t_ldrexd),
17572
 TCE("strexd",  1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17573
                                       RRnpcb), strexd, t_strexd),
17574
 
17575
#undef  THUMB_VARIANT
17576
#define THUMB_VARIANT  & arm_ext_v6t2
17577
 TCE("ldrexb",  1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17578
     rd_rn,  rd_rn),
17579
 TCE("ldrexh",  1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17580
     rd_rn,  rd_rn),
17581
 TCE("strexb",  1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17582 160 khays
     strex, t_strexbh),
17583 16 khays
 TCE("strexh",  1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17584 160 khays
     strex, t_strexbh),
17585 16 khays
 TUF("clrex",   57ff01f, f3bf8f2f, 0, (),                              noargs, noargs),
17586
 
17587
#undef  ARM_VARIANT
17588
#define ARM_VARIANT    & arm_ext_sec
17589
#undef THUMB_VARIANT
17590
#define THUMB_VARIANT  & arm_ext_sec
17591
 
17592
 TCE("smc",     1600070, f7f08000, 1, (EXPi), smc, t_smc),
17593
 
17594
#undef  ARM_VARIANT
17595
#define ARM_VARIANT    & arm_ext_virt
17596
#undef  THUMB_VARIANT
17597
#define THUMB_VARIANT    & arm_ext_virt
17598
 
17599
 TCE("hvc",     1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
17600
 TCE("eret",    160006e, f3de8f00, 0, (), noargs, noargs),
17601
 
17602
#undef  ARM_VARIANT
17603
#define ARM_VARIANT  & arm_ext_v6t2
17604
#undef  THUMB_VARIANT
17605
#define THUMB_VARIANT  & arm_ext_v6t2
17606
 
17607
 TCE("bfc",     7c0001f, f36f0000, 3, (RRnpc, I31, I32),           bfc, t_bfc),
17608
 TCE("bfi",     7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17609
 TCE("sbfx",    7a00050, f3400000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17610
 TCE("ubfx",    7e00050, f3c00000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17611
 
17612
 TCE("mls",     0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17613
 TCE("movw",    3000000, f2400000, 2, (RRnpc, HALF),                mov16, t_mov16),
17614
 TCE("movt",    3400000, f2c00000, 2, (RRnpc, HALF),                mov16, t_mov16),
17615
 TCE("rbit",    6ff0f30, fa90f0a0, 2, (RR, RR),                     rd_rm, t_rbit),
17616
 
17617
 TC3("ldrht",   03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17618
 TC3("ldrsht",  03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17619
 TC3("ldrsbt",  03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17620
 TC3("strht",   02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17621
 
17622
 /* Thumb-only instructions.  */
17623
#undef ARM_VARIANT
17624
#define ARM_VARIANT NULL
17625
  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
17626
  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
17627
 
17628
 /* ARM does not really have an IT instruction, so always allow it.
17629
    The opcode is copied from Thumb in order to allow warnings in
17630
    -mimplicit-it=[never | arm] modes.  */
17631
#undef  ARM_VARIANT
17632
#define ARM_VARIANT  & arm_ext_v1
17633
 
17634
 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
17635
 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
17636
 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
17637
 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
17638
 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
17639
 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
17640
 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
17641
 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
17642
 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
17643
 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
17644
 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
17645
 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
17646
 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
17647
 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
17648
 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
17649
 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
17650
 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17651
 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17652
 
17653
 /* Thumb2 only instructions.  */
17654
#undef  ARM_VARIANT
17655
#define ARM_VARIANT  NULL
17656
 
17657
 TCE("addw",    0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17658
 TCE("subw",    0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17659
 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
17660
 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
17661
 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
17662
 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
17663
 
17664
 /* Hardware division instructions.  */
17665
#undef  ARM_VARIANT
17666
#define ARM_VARIANT    & arm_ext_adiv
17667
#undef  THUMB_VARIANT
17668
#define THUMB_VARIANT  & arm_ext_div
17669
 
17670
 TCE("sdiv",    710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17671
 TCE("udiv",    730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17672
 
17673
 /* ARM V6M/V7 instructions.  */
17674
#undef  ARM_VARIANT
17675
#define ARM_VARIANT    & arm_ext_barrier
17676
#undef  THUMB_VARIANT
17677
#define THUMB_VARIANT  & arm_ext_barrier
17678
 
17679
 TUF("dmb",     57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier,  t_barrier),
17680
 TUF("dsb",     57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier,  t_barrier),
17681
 TUF("isb",     57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier,  t_barrier),
17682
 
17683
 /* ARM V7 instructions.  */
17684
#undef  ARM_VARIANT
17685
#define ARM_VARIANT    & arm_ext_v7
17686
#undef  THUMB_VARIANT
17687
#define THUMB_VARIANT  & arm_ext_v7
17688
 
17689
 TUF("pli",     450f000, f910f000, 1, (ADDR),     pli,      t_pld),
17690
 TCE("dbg",     320f0f0, f3af80f0, 1, (I15),      dbg,      t_dbg),
17691
 
17692
#undef ARM_VARIANT
17693
#define ARM_VARIANT    & arm_ext_mp
17694
#undef THUMB_VARIANT
17695
#define THUMB_VARIANT  & arm_ext_mp
17696
 
17697
 TUF("pldw",    410f000, f830f000, 1, (ADDR),   pld,    t_pld),
17698
 
17699
#undef  ARM_VARIANT
17700
#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
17701
 
17702
 cCE("wfs",     e200110, 1, (RR),            rd),
17703
 cCE("rfs",     e300110, 1, (RR),            rd),
17704
 cCE("wfc",     e400110, 1, (RR),            rd),
17705
 cCE("rfc",     e500110, 1, (RR),            rd),
17706
 
17707
 cCL("ldfs",    c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17708
 cCL("ldfd",    c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17709
 cCL("ldfe",    c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17710
 cCL("ldfp",    c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17711
 
17712
 cCL("stfs",    c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17713
 cCL("stfd",    c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17714
 cCL("stfe",    c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17715
 cCL("stfp",    c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17716
 
17717
 cCL("mvfs",    e008100, 2, (RF, RF_IF),     rd_rm),
17718
 cCL("mvfsp",   e008120, 2, (RF, RF_IF),     rd_rm),
17719
 cCL("mvfsm",   e008140, 2, (RF, RF_IF),     rd_rm),
17720
 cCL("mvfsz",   e008160, 2, (RF, RF_IF),     rd_rm),
17721
 cCL("mvfd",    e008180, 2, (RF, RF_IF),     rd_rm),
17722
 cCL("mvfdp",   e0081a0, 2, (RF, RF_IF),     rd_rm),
17723
 cCL("mvfdm",   e0081c0, 2, (RF, RF_IF),     rd_rm),
17724
 cCL("mvfdz",   e0081e0, 2, (RF, RF_IF),     rd_rm),
17725
 cCL("mvfe",    e088100, 2, (RF, RF_IF),     rd_rm),
17726
 cCL("mvfep",   e088120, 2, (RF, RF_IF),     rd_rm),
17727
 cCL("mvfem",   e088140, 2, (RF, RF_IF),     rd_rm),
17728
 cCL("mvfez",   e088160, 2, (RF, RF_IF),     rd_rm),
17729
 
17730
 cCL("mnfs",    e108100, 2, (RF, RF_IF),     rd_rm),
17731
 cCL("mnfsp",   e108120, 2, (RF, RF_IF),     rd_rm),
17732
 cCL("mnfsm",   e108140, 2, (RF, RF_IF),     rd_rm),
17733
 cCL("mnfsz",   e108160, 2, (RF, RF_IF),     rd_rm),
17734
 cCL("mnfd",    e108180, 2, (RF, RF_IF),     rd_rm),
17735
 cCL("mnfdp",   e1081a0, 2, (RF, RF_IF),     rd_rm),
17736
 cCL("mnfdm",   e1081c0, 2, (RF, RF_IF),     rd_rm),
17737
 cCL("mnfdz",   e1081e0, 2, (RF, RF_IF),     rd_rm),
17738
 cCL("mnfe",    e188100, 2, (RF, RF_IF),     rd_rm),
17739
 cCL("mnfep",   e188120, 2, (RF, RF_IF),     rd_rm),
17740
 cCL("mnfem",   e188140, 2, (RF, RF_IF),     rd_rm),
17741
 cCL("mnfez",   e188160, 2, (RF, RF_IF),     rd_rm),
17742
 
17743
 cCL("abss",    e208100, 2, (RF, RF_IF),     rd_rm),
17744
 cCL("abssp",   e208120, 2, (RF, RF_IF),     rd_rm),
17745
 cCL("abssm",   e208140, 2, (RF, RF_IF),     rd_rm),
17746
 cCL("abssz",   e208160, 2, (RF, RF_IF),     rd_rm),
17747
 cCL("absd",    e208180, 2, (RF, RF_IF),     rd_rm),
17748
 cCL("absdp",   e2081a0, 2, (RF, RF_IF),     rd_rm),
17749
 cCL("absdm",   e2081c0, 2, (RF, RF_IF),     rd_rm),
17750
 cCL("absdz",   e2081e0, 2, (RF, RF_IF),     rd_rm),
17751
 cCL("abse",    e288100, 2, (RF, RF_IF),     rd_rm),
17752
 cCL("absep",   e288120, 2, (RF, RF_IF),     rd_rm),
17753
 cCL("absem",   e288140, 2, (RF, RF_IF),     rd_rm),
17754
 cCL("absez",   e288160, 2, (RF, RF_IF),     rd_rm),
17755
 
17756
 cCL("rnds",    e308100, 2, (RF, RF_IF),     rd_rm),
17757
 cCL("rndsp",   e308120, 2, (RF, RF_IF),     rd_rm),
17758
 cCL("rndsm",   e308140, 2, (RF, RF_IF),     rd_rm),
17759
 cCL("rndsz",   e308160, 2, (RF, RF_IF),     rd_rm),
17760
 cCL("rndd",    e308180, 2, (RF, RF_IF),     rd_rm),
17761
 cCL("rnddp",   e3081a0, 2, (RF, RF_IF),     rd_rm),
17762
 cCL("rnddm",   e3081c0, 2, (RF, RF_IF),     rd_rm),
17763
 cCL("rnddz",   e3081e0, 2, (RF, RF_IF),     rd_rm),
17764
 cCL("rnde",    e388100, 2, (RF, RF_IF),     rd_rm),
17765
 cCL("rndep",   e388120, 2, (RF, RF_IF),     rd_rm),
17766
 cCL("rndem",   e388140, 2, (RF, RF_IF),     rd_rm),
17767
 cCL("rndez",   e388160, 2, (RF, RF_IF),     rd_rm),
17768
 
17769
 cCL("sqts",    e408100, 2, (RF, RF_IF),     rd_rm),
17770
 cCL("sqtsp",   e408120, 2, (RF, RF_IF),     rd_rm),
17771
 cCL("sqtsm",   e408140, 2, (RF, RF_IF),     rd_rm),
17772
 cCL("sqtsz",   e408160, 2, (RF, RF_IF),     rd_rm),
17773
 cCL("sqtd",    e408180, 2, (RF, RF_IF),     rd_rm),
17774
 cCL("sqtdp",   e4081a0, 2, (RF, RF_IF),     rd_rm),
17775
 cCL("sqtdm",   e4081c0, 2, (RF, RF_IF),     rd_rm),
17776
 cCL("sqtdz",   e4081e0, 2, (RF, RF_IF),     rd_rm),
17777
 cCL("sqte",    e488100, 2, (RF, RF_IF),     rd_rm),
17778
 cCL("sqtep",   e488120, 2, (RF, RF_IF),     rd_rm),
17779
 cCL("sqtem",   e488140, 2, (RF, RF_IF),     rd_rm),
17780
 cCL("sqtez",   e488160, 2, (RF, RF_IF),     rd_rm),
17781
 
17782
 cCL("logs",    e508100, 2, (RF, RF_IF),     rd_rm),
17783
 cCL("logsp",   e508120, 2, (RF, RF_IF),     rd_rm),
17784
 cCL("logsm",   e508140, 2, (RF, RF_IF),     rd_rm),
17785
 cCL("logsz",   e508160, 2, (RF, RF_IF),     rd_rm),
17786
 cCL("logd",    e508180, 2, (RF, RF_IF),     rd_rm),
17787
 cCL("logdp",   e5081a0, 2, (RF, RF_IF),     rd_rm),
17788
 cCL("logdm",   e5081c0, 2, (RF, RF_IF),     rd_rm),
17789
 cCL("logdz",   e5081e0, 2, (RF, RF_IF),     rd_rm),
17790
 cCL("loge",    e588100, 2, (RF, RF_IF),     rd_rm),
17791
 cCL("logep",   e588120, 2, (RF, RF_IF),     rd_rm),
17792
 cCL("logem",   e588140, 2, (RF, RF_IF),     rd_rm),
17793
 cCL("logez",   e588160, 2, (RF, RF_IF),     rd_rm),
17794
 
17795
 cCL("lgns",    e608100, 2, (RF, RF_IF),     rd_rm),
17796
 cCL("lgnsp",   e608120, 2, (RF, RF_IF),     rd_rm),
17797
 cCL("lgnsm",   e608140, 2, (RF, RF_IF),     rd_rm),
17798
 cCL("lgnsz",   e608160, 2, (RF, RF_IF),     rd_rm),
17799
 cCL("lgnd",    e608180, 2, (RF, RF_IF),     rd_rm),
17800
 cCL("lgndp",   e6081a0, 2, (RF, RF_IF),     rd_rm),
17801
 cCL("lgndm",   e6081c0, 2, (RF, RF_IF),     rd_rm),
17802
 cCL("lgndz",   e6081e0, 2, (RF, RF_IF),     rd_rm),
17803
 cCL("lgne",    e688100, 2, (RF, RF_IF),     rd_rm),
17804
 cCL("lgnep",   e688120, 2, (RF, RF_IF),     rd_rm),
17805
 cCL("lgnem",   e688140, 2, (RF, RF_IF),     rd_rm),
17806
 cCL("lgnez",   e688160, 2, (RF, RF_IF),     rd_rm),
17807
 
17808
 cCL("exps",    e708100, 2, (RF, RF_IF),     rd_rm),
17809
 cCL("expsp",   e708120, 2, (RF, RF_IF),     rd_rm),
17810
 cCL("expsm",   e708140, 2, (RF, RF_IF),     rd_rm),
17811
 cCL("expsz",   e708160, 2, (RF, RF_IF),     rd_rm),
17812
 cCL("expd",    e708180, 2, (RF, RF_IF),     rd_rm),
17813
 cCL("expdp",   e7081a0, 2, (RF, RF_IF),     rd_rm),
17814
 cCL("expdm",   e7081c0, 2, (RF, RF_IF),     rd_rm),
17815
 cCL("expdz",   e7081e0, 2, (RF, RF_IF),     rd_rm),
17816
 cCL("expe",    e788100, 2, (RF, RF_IF),     rd_rm),
17817
 cCL("expep",   e788120, 2, (RF, RF_IF),     rd_rm),
17818
 cCL("expem",   e788140, 2, (RF, RF_IF),     rd_rm),
17819
 cCL("expdz",   e788160, 2, (RF, RF_IF),     rd_rm),
17820
 
17821
 cCL("sins",    e808100, 2, (RF, RF_IF),     rd_rm),
17822
 cCL("sinsp",   e808120, 2, (RF, RF_IF),     rd_rm),
17823
 cCL("sinsm",   e808140, 2, (RF, RF_IF),     rd_rm),
17824
 cCL("sinsz",   e808160, 2, (RF, RF_IF),     rd_rm),
17825
 cCL("sind",    e808180, 2, (RF, RF_IF),     rd_rm),
17826
 cCL("sindp",   e8081a0, 2, (RF, RF_IF),     rd_rm),
17827
 cCL("sindm",   e8081c0, 2, (RF, RF_IF),     rd_rm),
17828
 cCL("sindz",   e8081e0, 2, (RF, RF_IF),     rd_rm),
17829
 cCL("sine",    e888100, 2, (RF, RF_IF),     rd_rm),
17830
 cCL("sinep",   e888120, 2, (RF, RF_IF),     rd_rm),
17831
 cCL("sinem",   e888140, 2, (RF, RF_IF),     rd_rm),
17832
 cCL("sinez",   e888160, 2, (RF, RF_IF),     rd_rm),
17833
 
17834
 cCL("coss",    e908100, 2, (RF, RF_IF),     rd_rm),
17835
 cCL("cossp",   e908120, 2, (RF, RF_IF),     rd_rm),
17836
 cCL("cossm",   e908140, 2, (RF, RF_IF),     rd_rm),
17837
 cCL("cossz",   e908160, 2, (RF, RF_IF),     rd_rm),
17838
 cCL("cosd",    e908180, 2, (RF, RF_IF),     rd_rm),
17839
 cCL("cosdp",   e9081a0, 2, (RF, RF_IF),     rd_rm),
17840
 cCL("cosdm",   e9081c0, 2, (RF, RF_IF),     rd_rm),
17841
 cCL("cosdz",   e9081e0, 2, (RF, RF_IF),     rd_rm),
17842
 cCL("cose",    e988100, 2, (RF, RF_IF),     rd_rm),
17843
 cCL("cosep",   e988120, 2, (RF, RF_IF),     rd_rm),
17844
 cCL("cosem",   e988140, 2, (RF, RF_IF),     rd_rm),
17845
 cCL("cosez",   e988160, 2, (RF, RF_IF),     rd_rm),
17846
 
17847
 cCL("tans",    ea08100, 2, (RF, RF_IF),     rd_rm),
17848
 cCL("tansp",   ea08120, 2, (RF, RF_IF),     rd_rm),
17849
 cCL("tansm",   ea08140, 2, (RF, RF_IF),     rd_rm),
17850
 cCL("tansz",   ea08160, 2, (RF, RF_IF),     rd_rm),
17851
 cCL("tand",    ea08180, 2, (RF, RF_IF),     rd_rm),
17852
 cCL("tandp",   ea081a0, 2, (RF, RF_IF),     rd_rm),
17853
 cCL("tandm",   ea081c0, 2, (RF, RF_IF),     rd_rm),
17854
 cCL("tandz",   ea081e0, 2, (RF, RF_IF),     rd_rm),
17855
 cCL("tane",    ea88100, 2, (RF, RF_IF),     rd_rm),
17856
 cCL("tanep",   ea88120, 2, (RF, RF_IF),     rd_rm),
17857
 cCL("tanem",   ea88140, 2, (RF, RF_IF),     rd_rm),
17858
 cCL("tanez",   ea88160, 2, (RF, RF_IF),     rd_rm),
17859
 
17860
 cCL("asns",    eb08100, 2, (RF, RF_IF),     rd_rm),
17861
 cCL("asnsp",   eb08120, 2, (RF, RF_IF),     rd_rm),
17862
 cCL("asnsm",   eb08140, 2, (RF, RF_IF),     rd_rm),
17863
 cCL("asnsz",   eb08160, 2, (RF, RF_IF),     rd_rm),
17864
 cCL("asnd",    eb08180, 2, (RF, RF_IF),     rd_rm),
17865
 cCL("asndp",   eb081a0, 2, (RF, RF_IF),     rd_rm),
17866
 cCL("asndm",   eb081c0, 2, (RF, RF_IF),     rd_rm),
17867
 cCL("asndz",   eb081e0, 2, (RF, RF_IF),     rd_rm),
17868
 cCL("asne",    eb88100, 2, (RF, RF_IF),     rd_rm),
17869
 cCL("asnep",   eb88120, 2, (RF, RF_IF),     rd_rm),
17870
 cCL("asnem",   eb88140, 2, (RF, RF_IF),     rd_rm),
17871
 cCL("asnez",   eb88160, 2, (RF, RF_IF),     rd_rm),
17872
 
17873
 cCL("acss",    ec08100, 2, (RF, RF_IF),     rd_rm),
17874
 cCL("acssp",   ec08120, 2, (RF, RF_IF),     rd_rm),
17875
 cCL("acssm",   ec08140, 2, (RF, RF_IF),     rd_rm),
17876
 cCL("acssz",   ec08160, 2, (RF, RF_IF),     rd_rm),
17877
 cCL("acsd",    ec08180, 2, (RF, RF_IF),     rd_rm),
17878
 cCL("acsdp",   ec081a0, 2, (RF, RF_IF),     rd_rm),
17879
 cCL("acsdm",   ec081c0, 2, (RF, RF_IF),     rd_rm),
17880
 cCL("acsdz",   ec081e0, 2, (RF, RF_IF),     rd_rm),
17881
 cCL("acse",    ec88100, 2, (RF, RF_IF),     rd_rm),
17882
 cCL("acsep",   ec88120, 2, (RF, RF_IF),     rd_rm),
17883
 cCL("acsem",   ec88140, 2, (RF, RF_IF),     rd_rm),
17884
 cCL("acsez",   ec88160, 2, (RF, RF_IF),     rd_rm),
17885
 
17886
 cCL("atns",    ed08100, 2, (RF, RF_IF),     rd_rm),
17887
 cCL("atnsp",   ed08120, 2, (RF, RF_IF),     rd_rm),
17888
 cCL("atnsm",   ed08140, 2, (RF, RF_IF),     rd_rm),
17889
 cCL("atnsz",   ed08160, 2, (RF, RF_IF),     rd_rm),
17890
 cCL("atnd",    ed08180, 2, (RF, RF_IF),     rd_rm),
17891
 cCL("atndp",   ed081a0, 2, (RF, RF_IF),     rd_rm),
17892
 cCL("atndm",   ed081c0, 2, (RF, RF_IF),     rd_rm),
17893
 cCL("atndz",   ed081e0, 2, (RF, RF_IF),     rd_rm),
17894
 cCL("atne",    ed88100, 2, (RF, RF_IF),     rd_rm),
17895
 cCL("atnep",   ed88120, 2, (RF, RF_IF),     rd_rm),
17896
 cCL("atnem",   ed88140, 2, (RF, RF_IF),     rd_rm),
17897
 cCL("atnez",   ed88160, 2, (RF, RF_IF),     rd_rm),
17898
 
17899
 cCL("urds",    ee08100, 2, (RF, RF_IF),     rd_rm),
17900
 cCL("urdsp",   ee08120, 2, (RF, RF_IF),     rd_rm),
17901
 cCL("urdsm",   ee08140, 2, (RF, RF_IF),     rd_rm),
17902
 cCL("urdsz",   ee08160, 2, (RF, RF_IF),     rd_rm),
17903
 cCL("urdd",    ee08180, 2, (RF, RF_IF),     rd_rm),
17904
 cCL("urddp",   ee081a0, 2, (RF, RF_IF),     rd_rm),
17905
 cCL("urddm",   ee081c0, 2, (RF, RF_IF),     rd_rm),
17906
 cCL("urddz",   ee081e0, 2, (RF, RF_IF),     rd_rm),
17907
 cCL("urde",    ee88100, 2, (RF, RF_IF),     rd_rm),
17908
 cCL("urdep",   ee88120, 2, (RF, RF_IF),     rd_rm),
17909
 cCL("urdem",   ee88140, 2, (RF, RF_IF),     rd_rm),
17910
 cCL("urdez",   ee88160, 2, (RF, RF_IF),     rd_rm),
17911
 
17912
 cCL("nrms",    ef08100, 2, (RF, RF_IF),     rd_rm),
17913
 cCL("nrmsp",   ef08120, 2, (RF, RF_IF),     rd_rm),
17914
 cCL("nrmsm",   ef08140, 2, (RF, RF_IF),     rd_rm),
17915
 cCL("nrmsz",   ef08160, 2, (RF, RF_IF),     rd_rm),
17916
 cCL("nrmd",    ef08180, 2, (RF, RF_IF),     rd_rm),
17917
 cCL("nrmdp",   ef081a0, 2, (RF, RF_IF),     rd_rm),
17918
 cCL("nrmdm",   ef081c0, 2, (RF, RF_IF),     rd_rm),
17919
 cCL("nrmdz",   ef081e0, 2, (RF, RF_IF),     rd_rm),
17920
 cCL("nrme",    ef88100, 2, (RF, RF_IF),     rd_rm),
17921
 cCL("nrmep",   ef88120, 2, (RF, RF_IF),     rd_rm),
17922
 cCL("nrmem",   ef88140, 2, (RF, RF_IF),     rd_rm),
17923
 cCL("nrmez",   ef88160, 2, (RF, RF_IF),     rd_rm),
17924
 
17925
 cCL("adfs",    e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17926
 cCL("adfsp",   e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17927
 cCL("adfsm",   e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17928
 cCL("adfsz",   e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17929
 cCL("adfd",    e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17930
 cCL("adfdp",   e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17931
 cCL("adfdm",   e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17932
 cCL("adfdz",   e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17933
 cCL("adfe",    e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17934
 cCL("adfep",   e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17935
 cCL("adfem",   e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17936
 cCL("adfez",   e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17937
 
17938
 cCL("sufs",    e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17939
 cCL("sufsp",   e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17940
 cCL("sufsm",   e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17941
 cCL("sufsz",   e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17942
 cCL("sufd",    e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17943
 cCL("sufdp",   e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17944
 cCL("sufdm",   e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17945
 cCL("sufdz",   e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17946
 cCL("sufe",    e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17947
 cCL("sufep",   e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17948
 cCL("sufem",   e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17949
 cCL("sufez",   e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17950
 
17951
 cCL("rsfs",    e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17952
 cCL("rsfsp",   e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17953
 cCL("rsfsm",   e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17954
 cCL("rsfsz",   e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17955
 cCL("rsfd",    e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17956
 cCL("rsfdp",   e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17957
 cCL("rsfdm",   e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17958
 cCL("rsfdz",   e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17959
 cCL("rsfe",    e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17960
 cCL("rsfep",   e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17961
 cCL("rsfem",   e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17962
 cCL("rsfez",   e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17963
 
17964
 cCL("mufs",    e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17965
 cCL("mufsp",   e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17966
 cCL("mufsm",   e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17967
 cCL("mufsz",   e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17968
 cCL("mufd",    e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17969
 cCL("mufdp",   e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17970
 cCL("mufdm",   e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17971
 cCL("mufdz",   e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17972
 cCL("mufe",    e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17973
 cCL("mufep",   e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17974
 cCL("mufem",   e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17975
 cCL("mufez",   e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17976
 
17977
 cCL("dvfs",    e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17978
 cCL("dvfsp",   e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17979
 cCL("dvfsm",   e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17980
 cCL("dvfsz",   e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17981
 cCL("dvfd",    e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17982
 cCL("dvfdp",   e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17983
 cCL("dvfdm",   e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17984
 cCL("dvfdz",   e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17985
 cCL("dvfe",    e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17986
 cCL("dvfep",   e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17987
 cCL("dvfem",   e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17988
 cCL("dvfez",   e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17989
 
17990
 cCL("rdfs",    e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17991
 cCL("rdfsp",   e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17992
 cCL("rdfsm",   e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17993
 cCL("rdfsz",   e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17994
 cCL("rdfd",    e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17995
 cCL("rdfdp",   e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17996
 cCL("rdfdm",   e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17997
 cCL("rdfdz",   e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17998
 cCL("rdfe",    e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17999
 cCL("rdfep",   e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18000
 cCL("rdfem",   e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18001
 cCL("rdfez",   e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18002
 
18003
 cCL("pows",    e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18004
 cCL("powsp",   e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18005
 cCL("powsm",   e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18006
 cCL("powsz",   e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18007
 cCL("powd",    e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18008
 cCL("powdp",   e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18009
 cCL("powdm",   e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18010
 cCL("powdz",   e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18011
 cCL("powe",    e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18012
 cCL("powep",   e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18013
 cCL("powem",   e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18014
 cCL("powez",   e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18015
 
18016
 cCL("rpws",    e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18017
 cCL("rpwsp",   e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18018
 cCL("rpwsm",   e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18019
 cCL("rpwsz",   e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18020
 cCL("rpwd",    e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18021
 cCL("rpwdp",   e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18022
 cCL("rpwdm",   e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18023
 cCL("rpwdz",   e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18024
 cCL("rpwe",    e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18025
 cCL("rpwep",   e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18026
 cCL("rpwem",   e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18027
 cCL("rpwez",   e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18028
 
18029
 cCL("rmfs",    e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18030
 cCL("rmfsp",   e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18031
 cCL("rmfsm",   e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18032
 cCL("rmfsz",   e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18033
 cCL("rmfd",    e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18034
 cCL("rmfdp",   e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18035
 cCL("rmfdm",   e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18036
 cCL("rmfdz",   e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18037
 cCL("rmfe",    e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18038
 cCL("rmfep",   e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18039
 cCL("rmfem",   e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18040
 cCL("rmfez",   e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18041
 
18042
 cCL("fmls",    e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18043
 cCL("fmlsp",   e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18044
 cCL("fmlsm",   e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18045
 cCL("fmlsz",   e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18046
 cCL("fmld",    e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18047
 cCL("fmldp",   e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18048
 cCL("fmldm",   e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18049
 cCL("fmldz",   e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18050
 cCL("fmle",    e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18051
 cCL("fmlep",   e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18052
 cCL("fmlem",   e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18053
 cCL("fmlez",   e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18054
 
18055
 cCL("fdvs",    ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18056
 cCL("fdvsp",   ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18057
 cCL("fdvsm",   ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18058
 cCL("fdvsz",   ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18059
 cCL("fdvd",    ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18060
 cCL("fdvdp",   ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18061
 cCL("fdvdm",   ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18062
 cCL("fdvdz",   ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18063
 cCL("fdve",    ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18064
 cCL("fdvep",   ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18065
 cCL("fdvem",   ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18066
 cCL("fdvez",   ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18067
 
18068
 cCL("frds",    eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18069
 cCL("frdsp",   eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18070
 cCL("frdsm",   eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18071
 cCL("frdsz",   eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18072
 cCL("frdd",    eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18073
 cCL("frddp",   eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18074
 cCL("frddm",   eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18075
 cCL("frddz",   eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18076
 cCL("frde",    eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18077
 cCL("frdep",   eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18078
 cCL("frdem",   eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18079
 cCL("frdez",   eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18080
 
18081
 cCL("pols",    ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18082
 cCL("polsp",   ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18083
 cCL("polsm",   ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18084
 cCL("polsz",   ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18085
 cCL("pold",    ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18086
 cCL("poldp",   ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18087
 cCL("poldm",   ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18088
 cCL("poldz",   ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18089
 cCL("pole",    ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18090
 cCL("polep",   ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18091
 cCL("polem",   ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18092
 cCL("polez",   ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18093
 
18094
 cCE("cmf",     e90f110, 2, (RF, RF_IF),     fpa_cmp),
18095
 C3E("cmfe",    ed0f110, 2, (RF, RF_IF),     fpa_cmp),
18096
 cCE("cnf",     eb0f110, 2, (RF, RF_IF),     fpa_cmp),
18097
 C3E("cnfe",    ef0f110, 2, (RF, RF_IF),     fpa_cmp),
18098
 
18099
 cCL("flts",    e000110, 2, (RF, RR),        rn_rd),
18100
 cCL("fltsp",   e000130, 2, (RF, RR),        rn_rd),
18101
 cCL("fltsm",   e000150, 2, (RF, RR),        rn_rd),
18102
 cCL("fltsz",   e000170, 2, (RF, RR),        rn_rd),
18103
 cCL("fltd",    e000190, 2, (RF, RR),        rn_rd),
18104
 cCL("fltdp",   e0001b0, 2, (RF, RR),        rn_rd),
18105
 cCL("fltdm",   e0001d0, 2, (RF, RR),        rn_rd),
18106
 cCL("fltdz",   e0001f0, 2, (RF, RR),        rn_rd),
18107
 cCL("flte",    e080110, 2, (RF, RR),        rn_rd),
18108
 cCL("fltep",   e080130, 2, (RF, RR),        rn_rd),
18109
 cCL("fltem",   e080150, 2, (RF, RR),        rn_rd),
18110
 cCL("fltez",   e080170, 2, (RF, RR),        rn_rd),
18111
 
18112
  /* The implementation of the FIX instruction is broken on some
18113
     assemblers, in that it accepts a precision specifier as well as a
18114
     rounding specifier, despite the fact that this is meaningless.
18115
     To be more compatible, we accept it as well, though of course it
18116
     does not set any bits.  */
18117
 cCE("fix",     e100110, 2, (RR, RF),        rd_rm),
18118
 cCL("fixp",    e100130, 2, (RR, RF),        rd_rm),
18119
 cCL("fixm",    e100150, 2, (RR, RF),        rd_rm),
18120
 cCL("fixz",    e100170, 2, (RR, RF),        rd_rm),
18121
 cCL("fixsp",   e100130, 2, (RR, RF),        rd_rm),
18122
 cCL("fixsm",   e100150, 2, (RR, RF),        rd_rm),
18123
 cCL("fixsz",   e100170, 2, (RR, RF),        rd_rm),
18124
 cCL("fixdp",   e100130, 2, (RR, RF),        rd_rm),
18125
 cCL("fixdm",   e100150, 2, (RR, RF),        rd_rm),
18126
 cCL("fixdz",   e100170, 2, (RR, RF),        rd_rm),
18127
 cCL("fixep",   e100130, 2, (RR, RF),        rd_rm),
18128
 cCL("fixem",   e100150, 2, (RR, RF),        rd_rm),
18129
 cCL("fixez",   e100170, 2, (RR, RF),        rd_rm),
18130
 
18131
  /* Instructions that were new with the real FPA, call them V2.  */
18132
#undef  ARM_VARIANT
18133
#define ARM_VARIANT  & fpu_fpa_ext_v2
18134
 
18135
 cCE("lfm",     c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18136
 cCL("lfmfd",   c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18137
 cCL("lfmea",   d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18138
 cCE("sfm",     c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18139
 cCL("sfmfd",   d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18140
 cCL("sfmea",   c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18141
 
18142
#undef  ARM_VARIANT
18143
#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
18144
 
18145
  /* Moves and type conversions.  */
18146
 cCE("fcpys",   eb00a40, 2, (RVS, RVS),       vfp_sp_monadic),
18147
 cCE("fmrs",    e100a10, 2, (RR, RVS),        vfp_reg_from_sp),
18148
 cCE("fmsr",    e000a10, 2, (RVS, RR),        vfp_sp_from_reg),
18149
 cCE("fmstat",  ef1fa10, 0, (),                noargs),
18150
 cCE("vmrs",    ef10a10, 2, (APSR_RR, RVC),   vmrs),
18151
 cCE("vmsr",    ee10a10, 2, (RVC, RR),        vmsr),
18152
 cCE("fsitos",  eb80ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18153
 cCE("fuitos",  eb80a40, 2, (RVS, RVS),       vfp_sp_monadic),
18154
 cCE("ftosis",  ebd0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18155
 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18156
 cCE("ftouis",  ebc0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18157
 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18158
 cCE("fmrx",    ef00a10, 2, (RR, RVC),        rd_rn),
18159
 cCE("fmxr",    ee00a10, 2, (RVC, RR),        rn_rd),
18160
 
18161
  /* Memory operations.  */
18162
 cCE("flds",    d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18163
 cCE("fsts",    d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18164
 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18165
 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18166
 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18167
 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18168
 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18169
 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18170
 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18171
 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18172
 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18173
 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18174
 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18175
 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18176
 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18177
 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18178
 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18179
 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18180
 
18181
  /* Monadic operations.  */
18182
 cCE("fabss",   eb00ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18183
 cCE("fnegs",   eb10a40, 2, (RVS, RVS),       vfp_sp_monadic),
18184
 cCE("fsqrts",  eb10ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18185
 
18186
  /* Dyadic operations.  */
18187
 cCE("fadds",   e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18188
 cCE("fsubs",   e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18189
 cCE("fmuls",   e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18190
 cCE("fdivs",   e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18191
 cCE("fmacs",   e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18192
 cCE("fmscs",   e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18193
 cCE("fnmuls",  e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18194
 cCE("fnmacs",  e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18195
 cCE("fnmscs",  e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18196
 
18197
  /* Comparisons.  */
18198
 cCE("fcmps",   eb40a40, 2, (RVS, RVS),       vfp_sp_monadic),
18199
 cCE("fcmpzs",  eb50a40, 1, (RVS),            vfp_sp_compare_z),
18200
 cCE("fcmpes",  eb40ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18201
 cCE("fcmpezs", eb50ac0, 1, (RVS),            vfp_sp_compare_z),
18202
 
18203
 /* Double precision load/store are still present on single precision
18204
    implementations.  */
18205
 cCE("fldd",    d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18206
 cCE("fstd",    d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18207
 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18208
 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18209
 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18210
 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18211
 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18212
 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18213
 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18214
 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18215
 
18216
#undef  ARM_VARIANT
18217
#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
18218
 
18219
  /* Moves and type conversions.  */
18220
 cCE("fcpyd",   eb00b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18221
 cCE("fcvtds",  eb70ac0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18222
 cCE("fcvtsd",  eb70bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18223
 cCE("fmdhr",   e200b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18224
 cCE("fmdlr",   e000b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18225
 cCE("fmrdh",   e300b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18226
 cCE("fmrdl",   e100b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18227
 cCE("fsitod",  eb80bc0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18228
 cCE("fuitod",  eb80b40, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18229
 cCE("ftosid",  ebd0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18230
 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18231
 cCE("ftouid",  ebc0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18232
 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18233
 
18234
  /* Monadic operations.  */
18235
 cCE("fabsd",   eb00bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18236
 cCE("fnegd",   eb10b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18237
 cCE("fsqrtd",  eb10bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18238
 
18239
  /* Dyadic operations.  */
18240
 cCE("faddd",   e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18241
 cCE("fsubd",   e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18242
 cCE("fmuld",   e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18243
 cCE("fdivd",   e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18244
 cCE("fmacd",   e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18245
 cCE("fmscd",   e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18246
 cCE("fnmuld",  e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18247
 cCE("fnmacd",  e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18248
 cCE("fnmscd",  e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18249
 
18250
  /* Comparisons.  */
18251
 cCE("fcmpd",   eb40b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18252
 cCE("fcmpzd",  eb50b40, 1, (RVD),            vfp_dp_rd),
18253
 cCE("fcmped",  eb40bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18254
 cCE("fcmpezd", eb50bc0, 1, (RVD),            vfp_dp_rd),
18255
 
18256
#undef  ARM_VARIANT
18257
#define ARM_VARIANT  & fpu_vfp_ext_v2
18258
 
18259
 cCE("fmsrr",   c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
18260
 cCE("fmrrs",   c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
18261
 cCE("fmdrr",   c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
18262
 cCE("fmrrd",   c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
18263
 
18264
/* Instructions which may belong to either the Neon or VFP instruction sets.
18265
   Individual encoder functions perform additional architecture checks.  */
18266
#undef  ARM_VARIANT
18267
#define ARM_VARIANT    & fpu_vfp_ext_v1xd
18268
#undef  THUMB_VARIANT
18269
#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
18270
 
18271
  /* These mnemonics are unique to VFP.  */
18272
 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
18273
 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
18274
 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18275
 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18276
 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18277
 nCE(vcmp,      _vcmp,    2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18278
 nCE(vcmpe,     _vcmpe,   2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18279
 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
18280
 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
18281
 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
18282
 
18283
  /* Mnemonics shared by Neon and VFP.  */
18284
 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
18285
 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18286
 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18287
 
18288
 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18289
 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18290
 
18291
 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18292
 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18293
 
18294
 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18295
 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18296
 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18297
 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18298
 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18299
 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18300
 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18301
 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18302
 
18303 160 khays
 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
18304 16 khays
 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
18305
 nCEF(vcvtb,    _vcvt,   2, (RVS, RVS), neon_cvtb),
18306
 nCEF(vcvtt,    _vcvt,   2, (RVS, RVS), neon_cvtt),
18307
 
18308
 
18309
  /* NOTE: All VMOV encoding is special-cased!  */
18310
 NCE(vmov,      0,       1, (VMOV), neon_mov),
18311
 NCE(vmovq,     0,       1, (VMOV), neon_mov),
18312
 
18313
#undef  THUMB_VARIANT
18314
#define THUMB_VARIANT  & fpu_neon_ext_v1
18315
#undef  ARM_VARIANT
18316
#define ARM_VARIANT    & fpu_neon_ext_v1
18317
 
18318
  /* Data processing with three registers of the same length.  */
18319
  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
18320
 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
18321
 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
18322
 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18323
 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18324
 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18325
 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18326
 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18327
 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18328
  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
18329
 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18330
 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18331
 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18332
 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18333
 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18334
 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18335
 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18336
 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18337
  /* If not immediate, fall back to neon_dyadic_i64_su.
18338
     shl_imm should accept I8 I16 I32 I64,
18339
     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
18340
 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
18341
 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
18342
 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
18343
 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
18344
  /* Logic ops, types optional & ignored.  */
18345
 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18346
 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18347
 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18348
 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18349
 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18350
 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18351
 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18352
 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18353
 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
18354
 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
18355
  /* Bitfield ops, untyped.  */
18356
 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18357
 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18358
 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18359
 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18360
 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18361
 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18362
  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
18363
 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18364
 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18365
 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18366
 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18367
 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18368
 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18369
  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
18370
     back to neon_dyadic_if_su.  */
18371
 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18372
 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18373
 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18374
 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18375
 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18376
 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18377
 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18378
 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18379
  /* Comparison. Type I8 I16 I32 F32.  */
18380
 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
18381
 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
18382
  /* As above, D registers only.  */
18383
 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18384
 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18385
  /* Int and float variants, signedness unimportant.  */
18386
 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18387
 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18388
 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
18389
  /* Add/sub take types I8 I16 I32 I64 F32.  */
18390
 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18391
 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18392
  /* vtst takes sizes 8, 16, 32.  */
18393
 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
18394
 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
18395
  /* VMUL takes I8 I16 I32 F32 P8.  */
18396
 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
18397
  /* VQD{R}MULH takes S16 S32.  */
18398
 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18399
 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18400
 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18401
 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18402
 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18403
 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18404
 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18405
 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18406
 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18407
 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18408
 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18409
 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18410
 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18411
 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18412
 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18413
 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18414
 
18415
  /* Two address, int/float. Types S8 S16 S32 F32.  */
18416
 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
18417
 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
18418
 
18419
  /* Data processing with two registers and a shift amount.  */
18420
  /* Right shifts, and variants with rounding.
18421
     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
18422
 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18423
 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18424
 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18425
 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18426
 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18427
 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18428
 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18429
 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18430
  /* Shift and insert. Sizes accepted 8 16 32 64.  */
18431
 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
18432
 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
18433
 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
18434
 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
18435
  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
18436
 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
18437
 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
18438
  /* Right shift immediate, saturating & narrowing, with rounding variants.
18439
     Types accepted S16 S32 S64 U16 U32 U64.  */
18440
 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18441
 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18442
  /* As above, unsigned. Types accepted S16 S32 S64.  */
18443
 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18444
 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18445
  /* Right shift narrowing. Types accepted I16 I32 I64.  */
18446
 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18447
 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18448
  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
18449
 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
18450
  /* CVT with optional immediate for fixed-point variant.  */
18451
 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
18452
 
18453
 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
18454
 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
18455
 
18456
  /* Data processing, three registers of different lengths.  */
18457
  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
18458
 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
18459
 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
18460
 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
18461
 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
18462
  /* If not scalar, fall back to neon_dyadic_long.
18463
     Vector types as above, scalar types S16 S32 U16 U32.  */
18464
 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18465
 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18466
  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
18467
 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18468
 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18469
  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
18470
 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18471
 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18472
 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18473
 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18474
  /* Saturating doubling multiplies. Types S16 S32.  */
18475
 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18476
 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18477
 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18478
  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
18479
     S16 S32 U16 U32.  */
18480
 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
18481
 
18482
  /* Extract. Size 8.  */
18483
 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
18484
 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
18485
 
18486
  /* Two registers, miscellaneous.  */
18487
  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
18488
 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
18489
 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
18490
 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
18491
 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
18492
 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
18493
 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
18494
  /* Vector replicate. Sizes 8 16 32.  */
18495
 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
18496
 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
18497
  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
18498
 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
18499
  /* VMOVN. Types I16 I32 I64.  */
18500
 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
18501
  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
18502
 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
18503
  /* VQMOVUN. Types S16 S32 S64.  */
18504
 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
18505
  /* VZIP / VUZP. Sizes 8 16 32.  */
18506
 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18507
 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
18508
 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18509
 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
18510
  /* VQABS / VQNEG. Types S8 S16 S32.  */
18511
 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18512
 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18513
 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18514
 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18515
  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
18516
 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
18517
 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
18518
 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
18519
 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
18520
  /* Reciprocal estimates. Types U32 F32.  */
18521
 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
18522
 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
18523
 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
18524
 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
18525
  /* VCLS. Types S8 S16 S32.  */
18526
 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
18527
 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
18528
  /* VCLZ. Types I8 I16 I32.  */
18529
 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
18530
 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
18531
  /* VCNT. Size 8.  */
18532
 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
18533
 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
18534
  /* Two address, untyped.  */
18535
 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
18536
 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
18537
  /* VTRN. Sizes 8 16 32.  */
18538
 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
18539
 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
18540
 
18541
  /* Table lookup. Size 8.  */
18542
 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18543
 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18544
 
18545
#undef  THUMB_VARIANT
18546
#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
18547
#undef  ARM_VARIANT
18548
#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
18549
 
18550
  /* Neon element/structure load/store.  */
18551
 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18552
 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18553
 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18554
 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18555
 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18556
 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18557
 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18558
 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18559
 
18560
#undef  THUMB_VARIANT
18561
#define THUMB_VARIANT &fpu_vfp_ext_v3xd
18562
#undef ARM_VARIANT
18563
#define ARM_VARIANT &fpu_vfp_ext_v3xd
18564
 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
18565
 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18566
 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18567
 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18568
 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18569
 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18570
 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18571
 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18572
 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18573
 
18574
#undef THUMB_VARIANT
18575
#define THUMB_VARIANT  & fpu_vfp_ext_v3
18576
#undef  ARM_VARIANT
18577
#define ARM_VARIANT    & fpu_vfp_ext_v3
18578
 
18579
 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
18580
 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18581
 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18582
 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18583
 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18584
 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18585
 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18586
 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18587
 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18588
 
18589
#undef ARM_VARIANT
18590
#define ARM_VARIANT &fpu_vfp_ext_fma
18591
#undef THUMB_VARIANT
18592
#define THUMB_VARIANT &fpu_vfp_ext_fma
18593
 /* Mnemonics shared by Neon and VFP.  These are included in the
18594
    VFP FMA variant; NEON and VFP FMA always includes the NEON
18595
    FMA instructions.  */
18596
 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18597
 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18598
 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18599
    the v form should always be used.  */
18600
 cCE("ffmas",   ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18601
 cCE("ffnmas",  ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18602
 cCE("ffmad",   ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18603
 cCE("ffnmad",  ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18604
 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18605
 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18606
 
18607
#undef THUMB_VARIANT
18608
#undef  ARM_VARIANT
18609
#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
18610
 
18611
 cCE("mia",     e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18612
 cCE("miaph",   e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18613
 cCE("miabb",   e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18614
 cCE("miabt",   e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18615
 cCE("miatb",   e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18616
 cCE("miatt",   e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18617
 cCE("mar",     c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18618
 cCE("mra",     c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18619
 
18620
#undef  ARM_VARIANT
18621
#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
18622
 
18623
 cCE("tandcb",  e13f130, 1, (RR),                   iwmmxt_tandorc),
18624
 cCE("tandch",  e53f130, 1, (RR),                   iwmmxt_tandorc),
18625
 cCE("tandcw",  e93f130, 1, (RR),                   iwmmxt_tandorc),
18626
 cCE("tbcstb",  e400010, 2, (RIWR, RR),             rn_rd),
18627
 cCE("tbcsth",  e400050, 2, (RIWR, RR),             rn_rd),
18628
 cCE("tbcstw",  e400090, 2, (RIWR, RR),             rn_rd),
18629
 cCE("textrcb", e130170, 2, (RR, I7),               iwmmxt_textrc),
18630
 cCE("textrch", e530170, 2, (RR, I7),               iwmmxt_textrc),
18631
 cCE("textrcw", e930170, 2, (RR, I7),               iwmmxt_textrc),
18632
 cCE("textrmub",        e100070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18633
 cCE("textrmuh",        e500070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18634
 cCE("textrmuw",        e900070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18635
 cCE("textrmsb",        e100078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18636
 cCE("textrmsh",        e500078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18637
 cCE("textrmsw",        e900078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18638
 cCE("tinsrb",  e600010, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18639
 cCE("tinsrh",  e600050, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18640
 cCE("tinsrw",  e600090, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18641
 cCE("tmcr",    e000110, 2, (RIWC_RIWG, RR),        rn_rd),
18642
 cCE("tmcrr",   c400000, 3, (RIWR, RR, RR),         rm_rd_rn),
18643
 cCE("tmia",    e200010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18644
 cCE("tmiaph",  e280010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18645
 cCE("tmiabb",  e2c0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18646
 cCE("tmiabt",  e2d0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18647
 cCE("tmiatb",  e2e0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18648
 cCE("tmiatt",  e2f0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18649
 cCE("tmovmskb",        e100030, 2, (RR, RIWR),             rd_rn),
18650
 cCE("tmovmskh",        e500030, 2, (RR, RIWR),             rd_rn),
18651
 cCE("tmovmskw",        e900030, 2, (RR, RIWR),             rd_rn),
18652
 cCE("tmrc",    e100110, 2, (RR, RIWC_RIWG),        rd_rn),
18653
 cCE("tmrrc",   c500000, 3, (RR, RR, RIWR),         rd_rn_rm),
18654
 cCE("torcb",   e13f150, 1, (RR),                   iwmmxt_tandorc),
18655
 cCE("torch",   e53f150, 1, (RR),                   iwmmxt_tandorc),
18656
 cCE("torcw",   e93f150, 1, (RR),                   iwmmxt_tandorc),
18657
 cCE("waccb",   e0001c0, 2, (RIWR, RIWR),           rd_rn),
18658
 cCE("wacch",   e4001c0, 2, (RIWR, RIWR),           rd_rn),
18659
 cCE("waccw",   e8001c0, 2, (RIWR, RIWR),           rd_rn),
18660
 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18661
 cCE("waddb",   e000180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18662
 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18663
 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18664
 cCE("waddh",   e400180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18665
 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18666
 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18667
 cCE("waddw",   e800180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18668
 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18669
 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18670
 cCE("walignr0",        e800020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18671
 cCE("walignr1",        e900020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18672
 cCE("walignr2",        ea00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18673
 cCE("walignr3",        eb00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18674
 cCE("wand",    e200000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18675
 cCE("wandn",   e300000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18676
 cCE("wavg2b",  e800000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18677
 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18678
 cCE("wavg2h",  ec00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18679
 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18680
 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18681
 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18682
 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18683
 cCE("wcmpgtub",        e100060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18684
 cCE("wcmpgtuh",        e500060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18685
 cCE("wcmpgtuw",        e900060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18686
 cCE("wcmpgtsb",        e300060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18687
 cCE("wcmpgtsh",        e700060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18688
 cCE("wcmpgtsw",        eb00060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18689
 cCE("wldrb",   c100000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18690
 cCE("wldrh",   c500000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18691
 cCE("wldrw",   c100100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18692
 cCE("wldrd",   c500100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18693
 cCE("wmacs",   e600100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18694
 cCE("wmacsz",  e700100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18695
 cCE("wmacu",   e400100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18696
 cCE("wmacuz",  e500100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18697
 cCE("wmadds",  ea00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18698
 cCE("wmaddu",  e800100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18699
 cCE("wmaxsb",  e200160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18700
 cCE("wmaxsh",  e600160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18701
 cCE("wmaxsw",  ea00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18702
 cCE("wmaxub",  e000160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18703
 cCE("wmaxuh",  e400160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18704
 cCE("wmaxuw",  e800160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18705
 cCE("wminsb",  e300160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18706
 cCE("wminsh",  e700160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18707
 cCE("wminsw",  eb00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18708
 cCE("wminub",  e100160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18709
 cCE("wminuh",  e500160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18710
 cCE("wminuw",  e900160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18711
 cCE("wmov",    e000000, 2, (RIWR, RIWR),           iwmmxt_wmov),
18712
 cCE("wmulsm",  e300100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18713
 cCE("wmulsl",  e200100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18714
 cCE("wmulum",  e100100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18715
 cCE("wmulul",  e000100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18716
 cCE("wor",     e000000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18717
 cCE("wpackhss",        e700080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18718
 cCE("wpackhus",        e500080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18719
 cCE("wpackwss",        eb00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18720
 cCE("wpackwus",        e900080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18721
 cCE("wpackdss",        ef00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18722
 cCE("wpackdus",        ed00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18723
 cCE("wrorh",   e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18724
 cCE("wrorhg",  e700148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18725
 cCE("wrorw",   eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18726
 cCE("wrorwg",  eb00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18727
 cCE("wrord",   ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18728
 cCE("wrordg",  ef00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18729
 cCE("wsadb",   e000120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18730
 cCE("wsadbz",  e100120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18731
 cCE("wsadh",   e400120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18732
 cCE("wsadhz",  e500120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18733
 cCE("wshufh",  e0001e0, 3, (RIWR, RIWR, I255),     iwmmxt_wshufh),
18734
 cCE("wsllh",   e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18735
 cCE("wsllhg",  e500148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18736
 cCE("wsllw",   e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18737
 cCE("wsllwg",  e900148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18738
 cCE("wslld",   ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18739
 cCE("wslldg",  ed00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18740
 cCE("wsrah",   e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18741
 cCE("wsrahg",  e400148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18742
 cCE("wsraw",   e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18743
 cCE("wsrawg",  e800148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18744
 cCE("wsrad",   ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18745
 cCE("wsradg",  ec00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18746
 cCE("wsrlh",   e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18747
 cCE("wsrlhg",  e600148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18748
 cCE("wsrlw",   ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18749
 cCE("wsrlwg",  ea00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18750
 cCE("wsrld",   ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18751
 cCE("wsrldg",  ee00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18752
 cCE("wstrb",   c000000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18753
 cCE("wstrh",   c400000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18754
 cCE("wstrw",   c000100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18755
 cCE("wstrd",   c400100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18756
 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18757
 cCE("wsubb",   e0001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18758
 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18759
 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18760
 cCE("wsubh",   e4001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18761
 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18762
 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18763
 cCE("wsubw",   e8001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18764
 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18765
 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),         rd_rn),
18766
 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),         rd_rn),
18767
 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),         rd_rn),
18768
 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),         rd_rn),
18769
 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),         rd_rn),
18770
 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),         rd_rn),
18771
 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18772
 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18773
 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18774
 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),         rd_rn),
18775
 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),         rd_rn),
18776
 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),         rd_rn),
18777
 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),         rd_rn),
18778
 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),         rd_rn),
18779
 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),         rd_rn),
18780
 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18781
 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18782
 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18783
 cCE("wxor",    e100000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18784
 cCE("wzero",   e300000, 1, (RIWR),                 iwmmxt_wzero),
18785
 
18786
#undef  ARM_VARIANT
18787
#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
18788
 
18789
 cCE("torvscb",   e12f190, 1, (RR),                 iwmmxt_tandorc),
18790
 cCE("torvsch",   e52f190, 1, (RR),                 iwmmxt_tandorc),
18791
 cCE("torvscw",   e92f190, 1, (RR),                 iwmmxt_tandorc),
18792
 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
18793
 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
18794
 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
18795
 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18796
 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18797
 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18798
 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18799
 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18800
 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18801
 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18802
 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18803
 cCE("wavg4",   e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18804
 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18805
 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18806
 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18807
 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18808
 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18809
 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18810
 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18811
 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18812
 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18813
 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18814
 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18815
 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18816
 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18817
 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18818
 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18819
 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18820
 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18821
 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18822
 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18823
 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18824
 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18825
 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18826
 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18827
 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18828
 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18829
 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18830
 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18831
 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18832
 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18833
 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18834
 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18835
 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18836
 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18837
 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18838
 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18839
 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18840
 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18841
 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18842
 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18843
 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18844
 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18845
 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18846
 
18847
#undef  ARM_VARIANT
18848
#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
18849
 
18850
 cCE("cfldrs",  c100400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18851
 cCE("cfldrd",  c500400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18852
 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18853
 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18854
 cCE("cfstrs",  c000400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18855
 cCE("cfstrd",  c400400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18856
 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18857
 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18858
 cCE("cfmvsr",  e000450, 2, (RMF, RR),                rn_rd),
18859
 cCE("cfmvrs",  e100450, 2, (RR, RMF),                rd_rn),
18860
 cCE("cfmvdlr", e000410, 2, (RMD, RR),                rn_rd),
18861
 cCE("cfmvrdl", e100410, 2, (RR, RMD),                rd_rn),
18862
 cCE("cfmvdhr", e000430, 2, (RMD, RR),                rn_rd),
18863
 cCE("cfmvrdh", e100430, 2, (RR, RMD),                rd_rn),
18864
 cCE("cfmv64lr",        e000510, 2, (RMDX, RR),               rn_rd),
18865
 cCE("cfmvr64l",        e100510, 2, (RR, RMDX),               rd_rn),
18866
 cCE("cfmv64hr",        e000530, 2, (RMDX, RR),               rn_rd),
18867
 cCE("cfmvr64h",        e100530, 2, (RR, RMDX),               rd_rn),
18868
 cCE("cfmval32",        e200440, 2, (RMAX, RMFX),             rd_rn),
18869
 cCE("cfmv32al",        e100440, 2, (RMFX, RMAX),             rd_rn),
18870
 cCE("cfmvam32",        e200460, 2, (RMAX, RMFX),             rd_rn),
18871
 cCE("cfmv32am",        e100460, 2, (RMFX, RMAX),             rd_rn),
18872
 cCE("cfmvah32",        e200480, 2, (RMAX, RMFX),             rd_rn),
18873
 cCE("cfmv32ah",        e100480, 2, (RMFX, RMAX),             rd_rn),
18874
 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX),             rd_rn),
18875
 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX),             rd_rn),
18876
 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX),             rd_rn),
18877
 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX),             rd_rn),
18878
 cCE("cfmvsc32",        e2004e0, 2, (RMDS, RMDX),             mav_dspsc),
18879
 cCE("cfmv32sc",        e1004e0, 2, (RMDX, RMDS),             rd),
18880
 cCE("cfcpys",  e000400, 2, (RMF, RMF),               rd_rn),
18881
 cCE("cfcpyd",  e000420, 2, (RMD, RMD),               rd_rn),
18882
 cCE("cfcvtsd", e000460, 2, (RMD, RMF),               rd_rn),
18883
 cCE("cfcvtds", e000440, 2, (RMF, RMD),               rd_rn),
18884
 cCE("cfcvt32s",        e000480, 2, (RMF, RMFX),              rd_rn),
18885
 cCE("cfcvt32d",        e0004a0, 2, (RMD, RMFX),              rd_rn),
18886
 cCE("cfcvt64s",        e0004c0, 2, (RMF, RMDX),              rd_rn),
18887
 cCE("cfcvt64d",        e0004e0, 2, (RMD, RMDX),              rd_rn),
18888
 cCE("cfcvts32",        e100580, 2, (RMFX, RMF),              rd_rn),
18889
 cCE("cfcvtd32",        e1005a0, 2, (RMFX, RMD),              rd_rn),
18890
 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),            rd_rn),
18891
 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),            rd_rn),
18892
 cCE("cfrshl32",        e000550, 3, (RMFX, RMFX, RR),         mav_triple),
18893
 cCE("cfrshl64",        e000570, 3, (RMDX, RMDX, RR),         mav_triple),
18894
 cCE("cfsh32",  e000500, 3, (RMFX, RMFX, I63s),       mav_shift),
18895
 cCE("cfsh64",  e200500, 3, (RMDX, RMDX, I63s),       mav_shift),
18896
 cCE("cfcmps",  e100490, 3, (RR, RMF, RMF),           rd_rn_rm),
18897
 cCE("cfcmpd",  e1004b0, 3, (RR, RMD, RMD),           rd_rn_rm),
18898
 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX),         rd_rn_rm),
18899
 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX),         rd_rn_rm),
18900
 cCE("cfabss",  e300400, 2, (RMF, RMF),               rd_rn),
18901
 cCE("cfabsd",  e300420, 2, (RMD, RMD),               rd_rn),
18902
 cCE("cfnegs",  e300440, 2, (RMF, RMF),               rd_rn),
18903
 cCE("cfnegd",  e300460, 2, (RMD, RMD),               rd_rn),
18904
 cCE("cfadds",  e300480, 3, (RMF, RMF, RMF),          rd_rn_rm),
18905
 cCE("cfaddd",  e3004a0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18906
 cCE("cfsubs",  e3004c0, 3, (RMF, RMF, RMF),          rd_rn_rm),
18907
 cCE("cfsubd",  e3004e0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18908
 cCE("cfmuls",  e100400, 3, (RMF, RMF, RMF),          rd_rn_rm),
18909
 cCE("cfmuld",  e100420, 3, (RMD, RMD, RMD),          rd_rn_rm),
18910
 cCE("cfabs32", e300500, 2, (RMFX, RMFX),             rd_rn),
18911
 cCE("cfabs64", e300520, 2, (RMDX, RMDX),             rd_rn),
18912
 cCE("cfneg32", e300540, 2, (RMFX, RMFX),             rd_rn),
18913
 cCE("cfneg64", e300560, 2, (RMDX, RMDX),             rd_rn),
18914
 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18915
 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18916
 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18917
 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18918
 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18919
 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18920
 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18921
 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18922
 cCE("cfmadd32",        e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18923
 cCE("cfmsub32",        e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18924
 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18925
 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18926
};
18927
#undef ARM_VARIANT
18928
#undef THUMB_VARIANT
18929
#undef TCE
18930
#undef TCM
18931
#undef TUE
18932
#undef TUF
18933
#undef TCC
18934
#undef cCE
18935
#undef cCL
18936
#undef C3E
18937
#undef CE
18938
#undef CM
18939
#undef UE
18940
#undef UF
18941
#undef UT
18942
#undef NUF
18943
#undef nUF
18944
#undef NCE
18945
#undef nCE
18946
#undef OPS0
18947
#undef OPS1
18948
#undef OPS2
18949
#undef OPS3
18950
#undef OPS4
18951
#undef OPS5
18952
#undef OPS6
18953
#undef do_0
18954
 
18955
/* MD interface: bits in the object file.  */
18956
 
18957
/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18958
   for use in the a.out file, and stores them in the array pointed to by buf.
18959
   This knows about the endian-ness of the target machine and does
18960
   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
18961
   2 (short) and 4 (long)  Floating numbers are put out as a series of
18962
   LITTLENUMS (shorts, here at least).  */
18963
 
18964
void
18965
md_number_to_chars (char * buf, valueT val, int n)
18966
{
18967
  if (target_big_endian)
18968
    number_to_chars_bigendian (buf, val, n);
18969
  else
18970
    number_to_chars_littleendian (buf, val, n);
18971
}
18972
 
18973
static valueT
18974
md_chars_to_number (char * buf, int n)
18975
{
18976
  valueT result = 0;
18977
  unsigned char * where = (unsigned char *) buf;
18978
 
18979
  if (target_big_endian)
18980
    {
18981
      while (n--)
18982
        {
18983
          result <<= 8;
18984
          result |= (*where++ & 255);
18985
        }
18986
    }
18987
  else
18988
    {
18989
      while (n--)
18990
        {
18991
          result <<= 8;
18992
          result |= (where[n] & 255);
18993
        }
18994
    }
18995
 
18996
  return result;
18997
}
18998
 
18999
/* MD interface: Sections.  */
19000
 
19001
/* Estimate the size of a frag before relaxing.  Assume everything fits in
19002
   2 bytes.  */
19003
 
19004
int
19005
md_estimate_size_before_relax (fragS * fragp,
19006
                               segT    segtype ATTRIBUTE_UNUSED)
19007
{
19008
  fragp->fr_var = 2;
19009
  return 2;
19010
}
19011
 
19012
/* Convert a machine dependent frag.  */
19013
 
19014
void
19015
md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19016
{
19017
  unsigned long insn;
19018
  unsigned long old_op;
19019
  char *buf;
19020
  expressionS exp;
19021
  fixS *fixp;
19022
  int reloc_type;
19023
  int pc_rel;
19024
  int opcode;
19025
 
19026
  buf = fragp->fr_literal + fragp->fr_fix;
19027
 
19028
  old_op = bfd_get_16(abfd, buf);
19029
  if (fragp->fr_symbol)
19030
    {
19031
      exp.X_op = O_symbol;
19032
      exp.X_add_symbol = fragp->fr_symbol;
19033
    }
19034
  else
19035
    {
19036
      exp.X_op = O_constant;
19037
    }
19038
  exp.X_add_number = fragp->fr_offset;
19039
  opcode = fragp->fr_subtype;
19040
  switch (opcode)
19041
    {
19042
    case T_MNEM_ldr_pc:
19043
    case T_MNEM_ldr_pc2:
19044
    case T_MNEM_ldr_sp:
19045
    case T_MNEM_str_sp:
19046
    case T_MNEM_ldr:
19047
    case T_MNEM_ldrb:
19048
    case T_MNEM_ldrh:
19049
    case T_MNEM_str:
19050
    case T_MNEM_strb:
19051
    case T_MNEM_strh:
19052
      if (fragp->fr_var == 4)
19053
        {
19054
          insn = THUMB_OP32 (opcode);
19055
          if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19056
            {
19057
              insn |= (old_op & 0x700) << 4;
19058
            }
19059
          else
19060
            {
19061
              insn |= (old_op & 7) << 12;
19062
              insn |= (old_op & 0x38) << 13;
19063
            }
19064
          insn |= 0x00000c00;
19065
          put_thumb32_insn (buf, insn);
19066
          reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19067
        }
19068
      else
19069
        {
19070
          reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19071
        }
19072
      pc_rel = (opcode == T_MNEM_ldr_pc2);
19073
      break;
19074
    case T_MNEM_adr:
19075
      if (fragp->fr_var == 4)
19076
        {
19077
          insn = THUMB_OP32 (opcode);
19078
          insn |= (old_op & 0xf0) << 4;
19079
          put_thumb32_insn (buf, insn);
19080
          reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19081
        }
19082
      else
19083
        {
19084
          reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19085
          exp.X_add_number -= 4;
19086
        }
19087
      pc_rel = 1;
19088
      break;
19089
    case T_MNEM_mov:
19090
    case T_MNEM_movs:
19091
    case T_MNEM_cmp:
19092
    case T_MNEM_cmn:
19093
      if (fragp->fr_var == 4)
19094
        {
19095
          int r0off = (opcode == T_MNEM_mov
19096
                       || opcode == T_MNEM_movs) ? 0 : 8;
19097
          insn = THUMB_OP32 (opcode);
19098
          insn = (insn & 0xe1ffffff) | 0x10000000;
19099
          insn |= (old_op & 0x700) << r0off;
19100
          put_thumb32_insn (buf, insn);
19101
          reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19102
        }
19103
      else
19104
        {
19105
          reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19106
        }
19107
      pc_rel = 0;
19108
      break;
19109
    case T_MNEM_b:
19110
      if (fragp->fr_var == 4)
19111
        {
19112
          insn = THUMB_OP32(opcode);
19113
          put_thumb32_insn (buf, insn);
19114
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19115
        }
19116
      else
19117
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19118
      pc_rel = 1;
19119
      break;
19120
    case T_MNEM_bcond:
19121
      if (fragp->fr_var == 4)
19122
        {
19123
          insn = THUMB_OP32(opcode);
19124
          insn |= (old_op & 0xf00) << 14;
19125
          put_thumb32_insn (buf, insn);
19126
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19127
        }
19128
      else
19129
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19130
      pc_rel = 1;
19131
      break;
19132
    case T_MNEM_add_sp:
19133
    case T_MNEM_add_pc:
19134
    case T_MNEM_inc_sp:
19135
    case T_MNEM_dec_sp:
19136
      if (fragp->fr_var == 4)
19137
        {
19138
          /* ??? Choose between add and addw.  */
19139
          insn = THUMB_OP32 (opcode);
19140
          insn |= (old_op & 0xf0) << 4;
19141
          put_thumb32_insn (buf, insn);
19142
          if (opcode == T_MNEM_add_pc)
19143
            reloc_type = BFD_RELOC_ARM_T32_IMM12;
19144
          else
19145
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19146
        }
19147
      else
19148
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19149
      pc_rel = 0;
19150
      break;
19151
 
19152
    case T_MNEM_addi:
19153
    case T_MNEM_addis:
19154
    case T_MNEM_subi:
19155
    case T_MNEM_subis:
19156
      if (fragp->fr_var == 4)
19157
        {
19158
          insn = THUMB_OP32 (opcode);
19159
          insn |= (old_op & 0xf0) << 4;
19160
          insn |= (old_op & 0xf) << 16;
19161
          put_thumb32_insn (buf, insn);
19162
          if (insn & (1 << 20))
19163
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19164
          else
19165
            reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19166
        }
19167
      else
19168
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19169
      pc_rel = 0;
19170
      break;
19171
    default:
19172
      abort ();
19173
    }
19174
  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
19175
                      (enum bfd_reloc_code_real) reloc_type);
19176
  fixp->fx_file = fragp->fr_file;
19177
  fixp->fx_line = fragp->fr_line;
19178
  fragp->fr_fix += fragp->fr_var;
19179
}
19180
 
19181
/* Return the size of a relaxable immediate operand instruction.
19182
   SHIFT and SIZE specify the form of the allowable immediate.  */
19183
static int
19184
relax_immediate (fragS *fragp, int size, int shift)
19185
{
19186
  offsetT offset;
19187
  offsetT mask;
19188
  offsetT low;
19189
 
19190
  /* ??? Should be able to do better than this.  */
19191
  if (fragp->fr_symbol)
19192
    return 4;
19193
 
19194
  low = (1 << shift) - 1;
19195
  mask = (1 << (shift + size)) - (1 << shift);
19196
  offset = fragp->fr_offset;
19197
  /* Force misaligned offsets to 32-bit variant.  */
19198
  if (offset & low)
19199
    return 4;
19200
  if (offset & ~mask)
19201
    return 4;
19202
  return 2;
19203
}
19204
 
19205
/* Get the address of a symbol during relaxation.  */
19206
static addressT
19207
relaxed_symbol_addr (fragS *fragp, long stretch)
19208
{
19209
  fragS *sym_frag;
19210
  addressT addr;
19211
  symbolS *sym;
19212
 
19213
  sym = fragp->fr_symbol;
19214
  sym_frag = symbol_get_frag (sym);
19215
  know (S_GET_SEGMENT (sym) != absolute_section
19216
        || sym_frag == &zero_address_frag);
19217
  addr = S_GET_VALUE (sym) + fragp->fr_offset;
19218
 
19219
  /* If frag has yet to be reached on this pass, assume it will
19220
     move by STRETCH just as we did.  If this is not so, it will
19221
     be because some frag between grows, and that will force
19222
     another pass.  */
19223
 
19224
  if (stretch != 0
19225
      && sym_frag->relax_marker != fragp->relax_marker)
19226
    {
19227
      fragS *f;
19228
 
19229
      /* Adjust stretch for any alignment frag.  Note that if have
19230
         been expanding the earlier code, the symbol may be
19231
         defined in what appears to be an earlier frag.  FIXME:
19232
         This doesn't handle the fr_subtype field, which specifies
19233
         a maximum number of bytes to skip when doing an
19234
         alignment.  */
19235
      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
19236
        {
19237
          if (f->fr_type == rs_align || f->fr_type == rs_align_code)
19238
            {
19239
              if (stretch < 0)
19240
                stretch = - ((- stretch)
19241
                             & ~ ((1 << (int) f->fr_offset) - 1));
19242
              else
19243
                stretch &= ~ ((1 << (int) f->fr_offset) - 1);
19244
              if (stretch == 0)
19245
                break;
19246
            }
19247
        }
19248
      if (f != NULL)
19249
        addr += stretch;
19250
    }
19251
 
19252
  return addr;
19253
}
19254
 
19255
/* Return the size of a relaxable adr pseudo-instruction or PC-relative
19256
   load.  */
19257
static int
19258
relax_adr (fragS *fragp, asection *sec, long stretch)
19259
{
19260
  addressT addr;
19261
  offsetT val;
19262
 
19263
  /* Assume worst case for symbols not known to be in the same section.  */
19264
  if (fragp->fr_symbol == NULL
19265
      || !S_IS_DEFINED (fragp->fr_symbol)
19266
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19267
      || S_IS_WEAK (fragp->fr_symbol))
19268
    return 4;
19269
 
19270
  val = relaxed_symbol_addr (fragp, stretch);
19271
  addr = fragp->fr_address + fragp->fr_fix;
19272
  addr = (addr + 4) & ~3;
19273
  /* Force misaligned targets to 32-bit variant.  */
19274
  if (val & 3)
19275
    return 4;
19276
  val -= addr;
19277
  if (val < 0 || val > 1020)
19278
    return 4;
19279
  return 2;
19280
}
19281
 
19282
/* Return the size of a relaxable add/sub immediate instruction.  */
19283
static int
19284
relax_addsub (fragS *fragp, asection *sec)
19285
{
19286
  char *buf;
19287
  int op;
19288
 
19289
  buf = fragp->fr_literal + fragp->fr_fix;
19290
  op = bfd_get_16(sec->owner, buf);
19291
  if ((op & 0xf) == ((op >> 4) & 0xf))
19292
    return relax_immediate (fragp, 8, 0);
19293
  else
19294
    return relax_immediate (fragp, 3, 0);
19295
}
19296
 
19297
 
19298
/* Return the size of a relaxable branch instruction.  BITS is the
19299
   size of the offset field in the narrow instruction.  */
19300
 
19301
static int
19302
relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
19303
{
19304
  addressT addr;
19305
  offsetT val;
19306
  offsetT limit;
19307
 
19308
  /* Assume worst case for symbols not known to be in the same section.  */
19309
  if (!S_IS_DEFINED (fragp->fr_symbol)
19310
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19311
      || S_IS_WEAK (fragp->fr_symbol))
19312
    return 4;
19313
 
19314
#ifdef OBJ_ELF
19315
  if (S_IS_DEFINED (fragp->fr_symbol)
19316
      && ARM_IS_FUNC (fragp->fr_symbol))
19317
      return 4;
19318
 
19319
  /* PR 12532.  Global symbols with default visibility might
19320
     be preempted, so do not relax relocations to them.  */
19321
  if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
19322
      && (! S_IS_LOCAL (fragp->fr_symbol)))
19323
    return 4;
19324
#endif
19325
 
19326
  val = relaxed_symbol_addr (fragp, stretch);
19327
  addr = fragp->fr_address + fragp->fr_fix + 4;
19328
  val -= addr;
19329
 
19330
  /* Offset is a signed value *2 */
19331
  limit = 1 << bits;
19332
  if (val >= limit || val < -limit)
19333
    return 4;
19334
  return 2;
19335
}
19336
 
19337
 
19338
/* Relax a machine dependent frag.  This returns the amount by which
19339
   the current size of the frag should change.  */
19340
 
19341
int
19342
arm_relax_frag (asection *sec, fragS *fragp, long stretch)
19343
{
19344
  int oldsize;
19345
  int newsize;
19346
 
19347
  oldsize = fragp->fr_var;
19348
  switch (fragp->fr_subtype)
19349
    {
19350
    case T_MNEM_ldr_pc2:
19351
      newsize = relax_adr (fragp, sec, stretch);
19352
      break;
19353
    case T_MNEM_ldr_pc:
19354
    case T_MNEM_ldr_sp:
19355
    case T_MNEM_str_sp:
19356
      newsize = relax_immediate (fragp, 8, 2);
19357
      break;
19358
    case T_MNEM_ldr:
19359
    case T_MNEM_str:
19360
      newsize = relax_immediate (fragp, 5, 2);
19361
      break;
19362
    case T_MNEM_ldrh:
19363
    case T_MNEM_strh:
19364
      newsize = relax_immediate (fragp, 5, 1);
19365
      break;
19366
    case T_MNEM_ldrb:
19367
    case T_MNEM_strb:
19368
      newsize = relax_immediate (fragp, 5, 0);
19369
      break;
19370
    case T_MNEM_adr:
19371
      newsize = relax_adr (fragp, sec, stretch);
19372
      break;
19373
    case T_MNEM_mov:
19374
    case T_MNEM_movs:
19375
    case T_MNEM_cmp:
19376
    case T_MNEM_cmn:
19377
      newsize = relax_immediate (fragp, 8, 0);
19378
      break;
19379
    case T_MNEM_b:
19380
      newsize = relax_branch (fragp, sec, 11, stretch);
19381
      break;
19382
    case T_MNEM_bcond:
19383
      newsize = relax_branch (fragp, sec, 8, stretch);
19384
      break;
19385
    case T_MNEM_add_sp:
19386
    case T_MNEM_add_pc:
19387
      newsize = relax_immediate (fragp, 8, 2);
19388
      break;
19389
    case T_MNEM_inc_sp:
19390
    case T_MNEM_dec_sp:
19391
      newsize = relax_immediate (fragp, 7, 2);
19392
      break;
19393
    case T_MNEM_addi:
19394
    case T_MNEM_addis:
19395
    case T_MNEM_subi:
19396
    case T_MNEM_subis:
19397
      newsize = relax_addsub (fragp, sec);
19398
      break;
19399
    default:
19400
      abort ();
19401
    }
19402
 
19403
  fragp->fr_var = newsize;
19404
  /* Freeze wide instructions that are at or before the same location as
19405
     in the previous pass.  This avoids infinite loops.
19406
     Don't freeze them unconditionally because targets may be artificially
19407
     misaligned by the expansion of preceding frags.  */
19408
  if (stretch <= 0 && newsize > 2)
19409
    {
19410
      md_convert_frag (sec->owner, sec, fragp);
19411
      frag_wane (fragp);
19412
    }
19413
 
19414
  return newsize - oldsize;
19415
}
19416
 
19417
/* Round up a section size to the appropriate boundary.  */
19418
 
19419
valueT
19420
md_section_align (segT   segment ATTRIBUTE_UNUSED,
19421
                  valueT size)
19422
{
19423
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
19424
  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
19425
    {
19426
      /* For a.out, force the section size to be aligned.  If we don't do
19427
         this, BFD will align it for us, but it will not write out the
19428
         final bytes of the section.  This may be a bug in BFD, but it is
19429
         easier to fix it here since that is how the other a.out targets
19430
         work.  */
19431
      int align;
19432
 
19433
      align = bfd_get_section_alignment (stdoutput, segment);
19434
      size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
19435
    }
19436
#endif
19437
 
19438
  return size;
19439
}
19440
 
19441
/* This is called from HANDLE_ALIGN in write.c.  Fill in the contents
19442
   of an rs_align_code fragment.  */
19443
 
19444
void
19445
arm_handle_align (fragS * fragP)
19446
{
19447
  static char const arm_noop[2][2][4] =
19448
    {
19449
      {  /* ARMv1 */
19450
        {0x00, 0x00, 0xa0, 0xe1},  /* LE */
19451
        {0xe1, 0xa0, 0x00, 0x00},  /* BE */
19452
      },
19453
      {  /* ARMv6k */
19454
        {0x00, 0xf0, 0x20, 0xe3},  /* LE */
19455
        {0xe3, 0x20, 0xf0, 0x00},  /* BE */
19456
      },
19457
    };
19458
  static char const thumb_noop[2][2][2] =
19459
    {
19460
      {  /* Thumb-1 */
19461
        {0xc0, 0x46},  /* LE */
19462
        {0x46, 0xc0},  /* BE */
19463
      },
19464
      {  /* Thumb-2 */
19465
        {0x00, 0xbf},  /* LE */
19466
        {0xbf, 0x00}   /* BE */
19467
      }
19468
    };
19469
  static char const wide_thumb_noop[2][4] =
19470
    {  /* Wide Thumb-2 */
19471
      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
19472
      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
19473
    };
19474
 
19475
  unsigned bytes, fix, noop_size;
19476
  char * p;
19477
  const char * noop;
19478
  const char *narrow_noop = NULL;
19479
#ifdef OBJ_ELF
19480
  enum mstate state;
19481
#endif
19482
 
19483
  if (fragP->fr_type != rs_align_code)
19484
    return;
19485
 
19486
  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
19487
  p = fragP->fr_literal + fragP->fr_fix;
19488
  fix = 0;
19489
 
19490
  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
19491
    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
19492
 
19493
  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
19494
 
19495
  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
19496
    {
19497
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
19498
        {
19499
          narrow_noop = thumb_noop[1][target_big_endian];
19500
          noop = wide_thumb_noop[target_big_endian];
19501
        }
19502
      else
19503
        noop = thumb_noop[0][target_big_endian];
19504
      noop_size = 2;
19505
#ifdef OBJ_ELF
19506
      state = MAP_THUMB;
19507
#endif
19508
    }
19509
  else
19510
    {
19511
      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
19512
                     [target_big_endian];
19513
      noop_size = 4;
19514
#ifdef OBJ_ELF
19515
      state = MAP_ARM;
19516
#endif
19517
    }
19518
 
19519
  fragP->fr_var = noop_size;
19520
 
19521
  if (bytes & (noop_size - 1))
19522
    {
19523
      fix = bytes & (noop_size - 1);
19524
#ifdef OBJ_ELF
19525
      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19526
#endif
19527
      memset (p, 0, fix);
19528
      p += fix;
19529
      bytes -= fix;
19530
    }
19531
 
19532
  if (narrow_noop)
19533
    {
19534
      if (bytes & noop_size)
19535
        {
19536
          /* Insert a narrow noop.  */
19537
          memcpy (p, narrow_noop, noop_size);
19538
          p += noop_size;
19539
          bytes -= noop_size;
19540
          fix += noop_size;
19541
        }
19542
 
19543
      /* Use wide noops for the remainder */
19544
      noop_size = 4;
19545
    }
19546
 
19547
  while (bytes >= noop_size)
19548
    {
19549
      memcpy (p, noop, noop_size);
19550
      p += noop_size;
19551
      bytes -= noop_size;
19552
      fix += noop_size;
19553
    }
19554
 
19555
  fragP->fr_fix += fix;
19556
}
19557
 
19558
/* Called from md_do_align.  Used to create an alignment
19559
   frag in a code section.  */
19560
 
19561
void
19562
arm_frag_align_code (int n, int max)
19563
{
19564
  char * p;
19565
 
19566
  /* We assume that there will never be a requirement
19567
     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
19568
  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19569
    {
19570
      char err_msg[128];
19571
 
19572
      sprintf (err_msg,
19573
        _("alignments greater than %d bytes not supported in .text sections."),
19574
        MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19575
      as_fatal ("%s", err_msg);
19576
    }
19577
 
19578
  p = frag_var (rs_align_code,
19579
                MAX_MEM_FOR_RS_ALIGN_CODE,
19580
                1,
19581
                (relax_substateT) max,
19582
                (symbolS *) NULL,
19583
                (offsetT) n,
19584
                (char *) NULL);
19585
  *p = 0;
19586
}
19587
 
19588
/* Perform target specific initialisation of a frag.
19589
   Note - despite the name this initialisation is not done when the frag
19590
   is created, but only when its type is assigned.  A frag can be created
19591
   and used a long time before its type is set, so beware of assuming that
19592
   this initialisationis performed first.  */
19593
 
19594
#ifndef OBJ_ELF
19595
void
19596
arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19597
{
19598
  /* Record whether this frag is in an ARM or a THUMB area.  */
19599
  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19600
}
19601
 
19602
#else /* OBJ_ELF is defined.  */
19603
void
19604
arm_init_frag (fragS * fragP, int max_chars)
19605
{
19606
  /* If the current ARM vs THUMB mode has not already
19607
     been recorded into this frag then do so now.  */
19608
  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19609
    {
19610
      fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19611
 
19612
      /* Record a mapping symbol for alignment frags.  We will delete this
19613
         later if the alignment ends up empty.  */
19614
      switch (fragP->fr_type)
19615
        {
19616
          case rs_align:
19617
          case rs_align_test:
19618
          case rs_fill:
19619
            mapping_state_2 (MAP_DATA, max_chars);
19620
            break;
19621
          case rs_align_code:
19622
            mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19623
            break;
19624
          default:
19625
            break;
19626
        }
19627
    }
19628
}
19629
 
19630
/* When we change sections we need to issue a new mapping symbol.  */
19631
 
19632
void
19633
arm_elf_change_section (void)
19634
{
19635
  /* Link an unlinked unwind index table section to the .text section.  */
19636
  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19637
      && elf_linked_to_section (now_seg) == NULL)
19638
    elf_linked_to_section (now_seg) = text_section;
19639
}
19640
 
19641
int
19642
arm_elf_section_type (const char * str, size_t len)
19643
{
19644
  if (len == 5 && strncmp (str, "exidx", 5) == 0)
19645
    return SHT_ARM_EXIDX;
19646
 
19647
  return -1;
19648
}
19649
 
19650
/* Code to deal with unwinding tables.  */
19651
 
19652
static void add_unwind_adjustsp (offsetT);
19653
 
19654
/* Generate any deferred unwind frame offset.  */
19655
 
19656
static void
19657
flush_pending_unwind (void)
19658
{
19659
  offsetT offset;
19660
 
19661
  offset = unwind.pending_offset;
19662
  unwind.pending_offset = 0;
19663
  if (offset != 0)
19664
    add_unwind_adjustsp (offset);
19665
}
19666
 
19667
/* Add an opcode to this list for this function.  Two-byte opcodes should
19668
   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
19669
   order.  */
19670
 
19671
static void
19672
add_unwind_opcode (valueT op, int length)
19673
{
19674
  /* Add any deferred stack adjustment.  */
19675
  if (unwind.pending_offset)
19676
    flush_pending_unwind ();
19677
 
19678
  unwind.sp_restored = 0;
19679
 
19680
  if (unwind.opcode_count + length > unwind.opcode_alloc)
19681
    {
19682
      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19683
      if (unwind.opcodes)
19684
        unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19685
                                                     unwind.opcode_alloc);
19686
      else
19687
        unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19688
    }
19689
  while (length > 0)
19690
    {
19691
      length--;
19692
      unwind.opcodes[unwind.opcode_count] = op & 0xff;
19693
      op >>= 8;
19694
      unwind.opcode_count++;
19695
    }
19696
}
19697
 
19698
/* Add unwind opcodes to adjust the stack pointer.  */
19699
 
19700
static void
19701
add_unwind_adjustsp (offsetT offset)
19702
{
19703
  valueT op;
19704
 
19705
  if (offset > 0x200)
19706
    {
19707
      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
19708
      char bytes[5];
19709
      int n;
19710
      valueT o;
19711
 
19712
      /* Long form: 0xb2, uleb128.  */
19713
      /* This might not fit in a word so add the individual bytes,
19714
         remembering the list is built in reverse order.  */
19715
      o = (valueT) ((offset - 0x204) >> 2);
19716
      if (o == 0)
19717
        add_unwind_opcode (0, 1);
19718
 
19719
      /* Calculate the uleb128 encoding of the offset.  */
19720
      n = 0;
19721
      while (o)
19722
        {
19723
          bytes[n] = o & 0x7f;
19724
          o >>= 7;
19725
          if (o)
19726
            bytes[n] |= 0x80;
19727
          n++;
19728
        }
19729
      /* Add the insn.  */
19730
      for (; n; n--)
19731
        add_unwind_opcode (bytes[n - 1], 1);
19732
      add_unwind_opcode (0xb2, 1);
19733
    }
19734
  else if (offset > 0x100)
19735
    {
19736
      /* Two short opcodes.  */
19737
      add_unwind_opcode (0x3f, 1);
19738
      op = (offset - 0x104) >> 2;
19739
      add_unwind_opcode (op, 1);
19740
    }
19741
  else if (offset > 0)
19742
    {
19743
      /* Short opcode.  */
19744
      op = (offset - 4) >> 2;
19745
      add_unwind_opcode (op, 1);
19746
    }
19747
  else if (offset < 0)
19748
    {
19749
      offset = -offset;
19750
      while (offset > 0x100)
19751
        {
19752
          add_unwind_opcode (0x7f, 1);
19753
          offset -= 0x100;
19754
        }
19755
      op = ((offset - 4) >> 2) | 0x40;
19756
      add_unwind_opcode (op, 1);
19757
    }
19758
}
19759
 
19760
/* Finish the list of unwind opcodes for this function.  */
19761
static void
19762
finish_unwind_opcodes (void)
19763
{
19764
  valueT op;
19765
 
19766
  if (unwind.fp_used)
19767
    {
19768
      /* Adjust sp as necessary.  */
19769
      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19770
      flush_pending_unwind ();
19771
 
19772
      /* After restoring sp from the frame pointer.  */
19773
      op = 0x90 | unwind.fp_reg;
19774
      add_unwind_opcode (op, 1);
19775
    }
19776
  else
19777
    flush_pending_unwind ();
19778
}
19779
 
19780
 
19781
/* Start an exception table entry.  If idx is nonzero this is an index table
19782
   entry.  */
19783
 
19784
static void
19785
start_unwind_section (const segT text_seg, int idx)
19786
{
19787
  const char * text_name;
19788
  const char * prefix;
19789
  const char * prefix_once;
19790
  const char * group_name;
19791
  size_t prefix_len;
19792
  size_t text_len;
19793
  char * sec_name;
19794
  size_t sec_name_len;
19795
  int type;
19796
  int flags;
19797
  int linkonce;
19798
 
19799
  if (idx)
19800
    {
19801
      prefix = ELF_STRING_ARM_unwind;
19802
      prefix_once = ELF_STRING_ARM_unwind_once;
19803
      type = SHT_ARM_EXIDX;
19804
    }
19805
  else
19806
    {
19807
      prefix = ELF_STRING_ARM_unwind_info;
19808
      prefix_once = ELF_STRING_ARM_unwind_info_once;
19809
      type = SHT_PROGBITS;
19810
    }
19811
 
19812
  text_name = segment_name (text_seg);
19813
  if (streq (text_name, ".text"))
19814
    text_name = "";
19815
 
19816
  if (strncmp (text_name, ".gnu.linkonce.t.",
19817
               strlen (".gnu.linkonce.t.")) == 0)
19818
    {
19819
      prefix = prefix_once;
19820
      text_name += strlen (".gnu.linkonce.t.");
19821
    }
19822
 
19823
  prefix_len = strlen (prefix);
19824
  text_len = strlen (text_name);
19825
  sec_name_len = prefix_len + text_len;
19826
  sec_name = (char *) xmalloc (sec_name_len + 1);
19827
  memcpy (sec_name, prefix, prefix_len);
19828
  memcpy (sec_name + prefix_len, text_name, text_len);
19829
  sec_name[prefix_len + text_len] = '\0';
19830
 
19831
  flags = SHF_ALLOC;
19832
  linkonce = 0;
19833
  group_name = 0;
19834
 
19835
  /* Handle COMDAT group.  */
19836
  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19837
    {
19838
      group_name = elf_group_name (text_seg);
19839
      if (group_name == NULL)
19840
        {
19841
          as_bad (_("Group section `%s' has no group signature"),
19842
                  segment_name (text_seg));
19843
          ignore_rest_of_line ();
19844
          return;
19845
        }
19846
      flags |= SHF_GROUP;
19847
      linkonce = 1;
19848
    }
19849
 
19850
  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19851
 
19852
  /* Set the section link for index tables.  */
19853
  if (idx)
19854
    elf_linked_to_section (now_seg) = text_seg;
19855
}
19856
 
19857
 
19858
/* Start an unwind table entry.  HAVE_DATA is nonzero if we have additional
19859
   personality routine data.  Returns zero, or the index table value for
19860
   and inline entry.  */
19861
 
19862
static valueT
19863
create_unwind_entry (int have_data)
19864
{
19865
  int size;
19866
  addressT where;
19867
  char *ptr;
19868
  /* The current word of data.  */
19869
  valueT data;
19870
  /* The number of bytes left in this word.  */
19871
  int n;
19872
 
19873
  finish_unwind_opcodes ();
19874
 
19875
  /* Remember the current text section.  */
19876
  unwind.saved_seg = now_seg;
19877
  unwind.saved_subseg = now_subseg;
19878
 
19879
  start_unwind_section (now_seg, 0);
19880
 
19881
  if (unwind.personality_routine == NULL)
19882
    {
19883
      if (unwind.personality_index == -2)
19884
        {
19885
          if (have_data)
19886
            as_bad (_("handlerdata in cantunwind frame"));
19887
          return 1; /* EXIDX_CANTUNWIND.  */
19888
        }
19889
 
19890
      /* Use a default personality routine if none is specified.  */
19891
      if (unwind.personality_index == -1)
19892
        {
19893
          if (unwind.opcode_count > 3)
19894
            unwind.personality_index = 1;
19895
          else
19896
            unwind.personality_index = 0;
19897
        }
19898
 
19899
      /* Space for the personality routine entry.  */
19900
      if (unwind.personality_index == 0)
19901
        {
19902
          if (unwind.opcode_count > 3)
19903
            as_bad (_("too many unwind opcodes for personality routine 0"));
19904
 
19905
          if (!have_data)
19906
            {
19907
              /* All the data is inline in the index table.  */
19908
              data = 0x80;
19909
              n = 3;
19910
              while (unwind.opcode_count > 0)
19911
                {
19912
                  unwind.opcode_count--;
19913
                  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19914
                  n--;
19915
                }
19916
 
19917
              /* Pad with "finish" opcodes.  */
19918
              while (n--)
19919
                data = (data << 8) | 0xb0;
19920
 
19921
              return data;
19922
            }
19923
          size = 0;
19924
        }
19925
      else
19926
        /* We get two opcodes "free" in the first word.  */
19927
        size = unwind.opcode_count - 2;
19928
    }
19929
  else
19930 166 khays
    {
19931
      gas_assert (unwind.personality_index == -1);
19932 16 khays
 
19933 166 khays
      /* An extra byte is required for the opcode count.        */
19934
      size = unwind.opcode_count + 1;
19935
    }
19936
 
19937 16 khays
  size = (size + 3) >> 2;
19938
  if (size > 0xff)
19939
    as_bad (_("too many unwind opcodes"));
19940
 
19941
  frag_align (2, 0, 0);
19942
  record_alignment (now_seg, 2);
19943
  unwind.table_entry = expr_build_dot ();
19944
 
19945
  /* Allocate the table entry.  */
19946
  ptr = frag_more ((size << 2) + 4);
19947 166 khays
  /* PR 13449: Zero the table entries in case some of them are not used.  */
19948
  memset (ptr, 0, (size << 2) + 4);
19949 16 khays
  where = frag_now_fix () - ((size << 2) + 4);
19950
 
19951
  switch (unwind.personality_index)
19952
    {
19953
    case -1:
19954
      /* ??? Should this be a PLT generating relocation?  */
19955
      /* Custom personality routine.  */
19956
      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19957
               BFD_RELOC_ARM_PREL31);
19958
 
19959
      where += 4;
19960
      ptr += 4;
19961
 
19962
      /* Set the first byte to the number of additional words.  */
19963 166 khays
      data = size > 0 ? size - 1 : 0;
19964 16 khays
      n = 3;
19965
      break;
19966
 
19967
    /* ABI defined personality routines.  */
19968
    case 0:
19969
      /* Three opcodes bytes are packed into the first word.  */
19970
      data = 0x80;
19971
      n = 3;
19972
      break;
19973
 
19974
    case 1:
19975
    case 2:
19976
      /* The size and first two opcode bytes go in the first word.  */
19977
      data = ((0x80 + unwind.personality_index) << 8) | size;
19978
      n = 2;
19979
      break;
19980
 
19981
    default:
19982
      /* Should never happen.  */
19983
      abort ();
19984
    }
19985
 
19986
  /* Pack the opcodes into words (MSB first), reversing the list at the same
19987
     time.  */
19988
  while (unwind.opcode_count > 0)
19989
    {
19990
      if (n == 0)
19991
        {
19992
          md_number_to_chars (ptr, data, 4);
19993
          ptr += 4;
19994
          n = 4;
19995
          data = 0;
19996
        }
19997
      unwind.opcode_count--;
19998
      n--;
19999
      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20000
    }
20001
 
20002
  /* Finish off the last word.  */
20003
  if (n < 4)
20004
    {
20005
      /* Pad with "finish" opcodes.  */
20006
      while (n--)
20007
        data = (data << 8) | 0xb0;
20008
 
20009
      md_number_to_chars (ptr, data, 4);
20010
    }
20011
 
20012
  if (!have_data)
20013
    {
20014
      /* Add an empty descriptor if there is no user-specified data.   */
20015
      ptr = frag_more (4);
20016
      md_number_to_chars (ptr, 0, 4);
20017
    }
20018
 
20019
  return 0;
20020
}
20021
 
20022
 
20023
/* Initialize the DWARF-2 unwind information for this procedure.  */
20024
 
20025
void
20026
tc_arm_frame_initial_instructions (void)
20027
{
20028
  cfi_add_CFA_def_cfa (REG_SP, 0);
20029
}
20030
#endif /* OBJ_ELF */
20031
 
20032
/* Convert REGNAME to a DWARF-2 register number.  */
20033
 
20034
int
20035
tc_arm_regname_to_dw2regnum (char *regname)
20036
{
20037
  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
20038
 
20039
  if (reg == FAIL)
20040
    return -1;
20041
 
20042
  return reg;
20043
}
20044
 
20045
#ifdef TE_PE
20046
void
20047
tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20048
{
20049
  expressionS exp;
20050
 
20051
  exp.X_op = O_secrel;
20052
  exp.X_add_symbol = symbol;
20053
  exp.X_add_number = 0;
20054
  emit_expr (&exp, size);
20055
}
20056
#endif
20057
 
20058
/* MD interface: Symbol and relocation handling.  */
20059
 
20060
/* Return the address within the segment that a PC-relative fixup is
20061
   relative to.  For ARM, PC-relative fixups applied to instructions
20062
   are generally relative to the location of the fixup plus 8 bytes.
20063
   Thumb branches are offset by 4, and Thumb loads relative to PC
20064
   require special handling.  */
20065
 
20066
long
20067
md_pcrel_from_section (fixS * fixP, segT seg)
20068
{
20069
  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20070
 
20071
  /* If this is pc-relative and we are going to emit a relocation
20072
     then we just want to put out any pipeline compensation that the linker
20073
     will need.  Otherwise we want to use the calculated base.
20074
     For WinCE we skip the bias for externals as well, since this
20075
     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
20076
  if (fixP->fx_pcrel
20077
      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20078
          || (arm_force_relocation (fixP)
20079
#ifdef TE_WINCE
20080
              && !S_IS_EXTERNAL (fixP->fx_addsy)
20081
#endif
20082
              )))
20083
    base = 0;
20084
 
20085
 
20086
  switch (fixP->fx_r_type)
20087
    {
20088
      /* PC relative addressing on the Thumb is slightly odd as the
20089
         bottom two bits of the PC are forced to zero for the
20090
         calculation.  This happens *after* application of the
20091
         pipeline offset.  However, Thumb adrl already adjusts for
20092
         this, so we need not do it again.  */
20093
    case BFD_RELOC_ARM_THUMB_ADD:
20094
      return base & ~3;
20095
 
20096
    case BFD_RELOC_ARM_THUMB_OFFSET:
20097
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20098
    case BFD_RELOC_ARM_T32_ADD_PC12:
20099
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20100
      return (base + 4) & ~3;
20101
 
20102
      /* Thumb branches are simply offset by +4.  */
20103
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
20104
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
20105
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
20106
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
20107
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
20108
      return base + 4;
20109
 
20110
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
20111
      if (fixP->fx_addsy
20112
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20113
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20114
          && ARM_IS_FUNC (fixP->fx_addsy)
20115
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20116
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20117
       return base + 4;
20118
 
20119
      /* BLX is like branches above, but forces the low two bits of PC to
20120
         zero.  */
20121
    case BFD_RELOC_THUMB_PCREL_BLX:
20122
      if (fixP->fx_addsy
20123
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20124
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20125
          && THUMB_IS_FUNC (fixP->fx_addsy)
20126
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20127
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20128
      return (base + 4) & ~3;
20129
 
20130
      /* ARM mode branches are offset by +8.  However, the Windows CE
20131
         loader expects the relocation not to take this into account.  */
20132
    case BFD_RELOC_ARM_PCREL_BLX:
20133
      if (fixP->fx_addsy
20134
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20135
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20136
          && ARM_IS_FUNC (fixP->fx_addsy)
20137
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20138
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20139
      return base + 8;
20140
 
20141
    case BFD_RELOC_ARM_PCREL_CALL:
20142
      if (fixP->fx_addsy
20143
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20144
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20145
          && THUMB_IS_FUNC (fixP->fx_addsy)
20146
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20147
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20148
      return base + 8;
20149
 
20150
    case BFD_RELOC_ARM_PCREL_BRANCH:
20151
    case BFD_RELOC_ARM_PCREL_JUMP:
20152
    case BFD_RELOC_ARM_PLT32:
20153
#ifdef TE_WINCE
20154
      /* When handling fixups immediately, because we have already
20155
         discovered the value of a symbol, or the address of the frag involved
20156
         we must account for the offset by +8, as the OS loader will never see the reloc.
20157
         see fixup_segment() in write.c
20158
         The S_IS_EXTERNAL test handles the case of global symbols.
20159
         Those need the calculated base, not just the pipe compensation the linker will need.  */
20160
      if (fixP->fx_pcrel
20161
          && fixP->fx_addsy != NULL
20162
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20163
          && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
20164
        return base + 8;
20165
      return base;
20166
#else
20167
      return base + 8;
20168
#endif
20169
 
20170
 
20171
      /* ARM mode loads relative to PC are also offset by +8.  Unlike
20172
         branches, the Windows CE loader *does* expect the relocation
20173
         to take this into account.  */
20174
    case BFD_RELOC_ARM_OFFSET_IMM:
20175
    case BFD_RELOC_ARM_OFFSET_IMM8:
20176
    case BFD_RELOC_ARM_HWLITERAL:
20177
    case BFD_RELOC_ARM_LITERAL:
20178
    case BFD_RELOC_ARM_CP_OFF_IMM:
20179
      return base + 8;
20180
 
20181
 
20182
      /* Other PC-relative relocations are un-offset.  */
20183
    default:
20184
      return base;
20185
    }
20186
}
20187
 
20188
/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
20189
   Otherwise we have no need to default values of symbols.  */
20190
 
20191
symbolS *
20192
md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
20193
{
20194
#ifdef OBJ_ELF
20195
  if (name[0] == '_' && name[1] == 'G'
20196
      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
20197
    {
20198
      if (!GOT_symbol)
20199
        {
20200
          if (symbol_find (name))
20201
            as_bad (_("GOT already in the symbol table"));
20202
 
20203
          GOT_symbol = symbol_new (name, undefined_section,
20204
                                   (valueT) 0, & zero_address_frag);
20205
        }
20206
 
20207
      return GOT_symbol;
20208
    }
20209
#endif
20210
 
20211
  return NULL;
20212
}
20213
 
20214
/* Subroutine of md_apply_fix.   Check to see if an immediate can be
20215
   computed as two separate immediate values, added together.  We
20216
   already know that this value cannot be computed by just one ARM
20217
   instruction.  */
20218
 
20219
static unsigned int
20220
validate_immediate_twopart (unsigned int   val,
20221
                            unsigned int * highpart)
20222
{
20223
  unsigned int a;
20224
  unsigned int i;
20225
 
20226
  for (i = 0; i < 32; i += 2)
20227
    if (((a = rotate_left (val, i)) & 0xff) != 0)
20228
      {
20229
        if (a & 0xff00)
20230
          {
20231
            if (a & ~ 0xffff)
20232
              continue;
20233
            * highpart = (a  >> 8) | ((i + 24) << 7);
20234
          }
20235
        else if (a & 0xff0000)
20236
          {
20237
            if (a & 0xff000000)
20238
              continue;
20239
            * highpart = (a >> 16) | ((i + 16) << 7);
20240
          }
20241
        else
20242
          {
20243
            gas_assert (a & 0xff000000);
20244
            * highpart = (a >> 24) | ((i + 8) << 7);
20245
          }
20246
 
20247
        return (a & 0xff) | (i << 7);
20248
      }
20249
 
20250
  return FAIL;
20251
}
20252
 
20253
static int
20254
validate_offset_imm (unsigned int val, int hwse)
20255
{
20256
  if ((hwse && val > 255) || val > 4095)
20257
    return FAIL;
20258
  return val;
20259
}
20260
 
20261
/* Subroutine of md_apply_fix.   Do those data_ops which can take a
20262
   negative immediate constant by altering the instruction.  A bit of
20263
   a hack really.
20264
        MOV <-> MVN
20265
        AND <-> BIC
20266
        ADC <-> SBC
20267
        by inverting the second operand, and
20268
        ADD <-> SUB
20269
        CMP <-> CMN
20270
        by negating the second operand.  */
20271
 
20272
static int
20273
negate_data_op (unsigned long * instruction,
20274
                unsigned long   value)
20275
{
20276
  int op, new_inst;
20277
  unsigned long negated, inverted;
20278
 
20279
  negated = encode_arm_immediate (-value);
20280
  inverted = encode_arm_immediate (~value);
20281
 
20282
  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
20283
  switch (op)
20284
    {
20285
      /* First negates.  */
20286
    case OPCODE_SUB:             /* ADD <-> SUB  */
20287
      new_inst = OPCODE_ADD;
20288
      value = negated;
20289
      break;
20290
 
20291
    case OPCODE_ADD:
20292
      new_inst = OPCODE_SUB;
20293
      value = negated;
20294
      break;
20295
 
20296
    case OPCODE_CMP:             /* CMP <-> CMN  */
20297
      new_inst = OPCODE_CMN;
20298
      value = negated;
20299
      break;
20300
 
20301
    case OPCODE_CMN:
20302
      new_inst = OPCODE_CMP;
20303
      value = negated;
20304
      break;
20305
 
20306
      /* Now Inverted ops.  */
20307
    case OPCODE_MOV:             /* MOV <-> MVN  */
20308
      new_inst = OPCODE_MVN;
20309
      value = inverted;
20310
      break;
20311
 
20312
    case OPCODE_MVN:
20313
      new_inst = OPCODE_MOV;
20314
      value = inverted;
20315
      break;
20316
 
20317
    case OPCODE_AND:             /* AND <-> BIC  */
20318
      new_inst = OPCODE_BIC;
20319
      value = inverted;
20320
      break;
20321
 
20322
    case OPCODE_BIC:
20323
      new_inst = OPCODE_AND;
20324
      value = inverted;
20325
      break;
20326
 
20327
    case OPCODE_ADC:              /* ADC <-> SBC  */
20328
      new_inst = OPCODE_SBC;
20329
      value = inverted;
20330
      break;
20331
 
20332
    case OPCODE_SBC:
20333
      new_inst = OPCODE_ADC;
20334
      value = inverted;
20335
      break;
20336
 
20337
      /* We cannot do anything.  */
20338
    default:
20339
      return FAIL;
20340
    }
20341
 
20342
  if (value == (unsigned) FAIL)
20343
    return FAIL;
20344
 
20345
  *instruction &= OPCODE_MASK;
20346
  *instruction |= new_inst << DATA_OP_SHIFT;
20347
  return value;
20348
}
20349
 
20350
/* Like negate_data_op, but for Thumb-2.   */
20351
 
20352
static unsigned int
20353
thumb32_negate_data_op (offsetT *instruction, unsigned int value)
20354
{
20355
  int op, new_inst;
20356
  int rd;
20357
  unsigned int negated, inverted;
20358
 
20359
  negated = encode_thumb32_immediate (-value);
20360
  inverted = encode_thumb32_immediate (~value);
20361
 
20362
  rd = (*instruction >> 8) & 0xf;
20363
  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
20364
  switch (op)
20365
    {
20366
      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
20367
    case T2_OPCODE_SUB:
20368
      new_inst = T2_OPCODE_ADD;
20369
      value = negated;
20370
      break;
20371
 
20372
    case T2_OPCODE_ADD:
20373
      new_inst = T2_OPCODE_SUB;
20374
      value = negated;
20375
      break;
20376
 
20377
      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
20378
    case T2_OPCODE_ORR:
20379
      new_inst = T2_OPCODE_ORN;
20380
      value = inverted;
20381
      break;
20382
 
20383
    case T2_OPCODE_ORN:
20384
      new_inst = T2_OPCODE_ORR;
20385
      value = inverted;
20386
      break;
20387
 
20388
      /* AND <-> BIC.  TST has no inverted equivalent.  */
20389
    case T2_OPCODE_AND:
20390
      new_inst = T2_OPCODE_BIC;
20391
      if (rd == 15)
20392
        value = FAIL;
20393
      else
20394
        value = inverted;
20395
      break;
20396
 
20397
    case T2_OPCODE_BIC:
20398
      new_inst = T2_OPCODE_AND;
20399
      value = inverted;
20400
      break;
20401
 
20402
      /* ADC <-> SBC  */
20403
    case T2_OPCODE_ADC:
20404
      new_inst = T2_OPCODE_SBC;
20405
      value = inverted;
20406
      break;
20407
 
20408
    case T2_OPCODE_SBC:
20409
      new_inst = T2_OPCODE_ADC;
20410
      value = inverted;
20411
      break;
20412
 
20413
      /* We cannot do anything.  */
20414
    default:
20415
      return FAIL;
20416
    }
20417
 
20418
  if (value == (unsigned int)FAIL)
20419
    return FAIL;
20420
 
20421
  *instruction &= T2_OPCODE_MASK;
20422
  *instruction |= new_inst << T2_DATA_OP_SHIFT;
20423
  return value;
20424
}
20425
 
20426
/* Read a 32-bit thumb instruction from buf.  */
20427
static unsigned long
20428
get_thumb32_insn (char * buf)
20429
{
20430
  unsigned long insn;
20431
  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
20432
  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20433
 
20434
  return insn;
20435
}
20436
 
20437
 
20438
/* We usually want to set the low bit on the address of thumb function
20439
   symbols.  In particular .word foo - . should have the low bit set.
20440
   Generic code tries to fold the difference of two symbols to
20441
   a constant.  Prevent this and force a relocation when the first symbols
20442
   is a thumb function.  */
20443
 
20444
bfd_boolean
20445
arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
20446
{
20447
  if (op == O_subtract
20448
      && l->X_op == O_symbol
20449
      && r->X_op == O_symbol
20450
      && THUMB_IS_FUNC (l->X_add_symbol))
20451
    {
20452
      l->X_op = O_subtract;
20453
      l->X_op_symbol = r->X_add_symbol;
20454
      l->X_add_number -= r->X_add_number;
20455
      return TRUE;
20456
    }
20457
 
20458
  /* Process as normal.  */
20459
  return FALSE;
20460
}
20461
 
20462
/* Encode Thumb2 unconditional branches and calls. The encoding
20463
   for the 2 are identical for the immediate values.  */
20464
 
20465
static void
20466
encode_thumb2_b_bl_offset (char * buf, offsetT value)
20467
{
20468
#define T2I1I2MASK  ((1 << 13) | (1 << 11))
20469
  offsetT newval;
20470
  offsetT newval2;
20471
  addressT S, I1, I2, lo, hi;
20472
 
20473
  S = (value >> 24) & 0x01;
20474
  I1 = (value >> 23) & 0x01;
20475
  I2 = (value >> 22) & 0x01;
20476
  hi = (value >> 12) & 0x3ff;
20477
  lo = (value >> 1) & 0x7ff;
20478
  newval   = md_chars_to_number (buf, THUMB_SIZE);
20479
  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20480
  newval  |= (S << 10) | hi;
20481
  newval2 &=  ~T2I1I2MASK;
20482
  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
20483
  md_number_to_chars (buf, newval, THUMB_SIZE);
20484
  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20485
}
20486
 
20487
void
20488
md_apply_fix (fixS *    fixP,
20489
               valueT * valP,
20490
               segT     seg)
20491
{
20492
  offsetT        value = * valP;
20493
  offsetT        newval;
20494
  unsigned int   newimm;
20495
  unsigned long  temp;
20496
  int            sign;
20497
  char *         buf = fixP->fx_where + fixP->fx_frag->fr_literal;
20498
 
20499
  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
20500
 
20501
  /* Note whether this will delete the relocation.  */
20502
 
20503
  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
20504
    fixP->fx_done = 1;
20505
 
20506
  /* On a 64-bit host, silently truncate 'value' to 32 bits for
20507
     consistency with the behaviour on 32-bit hosts.  Remember value
20508
     for emit_reloc.  */
20509
  value &= 0xffffffff;
20510
  value ^= 0x80000000;
20511
  value -= 0x80000000;
20512
 
20513
  *valP = value;
20514
  fixP->fx_addnumber = value;
20515
 
20516
  /* Same treatment for fixP->fx_offset.  */
20517
  fixP->fx_offset &= 0xffffffff;
20518
  fixP->fx_offset ^= 0x80000000;
20519
  fixP->fx_offset -= 0x80000000;
20520
 
20521
  switch (fixP->fx_r_type)
20522
    {
20523
    case BFD_RELOC_NONE:
20524
      /* This will need to go in the object file.  */
20525
      fixP->fx_done = 0;
20526
      break;
20527
 
20528
    case BFD_RELOC_ARM_IMMEDIATE:
20529
      /* We claim that this fixup has been processed here,
20530
         even if in fact we generate an error because we do
20531
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20532
      fixP->fx_done = 1;
20533
 
20534
      if (fixP->fx_addsy)
20535
        {
20536
          const char *msg = 0;
20537
 
20538
          if (! S_IS_DEFINED (fixP->fx_addsy))
20539
            msg = _("undefined symbol %s used as an immediate value");
20540
          else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20541
            msg = _("symbol %s is in a different section");
20542
          else if (S_IS_WEAK (fixP->fx_addsy))
20543
            msg = _("symbol %s is weak and may be overridden later");
20544
 
20545
          if (msg)
20546
            {
20547
              as_bad_where (fixP->fx_file, fixP->fx_line,
20548
                            msg, S_GET_NAME (fixP->fx_addsy));
20549
              break;
20550
            }
20551
        }
20552
 
20553
      newimm = encode_arm_immediate (value);
20554
      temp = md_chars_to_number (buf, INSN_SIZE);
20555
 
20556
      /* If the instruction will fail, see if we can fix things up by
20557
         changing the opcode.  */
20558
      if (newimm == (unsigned int) FAIL
20559
          && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
20560
        {
20561
          as_bad_where (fixP->fx_file, fixP->fx_line,
20562
                        _("invalid constant (%lx) after fixup"),
20563
                        (unsigned long) value);
20564
          break;
20565
        }
20566
 
20567
      newimm |= (temp & 0xfffff000);
20568
      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20569
      break;
20570
 
20571
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20572
      {
20573
        unsigned int highpart = 0;
20574
        unsigned int newinsn  = 0xe1a00000; /* nop.  */
20575
 
20576
        if (fixP->fx_addsy)
20577
          {
20578
            const char *msg = 0;
20579
 
20580
            if (! S_IS_DEFINED (fixP->fx_addsy))
20581
              msg = _("undefined symbol %s used as an immediate value");
20582
            else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20583
              msg = _("symbol %s is in a different section");
20584
            else if (S_IS_WEAK (fixP->fx_addsy))
20585
              msg = _("symbol %s is weak and may be overridden later");
20586
 
20587
            if (msg)
20588
              {
20589
                as_bad_where (fixP->fx_file, fixP->fx_line,
20590
                              msg, S_GET_NAME (fixP->fx_addsy));
20591
                break;
20592
              }
20593
          }
20594
 
20595
        newimm = encode_arm_immediate (value);
20596
        temp = md_chars_to_number (buf, INSN_SIZE);
20597
 
20598
        /* If the instruction will fail, see if we can fix things up by
20599
           changing the opcode.  */
20600
        if (newimm == (unsigned int) FAIL
20601
            && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20602
          {
20603
            /* No ?  OK - try using two ADD instructions to generate
20604
               the value.  */
20605
            newimm = validate_immediate_twopart (value, & highpart);
20606
 
20607
            /* Yes - then make sure that the second instruction is
20608
               also an add.  */
20609
            if (newimm != (unsigned int) FAIL)
20610
              newinsn = temp;
20611
            /* Still No ?  Try using a negated value.  */
20612
            else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20613
              temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20614
            /* Otherwise - give up.  */
20615
            else
20616
              {
20617
                as_bad_where (fixP->fx_file, fixP->fx_line,
20618
                              _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20619
                              (long) value);
20620
                break;
20621
              }
20622
 
20623
            /* Replace the first operand in the 2nd instruction (which
20624
               is the PC) with the destination register.  We have
20625
               already added in the PC in the first instruction and we
20626
               do not want to do it again.  */
20627
            newinsn &= ~ 0xf0000;
20628
            newinsn |= ((newinsn & 0x0f000) << 4);
20629
          }
20630
 
20631
        newimm |= (temp & 0xfffff000);
20632
        md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20633
 
20634
        highpart |= (newinsn & 0xfffff000);
20635
        md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20636
      }
20637
      break;
20638
 
20639
    case BFD_RELOC_ARM_OFFSET_IMM:
20640
      if (!fixP->fx_done && seg->use_rela_p)
20641
        value = 0;
20642
 
20643
    case BFD_RELOC_ARM_LITERAL:
20644
      sign = value > 0;
20645
 
20646
      if (value < 0)
20647
        value = - value;
20648
 
20649
      if (validate_offset_imm (value, 0) == FAIL)
20650
        {
20651
          if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20652
            as_bad_where (fixP->fx_file, fixP->fx_line,
20653
                          _("invalid literal constant: pool needs to be closer"));
20654
          else
20655
            as_bad_where (fixP->fx_file, fixP->fx_line,
20656
                          _("bad immediate value for offset (%ld)"),
20657
                          (long) value);
20658
          break;
20659
        }
20660
 
20661
      newval = md_chars_to_number (buf, INSN_SIZE);
20662
      if (value == 0)
20663
        newval &= 0xfffff000;
20664
      else
20665
        {
20666
          newval &= 0xff7ff000;
20667
          newval |= value | (sign ? INDEX_UP : 0);
20668
        }
20669
      md_number_to_chars (buf, newval, INSN_SIZE);
20670
      break;
20671
 
20672
    case BFD_RELOC_ARM_OFFSET_IMM8:
20673
    case BFD_RELOC_ARM_HWLITERAL:
20674
      sign = value > 0;
20675
 
20676
      if (value < 0)
20677
        value = - value;
20678
 
20679
      if (validate_offset_imm (value, 1) == FAIL)
20680
        {
20681
          if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20682
            as_bad_where (fixP->fx_file, fixP->fx_line,
20683
                          _("invalid literal constant: pool needs to be closer"));
20684
          else
20685
            as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20686
                    (long) value);
20687
          break;
20688
        }
20689
 
20690
      newval = md_chars_to_number (buf, INSN_SIZE);
20691
      if (value == 0)
20692
        newval &= 0xfffff0f0;
20693
      else
20694
        {
20695
          newval &= 0xff7ff0f0;
20696
          newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20697
        }
20698
      md_number_to_chars (buf, newval, INSN_SIZE);
20699
      break;
20700
 
20701
    case BFD_RELOC_ARM_T32_OFFSET_U8:
20702
      if (value < 0 || value > 1020 || value % 4 != 0)
20703
        as_bad_where (fixP->fx_file, fixP->fx_line,
20704
                      _("bad immediate value for offset (%ld)"), (long) value);
20705
      value /= 4;
20706
 
20707
      newval = md_chars_to_number (buf+2, THUMB_SIZE);
20708
      newval |= value;
20709
      md_number_to_chars (buf+2, newval, THUMB_SIZE);
20710
      break;
20711
 
20712
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20713
      /* This is a complicated relocation used for all varieties of Thumb32
20714
         load/store instruction with immediate offset:
20715
 
20716
         1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20717
                                                   *4, optional writeback(W)
20718
                                                   (doubleword load/store)
20719
 
20720
         1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20721
         1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20722
         1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20723
         1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20724
         1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20725
 
20726
         Uppercase letters indicate bits that are already encoded at
20727
         this point.  Lowercase letters are our problem.  For the
20728
         second block of instructions, the secondary opcode nybble
20729
         (bits 8..11) is present, and bit 23 is zero, even if this is
20730
         a PC-relative operation.  */
20731
      newval = md_chars_to_number (buf, THUMB_SIZE);
20732
      newval <<= 16;
20733
      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
20734
 
20735
      if ((newval & 0xf0000000) == 0xe0000000)
20736
        {
20737
          /* Doubleword load/store: 8-bit offset, scaled by 4.  */
20738
          if (value >= 0)
20739
            newval |= (1 << 23);
20740
          else
20741
            value = -value;
20742
          if (value % 4 != 0)
20743
            {
20744
              as_bad_where (fixP->fx_file, fixP->fx_line,
20745
                            _("offset not a multiple of 4"));
20746
              break;
20747
            }
20748
          value /= 4;
20749
          if (value > 0xff)
20750
            {
20751
              as_bad_where (fixP->fx_file, fixP->fx_line,
20752
                            _("offset out of range"));
20753
              break;
20754
            }
20755
          newval &= ~0xff;
20756
        }
20757
      else if ((newval & 0x000f0000) == 0x000f0000)
20758
        {
20759
          /* PC-relative, 12-bit offset.  */
20760
          if (value >= 0)
20761
            newval |= (1 << 23);
20762
          else
20763
            value = -value;
20764
          if (value > 0xfff)
20765
            {
20766
              as_bad_where (fixP->fx_file, fixP->fx_line,
20767
                            _("offset out of range"));
20768
              break;
20769
            }
20770
          newval &= ~0xfff;
20771
        }
20772
      else if ((newval & 0x00000100) == 0x00000100)
20773
        {
20774
          /* Writeback: 8-bit, +/- offset.  */
20775
          if (value >= 0)
20776
            newval |= (1 << 9);
20777
          else
20778
            value = -value;
20779
          if (value > 0xff)
20780
            {
20781
              as_bad_where (fixP->fx_file, fixP->fx_line,
20782
                            _("offset out of range"));
20783
              break;
20784
            }
20785
          newval &= ~0xff;
20786
        }
20787
      else if ((newval & 0x00000f00) == 0x00000e00)
20788
        {
20789
          /* T-instruction: positive 8-bit offset.  */
20790
          if (value < 0 || value > 0xff)
20791
            {
20792
              as_bad_where (fixP->fx_file, fixP->fx_line,
20793
                            _("offset out of range"));
20794
              break;
20795
            }
20796
          newval &= ~0xff;
20797
          newval |= value;
20798
        }
20799
      else
20800
        {
20801
          /* Positive 12-bit or negative 8-bit offset.  */
20802
          int limit;
20803
          if (value >= 0)
20804
            {
20805
              newval |= (1 << 23);
20806
              limit = 0xfff;
20807
            }
20808
          else
20809
            {
20810
              value = -value;
20811
              limit = 0xff;
20812
            }
20813
          if (value > limit)
20814
            {
20815
              as_bad_where (fixP->fx_file, fixP->fx_line,
20816
                            _("offset out of range"));
20817
              break;
20818
            }
20819
          newval &= ~limit;
20820
        }
20821
 
20822
      newval |= value;
20823
      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20824
      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20825
      break;
20826
 
20827
    case BFD_RELOC_ARM_SHIFT_IMM:
20828
      newval = md_chars_to_number (buf, INSN_SIZE);
20829
      if (((unsigned long) value) > 32
20830
          || (value == 32
20831
              && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20832
        {
20833
          as_bad_where (fixP->fx_file, fixP->fx_line,
20834
                        _("shift expression is too large"));
20835
          break;
20836
        }
20837
 
20838
      if (value == 0)
20839
        /* Shifts of zero must be done as lsl.  */
20840
        newval &= ~0x60;
20841
      else if (value == 32)
20842
        value = 0;
20843
      newval &= 0xfffff07f;
20844
      newval |= (value & 0x1f) << 7;
20845
      md_number_to_chars (buf, newval, INSN_SIZE);
20846
      break;
20847
 
20848
    case BFD_RELOC_ARM_T32_IMMEDIATE:
20849
    case BFD_RELOC_ARM_T32_ADD_IMM:
20850
    case BFD_RELOC_ARM_T32_IMM12:
20851
    case BFD_RELOC_ARM_T32_ADD_PC12:
20852
      /* We claim that this fixup has been processed here,
20853
         even if in fact we generate an error because we do
20854
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20855
      fixP->fx_done = 1;
20856
 
20857
      if (fixP->fx_addsy
20858
          && ! S_IS_DEFINED (fixP->fx_addsy))
20859
        {
20860
          as_bad_where (fixP->fx_file, fixP->fx_line,
20861
                        _("undefined symbol %s used as an immediate value"),
20862
                        S_GET_NAME (fixP->fx_addsy));
20863
          break;
20864
        }
20865
 
20866
      newval = md_chars_to_number (buf, THUMB_SIZE);
20867
      newval <<= 16;
20868
      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20869
 
20870
      newimm = FAIL;
20871
      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20872
          || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20873
        {
20874
          newimm = encode_thumb32_immediate (value);
20875
          if (newimm == (unsigned int) FAIL)
20876
            newimm = thumb32_negate_data_op (&newval, value);
20877
        }
20878
      if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20879
          && newimm == (unsigned int) FAIL)
20880
        {
20881
          /* Turn add/sum into addw/subw.  */
20882
          if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20883
            newval = (newval & 0xfeffffff) | 0x02000000;
20884
          /* No flat 12-bit imm encoding for addsw/subsw.  */
20885
          if ((newval & 0x00100000) == 0)
20886
            {
20887
              /* 12 bit immediate for addw/subw.  */
20888
              if (value < 0)
20889
                {
20890
                  value = -value;
20891
                  newval ^= 0x00a00000;
20892
                }
20893
              if (value > 0xfff)
20894
                newimm = (unsigned int) FAIL;
20895
              else
20896
                newimm = value;
20897
            }
20898
        }
20899
 
20900
      if (newimm == (unsigned int)FAIL)
20901
        {
20902
          as_bad_where (fixP->fx_file, fixP->fx_line,
20903
                        _("invalid constant (%lx) after fixup"),
20904
                        (unsigned long) value);
20905
          break;
20906
        }
20907
 
20908
      newval |= (newimm & 0x800) << 15;
20909
      newval |= (newimm & 0x700) << 4;
20910
      newval |= (newimm & 0x0ff);
20911
 
20912
      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20913
      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20914
      break;
20915
 
20916
    case BFD_RELOC_ARM_SMC:
20917
      if (((unsigned long) value) > 0xffff)
20918
        as_bad_where (fixP->fx_file, fixP->fx_line,
20919
                      _("invalid smc expression"));
20920
      newval = md_chars_to_number (buf, INSN_SIZE);
20921
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20922
      md_number_to_chars (buf, newval, INSN_SIZE);
20923
      break;
20924
 
20925
    case BFD_RELOC_ARM_HVC:
20926
      if (((unsigned long) value) > 0xffff)
20927
        as_bad_where (fixP->fx_file, fixP->fx_line,
20928
                      _("invalid hvc expression"));
20929
      newval = md_chars_to_number (buf, INSN_SIZE);
20930
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20931
      md_number_to_chars (buf, newval, INSN_SIZE);
20932
      break;
20933
 
20934
    case BFD_RELOC_ARM_SWI:
20935
      if (fixP->tc_fix_data != 0)
20936
        {
20937
          if (((unsigned long) value) > 0xff)
20938
            as_bad_where (fixP->fx_file, fixP->fx_line,
20939
                          _("invalid swi expression"));
20940
          newval = md_chars_to_number (buf, THUMB_SIZE);
20941
          newval |= value;
20942
          md_number_to_chars (buf, newval, THUMB_SIZE);
20943
        }
20944
      else
20945
        {
20946
          if (((unsigned long) value) > 0x00ffffff)
20947
            as_bad_where (fixP->fx_file, fixP->fx_line,
20948
                          _("invalid swi expression"));
20949
          newval = md_chars_to_number (buf, INSN_SIZE);
20950
          newval |= value;
20951
          md_number_to_chars (buf, newval, INSN_SIZE);
20952
        }
20953
      break;
20954
 
20955
    case BFD_RELOC_ARM_MULTI:
20956
      if (((unsigned long) value) > 0xffff)
20957
        as_bad_where (fixP->fx_file, fixP->fx_line,
20958
                      _("invalid expression in load/store multiple"));
20959
      newval = value | md_chars_to_number (buf, INSN_SIZE);
20960
      md_number_to_chars (buf, newval, INSN_SIZE);
20961
      break;
20962
 
20963
#ifdef OBJ_ELF
20964
    case BFD_RELOC_ARM_PCREL_CALL:
20965
 
20966
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20967
          && fixP->fx_addsy
20968
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20969
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20970
          && THUMB_IS_FUNC (fixP->fx_addsy))
20971
        /* Flip the bl to blx. This is a simple flip
20972
           bit here because we generate PCREL_CALL for
20973
           unconditional bls.  */
20974
        {
20975
          newval = md_chars_to_number (buf, INSN_SIZE);
20976
          newval = newval | 0x10000000;
20977
          md_number_to_chars (buf, newval, INSN_SIZE);
20978
          temp = 1;
20979
          fixP->fx_done = 1;
20980
        }
20981
      else
20982
        temp = 3;
20983
      goto arm_branch_common;
20984
 
20985
    case BFD_RELOC_ARM_PCREL_JUMP:
20986
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20987
          && fixP->fx_addsy
20988
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20989
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20990
          && THUMB_IS_FUNC (fixP->fx_addsy))
20991
        {
20992
          /* This would map to a bl<cond>, b<cond>,
20993
             b<always> to a Thumb function. We
20994
             need to force a relocation for this particular
20995
             case.  */
20996
          newval = md_chars_to_number (buf, INSN_SIZE);
20997
          fixP->fx_done = 0;
20998
        }
20999
 
21000
    case BFD_RELOC_ARM_PLT32:
21001
#endif
21002
    case BFD_RELOC_ARM_PCREL_BRANCH:
21003
      temp = 3;
21004
      goto arm_branch_common;
21005
 
21006
    case BFD_RELOC_ARM_PCREL_BLX:
21007
 
21008
      temp = 1;
21009
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21010
          && fixP->fx_addsy
21011
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21012
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21013
          && ARM_IS_FUNC (fixP->fx_addsy))
21014
        {
21015
          /* Flip the blx to a bl and warn.  */
21016
          const char *name = S_GET_NAME (fixP->fx_addsy);
21017
          newval = 0xeb000000;
21018
          as_warn_where (fixP->fx_file, fixP->fx_line,
21019
                         _("blx to '%s' an ARM ISA state function changed to bl"),
21020
                          name);
21021
          md_number_to_chars (buf, newval, INSN_SIZE);
21022
          temp = 3;
21023
          fixP->fx_done = 1;
21024
        }
21025
 
21026
#ifdef OBJ_ELF
21027
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21028
         fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21029
#endif
21030
 
21031
    arm_branch_common:
21032
      /* We are going to store value (shifted right by two) in the
21033
         instruction, in a 24 bit, signed field.  Bits 26 through 32 either
21034
         all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
21035
         also be be clear.  */
21036
      if (value & temp)
21037
        as_bad_where (fixP->fx_file, fixP->fx_line,
21038
                      _("misaligned branch destination"));
21039
      if ((value & (offsetT)0xfe000000) != (offsetT)0
21040
          && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21041 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21042 16 khays
 
21043
      if (fixP->fx_done || !seg->use_rela_p)
21044
        {
21045
          newval = md_chars_to_number (buf, INSN_SIZE);
21046
          newval |= (value >> 2) & 0x00ffffff;
21047
          /* Set the H bit on BLX instructions.  */
21048
          if (temp == 1)
21049
            {
21050
              if (value & 2)
21051
                newval |= 0x01000000;
21052
              else
21053
                newval &= ~0x01000000;
21054
            }
21055
          md_number_to_chars (buf, newval, INSN_SIZE);
21056
        }
21057
      break;
21058
 
21059
    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21060
      /* CBZ can only branch forward.  */
21061
 
21062
      /* Attempts to use CBZ to branch to the next instruction
21063
         (which, strictly speaking, are prohibited) will be turned into
21064
         no-ops.
21065
 
21066
         FIXME: It may be better to remove the instruction completely and
21067
         perform relaxation.  */
21068
      if (value == -2)
21069
        {
21070
          newval = md_chars_to_number (buf, THUMB_SIZE);
21071
          newval = 0xbf00; /* NOP encoding T1 */
21072
          md_number_to_chars (buf, newval, THUMB_SIZE);
21073
        }
21074
      else
21075
        {
21076
          if (value & ~0x7e)
21077 160 khays
            as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21078 16 khays
 
21079
          if (fixP->fx_done || !seg->use_rela_p)
21080
            {
21081
              newval = md_chars_to_number (buf, THUMB_SIZE);
21082
              newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21083
              md_number_to_chars (buf, newval, THUMB_SIZE);
21084
            }
21085
        }
21086
      break;
21087
 
21088
    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.  */
21089
      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21090 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21091 16 khays
 
21092
      if (fixP->fx_done || !seg->use_rela_p)
21093
        {
21094
          newval = md_chars_to_number (buf, THUMB_SIZE);
21095
          newval |= (value & 0x1ff) >> 1;
21096
          md_number_to_chars (buf, newval, THUMB_SIZE);
21097
        }
21098
      break;
21099
 
21100
    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
21101
      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21102 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21103 16 khays
 
21104
      if (fixP->fx_done || !seg->use_rela_p)
21105
        {
21106
          newval = md_chars_to_number (buf, THUMB_SIZE);
21107
          newval |= (value & 0xfff) >> 1;
21108
          md_number_to_chars (buf, newval, THUMB_SIZE);
21109
        }
21110
      break;
21111
 
21112
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21113
      if (fixP->fx_addsy
21114
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21115
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21116
          && ARM_IS_FUNC (fixP->fx_addsy)
21117
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21118
        {
21119
          /* Force a relocation for a branch 20 bits wide.  */
21120
          fixP->fx_done = 0;
21121
        }
21122 160 khays
      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21123 16 khays
        as_bad_where (fixP->fx_file, fixP->fx_line,
21124
                      _("conditional branch out of range"));
21125
 
21126
      if (fixP->fx_done || !seg->use_rela_p)
21127
        {
21128
          offsetT newval2;
21129
          addressT S, J1, J2, lo, hi;
21130
 
21131
          S  = (value & 0x00100000) >> 20;
21132
          J2 = (value & 0x00080000) >> 19;
21133
          J1 = (value & 0x00040000) >> 18;
21134
          hi = (value & 0x0003f000) >> 12;
21135
          lo = (value & 0x00000ffe) >> 1;
21136
 
21137
          newval   = md_chars_to_number (buf, THUMB_SIZE);
21138
          newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21139
          newval  |= (S << 10) | hi;
21140
          newval2 |= (J1 << 13) | (J2 << 11) | lo;
21141
          md_number_to_chars (buf, newval, THUMB_SIZE);
21142
          md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21143
        }
21144
      break;
21145
 
21146
    case BFD_RELOC_THUMB_PCREL_BLX:
21147
      /* If there is a blx from a thumb state function to
21148
         another thumb function flip this to a bl and warn
21149
         about it.  */
21150
 
21151
      if (fixP->fx_addsy
21152
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21153
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21154
          && THUMB_IS_FUNC (fixP->fx_addsy))
21155
        {
21156
          const char *name = S_GET_NAME (fixP->fx_addsy);
21157
          as_warn_where (fixP->fx_file, fixP->fx_line,
21158
                         _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
21159
                         name);
21160
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21161
          newval = newval | 0x1000;
21162
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21163
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21164
          fixP->fx_done = 1;
21165
        }
21166
 
21167
 
21168
      goto thumb_bl_common;
21169
 
21170
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21171
      /* A bl from Thumb state ISA to an internal ARM state function
21172
         is converted to a blx.  */
21173
      if (fixP->fx_addsy
21174
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21175
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21176
          && ARM_IS_FUNC (fixP->fx_addsy)
21177
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21178
        {
21179
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21180
          newval = newval & ~0x1000;
21181
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21182
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
21183
          fixP->fx_done = 1;
21184
        }
21185
 
21186
    thumb_bl_common:
21187
 
21188
#ifdef OBJ_ELF
21189
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
21190
           fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21191
         fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21192
#endif
21193
 
21194
      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21195
        /* For a BLX instruction, make sure that the relocation is rounded up
21196
           to a word boundary.  This follows the semantics of the instruction
21197
           which specifies that bit 1 of the target address will come from bit
21198
           1 of the base address.  */
21199
        value = (value + 1) & ~ 1;
21200
 
21201
       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
21202 160 khays
         {
21203
           if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
21204
             as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21205
           else if ((value & ~0x1ffffff)
21206
                    && ((value & ~0x1ffffff) != ~0x1ffffff))
21207
             as_bad_where (fixP->fx_file, fixP->fx_line,
21208
                           _("Thumb2 branch out of range"));
21209
         }
21210 16 khays
 
21211
      if (fixP->fx_done || !seg->use_rela_p)
21212
        encode_thumb2_b_bl_offset (buf, value);
21213
 
21214
      break;
21215
 
21216
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21217 160 khays
      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
21218
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21219 16 khays
 
21220
      if (fixP->fx_done || !seg->use_rela_p)
21221
          encode_thumb2_b_bl_offset (buf, value);
21222
 
21223
      break;
21224
 
21225
    case BFD_RELOC_8:
21226
      if (fixP->fx_done || !seg->use_rela_p)
21227
        md_number_to_chars (buf, value, 1);
21228
      break;
21229
 
21230
    case BFD_RELOC_16:
21231
      if (fixP->fx_done || !seg->use_rela_p)
21232
        md_number_to_chars (buf, value, 2);
21233
      break;
21234
 
21235
#ifdef OBJ_ELF
21236
    case BFD_RELOC_ARM_TLS_CALL:
21237
    case BFD_RELOC_ARM_THM_TLS_CALL:
21238
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21239
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21240
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21241
      break;
21242
 
21243
    case BFD_RELOC_ARM_TLS_GOTDESC:
21244
    case BFD_RELOC_ARM_TLS_GD32:
21245
    case BFD_RELOC_ARM_TLS_LE32:
21246
    case BFD_RELOC_ARM_TLS_IE32:
21247
    case BFD_RELOC_ARM_TLS_LDM32:
21248
    case BFD_RELOC_ARM_TLS_LDO32:
21249
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21250
      /* fall through */
21251
 
21252
    case BFD_RELOC_ARM_GOT32:
21253
    case BFD_RELOC_ARM_GOTOFF:
21254
      if (fixP->fx_done || !seg->use_rela_p)
21255
        md_number_to_chars (buf, 0, 4);
21256
      break;
21257
 
21258
    case BFD_RELOC_ARM_GOT_PREL:
21259
      if (fixP->fx_done || !seg->use_rela_p)
21260
        md_number_to_chars (buf, value, 4);
21261
      break;
21262
 
21263
    case BFD_RELOC_ARM_TARGET2:
21264
      /* TARGET2 is not partial-inplace, so we need to write the
21265
         addend here for REL targets, because it won't be written out
21266
         during reloc processing later.  */
21267
      if (fixP->fx_done || !seg->use_rela_p)
21268
        md_number_to_chars (buf, fixP->fx_offset, 4);
21269
      break;
21270
#endif
21271
 
21272
    case BFD_RELOC_RVA:
21273
    case BFD_RELOC_32:
21274
    case BFD_RELOC_ARM_TARGET1:
21275
    case BFD_RELOC_ARM_ROSEGREL32:
21276
    case BFD_RELOC_ARM_SBREL32:
21277
    case BFD_RELOC_32_PCREL:
21278
#ifdef TE_PE
21279
    case BFD_RELOC_32_SECREL:
21280
#endif
21281
      if (fixP->fx_done || !seg->use_rela_p)
21282
#ifdef TE_WINCE
21283
        /* For WinCE we only do this for pcrel fixups.  */
21284
        if (fixP->fx_done || fixP->fx_pcrel)
21285
#endif
21286
          md_number_to_chars (buf, value, 4);
21287
      break;
21288
 
21289
#ifdef OBJ_ELF
21290
    case BFD_RELOC_ARM_PREL31:
21291
      if (fixP->fx_done || !seg->use_rela_p)
21292
        {
21293
          newval = md_chars_to_number (buf, 4) & 0x80000000;
21294
          if ((value ^ (value >> 1)) & 0x40000000)
21295
            {
21296
              as_bad_where (fixP->fx_file, fixP->fx_line,
21297
                            _("rel31 relocation overflow"));
21298
            }
21299
          newval |= value & 0x7fffffff;
21300
          md_number_to_chars (buf, newval, 4);
21301
        }
21302
      break;
21303
#endif
21304
 
21305
    case BFD_RELOC_ARM_CP_OFF_IMM:
21306
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21307
      if (value < -1023 || value > 1023 || (value & 3))
21308
        as_bad_where (fixP->fx_file, fixP->fx_line,
21309
                      _("co-processor offset out of range"));
21310
    cp_off_common:
21311
      sign = value > 0;
21312
      if (value < 0)
21313
        value = -value;
21314
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21315
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21316
        newval = md_chars_to_number (buf, INSN_SIZE);
21317
      else
21318
        newval = get_thumb32_insn (buf);
21319
      if (value == 0)
21320
        newval &= 0xffffff00;
21321
      else
21322
        {
21323
          newval &= 0xff7fff00;
21324
          newval |= (value >> 2) | (sign ? INDEX_UP : 0);
21325
        }
21326
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21327
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21328
        md_number_to_chars (buf, newval, INSN_SIZE);
21329
      else
21330
        put_thumb32_insn (buf, newval);
21331
      break;
21332
 
21333
    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
21334
    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
21335
      if (value < -255 || value > 255)
21336
        as_bad_where (fixP->fx_file, fixP->fx_line,
21337
                      _("co-processor offset out of range"));
21338
      value *= 4;
21339
      goto cp_off_common;
21340
 
21341
    case BFD_RELOC_ARM_THUMB_OFFSET:
21342
      newval = md_chars_to_number (buf, THUMB_SIZE);
21343
      /* Exactly what ranges, and where the offset is inserted depends
21344
         on the type of instruction, we can establish this from the
21345
         top 4 bits.  */
21346
      switch (newval >> 12)
21347
        {
21348
        case 4: /* PC load.  */
21349
          /* Thumb PC loads are somewhat odd, bit 1 of the PC is
21350
             forced to zero for these loads; md_pcrel_from has already
21351
             compensated for this.  */
21352
          if (value & 3)
21353
            as_bad_where (fixP->fx_file, fixP->fx_line,
21354
                          _("invalid offset, target not word aligned (0x%08lX)"),
21355
                          (((unsigned long) fixP->fx_frag->fr_address
21356
                            + (unsigned long) fixP->fx_where) & ~3)
21357
                          + (unsigned long) value);
21358
 
21359
          if (value & ~0x3fc)
21360
            as_bad_where (fixP->fx_file, fixP->fx_line,
21361
                          _("invalid offset, value too big (0x%08lX)"),
21362
                          (long) value);
21363
 
21364
          newval |= value >> 2;
21365
          break;
21366
 
21367
        case 9: /* SP load/store.  */
21368
          if (value & ~0x3fc)
21369
            as_bad_where (fixP->fx_file, fixP->fx_line,
21370
                          _("invalid offset, value too big (0x%08lX)"),
21371
                          (long) value);
21372
          newval |= value >> 2;
21373
          break;
21374
 
21375
        case 6: /* Word load/store.  */
21376
          if (value & ~0x7c)
21377
            as_bad_where (fixP->fx_file, fixP->fx_line,
21378
                          _("invalid offset, value too big (0x%08lX)"),
21379
                          (long) value);
21380
          newval |= value << 4; /* 6 - 2.  */
21381
          break;
21382
 
21383
        case 7: /* Byte load/store.  */
21384
          if (value & ~0x1f)
21385
            as_bad_where (fixP->fx_file, fixP->fx_line,
21386
                          _("invalid offset, value too big (0x%08lX)"),
21387
                          (long) value);
21388
          newval |= value << 6;
21389
          break;
21390
 
21391
        case 8: /* Halfword load/store.  */
21392
          if (value & ~0x3e)
21393
            as_bad_where (fixP->fx_file, fixP->fx_line,
21394
                          _("invalid offset, value too big (0x%08lX)"),
21395
                          (long) value);
21396
          newval |= value << 5; /* 6 - 1.  */
21397
          break;
21398
 
21399
        default:
21400
          as_bad_where (fixP->fx_file, fixP->fx_line,
21401
                        "Unable to process relocation for thumb opcode: %lx",
21402
                        (unsigned long) newval);
21403
          break;
21404
        }
21405
      md_number_to_chars (buf, newval, THUMB_SIZE);
21406
      break;
21407
 
21408
    case BFD_RELOC_ARM_THUMB_ADD:
21409
      /* This is a complicated relocation, since we use it for all of
21410
         the following immediate relocations:
21411
 
21412
            3bit ADD/SUB
21413
            8bit ADD/SUB
21414
            9bit ADD/SUB SP word-aligned
21415
           10bit ADD PC/SP word-aligned
21416
 
21417
         The type of instruction being processed is encoded in the
21418
         instruction field:
21419
 
21420
           0x8000  SUB
21421
           0x00F0  Rd
21422
           0x000F  Rs
21423
      */
21424
      newval = md_chars_to_number (buf, THUMB_SIZE);
21425
      {
21426
        int rd = (newval >> 4) & 0xf;
21427
        int rs = newval & 0xf;
21428
        int subtract = !!(newval & 0x8000);
21429
 
21430
        /* Check for HI regs, only very restricted cases allowed:
21431
           Adjusting SP, and using PC or SP to get an address.  */
21432
        if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
21433
            || (rs > 7 && rs != REG_SP && rs != REG_PC))
21434
          as_bad_where (fixP->fx_file, fixP->fx_line,
21435
                        _("invalid Hi register with immediate"));
21436
 
21437
        /* If value is negative, choose the opposite instruction.  */
21438
        if (value < 0)
21439
          {
21440
            value = -value;
21441
            subtract = !subtract;
21442
            if (value < 0)
21443
              as_bad_where (fixP->fx_file, fixP->fx_line,
21444
                            _("immediate value out of range"));
21445
          }
21446
 
21447
        if (rd == REG_SP)
21448
          {
21449
            if (value & ~0x1fc)
21450
              as_bad_where (fixP->fx_file, fixP->fx_line,
21451
                            _("invalid immediate for stack address calculation"));
21452
            newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
21453
            newval |= value >> 2;
21454
          }
21455
        else if (rs == REG_PC || rs == REG_SP)
21456
          {
21457
            if (subtract || value & ~0x3fc)
21458
              as_bad_where (fixP->fx_file, fixP->fx_line,
21459
                            _("invalid immediate for address calculation (value = 0x%08lX)"),
21460
                            (unsigned long) value);
21461
            newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
21462
            newval |= rd << 8;
21463
            newval |= value >> 2;
21464
          }
21465
        else if (rs == rd)
21466
          {
21467
            if (value & ~0xff)
21468
              as_bad_where (fixP->fx_file, fixP->fx_line,
21469
                            _("immediate value out of range"));
21470
            newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
21471
            newval |= (rd << 8) | value;
21472
          }
21473
        else
21474
          {
21475
            if (value & ~0x7)
21476
              as_bad_where (fixP->fx_file, fixP->fx_line,
21477
                            _("immediate value out of range"));
21478
            newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
21479
            newval |= rd | (rs << 3) | (value << 6);
21480
          }
21481
      }
21482
      md_number_to_chars (buf, newval, THUMB_SIZE);
21483
      break;
21484
 
21485
    case BFD_RELOC_ARM_THUMB_IMM:
21486
      newval = md_chars_to_number (buf, THUMB_SIZE);
21487
      if (value < 0 || value > 255)
21488
        as_bad_where (fixP->fx_file, fixP->fx_line,
21489
                      _("invalid immediate: %ld is out of range"),
21490
                      (long) value);
21491
      newval |= value;
21492
      md_number_to_chars (buf, newval, THUMB_SIZE);
21493
      break;
21494
 
21495
    case BFD_RELOC_ARM_THUMB_SHIFT:
21496
      /* 5bit shift value (0..32).  LSL cannot take 32.  */
21497
      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
21498
      temp = newval & 0xf800;
21499
      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
21500
        as_bad_where (fixP->fx_file, fixP->fx_line,
21501
                      _("invalid shift value: %ld"), (long) value);
21502
      /* Shifts of zero must be encoded as LSL.  */
21503
      if (value == 0)
21504
        newval = (newval & 0x003f) | T_OPCODE_LSL_I;
21505
      /* Shifts of 32 are encoded as zero.  */
21506
      else if (value == 32)
21507
        value = 0;
21508
      newval |= value << 6;
21509
      md_number_to_chars (buf, newval, THUMB_SIZE);
21510
      break;
21511
 
21512
    case BFD_RELOC_VTABLE_INHERIT:
21513
    case BFD_RELOC_VTABLE_ENTRY:
21514
      fixP->fx_done = 0;
21515
      return;
21516
 
21517
    case BFD_RELOC_ARM_MOVW:
21518
    case BFD_RELOC_ARM_MOVT:
21519
    case BFD_RELOC_ARM_THUMB_MOVW:
21520
    case BFD_RELOC_ARM_THUMB_MOVT:
21521
      if (fixP->fx_done || !seg->use_rela_p)
21522
        {
21523
          /* REL format relocations are limited to a 16-bit addend.  */
21524
          if (!fixP->fx_done)
21525
            {
21526
              if (value < -0x8000 || value > 0x7fff)
21527
                  as_bad_where (fixP->fx_file, fixP->fx_line,
21528
                                _("offset out of range"));
21529
            }
21530
          else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21531
                   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21532
            {
21533
              value >>= 16;
21534
            }
21535
 
21536
          if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21537
              || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21538
            {
21539
              newval = get_thumb32_insn (buf);
21540
              newval &= 0xfbf08f00;
21541
              newval |= (value & 0xf000) << 4;
21542
              newval |= (value & 0x0800) << 15;
21543
              newval |= (value & 0x0700) << 4;
21544
              newval |= (value & 0x00ff);
21545
              put_thumb32_insn (buf, newval);
21546
            }
21547
          else
21548
            {
21549
              newval = md_chars_to_number (buf, 4);
21550
              newval &= 0xfff0f000;
21551
              newval |= value & 0x0fff;
21552
              newval |= (value & 0xf000) << 4;
21553
              md_number_to_chars (buf, newval, 4);
21554
            }
21555
        }
21556
      return;
21557
 
21558
   case BFD_RELOC_ARM_ALU_PC_G0_NC:
21559
   case BFD_RELOC_ARM_ALU_PC_G0:
21560
   case BFD_RELOC_ARM_ALU_PC_G1_NC:
21561
   case BFD_RELOC_ARM_ALU_PC_G1:
21562
   case BFD_RELOC_ARM_ALU_PC_G2:
21563
   case BFD_RELOC_ARM_ALU_SB_G0_NC:
21564
   case BFD_RELOC_ARM_ALU_SB_G0:
21565
   case BFD_RELOC_ARM_ALU_SB_G1_NC:
21566
   case BFD_RELOC_ARM_ALU_SB_G1:
21567
   case BFD_RELOC_ARM_ALU_SB_G2:
21568
     gas_assert (!fixP->fx_done);
21569
     if (!seg->use_rela_p)
21570
       {
21571
         bfd_vma insn;
21572
         bfd_vma encoded_addend;
21573
         bfd_vma addend_abs = abs (value);
21574
 
21575
         /* Check that the absolute value of the addend can be
21576
            expressed as an 8-bit constant plus a rotation.  */
21577
         encoded_addend = encode_arm_immediate (addend_abs);
21578
         if (encoded_addend == (unsigned int) FAIL)
21579
           as_bad_where (fixP->fx_file, fixP->fx_line,
21580
                         _("the offset 0x%08lX is not representable"),
21581
                         (unsigned long) addend_abs);
21582
 
21583
         /* Extract the instruction.  */
21584
         insn = md_chars_to_number (buf, INSN_SIZE);
21585
 
21586
         /* If the addend is positive, use an ADD instruction.
21587
            Otherwise use a SUB.  Take care not to destroy the S bit.  */
21588
         insn &= 0xff1fffff;
21589
         if (value < 0)
21590
           insn |= 1 << 22;
21591
         else
21592
           insn |= 1 << 23;
21593
 
21594
         /* Place the encoded addend into the first 12 bits of the
21595
            instruction.  */
21596
         insn &= 0xfffff000;
21597
         insn |= encoded_addend;
21598
 
21599
         /* Update the instruction.  */
21600
         md_number_to_chars (buf, insn, INSN_SIZE);
21601
       }
21602
     break;
21603
 
21604
    case BFD_RELOC_ARM_LDR_PC_G0:
21605
    case BFD_RELOC_ARM_LDR_PC_G1:
21606
    case BFD_RELOC_ARM_LDR_PC_G2:
21607
    case BFD_RELOC_ARM_LDR_SB_G0:
21608
    case BFD_RELOC_ARM_LDR_SB_G1:
21609
    case BFD_RELOC_ARM_LDR_SB_G2:
21610
      gas_assert (!fixP->fx_done);
21611
      if (!seg->use_rela_p)
21612
        {
21613
          bfd_vma insn;
21614
          bfd_vma addend_abs = abs (value);
21615
 
21616
          /* Check that the absolute value of the addend can be
21617
             encoded in 12 bits.  */
21618
          if (addend_abs >= 0x1000)
21619
            as_bad_where (fixP->fx_file, fixP->fx_line,
21620
                          _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21621
                          (unsigned long) addend_abs);
21622
 
21623
          /* Extract the instruction.  */
21624
          insn = md_chars_to_number (buf, INSN_SIZE);
21625
 
21626
          /* If the addend is negative, clear bit 23 of the instruction.
21627
             Otherwise set it.  */
21628
          if (value < 0)
21629
            insn &= ~(1 << 23);
21630
          else
21631
            insn |= 1 << 23;
21632
 
21633
          /* Place the absolute value of the addend into the first 12 bits
21634
             of the instruction.  */
21635
          insn &= 0xfffff000;
21636
          insn |= addend_abs;
21637
 
21638
          /* Update the instruction.  */
21639
          md_number_to_chars (buf, insn, INSN_SIZE);
21640
        }
21641
      break;
21642
 
21643
    case BFD_RELOC_ARM_LDRS_PC_G0:
21644
    case BFD_RELOC_ARM_LDRS_PC_G1:
21645
    case BFD_RELOC_ARM_LDRS_PC_G2:
21646
    case BFD_RELOC_ARM_LDRS_SB_G0:
21647
    case BFD_RELOC_ARM_LDRS_SB_G1:
21648
    case BFD_RELOC_ARM_LDRS_SB_G2:
21649
      gas_assert (!fixP->fx_done);
21650
      if (!seg->use_rela_p)
21651
        {
21652
          bfd_vma insn;
21653
          bfd_vma addend_abs = abs (value);
21654
 
21655
          /* Check that the absolute value of the addend can be
21656
             encoded in 8 bits.  */
21657
          if (addend_abs >= 0x100)
21658
            as_bad_where (fixP->fx_file, fixP->fx_line,
21659
                          _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21660
                          (unsigned long) addend_abs);
21661
 
21662
          /* Extract the instruction.  */
21663
          insn = md_chars_to_number (buf, INSN_SIZE);
21664
 
21665
          /* If the addend is negative, clear bit 23 of the instruction.
21666
             Otherwise set it.  */
21667
          if (value < 0)
21668
            insn &= ~(1 << 23);
21669
          else
21670
            insn |= 1 << 23;
21671
 
21672
          /* Place the first four bits of the absolute value of the addend
21673
             into the first 4 bits of the instruction, and the remaining
21674
             four into bits 8 .. 11.  */
21675
          insn &= 0xfffff0f0;
21676
          insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21677
 
21678
          /* Update the instruction.  */
21679
          md_number_to_chars (buf, insn, INSN_SIZE);
21680
        }
21681
      break;
21682
 
21683
    case BFD_RELOC_ARM_LDC_PC_G0:
21684
    case BFD_RELOC_ARM_LDC_PC_G1:
21685
    case BFD_RELOC_ARM_LDC_PC_G2:
21686
    case BFD_RELOC_ARM_LDC_SB_G0:
21687
    case BFD_RELOC_ARM_LDC_SB_G1:
21688
    case BFD_RELOC_ARM_LDC_SB_G2:
21689
      gas_assert (!fixP->fx_done);
21690
      if (!seg->use_rela_p)
21691
        {
21692
          bfd_vma insn;
21693
          bfd_vma addend_abs = abs (value);
21694
 
21695
          /* Check that the absolute value of the addend is a multiple of
21696
             four and, when divided by four, fits in 8 bits.  */
21697
          if (addend_abs & 0x3)
21698
            as_bad_where (fixP->fx_file, fixP->fx_line,
21699
                          _("bad offset 0x%08lX (must be word-aligned)"),
21700
                          (unsigned long) addend_abs);
21701
 
21702
          if ((addend_abs >> 2) > 0xff)
21703
            as_bad_where (fixP->fx_file, fixP->fx_line,
21704
                          _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21705
                          (unsigned long) addend_abs);
21706
 
21707
          /* Extract the instruction.  */
21708
          insn = md_chars_to_number (buf, INSN_SIZE);
21709
 
21710
          /* If the addend is negative, clear bit 23 of the instruction.
21711
             Otherwise set it.  */
21712
          if (value < 0)
21713
            insn &= ~(1 << 23);
21714
          else
21715
            insn |= 1 << 23;
21716
 
21717
          /* Place the addend (divided by four) into the first eight
21718
             bits of the instruction.  */
21719
          insn &= 0xfffffff0;
21720
          insn |= addend_abs >> 2;
21721
 
21722
          /* Update the instruction.  */
21723
          md_number_to_chars (buf, insn, INSN_SIZE);
21724
        }
21725
      break;
21726
 
21727
    case BFD_RELOC_ARM_V4BX:
21728
      /* This will need to go in the object file.  */
21729
      fixP->fx_done = 0;
21730
      break;
21731
 
21732
    case BFD_RELOC_UNUSED:
21733
    default:
21734
      as_bad_where (fixP->fx_file, fixP->fx_line,
21735
                    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
21736
    }
21737
}
21738
 
21739
/* Translate internal representation of relocation info to BFD target
21740
   format.  */
21741
 
21742
arelent *
21743
tc_gen_reloc (asection *section, fixS *fixp)
21744
{
21745
  arelent * reloc;
21746
  bfd_reloc_code_real_type code;
21747
 
21748
  reloc = (arelent *) xmalloc (sizeof (arelent));
21749
 
21750
  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
21751
  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
21752
  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
21753
 
21754
  if (fixp->fx_pcrel)
21755
    {
21756
      if (section->use_rela_p)
21757
        fixp->fx_offset -= md_pcrel_from_section (fixp, section);
21758
      else
21759
        fixp->fx_offset = reloc->address;
21760
    }
21761
  reloc->addend = fixp->fx_offset;
21762
 
21763
  switch (fixp->fx_r_type)
21764
    {
21765
    case BFD_RELOC_8:
21766
      if (fixp->fx_pcrel)
21767
        {
21768
          code = BFD_RELOC_8_PCREL;
21769
          break;
21770
        }
21771
 
21772
    case BFD_RELOC_16:
21773
      if (fixp->fx_pcrel)
21774
        {
21775
          code = BFD_RELOC_16_PCREL;
21776
          break;
21777
        }
21778
 
21779
    case BFD_RELOC_32:
21780
      if (fixp->fx_pcrel)
21781
        {
21782
          code = BFD_RELOC_32_PCREL;
21783
          break;
21784
        }
21785
 
21786
    case BFD_RELOC_ARM_MOVW:
21787
      if (fixp->fx_pcrel)
21788
        {
21789
          code = BFD_RELOC_ARM_MOVW_PCREL;
21790
          break;
21791
        }
21792
 
21793
    case BFD_RELOC_ARM_MOVT:
21794
      if (fixp->fx_pcrel)
21795
        {
21796
          code = BFD_RELOC_ARM_MOVT_PCREL;
21797
          break;
21798
        }
21799
 
21800
    case BFD_RELOC_ARM_THUMB_MOVW:
21801
      if (fixp->fx_pcrel)
21802
        {
21803
          code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21804
          break;
21805
        }
21806
 
21807
    case BFD_RELOC_ARM_THUMB_MOVT:
21808
      if (fixp->fx_pcrel)
21809
        {
21810
          code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21811
          break;
21812
        }
21813
 
21814
    case BFD_RELOC_NONE:
21815
    case BFD_RELOC_ARM_PCREL_BRANCH:
21816
    case BFD_RELOC_ARM_PCREL_BLX:
21817
    case BFD_RELOC_RVA:
21818
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
21819
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
21820
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
21821
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21822
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21823
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21824
    case BFD_RELOC_VTABLE_ENTRY:
21825
    case BFD_RELOC_VTABLE_INHERIT:
21826
#ifdef TE_PE
21827
    case BFD_RELOC_32_SECREL:
21828
#endif
21829
      code = fixp->fx_r_type;
21830
      break;
21831
 
21832
    case BFD_RELOC_THUMB_PCREL_BLX:
21833
#ifdef OBJ_ELF
21834
      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21835
        code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21836
      else
21837
#endif
21838
        code = BFD_RELOC_THUMB_PCREL_BLX;
21839
      break;
21840
 
21841
    case BFD_RELOC_ARM_LITERAL:
21842
    case BFD_RELOC_ARM_HWLITERAL:
21843
      /* If this is called then the a literal has
21844
         been referenced across a section boundary.  */
21845
      as_bad_where (fixp->fx_file, fixp->fx_line,
21846
                    _("literal referenced across section boundary"));
21847
      return NULL;
21848
 
21849
#ifdef OBJ_ELF
21850
    case BFD_RELOC_ARM_TLS_CALL:
21851
    case BFD_RELOC_ARM_THM_TLS_CALL:
21852
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21853
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21854
    case BFD_RELOC_ARM_GOT32:
21855
    case BFD_RELOC_ARM_GOTOFF:
21856
    case BFD_RELOC_ARM_GOT_PREL:
21857
    case BFD_RELOC_ARM_PLT32:
21858
    case BFD_RELOC_ARM_TARGET1:
21859
    case BFD_RELOC_ARM_ROSEGREL32:
21860
    case BFD_RELOC_ARM_SBREL32:
21861
    case BFD_RELOC_ARM_PREL31:
21862
    case BFD_RELOC_ARM_TARGET2:
21863
    case BFD_RELOC_ARM_TLS_LE32:
21864
    case BFD_RELOC_ARM_TLS_LDO32:
21865
    case BFD_RELOC_ARM_PCREL_CALL:
21866
    case BFD_RELOC_ARM_PCREL_JUMP:
21867
    case BFD_RELOC_ARM_ALU_PC_G0_NC:
21868
    case BFD_RELOC_ARM_ALU_PC_G0:
21869
    case BFD_RELOC_ARM_ALU_PC_G1_NC:
21870
    case BFD_RELOC_ARM_ALU_PC_G1:
21871
    case BFD_RELOC_ARM_ALU_PC_G2:
21872
    case BFD_RELOC_ARM_LDR_PC_G0:
21873
    case BFD_RELOC_ARM_LDR_PC_G1:
21874
    case BFD_RELOC_ARM_LDR_PC_G2:
21875
    case BFD_RELOC_ARM_LDRS_PC_G0:
21876
    case BFD_RELOC_ARM_LDRS_PC_G1:
21877
    case BFD_RELOC_ARM_LDRS_PC_G2:
21878
    case BFD_RELOC_ARM_LDC_PC_G0:
21879
    case BFD_RELOC_ARM_LDC_PC_G1:
21880
    case BFD_RELOC_ARM_LDC_PC_G2:
21881
    case BFD_RELOC_ARM_ALU_SB_G0_NC:
21882
    case BFD_RELOC_ARM_ALU_SB_G0:
21883
    case BFD_RELOC_ARM_ALU_SB_G1_NC:
21884
    case BFD_RELOC_ARM_ALU_SB_G1:
21885
    case BFD_RELOC_ARM_ALU_SB_G2:
21886
    case BFD_RELOC_ARM_LDR_SB_G0:
21887
    case BFD_RELOC_ARM_LDR_SB_G1:
21888
    case BFD_RELOC_ARM_LDR_SB_G2:
21889
    case BFD_RELOC_ARM_LDRS_SB_G0:
21890
    case BFD_RELOC_ARM_LDRS_SB_G1:
21891
    case BFD_RELOC_ARM_LDRS_SB_G2:
21892
    case BFD_RELOC_ARM_LDC_SB_G0:
21893
    case BFD_RELOC_ARM_LDC_SB_G1:
21894
    case BFD_RELOC_ARM_LDC_SB_G2:
21895
    case BFD_RELOC_ARM_V4BX:
21896
      code = fixp->fx_r_type;
21897
      break;
21898
 
21899
    case BFD_RELOC_ARM_TLS_GOTDESC:
21900
    case BFD_RELOC_ARM_TLS_GD32:
21901
    case BFD_RELOC_ARM_TLS_IE32:
21902
    case BFD_RELOC_ARM_TLS_LDM32:
21903
      /* BFD will include the symbol's address in the addend.
21904
         But we don't want that, so subtract it out again here.  */
21905
      if (!S_IS_COMMON (fixp->fx_addsy))
21906
        reloc->addend -= (*reloc->sym_ptr_ptr)->value;
21907
      code = fixp->fx_r_type;
21908
      break;
21909
#endif
21910
 
21911
    case BFD_RELOC_ARM_IMMEDIATE:
21912
      as_bad_where (fixp->fx_file, fixp->fx_line,
21913
                    _("internal relocation (type: IMMEDIATE) not fixed up"));
21914
      return NULL;
21915
 
21916
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21917
      as_bad_where (fixp->fx_file, fixp->fx_line,
21918
                    _("ADRL used for a symbol not defined in the same file"));
21919
      return NULL;
21920
 
21921
    case BFD_RELOC_ARM_OFFSET_IMM:
21922
      if (section->use_rela_p)
21923
        {
21924
          code = fixp->fx_r_type;
21925
          break;
21926
        }
21927
 
21928
      if (fixp->fx_addsy != NULL
21929
          && !S_IS_DEFINED (fixp->fx_addsy)
21930
          && S_IS_LOCAL (fixp->fx_addsy))
21931
        {
21932
          as_bad_where (fixp->fx_file, fixp->fx_line,
21933
                        _("undefined local label `%s'"),
21934
                        S_GET_NAME (fixp->fx_addsy));
21935
          return NULL;
21936
        }
21937
 
21938
      as_bad_where (fixp->fx_file, fixp->fx_line,
21939
                    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21940
      return NULL;
21941
 
21942
    default:
21943
      {
21944
        char * type;
21945
 
21946
        switch (fixp->fx_r_type)
21947
          {
21948
          case BFD_RELOC_NONE:             type = "NONE";         break;
21949
          case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
21950
          case BFD_RELOC_ARM_SHIFT_IMM:    type = "SHIFT_IMM";    break;
21951
          case BFD_RELOC_ARM_SMC:          type = "SMC";          break;
21952
          case BFD_RELOC_ARM_SWI:          type = "SWI";          break;
21953
          case BFD_RELOC_ARM_MULTI:        type = "MULTI";        break;
21954
          case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";   break;
21955
          case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
21956
          case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21957
          case BFD_RELOC_ARM_THUMB_ADD:    type = "THUMB_ADD";    break;
21958
          case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
21959
          case BFD_RELOC_ARM_THUMB_IMM:    type = "THUMB_IMM";    break;
21960
          case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21961
          default:                         type = _("<unknown>"); break;
21962
          }
21963
        as_bad_where (fixp->fx_file, fixp->fx_line,
21964
                      _("cannot represent %s relocation in this object file format"),
21965
                      type);
21966
        return NULL;
21967
      }
21968
    }
21969
 
21970
#ifdef OBJ_ELF
21971
  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21972
      && GOT_symbol
21973
      && fixp->fx_addsy == GOT_symbol)
21974
    {
21975
      code = BFD_RELOC_ARM_GOTPC;
21976
      reloc->addend = fixp->fx_offset = reloc->address;
21977
    }
21978
#endif
21979
 
21980
  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21981
 
21982
  if (reloc->howto == NULL)
21983
    {
21984
      as_bad_where (fixp->fx_file, fixp->fx_line,
21985
                    _("cannot represent %s relocation in this object file format"),
21986
                    bfd_get_reloc_code_name (code));
21987
      return NULL;
21988
    }
21989
 
21990
  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21991
     vtable entry to be used in the relocation's section offset.  */
21992
  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21993
    reloc->address = fixp->fx_offset;
21994
 
21995
  return reloc;
21996
}
21997
 
21998
/* This fix_new is called by cons via TC_CONS_FIX_NEW.  */
21999
 
22000
void
22001
cons_fix_new_arm (fragS *       frag,
22002
                  int           where,
22003
                  int           size,
22004
                  expressionS * exp)
22005
{
22006
  bfd_reloc_code_real_type type;
22007
  int pcrel = 0;
22008
 
22009
  /* Pick a reloc.
22010
     FIXME: @@ Should look at CPU word size.  */
22011
  switch (size)
22012
    {
22013
    case 1:
22014
      type = BFD_RELOC_8;
22015
      break;
22016
    case 2:
22017
      type = BFD_RELOC_16;
22018
      break;
22019
    case 4:
22020
    default:
22021
      type = BFD_RELOC_32;
22022
      break;
22023
    case 8:
22024
      type = BFD_RELOC_64;
22025
      break;
22026
    }
22027
 
22028
#ifdef TE_PE
22029
  if (exp->X_op == O_secrel)
22030
  {
22031
    exp->X_op = O_symbol;
22032
    type = BFD_RELOC_32_SECREL;
22033
  }
22034
#endif
22035
 
22036
  fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22037
}
22038
 
22039
#if defined (OBJ_COFF)
22040
void
22041
arm_validate_fix (fixS * fixP)
22042
{
22043
  /* If the destination of the branch is a defined symbol which does not have
22044
     the THUMB_FUNC attribute, then we must be calling a function which has
22045
     the (interfacearm) attribute.  We look for the Thumb entry point to that
22046
     function and change the branch to refer to that function instead.  */
22047
  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22048
      && fixP->fx_addsy != NULL
22049
      && S_IS_DEFINED (fixP->fx_addsy)
22050
      && ! THUMB_IS_FUNC (fixP->fx_addsy))
22051
    {
22052
      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22053
    }
22054
}
22055
#endif
22056
 
22057
 
22058
int
22059
arm_force_relocation (struct fix * fixp)
22060
{
22061
#if defined (OBJ_COFF) && defined (TE_PE)
22062
  if (fixp->fx_r_type == BFD_RELOC_RVA)
22063
    return 1;
22064
#endif
22065
 
22066
  /* In case we have a call or a branch to a function in ARM ISA mode from
22067
     a thumb function or vice-versa force the relocation. These relocations
22068
     are cleared off for some cores that might have blx and simple transformations
22069
     are possible.  */
22070
 
22071
#ifdef OBJ_ELF
22072
  switch (fixp->fx_r_type)
22073
    {
22074
    case BFD_RELOC_ARM_PCREL_JUMP:
22075
    case BFD_RELOC_ARM_PCREL_CALL:
22076
    case BFD_RELOC_THUMB_PCREL_BLX:
22077
      if (THUMB_IS_FUNC (fixp->fx_addsy))
22078
        return 1;
22079
      break;
22080
 
22081
    case BFD_RELOC_ARM_PCREL_BLX:
22082
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22083
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22084
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22085
      if (ARM_IS_FUNC (fixp->fx_addsy))
22086
        return 1;
22087
      break;
22088
 
22089
    default:
22090
      break;
22091
    }
22092
#endif
22093
 
22094
  /* Resolve these relocations even if the symbol is extern or weak.
22095
     Technically this is probably wrong due to symbol preemption.
22096
     In practice these relocations do not have enough range to be useful
22097
     at dynamic link time, and some code (e.g. in the Linux kernel)
22098
     expects these references to be resolved.  */
22099
  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22100
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22101
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22102
      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22103
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22104
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22105
      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22106
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22107
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22108
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22109
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22110
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22111
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22112
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22113
    return 0;
22114
 
22115
  /* Always leave these relocations for the linker.  */
22116
  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22117
       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22118
      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22119
    return 1;
22120
 
22121
  /* Always generate relocations against function symbols.  */
22122
  if (fixp->fx_r_type == BFD_RELOC_32
22123
      && fixp->fx_addsy
22124
      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22125
    return 1;
22126
 
22127
  return generic_force_reloc (fixp);
22128
}
22129
 
22130
#if defined (OBJ_ELF) || defined (OBJ_COFF)
22131
/* Relocations against function names must be left unadjusted,
22132
   so that the linker can use this information to generate interworking
22133
   stubs.  The MIPS version of this function
22134
   also prevents relocations that are mips-16 specific, but I do not
22135
   know why it does this.
22136
 
22137
   FIXME:
22138
   There is one other problem that ought to be addressed here, but
22139
   which currently is not:  Taking the address of a label (rather
22140
   than a function) and then later jumping to that address.  Such
22141
   addresses also ought to have their bottom bit set (assuming that
22142
   they reside in Thumb code), but at the moment they will not.  */
22143
 
22144
bfd_boolean
22145
arm_fix_adjustable (fixS * fixP)
22146
{
22147
  if (fixP->fx_addsy == NULL)
22148
    return 1;
22149
 
22150
  /* Preserve relocations against symbols with function type.  */
22151
  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
22152
    return FALSE;
22153
 
22154
  if (THUMB_IS_FUNC (fixP->fx_addsy)
22155
      && fixP->fx_subsy == NULL)
22156
    return FALSE;
22157
 
22158
  /* We need the symbol name for the VTABLE entries.  */
22159
  if (   fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
22160
      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22161
    return FALSE;
22162
 
22163
  /* Don't allow symbols to be discarded on GOT related relocs.  */
22164
  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
22165
      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
22166
      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
22167
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
22168
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
22169
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
22170
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
22171
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
22172
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
22173
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
22174
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
22175
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
22176
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
22177
      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
22178
    return FALSE;
22179
 
22180
  /* Similarly for group relocations.  */
22181
  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22182
       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22183
      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22184
    return FALSE;
22185
 
22186
  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
22187
  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
22188
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22189
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
22190
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
22191
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22192
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
22193
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
22194
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
22195
    return FALSE;
22196
 
22197
  return TRUE;
22198
}
22199
#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
22200
 
22201
#ifdef OBJ_ELF
22202
 
22203
const char *
22204
elf32_arm_target_format (void)
22205
{
22206
#ifdef TE_SYMBIAN
22207
  return (target_big_endian
22208
          ? "elf32-bigarm-symbian"
22209
          : "elf32-littlearm-symbian");
22210
#elif defined (TE_VXWORKS)
22211
  return (target_big_endian
22212
          ? "elf32-bigarm-vxworks"
22213
          : "elf32-littlearm-vxworks");
22214
#else
22215
  if (target_big_endian)
22216
    return "elf32-bigarm";
22217
  else
22218
    return "elf32-littlearm";
22219
#endif
22220
}
22221
 
22222
void
22223
armelf_frob_symbol (symbolS * symp,
22224
                    int *     puntp)
22225
{
22226
  elf_frob_symbol (symp, puntp);
22227
}
22228
#endif
22229
 
22230
/* MD interface: Finalization.  */
22231
 
22232
void
22233
arm_cleanup (void)
22234
{
22235
  literal_pool * pool;
22236
 
22237
  /* Ensure that all the IT blocks are properly closed.  */
22238
  check_it_blocks_finished ();
22239
 
22240
  for (pool = list_of_pools; pool; pool = pool->next)
22241
    {
22242
      /* Put it at the end of the relevant section.  */
22243
      subseg_set (pool->section, pool->sub_section);
22244
#ifdef OBJ_ELF
22245
      arm_elf_change_section ();
22246
#endif
22247
      s_ltorg (0);
22248
    }
22249
}
22250
 
22251
#ifdef OBJ_ELF
22252
/* Remove any excess mapping symbols generated for alignment frags in
22253
   SEC.  We may have created a mapping symbol before a zero byte
22254
   alignment; remove it if there's a mapping symbol after the
22255
   alignment.  */
22256
static void
22257
check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
22258
                       void *dummy ATTRIBUTE_UNUSED)
22259
{
22260
  segment_info_type *seginfo = seg_info (sec);
22261
  fragS *fragp;
22262
 
22263
  if (seginfo == NULL || seginfo->frchainP == NULL)
22264
    return;
22265
 
22266
  for (fragp = seginfo->frchainP->frch_root;
22267
       fragp != NULL;
22268
       fragp = fragp->fr_next)
22269
    {
22270
      symbolS *sym = fragp->tc_frag_data.last_map;
22271
      fragS *next = fragp->fr_next;
22272
 
22273
      /* Variable-sized frags have been converted to fixed size by
22274
         this point.  But if this was variable-sized to start with,
22275
         there will be a fixed-size frag after it.  So don't handle
22276
         next == NULL.  */
22277
      if (sym == NULL || next == NULL)
22278
        continue;
22279
 
22280
      if (S_GET_VALUE (sym) < next->fr_address)
22281
        /* Not at the end of this frag.  */
22282
        continue;
22283
      know (S_GET_VALUE (sym) == next->fr_address);
22284
 
22285
      do
22286
        {
22287
          if (next->tc_frag_data.first_map != NULL)
22288
            {
22289
              /* Next frag starts with a mapping symbol.  Discard this
22290
                 one.  */
22291
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22292
              break;
22293
            }
22294
 
22295
          if (next->fr_next == NULL)
22296
            {
22297
              /* This mapping symbol is at the end of the section.  Discard
22298
                 it.  */
22299
              know (next->fr_fix == 0 && next->fr_var == 0);
22300
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22301
              break;
22302
            }
22303
 
22304
          /* As long as we have empty frags without any mapping symbols,
22305
             keep looking.  */
22306
          /* If the next frag is non-empty and does not start with a
22307
             mapping symbol, then this mapping symbol is required.  */
22308
          if (next->fr_address != next->fr_next->fr_address)
22309
            break;
22310
 
22311
          next = next->fr_next;
22312
        }
22313
      while (next != NULL);
22314
    }
22315
}
22316
#endif
22317
 
22318
/* Adjust the symbol table.  This marks Thumb symbols as distinct from
22319
   ARM ones.  */
22320
 
22321
void
22322
arm_adjust_symtab (void)
22323
{
22324
#ifdef OBJ_COFF
22325
  symbolS * sym;
22326
 
22327
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22328
    {
22329
      if (ARM_IS_THUMB (sym))
22330
        {
22331
          if (THUMB_IS_FUNC (sym))
22332
            {
22333
              /* Mark the symbol as a Thumb function.  */
22334
              if (   S_GET_STORAGE_CLASS (sym) == C_STAT
22335
                  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!  */
22336
                S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
22337
 
22338
              else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
22339
                S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
22340
              else
22341
                as_bad (_("%s: unexpected function type: %d"),
22342
                        S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
22343
            }
22344
          else switch (S_GET_STORAGE_CLASS (sym))
22345
            {
22346
            case C_EXT:
22347
              S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
22348
              break;
22349
            case C_STAT:
22350
              S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
22351
              break;
22352
            case C_LABEL:
22353
              S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
22354
              break;
22355
            default:
22356
              /* Do nothing.  */
22357
              break;
22358
            }
22359
        }
22360
 
22361
      if (ARM_IS_INTERWORK (sym))
22362
        coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
22363
    }
22364
#endif
22365
#ifdef OBJ_ELF
22366
  symbolS * sym;
22367
  char      bind;
22368
 
22369
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22370
    {
22371
      if (ARM_IS_THUMB (sym))
22372
        {
22373
          elf_symbol_type * elf_sym;
22374
 
22375
          elf_sym = elf_symbol (symbol_get_bfdsym (sym));
22376
          bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
22377
 
22378
          if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
22379
                BFD_ARM_SPECIAL_SYM_TYPE_ANY))
22380
            {
22381
              /* If it's a .thumb_func, declare it as so,
22382
                 otherwise tag label as .code 16.  */
22383
              if (THUMB_IS_FUNC (sym))
22384
                elf_sym->internal_elf_sym.st_target_internal
22385
                  = ST_BRANCH_TO_THUMB;
22386
              else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22387
                elf_sym->internal_elf_sym.st_info =
22388
                  ELF_ST_INFO (bind, STT_ARM_16BIT);
22389
            }
22390
        }
22391
    }
22392
 
22393
  /* Remove any overlapping mapping symbols generated by alignment frags.  */
22394
  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
22395
  /* Now do generic ELF adjustments.  */
22396
  elf_adjust_symtab ();
22397
#endif
22398
}
22399
 
22400
/* MD interface: Initialization.  */
22401
 
22402
static void
22403
set_constant_flonums (void)
22404
{
22405
  int i;
22406
 
22407
  for (i = 0; i < NUM_FLOAT_VALS; i++)
22408
    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
22409
      abort ();
22410
}
22411
 
22412
/* Auto-select Thumb mode if it's the only available instruction set for the
22413
   given architecture.  */
22414
 
22415
static void
22416
autoselect_thumb_from_cpu_variant (void)
22417
{
22418
  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22419
    opcode_select (16);
22420
}
22421
 
22422
void
22423
md_begin (void)
22424
{
22425
  unsigned mach;
22426
  unsigned int i;
22427
 
22428
  if (   (arm_ops_hsh = hash_new ()) == NULL
22429
      || (arm_cond_hsh = hash_new ()) == NULL
22430
      || (arm_shift_hsh = hash_new ()) == NULL
22431
      || (arm_psr_hsh = hash_new ()) == NULL
22432
      || (arm_v7m_psr_hsh = hash_new ()) == NULL
22433
      || (arm_reg_hsh = hash_new ()) == NULL
22434
      || (arm_reloc_hsh = hash_new ()) == NULL
22435
      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
22436
    as_fatal (_("virtual memory exhausted"));
22437
 
22438
  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
22439
    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
22440
  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
22441
    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
22442
  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
22443
    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
22444
  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
22445
    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
22446
  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
22447
    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
22448
                 (void *) (v7m_psrs + i));
22449
  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
22450
    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
22451
  for (i = 0;
22452
       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
22453
       i++)
22454
    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
22455
                 (void *) (barrier_opt_names + i));
22456
#ifdef OBJ_ELF
22457 163 khays
  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
22458
    {
22459
      struct reloc_entry * entry = reloc_names + i;
22460
 
22461
      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
22462
        /* This makes encode_branch() use the EABI versions of this relocation.  */
22463
        entry->reloc = BFD_RELOC_UNUSED;
22464
 
22465
      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
22466
    }
22467 16 khays
#endif
22468
 
22469
  set_constant_flonums ();
22470
 
22471
  /* Set the cpu variant based on the command-line options.  We prefer
22472
     -mcpu= over -march= if both are set (as for GCC); and we prefer
22473
     -mfpu= over any other way of setting the floating point unit.
22474
     Use of legacy options with new options are faulted.  */
22475
  if (legacy_cpu)
22476
    {
22477
      if (mcpu_cpu_opt || march_cpu_opt)
22478
        as_bad (_("use of old and new-style options to set CPU type"));
22479
 
22480
      mcpu_cpu_opt = legacy_cpu;
22481
    }
22482
  else if (!mcpu_cpu_opt)
22483
    mcpu_cpu_opt = march_cpu_opt;
22484
 
22485
  if (legacy_fpu)
22486
    {
22487
      if (mfpu_opt)
22488
        as_bad (_("use of old and new-style options to set FPU type"));
22489
 
22490
      mfpu_opt = legacy_fpu;
22491
    }
22492
  else if (!mfpu_opt)
22493
    {
22494
#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
22495
        || defined (TE_NetBSD) || defined (TE_VXWORKS))
22496
      /* Some environments specify a default FPU.  If they don't, infer it
22497
         from the processor.  */
22498
      if (mcpu_fpu_opt)
22499
        mfpu_opt = mcpu_fpu_opt;
22500
      else
22501
        mfpu_opt = march_fpu_opt;
22502
#else
22503
      mfpu_opt = &fpu_default;
22504
#endif
22505
    }
22506
 
22507
  if (!mfpu_opt)
22508
    {
22509
      if (mcpu_cpu_opt != NULL)
22510
        mfpu_opt = &fpu_default;
22511
      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
22512
        mfpu_opt = &fpu_arch_vfp_v2;
22513
      else
22514
        mfpu_opt = &fpu_arch_fpa;
22515
    }
22516
 
22517
#ifdef CPU_DEFAULT
22518
  if (!mcpu_cpu_opt)
22519
    {
22520
      mcpu_cpu_opt = &cpu_default;
22521
      selected_cpu = cpu_default;
22522
    }
22523
#else
22524
  if (mcpu_cpu_opt)
22525
    selected_cpu = *mcpu_cpu_opt;
22526
  else
22527
    mcpu_cpu_opt = &arm_arch_any;
22528
#endif
22529
 
22530
  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22531
 
22532
  autoselect_thumb_from_cpu_variant ();
22533
 
22534
  arm_arch_used = thumb_arch_used = arm_arch_none;
22535
 
22536
#if defined OBJ_COFF || defined OBJ_ELF
22537
  {
22538
    unsigned int flags = 0;
22539
 
22540
#if defined OBJ_ELF
22541
    flags = meabi_flags;
22542
 
22543
    switch (meabi_flags)
22544
      {
22545
      case EF_ARM_EABI_UNKNOWN:
22546
#endif
22547
        /* Set the flags in the private structure.  */
22548
        if (uses_apcs_26)      flags |= F_APCS26;
22549
        if (support_interwork) flags |= F_INTERWORK;
22550
        if (uses_apcs_float)   flags |= F_APCS_FLOAT;
22551
        if (pic_code)          flags |= F_PIC;
22552
        if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
22553
          flags |= F_SOFT_FLOAT;
22554
 
22555
        switch (mfloat_abi_opt)
22556
          {
22557
          case ARM_FLOAT_ABI_SOFT:
22558
          case ARM_FLOAT_ABI_SOFTFP:
22559
            flags |= F_SOFT_FLOAT;
22560
            break;
22561
 
22562
          case ARM_FLOAT_ABI_HARD:
22563
            if (flags & F_SOFT_FLOAT)
22564
              as_bad (_("hard-float conflicts with specified fpu"));
22565
            break;
22566
          }
22567
 
22568
        /* Using pure-endian doubles (even if soft-float).      */
22569
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22570
          flags |= F_VFP_FLOAT;
22571
 
22572
#if defined OBJ_ELF
22573
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22574
            flags |= EF_ARM_MAVERICK_FLOAT;
22575
        break;
22576
 
22577
      case EF_ARM_EABI_VER4:
22578
      case EF_ARM_EABI_VER5:
22579
        /* No additional flags to set.  */
22580
        break;
22581
 
22582
      default:
22583
        abort ();
22584
      }
22585
#endif
22586
    bfd_set_private_flags (stdoutput, flags);
22587
 
22588
    /* We have run out flags in the COFF header to encode the
22589
       status of ATPCS support, so instead we create a dummy,
22590
       empty, debug section called .arm.atpcs.  */
22591
    if (atpcs)
22592
      {
22593
        asection * sec;
22594
 
22595
        sec = bfd_make_section (stdoutput, ".arm.atpcs");
22596
 
22597
        if (sec != NULL)
22598
          {
22599
            bfd_set_section_flags
22600
              (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22601
            bfd_set_section_size (stdoutput, sec, 0);
22602
            bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22603
          }
22604
      }
22605
  }
22606
#endif
22607
 
22608
  /* Record the CPU type as well.  */
22609
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22610
    mach = bfd_mach_arm_iWMMXt2;
22611
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22612
    mach = bfd_mach_arm_iWMMXt;
22613
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22614
    mach = bfd_mach_arm_XScale;
22615
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22616
    mach = bfd_mach_arm_ep9312;
22617
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22618
    mach = bfd_mach_arm_5TE;
22619
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22620
    {
22621
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22622
        mach = bfd_mach_arm_5T;
22623
      else
22624
        mach = bfd_mach_arm_5;
22625
    }
22626
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22627
    {
22628
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22629
        mach = bfd_mach_arm_4T;
22630
      else
22631
        mach = bfd_mach_arm_4;
22632
    }
22633
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22634
    mach = bfd_mach_arm_3M;
22635
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22636
    mach = bfd_mach_arm_3;
22637
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22638
    mach = bfd_mach_arm_2a;
22639
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22640
    mach = bfd_mach_arm_2;
22641
  else
22642
    mach = bfd_mach_arm_unknown;
22643
 
22644
  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22645
}
22646
 
22647
/* Command line processing.  */
22648
 
22649
/* md_parse_option
22650
      Invocation line includes a switch not recognized by the base assembler.
22651
      See if it's a processor-specific option.
22652
 
22653
      This routine is somewhat complicated by the need for backwards
22654
      compatibility (since older releases of gcc can't be changed).
22655
      The new options try to make the interface as compatible as
22656
      possible with GCC.
22657
 
22658
      New options (supported) are:
22659
 
22660
              -mcpu=<cpu name>           Assemble for selected processor
22661
              -march=<architecture name> Assemble for selected architecture
22662
              -mfpu=<fpu architecture>   Assemble for selected FPU.
22663
              -EB/-mbig-endian           Big-endian
22664
              -EL/-mlittle-endian        Little-endian
22665
              -k                         Generate PIC code
22666
              -mthumb                    Start in Thumb mode
22667
              -mthumb-interwork          Code supports ARM/Thumb interworking
22668
 
22669
              -m[no-]warn-deprecated     Warn about deprecated features
22670
 
22671
      For now we will also provide support for:
22672
 
22673
              -mapcs-32                  32-bit Program counter
22674
              -mapcs-26                  26-bit Program counter
22675
              -macps-float               Floats passed in FP registers
22676
              -mapcs-reentrant           Reentrant code
22677
              -matpcs
22678
      (sometime these will probably be replaced with -mapcs=<list of options>
22679
      and -matpcs=<list of options>)
22680
 
22681
      The remaining options are only supported for back-wards compatibility.
22682
      Cpu variants, the arm part is optional:
22683
              -m[arm]1                Currently not supported.
22684
              -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
22685
              -m[arm]3                Arm 3 processor
22686
              -m[arm]6[xx],           Arm 6 processors
22687
              -m[arm]7[xx][t][[d]m]   Arm 7 processors
22688
              -m[arm]8[10]            Arm 8 processors
22689
              -m[arm]9[20][tdmi]      Arm 9 processors
22690
              -mstrongarm[110[0]]     StrongARM processors
22691
              -mxscale                XScale processors
22692
              -m[arm]v[2345[t[e]]]    Arm architectures
22693
              -mall                   All (except the ARM1)
22694
      FP variants:
22695
              -mfpa10, -mfpa11        FPA10 and 11 co-processor instructions
22696
              -mfpe-old               (No float load/store multiples)
22697
              -mvfpxd                 VFP Single precision
22698
              -mvfp                   All VFP
22699
              -mno-fpu                Disable all floating point instructions
22700
 
22701
      The following CPU names are recognized:
22702
              arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22703
              arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22704
              arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22705
              arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22706
              arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22707
              arm10t arm10e, arm1020t, arm1020e, arm10200e,
22708
              strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22709
 
22710
      */
22711
 
22712
const char * md_shortopts = "m:k";
22713
 
22714
#ifdef ARM_BI_ENDIAN
22715
#define OPTION_EB (OPTION_MD_BASE + 0)
22716
#define OPTION_EL (OPTION_MD_BASE + 1)
22717
#else
22718
#if TARGET_BYTES_BIG_ENDIAN
22719
#define OPTION_EB (OPTION_MD_BASE + 0)
22720
#else
22721
#define OPTION_EL (OPTION_MD_BASE + 1)
22722
#endif
22723
#endif
22724
#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22725
 
22726
struct option md_longopts[] =
22727
{
22728
#ifdef OPTION_EB
22729
  {"EB", no_argument, NULL, OPTION_EB},
22730
#endif
22731
#ifdef OPTION_EL
22732
  {"EL", no_argument, NULL, OPTION_EL},
22733
#endif
22734
  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
22735
  {NULL, no_argument, NULL, 0}
22736
};
22737
 
22738
size_t md_longopts_size = sizeof (md_longopts);
22739
 
22740
struct arm_option_table
22741
{
22742
  char *option;         /* Option name to match.  */
22743
  char *help;           /* Help information.  */
22744
  int  *var;            /* Variable to change.  */
22745
  int   value;          /* What to change it to.  */
22746
  char *deprecated;     /* If non-null, print this message.  */
22747
};
22748
 
22749
struct arm_option_table arm_opts[] =
22750
{
22751
  {"k",      N_("generate PIC code"),      &pic_code,    1, NULL},
22752
  {"mthumb", N_("assemble Thumb code"),    &thumb_mode,  1, NULL},
22753
  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22754
   &support_interwork, 1, NULL},
22755
  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
22756
  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
22757
  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
22758
   1, NULL},
22759
  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
22760
  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
22761
  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
22762
  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
22763
   NULL},
22764
 
22765
  /* These are recognized by the assembler, but have no affect on code.  */
22766
  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
22767
  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
22768
 
22769
  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
22770
  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22771
   &warn_on_deprecated, 0, NULL},
22772
  {NULL, NULL, NULL, 0, NULL}
22773
};
22774
 
22775
struct arm_legacy_option_table
22776
{
22777
  char *option;                         /* Option name to match.  */
22778
  const arm_feature_set **var;          /* Variable to change.  */
22779
  const arm_feature_set value;          /* What to change it to.  */
22780
  char *deprecated;                     /* If non-null, print this message.  */
22781
};
22782
 
22783
const struct arm_legacy_option_table arm_legacy_opts[] =
22784
{
22785
  /* DON'T add any new processors to this list -- we want the whole list
22786
     to go away...  Add them to the processors table instead.  */
22787
  {"marm1",      &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22788
  {"m1",         &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22789
  {"marm2",      &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22790
  {"m2",         &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22791
  {"marm250",    &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22792
  {"m250",       &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22793
  {"marm3",      &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22794
  {"m3",         &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22795
  {"marm6",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22796
  {"m6",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22797
  {"marm600",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22798
  {"m600",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22799
  {"marm610",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22800
  {"m610",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22801
  {"marm620",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22802
  {"m620",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22803
  {"marm7",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22804
  {"m7",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22805
  {"marm70",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22806
  {"m70",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22807
  {"marm700",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22808
  {"m700",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22809
  {"marm700i",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22810
  {"m700i",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22811
  {"marm710",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22812
  {"m710",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22813
  {"marm710c",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22814
  {"m710c",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22815
  {"marm720",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22816
  {"m720",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22817
  {"marm7d",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22818
  {"m7d",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22819
  {"marm7di",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22820
  {"m7di",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22821
  {"marm7m",     &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22822
  {"m7m",        &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22823
  {"marm7dm",    &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22824
  {"m7dm",       &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22825
  {"marm7dmi",   &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22826
  {"m7dmi",      &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22827
  {"marm7100",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22828
  {"m7100",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22829
  {"marm7500",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22830
  {"m7500",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22831
  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22832
  {"m7500fe",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22833
  {"marm7t",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22834
  {"m7t",        &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22835
  {"marm7tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22836
  {"m7tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22837
  {"marm710t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22838
  {"m710t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22839
  {"marm720t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22840
  {"m720t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22841
  {"marm740t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22842
  {"m740t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22843
  {"marm8",      &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22844
  {"m8",         &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22845
  {"marm810",    &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22846
  {"m810",       &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22847
  {"marm9",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22848
  {"m9",         &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22849
  {"marm9tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22850
  {"m9tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22851
  {"marm920",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22852
  {"m920",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22853
  {"marm940",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22854
  {"m940",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22855
  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
22856
  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22857
   N_("use -mcpu=strongarm110")},
22858
  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22859
   N_("use -mcpu=strongarm1100")},
22860
  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22861
   N_("use -mcpu=strongarm1110")},
22862
  {"mxscale",    &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22863
  {"miwmmxt",    &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22864
  {"mall",       &legacy_cpu, ARM_ANY,         N_("use -mcpu=all")},
22865
 
22866
  /* Architecture variants -- don't add any more to this list either.  */
22867
  {"mv2",        &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22868
  {"marmv2",     &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22869
  {"mv2a",       &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22870
  {"marmv2a",    &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22871
  {"mv3",        &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22872
  {"marmv3",     &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22873
  {"mv3m",       &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22874
  {"marmv3m",    &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22875
  {"mv4",        &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22876
  {"marmv4",     &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22877
  {"mv4t",       &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22878
  {"marmv4t",    &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22879
  {"mv5",        &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22880
  {"marmv5",     &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22881
  {"mv5t",       &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22882
  {"marmv5t",    &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22883
  {"mv5e",       &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22884
  {"marmv5e",    &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22885
 
22886
  /* Floating point variants -- don't add any more to this list either.  */
22887
  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22888
  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22889
  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22890
  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
22891
   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22892
 
22893
  {NULL, NULL, ARM_ARCH_NONE, NULL}
22894
};
22895
 
22896
struct arm_cpu_option_table
22897
{
22898
  char *name;
22899 166 khays
  size_t name_len;
22900 16 khays
  const arm_feature_set value;
22901
  /* For some CPUs we assume an FPU unless the user explicitly sets
22902
     -mfpu=...  */
22903
  const arm_feature_set default_fpu;
22904
  /* The canonical name of the CPU, or NULL to use NAME converted to upper
22905
     case.  */
22906
  const char *canonical_name;
22907
};
22908
 
22909
/* This list should, at a minimum, contain all the cpu names
22910
   recognized by GCC.  */
22911 166 khays
#define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
22912 16 khays
static const struct arm_cpu_option_table arm_cpus[] =
22913
{
22914 166 khays
  ARM_CPU_OPT ("all",           ARM_ANY,         FPU_ARCH_FPA,    NULL),
22915
  ARM_CPU_OPT ("arm1",          ARM_ARCH_V1,     FPU_ARCH_FPA,    NULL),
22916
  ARM_CPU_OPT ("arm2",          ARM_ARCH_V2,     FPU_ARCH_FPA,    NULL),
22917
  ARM_CPU_OPT ("arm250",        ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL),
22918
  ARM_CPU_OPT ("arm3",          ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL),
22919
  ARM_CPU_OPT ("arm6",          ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22920
  ARM_CPU_OPT ("arm60",         ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22921
  ARM_CPU_OPT ("arm600",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22922
  ARM_CPU_OPT ("arm610",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22923
  ARM_CPU_OPT ("arm620",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22924
  ARM_CPU_OPT ("arm7",          ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22925
  ARM_CPU_OPT ("arm7m",         ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL),
22926
  ARM_CPU_OPT ("arm7d",         ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22927
  ARM_CPU_OPT ("arm7dm",        ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL),
22928
  ARM_CPU_OPT ("arm7di",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22929
  ARM_CPU_OPT ("arm7dmi",       ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL),
22930
  ARM_CPU_OPT ("arm70",         ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22931
  ARM_CPU_OPT ("arm700",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22932
  ARM_CPU_OPT ("arm700i",       ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22933
  ARM_CPU_OPT ("arm710",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22934
  ARM_CPU_OPT ("arm710t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22935
  ARM_CPU_OPT ("arm720",        ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22936
  ARM_CPU_OPT ("arm720t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22937
  ARM_CPU_OPT ("arm740t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22938
  ARM_CPU_OPT ("arm710c",       ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22939
  ARM_CPU_OPT ("arm7100",       ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22940
  ARM_CPU_OPT ("arm7500",       ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22941
  ARM_CPU_OPT ("arm7500fe",     ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL),
22942
  ARM_CPU_OPT ("arm7t",         ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22943
  ARM_CPU_OPT ("arm7tdmi",      ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22944
  ARM_CPU_OPT ("arm7tdmi-s",    ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22945
  ARM_CPU_OPT ("arm8",          ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22946
  ARM_CPU_OPT ("arm810",        ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22947
  ARM_CPU_OPT ("strongarm",     ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22948
  ARM_CPU_OPT ("strongarm1",    ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22949
  ARM_CPU_OPT ("strongarm110",  ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22950
  ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22951
  ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22952
  ARM_CPU_OPT ("arm9",          ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22953
  ARM_CPU_OPT ("arm920",        ARM_ARCH_V4T,    FPU_ARCH_FPA,    "ARM920T"),
22954
  ARM_CPU_OPT ("arm920t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22955
  ARM_CPU_OPT ("arm922t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22956
  ARM_CPU_OPT ("arm940t",       ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22957
  ARM_CPU_OPT ("arm9tdmi",      ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL),
22958
  ARM_CPU_OPT ("fa526",         ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22959
  ARM_CPU_OPT ("fa626",         ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL),
22960 16 khays
  /* For V5 or later processors we default to using VFP; but the user
22961
     should really set the FPU type explicitly.  */
22962 166 khays
  ARM_CPU_OPT ("arm9e-r0",      ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
22963
  ARM_CPU_OPT ("arm9e",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22964
  ARM_CPU_OPT ("arm926ej",      ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"),
22965
  ARM_CPU_OPT ("arm926ejs",     ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"),
22966
  ARM_CPU_OPT ("arm926ej-s",    ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL),
22967
  ARM_CPU_OPT ("arm946e-r0",    ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
22968
  ARM_CPU_OPT ("arm946e",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM946E-S"),
22969
  ARM_CPU_OPT ("arm946e-s",     ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22970
  ARM_CPU_OPT ("arm966e-r0",    ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
22971
  ARM_CPU_OPT ("arm966e",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM966E-S"),
22972
  ARM_CPU_OPT ("arm966e-s",     ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22973
  ARM_CPU_OPT ("arm968e-s",     ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22974
  ARM_CPU_OPT ("arm10t",        ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL),
22975
  ARM_CPU_OPT ("arm10tdmi",     ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL),
22976
  ARM_CPU_OPT ("arm10e",        ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22977
  ARM_CPU_OPT ("arm1020",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM1020E"),
22978
  ARM_CPU_OPT ("arm1020t",      ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL),
22979
  ARM_CPU_OPT ("arm1020e",      ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22980
  ARM_CPU_OPT ("arm1022e",      ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22981
  ARM_CPU_OPT ("arm1026ejs",    ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2,
22982
                                                                 "ARM1026EJ-S"),
22983
  ARM_CPU_OPT ("arm1026ej-s",   ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL),
22984
  ARM_CPU_OPT ("fa606te",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22985
  ARM_CPU_OPT ("fa616te",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22986
  ARM_CPU_OPT ("fa626te",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22987
  ARM_CPU_OPT ("fmp626",        ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22988
  ARM_CPU_OPT ("fa726te",       ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL),
22989
  ARM_CPU_OPT ("arm1136js",     ARM_ARCH_V6,     FPU_NONE,        "ARM1136J-S"),
22990
  ARM_CPU_OPT ("arm1136j-s",    ARM_ARCH_V6,     FPU_NONE,        NULL),
22991
  ARM_CPU_OPT ("arm1136jfs",    ARM_ARCH_V6,     FPU_ARCH_VFP_V2,
22992
                                                                 "ARM1136JF-S"),
22993
  ARM_CPU_OPT ("arm1136jf-s",   ARM_ARCH_V6,     FPU_ARCH_VFP_V2, NULL),
22994
  ARM_CPU_OPT ("mpcore",        ARM_ARCH_V6K,    FPU_ARCH_VFP_V2, "MPCore"),
22995
  ARM_CPU_OPT ("mpcorenovfp",   ARM_ARCH_V6K,    FPU_NONE,        "MPCore"),
22996
  ARM_CPU_OPT ("arm1156t2-s",   ARM_ARCH_V6T2,   FPU_NONE,        NULL),
22997
  ARM_CPU_OPT ("arm1156t2f-s",  ARM_ARCH_V6T2,   FPU_ARCH_VFP_V2, NULL),
22998
  ARM_CPU_OPT ("arm1176jz-s",   ARM_ARCH_V6ZK,   FPU_NONE,        NULL),
22999
  ARM_CPU_OPT ("arm1176jzf-s",  ARM_ARCH_V6ZK,   FPU_ARCH_VFP_V2, NULL),
23000
  ARM_CPU_OPT ("cortex-a5",     ARM_ARCH_V7A_MP_SEC,
23001
                                                 FPU_NONE,        "Cortex-A5"),
23002
  ARM_CPU_OPT ("cortex-a7",     ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23003
                                                 FPU_ARCH_NEON_VFP_V4,
23004
                                                                  "Cortex-A7"),
23005
  ARM_CPU_OPT ("cortex-a8",     ARM_ARCH_V7A_SEC,
23006
                                                 ARM_FEATURE (0, FPU_VFP_V3
23007 16 khays
                                                        | FPU_NEON_EXT_V1),
23008 166 khays
                                                                  "Cortex-A8"),
23009
  ARM_CPU_OPT ("cortex-a9",     ARM_ARCH_V7A_MP_SEC,
23010
                                                 ARM_FEATURE (0, FPU_VFP_V3
23011 16 khays
                                                        | FPU_NEON_EXT_V1),
23012 166 khays
                                                                  "Cortex-A9"),
23013
  ARM_CPU_OPT ("cortex-a15",    ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23014
                                                 FPU_ARCH_NEON_VFP_V4,
23015
                                                                  "Cortex-A15"),
23016
  ARM_CPU_OPT ("cortex-r4",     ARM_ARCH_V7R,    FPU_NONE,        "Cortex-R4"),
23017
  ARM_CPU_OPT ("cortex-r4f",    ARM_ARCH_V7R,    FPU_ARCH_VFP_V3D16,
23018
                                                                  "Cortex-R4F"),
23019
  ARM_CPU_OPT ("cortex-r5",     ARM_ARCH_V7R_IDIV,
23020
                                                 FPU_NONE,        "Cortex-R5"),
23021
  ARM_CPU_OPT ("cortex-m4",     ARM_ARCH_V7EM,   FPU_NONE,        "Cortex-M4"),
23022
  ARM_CPU_OPT ("cortex-m3",     ARM_ARCH_V7M,    FPU_NONE,        "Cortex-M3"),
23023
  ARM_CPU_OPT ("cortex-m1",     ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M1"),
23024
  ARM_CPU_OPT ("cortex-m0",     ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M0"),
23025 16 khays
  /* ??? XSCALE is really an architecture.  */
23026 166 khays
  ARM_CPU_OPT ("xscale",        ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23027 16 khays
  /* ??? iwmmxt is not a processor.  */
23028 166 khays
  ARM_CPU_OPT ("iwmmxt",        ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
23029
  ARM_CPU_OPT ("iwmmxt2",       ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
23030
  ARM_CPU_OPT ("i80200",        ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23031 16 khays
  /* Maverick */
23032 166 khays
  ARM_CPU_OPT ("ep9312",        ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
23033
                                                 FPU_ARCH_MAVERICK,
23034
                                                                  "ARM920T"),
23035
  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
23036 16 khays
};
23037 166 khays
#undef ARM_CPU_OPT
23038 16 khays
 
23039
struct arm_arch_option_table
23040
{
23041
  char *name;
23042 166 khays
  size_t name_len;
23043 16 khays
  const arm_feature_set value;
23044
  const arm_feature_set default_fpu;
23045
};
23046
 
23047
/* This list should, at a minimum, contain all the architecture names
23048
   recognized by GCC.  */
23049 166 khays
#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
23050 16 khays
static const struct arm_arch_option_table arm_archs[] =
23051
{
23052 166 khays
  ARM_ARCH_OPT ("all",          ARM_ANY,         FPU_ARCH_FPA),
23053
  ARM_ARCH_OPT ("armv1",        ARM_ARCH_V1,     FPU_ARCH_FPA),
23054
  ARM_ARCH_OPT ("armv2",        ARM_ARCH_V2,     FPU_ARCH_FPA),
23055
  ARM_ARCH_OPT ("armv2a",       ARM_ARCH_V2S,    FPU_ARCH_FPA),
23056
  ARM_ARCH_OPT ("armv2s",       ARM_ARCH_V2S,    FPU_ARCH_FPA),
23057
  ARM_ARCH_OPT ("armv3",        ARM_ARCH_V3,     FPU_ARCH_FPA),
23058
  ARM_ARCH_OPT ("armv3m",       ARM_ARCH_V3M,    FPU_ARCH_FPA),
23059
  ARM_ARCH_OPT ("armv4",        ARM_ARCH_V4,     FPU_ARCH_FPA),
23060
  ARM_ARCH_OPT ("armv4xm",      ARM_ARCH_V4xM,   FPU_ARCH_FPA),
23061
  ARM_ARCH_OPT ("armv4t",       ARM_ARCH_V4T,    FPU_ARCH_FPA),
23062
  ARM_ARCH_OPT ("armv4txm",     ARM_ARCH_V4TxM,  FPU_ARCH_FPA),
23063
  ARM_ARCH_OPT ("armv5",        ARM_ARCH_V5,     FPU_ARCH_VFP),
23064
  ARM_ARCH_OPT ("armv5t",       ARM_ARCH_V5T,    FPU_ARCH_VFP),
23065
  ARM_ARCH_OPT ("armv5txm",     ARM_ARCH_V5TxM,  FPU_ARCH_VFP),
23066
  ARM_ARCH_OPT ("armv5te",      ARM_ARCH_V5TE,   FPU_ARCH_VFP),
23067
  ARM_ARCH_OPT ("armv5texp",    ARM_ARCH_V5TExP, FPU_ARCH_VFP),
23068
  ARM_ARCH_OPT ("armv5tej",     ARM_ARCH_V5TEJ,  FPU_ARCH_VFP),
23069
  ARM_ARCH_OPT ("armv6",        ARM_ARCH_V6,     FPU_ARCH_VFP),
23070
  ARM_ARCH_OPT ("armv6j",       ARM_ARCH_V6,     FPU_ARCH_VFP),
23071
  ARM_ARCH_OPT ("armv6k",       ARM_ARCH_V6K,    FPU_ARCH_VFP),
23072
  ARM_ARCH_OPT ("armv6z",       ARM_ARCH_V6Z,    FPU_ARCH_VFP),
23073
  ARM_ARCH_OPT ("armv6zk",      ARM_ARCH_V6ZK,   FPU_ARCH_VFP),
23074
  ARM_ARCH_OPT ("armv6t2",      ARM_ARCH_V6T2,   FPU_ARCH_VFP),
23075
  ARM_ARCH_OPT ("armv6kt2",     ARM_ARCH_V6KT2,  FPU_ARCH_VFP),
23076
  ARM_ARCH_OPT ("armv6zt2",     ARM_ARCH_V6ZT2,  FPU_ARCH_VFP),
23077
  ARM_ARCH_OPT ("armv6zkt2",    ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
23078
  ARM_ARCH_OPT ("armv6-m",      ARM_ARCH_V6M,    FPU_ARCH_VFP),
23079
  ARM_ARCH_OPT ("armv6s-m",     ARM_ARCH_V6SM,   FPU_ARCH_VFP),
23080
  ARM_ARCH_OPT ("armv7",        ARM_ARCH_V7,     FPU_ARCH_VFP),
23081 16 khays
  /* The official spelling of the ARMv7 profile variants is the dashed form.
23082
     Accept the non-dashed form for compatibility with old toolchains.  */
23083 166 khays
  ARM_ARCH_OPT ("armv7a",       ARM_ARCH_V7A,    FPU_ARCH_VFP),
23084
  ARM_ARCH_OPT ("armv7r",       ARM_ARCH_V7R,    FPU_ARCH_VFP),
23085
  ARM_ARCH_OPT ("armv7m",       ARM_ARCH_V7M,    FPU_ARCH_VFP),
23086
  ARM_ARCH_OPT ("armv7-a",      ARM_ARCH_V7A,    FPU_ARCH_VFP),
23087
  ARM_ARCH_OPT ("armv7-r",      ARM_ARCH_V7R,    FPU_ARCH_VFP),
23088
  ARM_ARCH_OPT ("armv7-m",      ARM_ARCH_V7M,    FPU_ARCH_VFP),
23089
  ARM_ARCH_OPT ("armv7e-m",     ARM_ARCH_V7EM,   FPU_ARCH_VFP),
23090
  ARM_ARCH_OPT ("xscale",       ARM_ARCH_XSCALE, FPU_ARCH_VFP),
23091
  ARM_ARCH_OPT ("iwmmxt",       ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
23092
  ARM_ARCH_OPT ("iwmmxt2",      ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
23093
  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23094 16 khays
};
23095 166 khays
#undef ARM_ARCH_OPT
23096 16 khays
 
23097
/* ISA extensions in the co-processor and main instruction set space.  */
23098
struct arm_option_extension_value_table
23099
{
23100
  char *name;
23101 166 khays
  size_t name_len;
23102 16 khays
  const arm_feature_set value;
23103
  const arm_feature_set allowed_archs;
23104
};
23105
 
23106
/* The following table must be in alphabetical order with a NULL last entry.
23107
   */
23108 166 khays
#define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
23109 16 khays
static const struct arm_option_extension_value_table arm_extensions[] =
23110
{
23111 166 khays
  ARM_EXT_OPT ("idiv",  ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23112
                                   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23113
  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT),        ARM_ANY),
23114
  ARM_EXT_OPT ("iwmmxt2",
23115
                        ARM_FEATURE (0, ARM_CEXT_IWMMXT2),       ARM_ANY),
23116
  ARM_EXT_OPT ("maverick",
23117
                        ARM_FEATURE (0, ARM_CEXT_MAVERICK),      ARM_ANY),
23118
  ARM_EXT_OPT ("mp",    ARM_FEATURE (ARM_EXT_MP, 0),
23119
                                   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23120
  ARM_EXT_OPT ("os",    ARM_FEATURE (ARM_EXT_OS, 0),
23121
                                   ARM_FEATURE (ARM_EXT_V6M, 0)),
23122
  ARM_EXT_OPT ("sec",   ARM_FEATURE (ARM_EXT_SEC, 0),
23123
                                   ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
23124
  ARM_EXT_OPT ("virt",  ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
23125
                                     | ARM_EXT_DIV, 0),
23126
                                   ARM_FEATURE (ARM_EXT_V7A, 0)),
23127
  ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE),        ARM_ANY),
23128
  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23129 16 khays
};
23130 166 khays
#undef ARM_EXT_OPT
23131 16 khays
 
23132
/* ISA floating-point and Advanced SIMD extensions.  */
23133
struct arm_option_fpu_value_table
23134
{
23135
  char *name;
23136
  const arm_feature_set value;
23137
};
23138
 
23139
/* This list should, at a minimum, contain all the fpu names
23140
   recognized by GCC.  */
23141
static const struct arm_option_fpu_value_table arm_fpus[] =
23142
{
23143
  {"softfpa",           FPU_NONE},
23144
  {"fpe",               FPU_ARCH_FPE},
23145
  {"fpe2",              FPU_ARCH_FPE},
23146
  {"fpe3",              FPU_ARCH_FPA},  /* Third release supports LFM/SFM.  */
23147
  {"fpa",               FPU_ARCH_FPA},
23148
  {"fpa10",             FPU_ARCH_FPA},
23149
  {"fpa11",             FPU_ARCH_FPA},
23150
  {"arm7500fe",         FPU_ARCH_FPA},
23151
  {"softvfp",           FPU_ARCH_VFP},
23152
  {"softvfp+vfp",       FPU_ARCH_VFP_V2},
23153
  {"vfp",               FPU_ARCH_VFP_V2},
23154
  {"vfp9",              FPU_ARCH_VFP_V2},
23155
  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
23156
  {"vfp10",             FPU_ARCH_VFP_V2},
23157
  {"vfp10-r0",          FPU_ARCH_VFP_V1},
23158
  {"vfpxd",             FPU_ARCH_VFP_V1xD},
23159
  {"vfpv2",             FPU_ARCH_VFP_V2},
23160
  {"vfpv3",             FPU_ARCH_VFP_V3},
23161
  {"vfpv3-fp16",        FPU_ARCH_VFP_V3_FP16},
23162
  {"vfpv3-d16",         FPU_ARCH_VFP_V3D16},
23163
  {"vfpv3-d16-fp16",    FPU_ARCH_VFP_V3D16_FP16},
23164
  {"vfpv3xd",           FPU_ARCH_VFP_V3xD},
23165
  {"vfpv3xd-fp16",      FPU_ARCH_VFP_V3xD_FP16},
23166
  {"arm1020t",          FPU_ARCH_VFP_V1},
23167
  {"arm1020e",          FPU_ARCH_VFP_V2},
23168
  {"arm1136jfs",        FPU_ARCH_VFP_V2},
23169
  {"arm1136jf-s",       FPU_ARCH_VFP_V2},
23170
  {"maverick",          FPU_ARCH_MAVERICK},
23171
  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
23172
  {"neon-fp16",         FPU_ARCH_NEON_FP16},
23173
  {"vfpv4",             FPU_ARCH_VFP_V4},
23174
  {"vfpv4-d16",         FPU_ARCH_VFP_V4D16},
23175
  {"fpv4-sp-d16",       FPU_ARCH_VFP_V4_SP_D16},
23176
  {"neon-vfpv4",        FPU_ARCH_NEON_VFP_V4},
23177
  {NULL,                ARM_ARCH_NONE}
23178
};
23179
 
23180
struct arm_option_value_table
23181
{
23182
  char *name;
23183
  long value;
23184
};
23185
 
23186
static const struct arm_option_value_table arm_float_abis[] =
23187
{
23188
  {"hard",      ARM_FLOAT_ABI_HARD},
23189
  {"softfp",    ARM_FLOAT_ABI_SOFTFP},
23190
  {"soft",      ARM_FLOAT_ABI_SOFT},
23191
  {NULL,        0}
23192
};
23193
 
23194
#ifdef OBJ_ELF
23195
/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
23196
static const struct arm_option_value_table arm_eabis[] =
23197
{
23198
  {"gnu",       EF_ARM_EABI_UNKNOWN},
23199
  {"4",         EF_ARM_EABI_VER4},
23200
  {"5",         EF_ARM_EABI_VER5},
23201
  {NULL,        0}
23202
};
23203
#endif
23204
 
23205
struct arm_long_option_table
23206
{
23207
  char * option;                /* Substring to match.  */
23208
  char * help;                  /* Help information.  */
23209
  int (* func) (char * subopt); /* Function to decode sub-option.  */
23210
  char * deprecated;            /* If non-null, print this message.  */
23211
};
23212
 
23213
static bfd_boolean
23214 166 khays
arm_parse_extension (char *str, const arm_feature_set **opt_p)
23215 16 khays
{
23216
  arm_feature_set *ext_set = (arm_feature_set *)
23217
      xmalloc (sizeof (arm_feature_set));
23218
 
23219
  /* We insist on extensions being specified in alphabetical order, and with
23220
     extensions being added before being removed.  We achieve this by having
23221
     the global ARM_EXTENSIONS table in alphabetical order, and using the
23222
     ADDING_VALUE variable to indicate whether we are adding an extension (1)
23223
     or removing it (0) and only allowing it to change in the order
23224
     -1 -> 1 -> 0.  */
23225
  const struct arm_option_extension_value_table * opt = NULL;
23226
  int adding_value = -1;
23227
 
23228
  /* Copy the feature set, so that we can modify it.  */
23229
  *ext_set = **opt_p;
23230
  *opt_p = ext_set;
23231
 
23232
  while (str != NULL && *str != 0)
23233
    {
23234 166 khays
      char *ext;
23235
      size_t len;
23236 16 khays
 
23237
      if (*str != '+')
23238
        {
23239
          as_bad (_("invalid architectural extension"));
23240
          return FALSE;
23241
        }
23242
 
23243
      str++;
23244
      ext = strchr (str, '+');
23245
 
23246
      if (ext != NULL)
23247 166 khays
        len = ext - str;
23248 16 khays
      else
23249 166 khays
        len = strlen (str);
23250 16 khays
 
23251 166 khays
      if (len >= 2 && strncmp (str, "no", 2) == 0)
23252 16 khays
        {
23253
          if (adding_value != 0)
23254
            {
23255
              adding_value = 0;
23256
              opt = arm_extensions;
23257
            }
23258
 
23259 166 khays
          len -= 2;
23260 16 khays
          str += 2;
23261
        }
23262 166 khays
      else if (len > 0)
23263 16 khays
        {
23264
          if (adding_value == -1)
23265
            {
23266
              adding_value = 1;
23267
              opt = arm_extensions;
23268
            }
23269
          else if (adding_value != 1)
23270
            {
23271
              as_bad (_("must specify extensions to add before specifying "
23272
                        "those to remove"));
23273
              return FALSE;
23274
            }
23275
        }
23276
 
23277 166 khays
      if (len == 0)
23278 16 khays
        {
23279
          as_bad (_("missing architectural extension"));
23280
          return FALSE;
23281
        }
23282
 
23283
      gas_assert (adding_value != -1);
23284
      gas_assert (opt != NULL);
23285
 
23286
      /* Scan over the options table trying to find an exact match. */
23287
      for (; opt->name != NULL; opt++)
23288 166 khays
        if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23289 16 khays
          {
23290
            /* Check we can apply the extension to this architecture.  */
23291
            if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
23292
              {
23293
                as_bad (_("extension does not apply to the base architecture"));
23294
                return FALSE;
23295
              }
23296
 
23297
            /* Add or remove the extension.  */
23298
            if (adding_value)
23299
              ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
23300
            else
23301
              ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
23302
 
23303
            break;
23304
          }
23305
 
23306
      if (opt->name == NULL)
23307
        {
23308
          /* Did we fail to find an extension because it wasn't specified in
23309
             alphabetical order, or because it does not exist?  */
23310
 
23311
          for (opt = arm_extensions; opt->name != NULL; opt++)
23312 166 khays
            if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23313 16 khays
              break;
23314
 
23315
          if (opt->name == NULL)
23316
            as_bad (_("unknown architectural extension `%s'"), str);
23317
          else
23318
            as_bad (_("architectural extensions must be specified in "
23319
                      "alphabetical order"));
23320
 
23321
          return FALSE;
23322
        }
23323
      else
23324
        {
23325
          /* We should skip the extension we've just matched the next time
23326
             round.  */
23327
          opt++;
23328
        }
23329
 
23330
      str = ext;
23331
    };
23332
 
23333
  return TRUE;
23334
}
23335
 
23336
static bfd_boolean
23337 166 khays
arm_parse_cpu (char *str)
23338 16 khays
{
23339 166 khays
  const struct arm_cpu_option_table *opt;
23340
  char *ext = strchr (str, '+');
23341
  size_t len;
23342 16 khays
 
23343
  if (ext != NULL)
23344 166 khays
    len = ext - str;
23345 16 khays
  else
23346 166 khays
    len = strlen (str);
23347 16 khays
 
23348 166 khays
  if (len == 0)
23349 16 khays
    {
23350
      as_bad (_("missing cpu name `%s'"), str);
23351
      return FALSE;
23352
    }
23353
 
23354
  for (opt = arm_cpus; opt->name != NULL; opt++)
23355 166 khays
    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23356 16 khays
      {
23357
        mcpu_cpu_opt = &opt->value;
23358
        mcpu_fpu_opt = &opt->default_fpu;
23359
        if (opt->canonical_name)
23360
          strcpy (selected_cpu_name, opt->canonical_name);
23361
        else
23362
          {
23363 166 khays
            size_t i;
23364 16 khays
 
23365 166 khays
            for (i = 0; i < len; i++)
23366 16 khays
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23367
            selected_cpu_name[i] = 0;
23368
          }
23369
 
23370
        if (ext != NULL)
23371
          return arm_parse_extension (ext, &mcpu_cpu_opt);
23372
 
23373
        return TRUE;
23374
      }
23375
 
23376
  as_bad (_("unknown cpu `%s'"), str);
23377
  return FALSE;
23378
}
23379
 
23380
static bfd_boolean
23381 166 khays
arm_parse_arch (char *str)
23382 16 khays
{
23383
  const struct arm_arch_option_table *opt;
23384
  char *ext = strchr (str, '+');
23385 166 khays
  size_t len;
23386 16 khays
 
23387
  if (ext != NULL)
23388 166 khays
    len = ext - str;
23389 16 khays
  else
23390 166 khays
    len = strlen (str);
23391 16 khays
 
23392 166 khays
  if (len == 0)
23393 16 khays
    {
23394
      as_bad (_("missing architecture name `%s'"), str);
23395
      return FALSE;
23396
    }
23397
 
23398
  for (opt = arm_archs; opt->name != NULL; opt++)
23399 166 khays
    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23400 16 khays
      {
23401
        march_cpu_opt = &opt->value;
23402
        march_fpu_opt = &opt->default_fpu;
23403
        strcpy (selected_cpu_name, opt->name);
23404
 
23405
        if (ext != NULL)
23406
          return arm_parse_extension (ext, &march_cpu_opt);
23407
 
23408
        return TRUE;
23409
      }
23410
 
23411
  as_bad (_("unknown architecture `%s'\n"), str);
23412
  return FALSE;
23413
}
23414
 
23415
static bfd_boolean
23416
arm_parse_fpu (char * str)
23417
{
23418
  const struct arm_option_fpu_value_table * opt;
23419
 
23420
  for (opt = arm_fpus; opt->name != NULL; opt++)
23421
    if (streq (opt->name, str))
23422
      {
23423
        mfpu_opt = &opt->value;
23424
        return TRUE;
23425
      }
23426
 
23427
  as_bad (_("unknown floating point format `%s'\n"), str);
23428
  return FALSE;
23429
}
23430
 
23431
static bfd_boolean
23432
arm_parse_float_abi (char * str)
23433
{
23434
  const struct arm_option_value_table * opt;
23435
 
23436
  for (opt = arm_float_abis; opt->name != NULL; opt++)
23437
    if (streq (opt->name, str))
23438
      {
23439
        mfloat_abi_opt = opt->value;
23440
        return TRUE;
23441
      }
23442
 
23443
  as_bad (_("unknown floating point abi `%s'\n"), str);
23444
  return FALSE;
23445
}
23446
 
23447
#ifdef OBJ_ELF
23448
static bfd_boolean
23449
arm_parse_eabi (char * str)
23450
{
23451
  const struct arm_option_value_table *opt;
23452
 
23453
  for (opt = arm_eabis; opt->name != NULL; opt++)
23454
    if (streq (opt->name, str))
23455
      {
23456
        meabi_flags = opt->value;
23457
        return TRUE;
23458
      }
23459
  as_bad (_("unknown EABI `%s'\n"), str);
23460
  return FALSE;
23461
}
23462
#endif
23463
 
23464
static bfd_boolean
23465
arm_parse_it_mode (char * str)
23466
{
23467
  bfd_boolean ret = TRUE;
23468
 
23469
  if (streq ("arm", str))
23470
    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
23471
  else if (streq ("thumb", str))
23472
    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
23473
  else if (streq ("always", str))
23474
    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
23475
  else if (streq ("never", str))
23476
    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
23477
  else
23478
    {
23479
      as_bad (_("unknown implicit IT mode `%s', should be "\
23480
                "arm, thumb, always, or never."), str);
23481
      ret = FALSE;
23482
    }
23483
 
23484
  return ret;
23485
}
23486
 
23487
struct arm_long_option_table arm_long_opts[] =
23488
{
23489
  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
23490
   arm_parse_cpu, NULL},
23491
  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
23492
   arm_parse_arch, NULL},
23493
  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
23494
   arm_parse_fpu, NULL},
23495
  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
23496
   arm_parse_float_abi, NULL},
23497
#ifdef OBJ_ELF
23498
  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
23499
   arm_parse_eabi, NULL},
23500
#endif
23501
  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
23502
   arm_parse_it_mode, NULL},
23503
  {NULL, NULL, 0, NULL}
23504
};
23505
 
23506
int
23507
md_parse_option (int c, char * arg)
23508
{
23509
  struct arm_option_table *opt;
23510
  const struct arm_legacy_option_table *fopt;
23511
  struct arm_long_option_table *lopt;
23512
 
23513
  switch (c)
23514
    {
23515
#ifdef OPTION_EB
23516
    case OPTION_EB:
23517
      target_big_endian = 1;
23518
      break;
23519
#endif
23520
 
23521
#ifdef OPTION_EL
23522
    case OPTION_EL:
23523
      target_big_endian = 0;
23524
      break;
23525
#endif
23526
 
23527
    case OPTION_FIX_V4BX:
23528
      fix_v4bx = TRUE;
23529
      break;
23530
 
23531
    case 'a':
23532
      /* Listing option.  Just ignore these, we don't support additional
23533
         ones.  */
23534
      return 0;
23535
 
23536
    default:
23537
      for (opt = arm_opts; opt->option != NULL; opt++)
23538
        {
23539
          if (c == opt->option[0]
23540
              && ((arg == NULL && opt->option[1] == 0)
23541
                  || streq (arg, opt->option + 1)))
23542
            {
23543
              /* If the option is deprecated, tell the user.  */
23544
              if (warn_on_deprecated && opt->deprecated != NULL)
23545
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23546
                           arg ? arg : "", _(opt->deprecated));
23547
 
23548
              if (opt->var != NULL)
23549
                *opt->var = opt->value;
23550
 
23551
              return 1;
23552
            }
23553
        }
23554
 
23555
      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
23556
        {
23557
          if (c == fopt->option[0]
23558
              && ((arg == NULL && fopt->option[1] == 0)
23559
                  || streq (arg, fopt->option + 1)))
23560
            {
23561
              /* If the option is deprecated, tell the user.  */
23562
              if (warn_on_deprecated && fopt->deprecated != NULL)
23563
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23564
                           arg ? arg : "", _(fopt->deprecated));
23565
 
23566
              if (fopt->var != NULL)
23567
                *fopt->var = &fopt->value;
23568
 
23569
              return 1;
23570
            }
23571
        }
23572
 
23573
      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23574
        {
23575
          /* These options are expected to have an argument.  */
23576
          if (c == lopt->option[0]
23577
              && arg != NULL
23578
              && strncmp (arg, lopt->option + 1,
23579
                          strlen (lopt->option + 1)) == 0)
23580
            {
23581
              /* If the option is deprecated, tell the user.  */
23582
              if (warn_on_deprecated && lopt->deprecated != NULL)
23583
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
23584
                           _(lopt->deprecated));
23585
 
23586
              /* Call the sup-option parser.  */
23587
              return lopt->func (arg + strlen (lopt->option) - 1);
23588
            }
23589
        }
23590
 
23591
      return 0;
23592
    }
23593
 
23594
  return 1;
23595
}
23596
 
23597
void
23598
md_show_usage (FILE * fp)
23599
{
23600
  struct arm_option_table *opt;
23601
  struct arm_long_option_table *lopt;
23602
 
23603
  fprintf (fp, _(" ARM-specific assembler options:\n"));
23604
 
23605
  for (opt = arm_opts; opt->option != NULL; opt++)
23606
    if (opt->help != NULL)
23607
      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
23608
 
23609
  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23610
    if (lopt->help != NULL)
23611
      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
23612
 
23613
#ifdef OPTION_EB
23614
  fprintf (fp, _("\
23615
  -EB                     assemble code for a big-endian cpu\n"));
23616
#endif
23617
 
23618
#ifdef OPTION_EL
23619
  fprintf (fp, _("\
23620
  -EL                     assemble code for a little-endian cpu\n"));
23621
#endif
23622
 
23623
  fprintf (fp, _("\
23624
  --fix-v4bx              Allow BX in ARMv4 code\n"));
23625
}
23626
 
23627
 
23628
#ifdef OBJ_ELF
23629
typedef struct
23630
{
23631
  int val;
23632
  arm_feature_set flags;
23633
} cpu_arch_ver_table;
23634
 
23635
/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
23636
   least features first.  */
23637
static const cpu_arch_ver_table cpu_arch_ver[] =
23638
{
23639
    {1, ARM_ARCH_V4},
23640
    {2, ARM_ARCH_V4T},
23641
    {3, ARM_ARCH_V5},
23642
    {3, ARM_ARCH_V5T},
23643
    {4, ARM_ARCH_V5TE},
23644
    {5, ARM_ARCH_V5TEJ},
23645
    {6, ARM_ARCH_V6},
23646
    {9, ARM_ARCH_V6K},
23647
    {7, ARM_ARCH_V6Z},
23648
    {11, ARM_ARCH_V6M},
23649
    {12, ARM_ARCH_V6SM},
23650
    {8, ARM_ARCH_V6T2},
23651
    {10, ARM_ARCH_V7A},
23652
    {10, ARM_ARCH_V7R},
23653
    {10, ARM_ARCH_V7M},
23654
    {0, ARM_ARCH_NONE}
23655
};
23656
 
23657
/* Set an attribute if it has not already been set by the user.  */
23658
static void
23659
aeabi_set_attribute_int (int tag, int value)
23660
{
23661
  if (tag < 1
23662
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23663
      || !attributes_set_explicitly[tag])
23664
    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23665
}
23666
 
23667
static void
23668
aeabi_set_attribute_string (int tag, const char *value)
23669
{
23670
  if (tag < 1
23671
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23672
      || !attributes_set_explicitly[tag])
23673
    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23674
}
23675
 
23676
/* Set the public EABI object attributes.  */
23677
static void
23678
aeabi_set_public_attributes (void)
23679
{
23680
  int arch;
23681
  int virt_sec = 0;
23682
  arm_feature_set flags;
23683
  arm_feature_set tmp;
23684
  const cpu_arch_ver_table *p;
23685
 
23686
  /* Choose the architecture based on the capabilities of the requested cpu
23687
     (if any) and/or the instructions actually used.  */
23688
  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23689
  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23690
  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23691 166 khays
 
23692
  if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
23693
    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
23694
 
23695
  if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
23696
    ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
23697
 
23698
  /* Allow the user to override the reported architecture.  */
23699 16 khays
  if (object_arch)
23700
    {
23701
      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23702
      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23703
    }
23704
 
23705
  /* We need to make sure that the attributes do not identify us as v6S-M
23706
     when the only v6S-M feature in use is the Operating System Extensions.  */
23707
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
23708
      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
23709
        ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
23710
 
23711
  tmp = flags;
23712
  arch = 0;
23713
  for (p = cpu_arch_ver; p->val; p++)
23714
    {
23715
      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
23716
        {
23717
          arch = p->val;
23718
          ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
23719
        }
23720
    }
23721
 
23722
  /* The table lookup above finds the last architecture to contribute
23723
     a new feature.  Unfortunately, Tag13 is a subset of the union of
23724
     v6T2 and v7-M, so it is never seen as contributing a new feature.
23725
     We can not search for the last entry which is entirely used,
23726
     because if no CPU is specified we build up only those flags
23727
     actually used.  Perhaps we should separate out the specified
23728
     and implicit cases.  Avoid taking this path for -march=all by
23729
     checking for contradictory v7-A / v7-M features.  */
23730
  if (arch == 10
23731
      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
23732
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
23733
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
23734
    arch = 13;
23735
 
23736
  /* Tag_CPU_name.  */
23737
  if (selected_cpu_name[0])
23738
    {
23739
      char *q;
23740
 
23741
      q = selected_cpu_name;
23742
      if (strncmp (q, "armv", 4) == 0)
23743
        {
23744
          int i;
23745
 
23746
          q += 4;
23747
          for (i = 0; q[i]; i++)
23748
            q[i] = TOUPPER (q[i]);
23749
        }
23750
      aeabi_set_attribute_string (Tag_CPU_name, q);
23751
    }
23752
 
23753
  /* Tag_CPU_arch.  */
23754
  aeabi_set_attribute_int (Tag_CPU_arch, arch);
23755
 
23756
  /* Tag_CPU_arch_profile.  */
23757
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
23758
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
23759
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
23760
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
23761
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
23762
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
23763
 
23764
  /* Tag_ARM_ISA_use.  */
23765
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
23766
      || arch == 0)
23767
    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
23768
 
23769
  /* Tag_THUMB_ISA_use.  */
23770
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
23771
      || arch == 0)
23772
    aeabi_set_attribute_int (Tag_THUMB_ISA_use,
23773
        ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
23774
 
23775
  /* Tag_VFP_arch.  */
23776
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
23777
    aeabi_set_attribute_int (Tag_VFP_arch,
23778
                             ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
23779
                             ? 5 : 6);
23780
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
23781
    aeabi_set_attribute_int (Tag_VFP_arch, 3);
23782
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
23783
    aeabi_set_attribute_int (Tag_VFP_arch, 4);
23784
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
23785
    aeabi_set_attribute_int (Tag_VFP_arch, 2);
23786
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
23787
           || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
23788
    aeabi_set_attribute_int (Tag_VFP_arch, 1);
23789
 
23790
  /* Tag_ABI_HardFP_use.  */
23791
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
23792
      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
23793
    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
23794
 
23795
  /* Tag_WMMX_arch.  */
23796
  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
23797
    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
23798
  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
23799
    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
23800
 
23801
  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
23802
  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
23803
    aeabi_set_attribute_int
23804
      (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
23805
                                ? 2 : 1));
23806
 
23807
  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
23808
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
23809
    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
23810
 
23811
  /* Tag_DIV_use.  */
23812
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv))
23813
    aeabi_set_attribute_int (Tag_DIV_use, 2);
23814
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_div))
23815
    aeabi_set_attribute_int (Tag_DIV_use, 0);
23816
  else
23817
    aeabi_set_attribute_int (Tag_DIV_use, 1);
23818
 
23819
  /* Tag_MP_extension_use.  */
23820
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
23821
    aeabi_set_attribute_int (Tag_MPextension_use, 1);
23822
 
23823
  /* Tag Virtualization_use.  */
23824
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
23825
    virt_sec |= 1;
23826
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
23827
    virt_sec |= 2;
23828
  if (virt_sec != 0)
23829
    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
23830
}
23831
 
23832
/* Add the default contents for the .ARM.attributes section.  */
23833
void
23834
arm_md_end (void)
23835
{
23836
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23837
    return;
23838
 
23839
  aeabi_set_public_attributes ();
23840
}
23841
#endif /* OBJ_ELF */
23842
 
23843
 
23844
/* Parse a .cpu directive.  */
23845
 
23846
static void
23847
s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
23848
{
23849
  const struct arm_cpu_option_table *opt;
23850
  char *name;
23851
  char saved_char;
23852
 
23853
  name = input_line_pointer;
23854
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23855
    input_line_pointer++;
23856
  saved_char = *input_line_pointer;
23857
  *input_line_pointer = 0;
23858
 
23859
  /* Skip the first "all" entry.  */
23860
  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
23861
    if (streq (opt->name, name))
23862
      {
23863
        mcpu_cpu_opt = &opt->value;
23864
        selected_cpu = opt->value;
23865
        if (opt->canonical_name)
23866
          strcpy (selected_cpu_name, opt->canonical_name);
23867
        else
23868
          {
23869
            int i;
23870
            for (i = 0; opt->name[i]; i++)
23871
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23872 166 khays
 
23873 16 khays
            selected_cpu_name[i] = 0;
23874
          }
23875
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23876
        *input_line_pointer = saved_char;
23877
        demand_empty_rest_of_line ();
23878
        return;
23879
      }
23880
  as_bad (_("unknown cpu `%s'"), name);
23881
  *input_line_pointer = saved_char;
23882
  ignore_rest_of_line ();
23883
}
23884
 
23885
 
23886
/* Parse a .arch directive.  */
23887
 
23888
static void
23889
s_arm_arch (int ignored ATTRIBUTE_UNUSED)
23890
{
23891
  const struct arm_arch_option_table *opt;
23892
  char saved_char;
23893
  char *name;
23894
 
23895
  name = input_line_pointer;
23896
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23897
    input_line_pointer++;
23898
  saved_char = *input_line_pointer;
23899
  *input_line_pointer = 0;
23900
 
23901
  /* Skip the first "all" entry.  */
23902
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23903
    if (streq (opt->name, name))
23904
      {
23905
        mcpu_cpu_opt = &opt->value;
23906
        selected_cpu = opt->value;
23907
        strcpy (selected_cpu_name, opt->name);
23908
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23909
        *input_line_pointer = saved_char;
23910
        demand_empty_rest_of_line ();
23911
        return;
23912
      }
23913
 
23914
  as_bad (_("unknown architecture `%s'\n"), name);
23915
  *input_line_pointer = saved_char;
23916
  ignore_rest_of_line ();
23917
}
23918
 
23919
 
23920
/* Parse a .object_arch directive.  */
23921
 
23922
static void
23923
s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
23924
{
23925
  const struct arm_arch_option_table *opt;
23926
  char saved_char;
23927
  char *name;
23928
 
23929
  name = input_line_pointer;
23930
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23931
    input_line_pointer++;
23932
  saved_char = *input_line_pointer;
23933
  *input_line_pointer = 0;
23934
 
23935
  /* Skip the first "all" entry.  */
23936
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23937
    if (streq (opt->name, name))
23938
      {
23939
        object_arch = &opt->value;
23940
        *input_line_pointer = saved_char;
23941
        demand_empty_rest_of_line ();
23942
        return;
23943
      }
23944
 
23945
  as_bad (_("unknown architecture `%s'\n"), name);
23946
  *input_line_pointer = saved_char;
23947
  ignore_rest_of_line ();
23948
}
23949
 
23950
/* Parse a .arch_extension directive.  */
23951
 
23952
static void
23953
s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
23954
{
23955
  const struct arm_option_extension_value_table *opt;
23956
  char saved_char;
23957
  char *name;
23958
  int adding_value = 1;
23959
 
23960
  name = input_line_pointer;
23961
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23962
    input_line_pointer++;
23963
  saved_char = *input_line_pointer;
23964
  *input_line_pointer = 0;
23965
 
23966
  if (strlen (name) >= 2
23967
      && strncmp (name, "no", 2) == 0)
23968
    {
23969
      adding_value = 0;
23970
      name += 2;
23971
    }
23972
 
23973
  for (opt = arm_extensions; opt->name != NULL; opt++)
23974
    if (streq (opt->name, name))
23975
      {
23976
        if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
23977
          {
23978
            as_bad (_("architectural extension `%s' is not allowed for the "
23979
                      "current base architecture"), name);
23980
            break;
23981
          }
23982
 
23983
        if (adding_value)
23984
          ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
23985
        else
23986
          ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
23987
 
23988
        mcpu_cpu_opt = &selected_cpu;
23989
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23990
        *input_line_pointer = saved_char;
23991
        demand_empty_rest_of_line ();
23992
        return;
23993
      }
23994
 
23995
  if (opt->name == NULL)
23996
    as_bad (_("unknown architecture `%s'\n"), name);
23997
 
23998
  *input_line_pointer = saved_char;
23999
  ignore_rest_of_line ();
24000
}
24001
 
24002
/* Parse a .fpu directive.  */
24003
 
24004
static void
24005
s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
24006
{
24007
  const struct arm_option_fpu_value_table *opt;
24008
  char saved_char;
24009
  char *name;
24010
 
24011
  name = input_line_pointer;
24012
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24013
    input_line_pointer++;
24014
  saved_char = *input_line_pointer;
24015
  *input_line_pointer = 0;
24016
 
24017
  for (opt = arm_fpus; opt->name != NULL; opt++)
24018
    if (streq (opt->name, name))
24019
      {
24020
        mfpu_opt = &opt->value;
24021
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24022
        *input_line_pointer = saved_char;
24023
        demand_empty_rest_of_line ();
24024
        return;
24025
      }
24026
 
24027
  as_bad (_("unknown floating point format `%s'\n"), name);
24028
  *input_line_pointer = saved_char;
24029
  ignore_rest_of_line ();
24030
}
24031
 
24032
/* Copy symbol information.  */
24033
 
24034
void
24035
arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
24036
{
24037
  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
24038
}
24039
 
24040
#ifdef OBJ_ELF
24041
/* Given a symbolic attribute NAME, return the proper integer value.
24042
   Returns -1 if the attribute is not known.  */
24043
 
24044
int
24045
arm_convert_symbolic_attribute (const char *name)
24046
{
24047
  static const struct
24048
  {
24049
    const char * name;
24050
    const int    tag;
24051
  }
24052
  attribute_table[] =
24053
    {
24054
      /* When you modify this table you should
24055
         also modify the list in doc/c-arm.texi.  */
24056
#define T(tag) {#tag, tag}
24057
      T (Tag_CPU_raw_name),
24058
      T (Tag_CPU_name),
24059
      T (Tag_CPU_arch),
24060
      T (Tag_CPU_arch_profile),
24061
      T (Tag_ARM_ISA_use),
24062
      T (Tag_THUMB_ISA_use),
24063
      T (Tag_FP_arch),
24064
      T (Tag_VFP_arch),
24065
      T (Tag_WMMX_arch),
24066
      T (Tag_Advanced_SIMD_arch),
24067
      T (Tag_PCS_config),
24068
      T (Tag_ABI_PCS_R9_use),
24069
      T (Tag_ABI_PCS_RW_data),
24070
      T (Tag_ABI_PCS_RO_data),
24071
      T (Tag_ABI_PCS_GOT_use),
24072
      T (Tag_ABI_PCS_wchar_t),
24073
      T (Tag_ABI_FP_rounding),
24074
      T (Tag_ABI_FP_denormal),
24075
      T (Tag_ABI_FP_exceptions),
24076
      T (Tag_ABI_FP_user_exceptions),
24077
      T (Tag_ABI_FP_number_model),
24078
      T (Tag_ABI_align_needed),
24079
      T (Tag_ABI_align8_needed),
24080
      T (Tag_ABI_align_preserved),
24081
      T (Tag_ABI_align8_preserved),
24082
      T (Tag_ABI_enum_size),
24083
      T (Tag_ABI_HardFP_use),
24084
      T (Tag_ABI_VFP_args),
24085
      T (Tag_ABI_WMMX_args),
24086
      T (Tag_ABI_optimization_goals),
24087
      T (Tag_ABI_FP_optimization_goals),
24088
      T (Tag_compatibility),
24089
      T (Tag_CPU_unaligned_access),
24090
      T (Tag_FP_HP_extension),
24091
      T (Tag_VFP_HP_extension),
24092
      T (Tag_ABI_FP_16bit_format),
24093
      T (Tag_MPextension_use),
24094
      T (Tag_DIV_use),
24095
      T (Tag_nodefaults),
24096
      T (Tag_also_compatible_with),
24097
      T (Tag_conformance),
24098
      T (Tag_T2EE_use),
24099
      T (Tag_Virtualization_use),
24100
      /* We deliberately do not include Tag_MPextension_use_legacy.  */
24101
#undef T
24102
    };
24103
  unsigned int i;
24104
 
24105
  if (name == NULL)
24106
    return -1;
24107
 
24108
  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
24109
    if (streq (name, attribute_table[i].name))
24110
      return attribute_table[i].tag;
24111
 
24112
  return -1;
24113
}
24114
 
24115
 
24116
/* Apply sym value for relocations only in the case that
24117
   they are for local symbols and you have the respective
24118
   architectural feature for blx and simple switches.  */
24119
int
24120
arm_apply_sym_value (struct fix * fixP)
24121
{
24122
  if (fixP->fx_addsy
24123
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24124
      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
24125
    {
24126
      switch (fixP->fx_r_type)
24127
        {
24128
        case BFD_RELOC_ARM_PCREL_BLX:
24129
        case BFD_RELOC_THUMB_PCREL_BRANCH23:
24130
          if (ARM_IS_FUNC (fixP->fx_addsy))
24131
            return 1;
24132
          break;
24133
 
24134
        case BFD_RELOC_ARM_PCREL_CALL:
24135
        case BFD_RELOC_THUMB_PCREL_BLX:
24136
          if (THUMB_IS_FUNC (fixP->fx_addsy))
24137
              return 1;
24138
          break;
24139
 
24140
        default:
24141
          break;
24142
        }
24143
 
24144
    }
24145
  return 0;
24146
}
24147
#endif /* OBJ_ELF */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.