/* tc-i386.c -- Assemble code for the Intel 80386
- Copyright (C) 1989-2020 Free Software Foundation, Inc.
+ Copyright (C) 1989-2021 Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler.
#include "dw2gencfi.h"
#include "elf/x86-64.h"
#include "opcodes/i386-init.h"
-
-#ifdef HAVE_LIMITS_H
#include <limits.h>
-#else
-#ifdef HAVE_SYS_PARAM_H
-#include <sys/param.h>
-#endif
-#ifndef INT_MAX
-#define INT_MAX (int) (((unsigned) (-1)) >> 1)
-#endif
-#endif
#ifndef INFER_ADDR_PREFIX
#define INFER_ADDR_PREFIX 1
static char *parse_insn (char *, char *);
static char *parse_operands (char *, const char *);
static void swap_operands (void);
-static void swap_2_operands (int, int);
+static void swap_2_operands (unsigned int, unsigned int);
static enum flag_code i386_addressing_mode (void);
static void optimize_imm (void);
static void optimize_disp (void);
static int check_word_reg (void);
static int finalize_imm (void);
static int process_operands (void);
-static const seg_entry *build_modrm_byte (void);
+static const reg_entry *build_modrm_byte (void);
static void output_insn (void);
static void output_imm (fragS *, offsetT);
static void output_disp (fragS *, offsetT);
static const reg_entry bad_reg = { "<bad>", OPERAND_TYPE_NONE, 0, 0,
{ Dw2Inval, Dw2Inval } };
-/* This struct describes rounding control and SAE in the instruction. */
-struct RC_Operation
-{
- enum rc_type
- {
- rne = 0,
- rd,
- ru,
- rz,
- saeonly
- } type;
- int operand;
-};
-
-static struct RC_Operation rc_op;
-
-/* The struct describes masking, applied to OPERAND in the instruction.
- MASK is a pointer to the corresponding mask register. ZEROING tells
- whether merging or zeroing mask is used. */
-struct Mask_Operation
-{
- const reg_entry *mask;
- unsigned int zeroing;
- /* The operand where this operation is associated. */
- int operand;
-};
-
-static struct Mask_Operation mask_op;
-
-/* The struct describes broadcasting, applied to OPERAND. FACTOR is
- broadcast factor. */
-struct Broadcast_Operation
-{
- /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
- int type;
-
- /* Index of broadcasted operand. */
- int operand;
-
- /* Number of bytes to broadcast. */
- int bytes;
-};
-
-static struct Broadcast_Operation broadcast_op;
+static const reg_entry *reg_eax;
+static const reg_entry *reg_ds;
+static const reg_entry *reg_es;
+static const reg_entry *reg_ss;
+static const reg_entry *reg_st0;
+static const reg_entry *reg_k0;
/* VEX prefix. */
typedef struct
unsupported_with_intel_mnemonic,
unsupported_syntax,
unsupported,
+ invalid_sib_address,
invalid_vsib_address,
invalid_vector_register_set,
+ invalid_tmm_register_set,
unsupported_vector_index_register,
unsupported_broadcast,
broadcast_needed,
or qword, if given. */
char suffix;
+ /* OPCODE_LENGTH holds the number of base opcode bytes. */
+ unsigned char opcode_length;
+
/* OPERANDS gives the number of given operands. */
unsigned int operands;
/* SEG gives the seg_entries of this insn. They are zero unless
explicit segment overrides are given. */
- const seg_entry *seg[2];
+ const reg_entry *seg[2];
/* Copied first memory operand string, for re-checking. */
char *memop1_string;
unsigned char prefix[MAX_PREFIXES];
/* Register is in low 3 bits of opcode. */
- bfd_boolean short_form;
+ bool short_form;
/* The operand to a branch insn indicates an absolute branch. */
- bfd_boolean jumpabsolute;
+ bool jumpabsolute;
- /* Has MMX register operands. */
- bfd_boolean has_regmmx;
-
- /* Has XMM register operands. */
- bfd_boolean has_regxmm;
-
- /* Has YMM register operands. */
- bfd_boolean has_regymm;
-
- /* Has ZMM register operands. */
- bfd_boolean has_regzmm;
+ /* Extended states. */
+ enum
+ {
+ /* Use MMX state. */
+ xstate_mmx = 1 << 0,
+ /* Use XMM state. */
+ xstate_xmm = 1 << 1,
+ /* Use YMM state. */
+ xstate_ymm = 1 << 2 | xstate_xmm,
+ /* Use ZMM state. */
+ xstate_zmm = 1 << 3 | xstate_ymm,
+ /* Use TMM state. */
+ xstate_tmm = 1 << 4,
+ /* Use MASK state. */
+ xstate_mask = 1 << 5
+ } xstate;
/* Has GOTPC or TLS relocation. */
- bfd_boolean has_gotpc_tls_reloc;
+ bool has_gotpc_tls_reloc;
/* RM and SIB are the modrm byte and the sib byte where the
addressing modes of this insn are encoded. */
sib_byte sib;
vex_prefix vex;
- /* Masking attributes. */
- struct Mask_Operation *mask;
+ /* Masking attributes.
+
+ The struct describes masking, applied to OPERAND in the instruction.
+ REG is a pointer to the corresponding mask register. ZEROING tells
+ whether merging or zeroing mask is used. */
+ struct Mask_Operation
+ {
+ const reg_entry *reg;
+ unsigned int zeroing;
+ /* The operand where this operation is associated. */
+ unsigned int operand;
+ } mask;
/* Rounding control and SAE attributes. */
- struct RC_Operation *rounding;
+ struct RC_Operation
+ {
+ enum rc_type
+ {
+ rc_none = -1,
+ rne,
+ rd,
+ ru,
+ rz,
+ saeonly
+ } type;
+
+ unsigned int operand;
+ } rounding;
+
+ /* Broadcasting attributes.
+
+ The struct describes broadcasting, applied to OPERAND. TYPE is
+ expresses the broadcast factor. */
+ struct Broadcast_Operation
+ {
+ /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
+ unsigned int type;
+
+ /* Index of broadcasted operand. */
+ unsigned int operand;
- /* Broadcasting attributes. */
- struct Broadcast_Operation *broadcast;
+ /* Number of bytes to broadcast. */
+ unsigned int bytes;
+ } broadcast;
/* Compressed disp8*N attribute. */
unsigned int memshift;
dir_encoding_swap
} dir_encoding;
- /* Prefer 8bit or 32bit displacement in encoding. */
+ /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
enum
{
disp_encoding_default = 0,
disp_encoding_8bit,
+ disp_encoding_16bit,
disp_encoding_32bit
} disp_encoding;
/* Prefer the REX byte in encoding. */
- bfd_boolean rex_encoding;
+ bool rex_encoding;
/* Disable instruction size optimization. */
- bfd_boolean no_optimize;
+ bool no_optimize;
/* How to encode vector instructions. */
enum
#endif
;
-#if (defined (TE_I386AIX) \
- || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
- && !defined (TE_GNU) \
- && !defined (TE_LINUX) \
- && !defined (TE_NACL) \
- && !defined (TE_FreeBSD) \
- && !defined (TE_DragonFly) \
- && !defined (TE_NetBSD)))
+#if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
+ && !defined (TE_GNU) \
+ && !defined (TE_LINUX) \
+ && !defined (TE_FreeBSD) \
+ && !defined (TE_DragonFly) \
+ && !defined (TE_NetBSD))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
--divide will remove '/' from this list. */
CPU_ZNVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
CPU_ZNVER2_FLAGS, 0 },
+ { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER,
+ CPU_ZNVER3_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
CPU_BTVER1_FLAGS, 0 },
{ STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
CPU_AVX512_VNNI_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN,
CPU_AVX512_BITALG_FLAGS, 0 },
+ { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN,
+ CPU_AVX_VNNI_FLAGS, 0 },
{ STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
CPU_CLZERO_FLAGS, 0 },
{ STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
CPU_WAITPKG_FLAGS, 0 },
{ STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
CPU_CLDEMOTE_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN,
+ CPU_AMX_INT8_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN,
+ CPU_AMX_BF16_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN,
+ CPU_AMX_TILE_FLAGS, 0 },
{ STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
CPU_MOVDIRI_FLAGS, 0 },
{ STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
CPU_AVX512_BF16_FLAGS, 0 },
{ STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN,
CPU_AVX512_VP2INTERSECT_FLAGS, 0 },
+ { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN,
+ CPU_TDX_FLAGS, 0 },
{ STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN,
CPU_ENQCMD_FLAGS, 0 },
{ STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN,
CPU_SEV_ES_FLAGS, 0 },
{ STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN,
CPU_TSXLDTRK_FLAGS, 0 },
+ { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN,
+ CPU_KL_FLAGS, 0 },
+ { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN,
+ CPU_WIDEKL_FLAGS, 0 },
+ { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN,
+ CPU_UINTR_FLAGS, 0 },
+ { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN,
+ CPU_HRESET_FLAGS, 0 },
};
static const noarch_entry cpu_noarch[] =
{ STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
{ STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS },
{ STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
+ { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS },
{ STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
{ STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
+ { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS },
+ { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS },
+ { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS },
{ STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
{ STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
{ STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
{ STRING_COMMA_LEN ("noavx512_vp2intersect"),
CPU_ANY_AVX512_VP2INTERSECT_FLAGS },
+ { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS },
{ STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
{ STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS },
{ STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS },
+ { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS },
+ { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS },
+ { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS },
+ { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS },
};
#ifdef I386COFF
extern char *input_line_pointer;
/* Hash table for instruction mnemonic lookup. */
-static struct hash_control *op_hash;
+static htab_t op_hash;
/* Hash table for register lookup. */
-static struct hash_control *reg_hash;
+static htab_t reg_hash;
\f
/* Various efficient no-op patterns for aligning code labels.
Note: Don't try to assemble the instructions in the comments.
|| (i.types[given].bitfield.ymmword
&& !t->operand_types[wanted].bitfield.ymmword)
|| (i.types[given].bitfield.zmmword
- && !t->operand_types[wanted].bitfield.zmmword));
+ && !t->operand_types[wanted].bitfield.zmmword)
+ || (i.types[given].bitfield.tmmword
+ && !t->operand_types[wanted].bitfield.tmmword));
}
/* Return 1 if there is no conflict in any size between operand GIVEN
{
return (match_operand_size (t, wanted, given)
&& !((i.types[given].bitfield.unspecified
- && !i.broadcast
+ && !i.broadcast.type
&& !t->operand_types[wanted].bitfield.unspecified)
|| (i.types[given].bitfield.fword
&& !t->operand_types[wanted].bitfield.fword)
temp.bitfield.xmmword = 0;
temp.bitfield.ymmword = 0;
temp.bitfield.zmmword = 0;
+ temp.bitfield.tmmword = 0;
if (operand_type_all_zero (&temp))
goto mismatch;
default: abort ();
}
-#ifdef BFD64
- /* If BFD64, sign extend val for 32bit address mode. */
- if (flag_code != CODE_64BIT
- || i.prefix[ADDR_PREFIX])
- if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
- val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
-#endif
-
if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
{
char buf1[40], buf2[40];
- sprint_value (buf1, val);
- sprint_value (buf2, val & mask);
+ bfd_sprintf_vma (stdoutput, buf1, val);
+ bfd_sprintf_vma (stdoutput, buf2, val & mask);
as_warn (_("%s shortened to %s"), buf1, buf2);
}
return val & mask;
unsigned long
i386_mach (void)
{
- if (!strncmp (default_arch, "x86_64", 6))
+ if (startswith (default_arch, "x86_64"))
{
if (cpu_arch_isa == PROCESSOR_L1OM)
{
void
md_begin (void)
{
- const char *hash_err;
-
/* Support pseudo prefixes like {disp32}. */
lex_type ['{'] = LEX_BEGIN_NAME;
/* Initialize op_hash hash table. */
- op_hash = hash_new ();
+ op_hash = str_htab_create ();
{
const insn_template *optab;
/* different name --> ship out current template list;
add to hash table; & begin anew. */
core_optab->end = optab;
- hash_err = hash_insert (op_hash,
- (optab - 1)->name,
- (void *) core_optab);
- if (hash_err)
- {
- as_fatal (_("can't hash %s: %s"),
- (optab - 1)->name,
- hash_err);
- }
+ if (str_hash_insert (op_hash, (optab - 1)->name, core_optab, 0))
+ as_fatal (_("duplicate %s"), (optab - 1)->name);
+
if (optab->name == NULL)
break;
core_optab = XNEW (templates);
}
/* Initialize reg_hash hash table. */
- reg_hash = hash_new ();
+ reg_hash = str_htab_create ();
{
const reg_entry *regtab;
unsigned int regtab_size = i386_regtab_size;
for (regtab = i386_regtab; regtab_size--; regtab++)
{
- hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
- if (hash_err)
- as_fatal (_("can't hash %s: %s"),
- regtab->reg_name,
- hash_err);
+ switch (regtab->reg_type.bitfield.class)
+ {
+ case Reg:
+ if (regtab->reg_type.bitfield.dword)
+ {
+ if (regtab->reg_type.bitfield.instance == Accum)
+ reg_eax = regtab;
+ }
+ else if (regtab->reg_type.bitfield.tbyte)
+ {
+ /* There's no point inserting st(<N>) in the hash table, as
+ parentheses aren't included in register_chars[] anyway. */
+ if (regtab->reg_type.bitfield.instance != Accum)
+ continue;
+ reg_st0 = regtab;
+ }
+ break;
+
+ case SReg:
+ switch (regtab->reg_num)
+ {
+ case 0: reg_es = regtab; break;
+ case 2: reg_ss = regtab; break;
+ case 3: reg_ds = regtab; break;
+ }
+ break;
+
+ case RegMask:
+ if (!regtab->reg_num)
+ reg_k0 = regtab;
+ break;
+ }
+
+ if (str_hash_insert (reg_hash, regtab->reg_name, regtab, 0) != NULL)
+ as_fatal (_("duplicate %s"), regtab->reg_name);
}
}
mnemonic_chars[c] = c;
operand_chars[c] = c;
}
+#ifdef SVR4_COMMENT_CHARS
+ else if (c == '\\' && strchr (i386_comment_chars, '/'))
+ operand_chars[c] = c;
+#endif
if (ISALPHA (c) || ISDIGIT (c))
identifier_chars[c] = c;
void
i386_print_statistics (FILE *file)
{
- hash_print_statistics (file, "i386 opcode", op_hash);
- hash_print_statistics (file, "i386 register", reg_hash);
+ htab_print_statistics (file, "i386 opcode", op_hash);
+ htab_print_statistics (file, "i386 register", reg_hash);
}
\f
#ifdef DEBUG386
static void
pte (insn_template *t)
{
+ static const unsigned char opc_pfx[] = { 0, 0x66, 0xf3, 0xf2 };
+ static const char *const opc_spc[] = {
+ NULL, "0f", "0f38", "0f3a", NULL, NULL, NULL, NULL,
+ "XOP08", "XOP09", "XOP0A",
+ };
unsigned int j;
+
fprintf (stdout, " %d operands ", t->operands);
+ if (opc_pfx[t->opcode_modifier.opcodeprefix])
+ fprintf (stdout, "pfx %x ", opc_pfx[t->opcode_modifier.opcodeprefix]);
+ if (opc_spc[t->opcode_modifier.opcodespace])
+ fprintf (stdout, "space %s ", opc_spc[t->opcode_modifier.opcodespace]);
fprintf (stdout, "opcode %x ", t->base_opcode);
if (t->extension_opcode != None)
fprintf (stdout, "ext %x ", t->extension_opcode);
pe (expressionS *e)
{
fprintf (stdout, " operation %d\n", e->X_op);
- fprintf (stdout, " add_number %ld (%lx)\n",
- (long) e->X_add_number, (long) e->X_add_number);
+ fprintf (stdout, " add_number %" BFD_VMA_FMT "d (%" BFD_VMA_FMT "x)\n",
+ e->X_add_number, e->X_add_number);
if (e->X_add_symbol)
{
fprintf (stdout, " add_symbol ");
{ OPERAND_TYPE_REGXMM, "rXMM" },
{ OPERAND_TYPE_REGYMM, "rYMM" },
{ OPERAND_TYPE_REGZMM, "rZMM" },
+ { OPERAND_TYPE_REGTMM, "rTMM" },
{ OPERAND_TYPE_REGMASK, "Mask reg" },
};
return 1;
}
+static INLINE bool
+want_disp32 (const insn_template *t)
+{
+ return flag_code != CODE_64BIT
+ || i.prefix[ADDR_PREFIX]
+ || (t->base_opcode == 0x8d
+ && t->opcode_modifier.opcodespace == SPACE_BASE
+ && (!i.types[1].bitfield.qword
+ || t->opcode_modifier.size == SIZE32));
+}
+
static int
intel_float_operand (const char *mnemonic)
{
return 1;
}
+static INLINE void
+install_template (const insn_template *t)
+{
+ unsigned int l;
+
+ i.tm = *t;
+
+ /* Note that for pseudo prefixes this produces a length of 1. But for them
+ the length isn't interesting at all. */
+ for (l = 1; l < 4; ++l)
+ if (!(t->base_opcode >> (8 * l)))
+ break;
+
+ i.opcode_length = l;
+}
+
/* Build the VEX prefix. */
static void
build_vex_prefix (const insn_template *t)
{
unsigned int register_specifier;
- unsigned int implied_prefix;
unsigned int vector_length;
unsigned int w;
&& i.dir_encoding == dir_encoding_default
&& i.operands == i.reg_operands
&& operand_type_equal (&i.types[0], &i.types[i.operands - 1])
- && i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.opcodespace == SPACE_0F
&& (i.tm.opcode_modifier.load || i.tm.opcode_modifier.d)
&& i.rex == REX_B)
{
i.tm.base_opcode ^= (i.tm.base_opcode & 0xee) != 0x6e
? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
else /* Use the next insn. */
- i.tm = t[1];
+ install_template (&t[1]);
}
/* Use 2-byte VEX prefix by swapping commutative source operands if there
union i386_op temp_op;
i386_operand_type temp_type;
- gas_assert (i.tm.opcode_modifier.vexopcode == VEX0F);
+ gas_assert (i.tm.opcode_modifier.opcodespace == SPACE_0F);
gas_assert (!i.tm.opcode_modifier.sae);
gas_assert (operand_type_equal (&i.types[i.operands - 2],
&i.types[i.operands - 3]));
}
}
- switch ((i.tm.base_opcode >> 8) & 0xff)
- {
- case 0:
- implied_prefix = 0;
- break;
- case DATA_PREFIX_OPCODE:
- implied_prefix = 1;
- break;
- case REPE_PREFIX_OPCODE:
- implied_prefix = 2;
- break;
- case REPNE_PREFIX_OPCODE:
- implied_prefix = 3;
- break;
- default:
- abort ();
- }
-
/* Check the REX.W bit and VEXW. */
if (i.tm.opcode_modifier.vexw == VEXWIG)
w = (vexwig == vexw1 || (i.rex & REX_W)) ? 1 : 0;
/* Use 2-byte VEX prefix if possible. */
if (w == 0
&& i.vec_encoding != vex_encoding_vex3
- && i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.opcodespace == SPACE_0F
&& (i.rex & (REX_W | REX_X | REX_B)) == 0)
{
/* 2-byte VEX prefix. */
i.vex.bytes[1] = (r << 7
| register_specifier << 3
| vector_length << 2
- | implied_prefix);
+ | i.tm.opcode_modifier.opcodeprefix);
}
else
{
/* 3-byte VEX prefix. */
- unsigned int m;
-
i.vex.length = 3;
- switch (i.tm.opcode_modifier.vexopcode)
+ switch (i.tm.opcode_modifier.opcodespace)
{
- case VEX0F:
- m = 0x1;
+ case SPACE_0F:
+ case SPACE_0F38:
+ case SPACE_0F3A:
i.vex.bytes[0] = 0xc4;
break;
- case VEX0F38:
- m = 0x2;
- i.vex.bytes[0] = 0xc4;
- break;
- case VEX0F3A:
- m = 0x3;
- i.vex.bytes[0] = 0xc4;
- break;
- case XOP08:
- m = 0x8;
- i.vex.bytes[0] = 0x8f;
- break;
- case XOP09:
- m = 0x9;
- i.vex.bytes[0] = 0x8f;
- break;
- case XOP0A:
- m = 0xa;
+ case SPACE_XOP08:
+ case SPACE_XOP09:
+ case SPACE_XOP0A:
i.vex.bytes[0] = 0x8f;
break;
default:
/* The high 3 bits of the second VEX byte are 1's compliment
of RXB bits from REX. */
- i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
+ i.vex.bytes[1] = (~i.rex & 0x7) << 5 | i.tm.opcode_modifier.opcodespace;
i.vex.bytes[2] = (w << 7
| register_specifier << 3
| vector_length << 2
- | implied_prefix);
+ | i.tm.opcode_modifier.opcodeprefix);
}
}
-static INLINE bfd_boolean
+static INLINE bool
is_evex_encoding (const insn_template *t)
{
return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
|| t->opcode_modifier.sae;
}
-static INLINE bfd_boolean
+static INLINE bool
is_any_vex_encoding (const insn_template *t)
{
- return t->opcode_modifier.vex || t->opcode_modifier.vexopcode
- || is_evex_encoding (t);
+ return t->opcode_modifier.vex || is_evex_encoding (t);
}
/* Build the EVEX prefix. */
static void
build_evex_prefix (void)
{
- unsigned int register_specifier;
- unsigned int implied_prefix;
- unsigned int m, w;
+ unsigned int register_specifier, w;
rex_byte vrex_used = 0;
/* Check register specifier. */
vrex_used |= REX_X;
}
- switch ((i.tm.base_opcode >> 8) & 0xff)
- {
- case 0:
- implied_prefix = 0;
- break;
- case DATA_PREFIX_OPCODE:
- implied_prefix = 1;
- break;
- case REPE_PREFIX_OPCODE:
- implied_prefix = 2;
- break;
- case REPNE_PREFIX_OPCODE:
- implied_prefix = 3;
- break;
- default:
- abort ();
- }
-
/* 4 byte EVEX prefix. */
i.vex.length = 4;
i.vex.bytes[0] = 0x62;
- /* mmmm bits. */
- switch (i.tm.opcode_modifier.vexopcode)
- {
- case VEX0F:
- m = 1;
- break;
- case VEX0F38:
- m = 2;
- break;
- case VEX0F3A:
- m = 3;
- break;
- default:
- abort ();
- break;
- }
-
/* The high 3 bits of the second EVEX byte are 1's compliment of RXB
bits from REX. */
- i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
+ gas_assert (i.tm.opcode_modifier.opcodespace >= SPACE_0F);
+ gas_assert (i.tm.opcode_modifier.opcodespace <= SPACE_0F3A);
+ i.vex.bytes[1] = (~i.rex & 0x7) << 5 | i.tm.opcode_modifier.opcodespace;
/* The fifth bit of the second EVEX byte is 1's compliment of the
REX_R bit in VREX. */
else
w = (flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1) ? 1 : 0;
- /* Encode the U bit. */
- implied_prefix |= 0x4;
-
/* The third byte of the EVEX prefix. */
- i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
+ i.vex.bytes[2] = ((w << 7)
+ | (register_specifier << 3)
+ | 4 /* Encode the U bit. */
+ | i.tm.opcode_modifier.opcodeprefix);
/* The fourth byte of the EVEX prefix. */
/* The zeroing-masking bit. */
- if (i.mask && i.mask->zeroing)
+ if (i.mask.reg && i.mask.zeroing)
i.vex.bytes[3] |= 0x80;
/* Don't always set the broadcast bit if there is no RC. */
- if (!i.rounding)
+ if (i.rounding.type == rc_none)
{
/* Encode the vector length. */
unsigned int vec_length;
i.tm.opcode_modifier.evex = EVEX128;
break;
}
- else if (i.broadcast && (int) op == i.broadcast->operand)
+ else if (i.broadcast.type && op == i.broadcast.operand)
{
- switch (i.broadcast->bytes)
+ switch (i.broadcast.bytes)
{
case 64:
i.tm.opcode_modifier.evex = EVEX512;
}
i.vex.bytes[3] |= vec_length;
/* Encode the broadcast bit. */
- if (i.broadcast)
+ if (i.broadcast.type)
i.vex.bytes[3] |= 0x10;
}
+ else if (i.rounding.type != saeonly)
+ i.vex.bytes[3] |= 0x10 | (i.rounding.type << 5);
else
- {
- if (i.rounding->type != saeonly)
- i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
- else
- i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
- }
+ i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
- if (i.mask && i.mask->mask)
- i.vex.bytes[3] |= i.mask->mask->reg_num;
+ if (i.mask.reg)
+ i.vex.bytes[3] |= i.mask.reg->reg_num;
}
static void
static int
check_hle (void)
{
- switch (i.tm.opcode_modifier.hleprefixok)
+ switch (i.tm.opcode_modifier.prefixok)
{
default:
abort ();
- case HLEPrefixNone:
+ case PrefixLock:
+ case PrefixNone:
+ case PrefixNoTrack:
+ case PrefixRep:
as_bad (_("invalid instruction `%s' after `%s'"),
i.tm.name, i.hle_prefix);
return 0;
- case HLEPrefixLock:
+ case PrefixHLELock:
if (i.prefix[LOCK_PREFIX])
return 1;
as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
return 0;
- case HLEPrefixAny:
+ case PrefixHLEAny:
return 1;
- case HLEPrefixRelease:
+ case PrefixHLERelease:
if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
{
as_bad (_("instruction `%s' after `xacquire' not allowed"),
{
unsigned int j;
+ if (i.tm.opcode_modifier.opcodespace == SPACE_BASE
+ && i.tm.base_opcode == 0x8d)
+ {
+ /* Optimize: -O:
+ lea symbol, %rN -> mov $symbol, %rN
+ lea (%rM), %rN -> mov %rM, %rN
+ lea (,%rM,1), %rN -> mov %rM, %rN
+
+ and in 32-bit mode for 16-bit addressing
+
+ lea (%rM), %rN -> movzx %rM, %rN
+
+ and in 64-bit mode zap 32-bit addressing in favor of using a
+ 32-bit (or less) destination.
+ */
+ if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
+ {
+ if (!i.op[1].regs->reg_type.bitfield.word)
+ i.tm.opcode_modifier.size = SIZE32;
+ i.prefix[ADDR_PREFIX] = 0;
+ }
+
+ if (!i.index_reg && !i.base_reg)
+ {
+ /* Handle:
+ lea symbol, %rN -> mov $symbol, %rN
+ */
+ if (flag_code == CODE_64BIT)
+ {
+ /* Don't transform a relocation to a 16-bit one. */
+ if (i.op[0].disps
+ && i.op[0].disps->X_op != O_constant
+ && i.op[1].regs->reg_type.bitfield.word)
+ return;
+
+ if (!i.op[1].regs->reg_type.bitfield.qword
+ || i.tm.opcode_modifier.size == SIZE32)
+ {
+ i.tm.base_opcode = 0xb8;
+ i.tm.opcode_modifier.modrm = 0;
+ if (!i.op[1].regs->reg_type.bitfield.word)
+ i.types[0].bitfield.imm32 = 1;
+ else
+ {
+ i.tm.opcode_modifier.size = SIZE16;
+ i.types[0].bitfield.imm16 = 1;
+ }
+ }
+ else
+ {
+ /* Subject to further optimization below. */
+ i.tm.base_opcode = 0xc7;
+ i.tm.extension_opcode = 0;
+ i.types[0].bitfield.imm32s = 1;
+ i.types[0].bitfield.baseindex = 0;
+ }
+ }
+ /* Outside of 64-bit mode address and operand sizes have to match if
+ a relocation is involved, as otherwise we wouldn't (currently) or
+ even couldn't express the relocation correctly. */
+ else if (i.op[0].disps
+ && i.op[0].disps->X_op != O_constant
+ && ((!i.prefix[ADDR_PREFIX])
+ != (flag_code == CODE_32BIT
+ ? i.op[1].regs->reg_type.bitfield.dword
+ : i.op[1].regs->reg_type.bitfield.word)))
+ return;
+ else
+ {
+ i.tm.base_opcode = 0xb8;
+ i.tm.opcode_modifier.modrm = 0;
+ if (i.op[1].regs->reg_type.bitfield.dword)
+ i.types[0].bitfield.imm32 = 1;
+ else
+ i.types[0].bitfield.imm16 = 1;
+
+ if (i.op[0].disps
+ && i.op[0].disps->X_op == O_constant
+ && i.op[1].regs->reg_type.bitfield.dword
+ && !i.prefix[ADDR_PREFIX] != (flag_code == CODE_32BIT))
+ i.op[0].disps->X_add_number &= 0xffff;
+ }
+
+ i.tm.operand_types[0] = i.types[0];
+ i.imm_operands = 1;
+ if (!i.op[0].imms)
+ {
+ i.op[0].imms = &im_expressions[0];
+ i.op[0].imms->X_op = O_absent;
+ }
+ }
+ else if (i.op[0].disps
+ && (i.op[0].disps->X_op != O_constant
+ || i.op[0].disps->X_add_number))
+ return;
+ else
+ {
+ /* Handle:
+ lea (%rM), %rN -> mov %rM, %rN
+ lea (,%rM,1), %rN -> mov %rM, %rN
+ lea (%rM), %rN -> movzx %rM, %rN
+ */
+ const reg_entry *addr_reg;
+
+ if (!i.index_reg && i.base_reg->reg_num != RegIP)
+ addr_reg = i.base_reg;
+ else if (!i.base_reg
+ && i.index_reg->reg_num != RegIZ
+ && !i.log2_scale_factor)
+ addr_reg = i.index_reg;
+ else
+ return;
+
+ if (addr_reg->reg_type.bitfield.word
+ && i.op[1].regs->reg_type.bitfield.dword)
+ {
+ if (flag_code != CODE_32BIT)
+ return;
+ i.tm.opcode_modifier.opcodespace = SPACE_0F;
+ i.tm.base_opcode = 0xb7;
+ }
+ else
+ i.tm.base_opcode = 0x8b;
+
+ if (addr_reg->reg_type.bitfield.dword
+ && i.op[1].regs->reg_type.bitfield.qword)
+ i.tm.opcode_modifier.size = SIZE32;
+
+ i.op[0].regs = addr_reg;
+ i.reg_operands = 2;
+ }
+
+ i.mem_operands = 0;
+ i.disp_operands = 0;
+ i.prefix[ADDR_PREFIX] = 0;
+ i.prefix[SEG_PREFIX] = 0;
+ i.seg[0] = NULL;
+ }
+
if (optimize_for_space
- && !is_any_vex_encoding (&i.tm)
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& i.reg_operands == 1
&& i.imm_operands == 1
&& !i.types[1].bitfield.byte
}
}
else if (flag_code == CODE_64BIT
- && !is_any_vex_encoding (&i.tm)
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& ((i.types[1].bitfield.qword
&& i.reg_operands == 1
&& i.imm_operands == 1
}
else if (optimize > 1
&& !optimize_for_space
- && !is_any_vex_encoding (&i.tm)
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& i.reg_operands == 2
&& i.op[0].regs == i.op[1].regs
&& ((i.tm.base_opcode & ~(Opcode_D | 1)) == 0x8
&& i.op[0].regs == i.op[1].regs
&& !i.types[2].bitfield.xmmword
&& (i.tm.opcode_modifier.vex
- || ((!i.mask || i.mask->zeroing)
- && !i.rounding
+ || ((!i.mask.reg || i.mask.zeroing)
+ && i.rounding.type == rc_none
&& is_evex_encoding (&i.tm)
&& (i.vec_encoding != vex_encoding_evex
|| cpu_arch_isa_flags.bitfield.cpuavx512vl
|| (i.tm.operand_types[2].bitfield.zmmword
&& i.types[2].bitfield.ymmword))))
&& ((i.tm.base_opcode == 0x55
- || i.tm.base_opcode == 0x6655
- || i.tm.base_opcode == 0x66df
|| i.tm.base_opcode == 0x57
- || i.tm.base_opcode == 0x6657
- || i.tm.base_opcode == 0x66ef
- || i.tm.base_opcode == 0x66f8
- || i.tm.base_opcode == 0x66f9
- || i.tm.base_opcode == 0x66fa
- || i.tm.base_opcode == 0x66fb
+ || i.tm.base_opcode == 0xdf
+ || i.tm.base_opcode == 0xef
+ || i.tm.base_opcode == 0xf8
+ || i.tm.base_opcode == 0xf9
+ || i.tm.base_opcode == 0xfa
+ || i.tm.base_opcode == 0xfb
|| i.tm.base_opcode == 0x42
- || i.tm.base_opcode == 0x6642
- || i.tm.base_opcode == 0x47
- || i.tm.base_opcode == 0x6647)
+ || i.tm.base_opcode == 0x47)
&& i.tm.extension_opcode == None))
{
/* Optimize: -O1:
}
else if (i.tm.operand_types[0].bitfield.class == RegMask)
{
- i.tm.base_opcode &= 0xff;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_NONE;
i.tm.opcode_modifier.vexw = VEXW0;
}
else
else if (i.vec_encoding != vex_encoding_evex
&& !i.types[0].bitfield.zmmword
&& !i.types[1].bitfield.zmmword
- && !i.mask
- && !i.broadcast
+ && !i.mask.reg
+ && !i.broadcast.type
&& is_evex_encoding (&i.tm)
- && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x666f
- || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf36f
- || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f
- || (i.tm.base_opcode & ~4) == 0x66db
- || (i.tm.base_opcode & ~4) == 0x66eb)
+ && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x6f
+ || (i.tm.base_opcode & ~4) == 0xdb
+ || (i.tm.base_opcode & ~4) == 0xeb)
&& i.tm.extension_opcode == None)
{
/* Optimize: -O1:
i.types[j].bitfield.disp8 = vex_disp8;
break;
}
- if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f)
- i.tm.base_opcode ^= 0xf36f ^ 0xf26f;
+ if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x6f
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_0XF2)
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;
i.tm.opcode_modifier.vex
= i.types[0].bitfield.ymmword ? VEX256 : VEX128;
i.tm.opcode_modifier.vexw = VEXW0;
/* VPAND, VPOR, and VPXOR are commutative. */
- if (i.reg_operands == 3 && i.tm.base_opcode != 0x66df)
+ if (i.reg_operands == 3 && i.tm.base_opcode != 0xdf)
i.tm.opcode_modifier.commutative = 1;
i.tm.opcode_modifier.evex = 0;
i.tm.opcode_modifier.masking = 0;
if (i.tm.opcode_modifier.anysize)
return 0;
- /* pop, popf, popa. */
- if (strcmp (i.tm.name, "pop") == 0
- || i.tm.base_opcode == 0x9d
+ /* pop. */
+ if (strcmp (i.tm.name, "pop") == 0)
+ return 1;
+ }
+
+ if (i.tm.opcode_modifier.opcodespace == SPACE_BASE)
+ {
+ /* popf, popa. */
+ if (i.tm.base_opcode == 0x9d
|| i.tm.base_opcode == 0x61)
return 1;
/* vldmxcsr. */
if (i.tm.base_opcode == 0xae
&& i.tm.opcode_modifier.vex
- && i.tm.opcode_modifier.vexopcode == VEX0F
+ && i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_NONE
&& i.tm.extension_opcode == 2)
return 1;
}
- else
+ else if (i.tm.opcode_modifier.opcodespace == SPACE_BASE)
{
/* test, not, neg, mul, imul, div, idiv. */
if ((i.tm.base_opcode == 0xf6 || i.tm.base_opcode == 0xf7)
if (i.tm.base_opcode >= 0x80 && i.tm.base_opcode <= 0x83)
return 1;
- /* bt, bts, btr, btc. */
- if (i.tm.base_opcode == 0xfba
- && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7))
- return 1;
-
/* rol, ror, rcl, rcr, shl/sal, shr, sar. */
if ((base_opcode == 0xc1
|| (i.tm.base_opcode >= 0xd0 && i.tm.base_opcode <= 0xd3))
&& i.tm.extension_opcode != 6)
return 1;
- /* cmpxchg8b, cmpxchg16b, xrstors. */
- if (i.tm.base_opcode == 0xfc7
- && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3))
- return 1;
-
- /* fxrstor, ldmxcsr, xrstor. */
- if (i.tm.base_opcode == 0xfae
- && (i.tm.extension_opcode == 1
- || i.tm.extension_opcode == 2
- || i.tm.extension_opcode == 5))
- return 1;
-
- /* lgdt, lidt, lmsw. */
- if (i.tm.base_opcode == 0xf01
- && (i.tm.extension_opcode == 2
- || i.tm.extension_opcode == 3
- || i.tm.extension_opcode == 6))
- return 1;
-
- /* vmptrld */
- if (i.tm.base_opcode == 0xfc7
- && i.tm.extension_opcode == 6)
- return 1;
-
/* Check for x87 instructions. */
- if (i.tm.base_opcode >= 0xd8 && i.tm.base_opcode <= 0xdf)
+ if (base_opcode >= 0xd8 && base_opcode <= 0xdf)
{
/* Skip fst, fstp, fstenv, fstcw. */
if (i.tm.base_opcode == 0xd9
return 1;
}
}
+ else if (i.tm.opcode_modifier.opcodespace == SPACE_0F)
+ {
+ /* bt, bts, btr, btc. */
+ if (i.tm.base_opcode == 0xba
+ && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7))
+ return 1;
+
+ /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
+ if (i.tm.base_opcode == 0xc7
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_NONE
+ && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6))
+ return 1;
+
+ /* fxrstor, ldmxcsr, xrstor. */
+ if (i.tm.base_opcode == 0xae
+ && (i.tm.extension_opcode == 1
+ || i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 5))
+ return 1;
+
+ /* lgdt, lidt, lmsw. */
+ if (i.tm.base_opcode == 0x01
+ && (i.tm.extension_opcode == 2
+ || i.tm.extension_opcode == 3
+ || i.tm.extension_opcode == 6))
+ return 1;
+ }
dest = i.operands - 1;
&& i.types[dest].bitfield.imm8)
dest--;
- /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
- if (!any_vex_p
+ /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
+ if (i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& (base_opcode == 0x1
|| base_opcode == 0x9
|| base_opcode == 0x11
|| base_opcode == 0x29
|| base_opcode == 0x31
|| base_opcode == 0x39
- || (i.tm.base_opcode >= 0x84 && i.tm.base_opcode <= 0x87)
- || base_opcode == 0xfc1))
+ || (base_opcode | 2) == 0x87))
+ return 1;
+
+ /* xadd. */
+ if (i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && base_opcode == 0xc1)
return 1;
/* Check for load instruction. */
{
char *p;
- if (is_any_vex_encoding (&i.tm))
+ if (i.tm.opcode_modifier.opcodespace != SPACE_BASE)
return;
if (i.tm.base_opcode == 0xff
/* Initialize globals. */
memset (&i, '\0', sizeof (i));
+ i.rounding.type = rc_none;
for (j = 0; j < MAX_OPERANDS; j++)
i.reloc[j] = NO_RELOC;
memset (disp_expressions, '\0', sizeof (disp_expressions));
operands at hand. */
/* All Intel opcodes have reversed operands except for "bound", "enter",
- "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
- intersegment "jmp" and "call" instructions with 2 immediate operands so
- that the immediate segment precedes the offset, as it does when in AT&T
- mode. */
+ "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
+ "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
+ and "call" instructions with 2 immediate operands so that the immediate
+ segment precedes the offset consistently in Intel and AT&T modes. */
if (intel_syntax
&& i.operands > 1
&& (strcmp (mnemonic, "bound") != 0)
- && (strcmp (mnemonic, "invlpga") != 0)
- && (strncmp (mnemonic, "monitor", 7) != 0)
- && (strncmp (mnemonic, "mwait", 5) != 0)
+ && (strncmp (mnemonic, "invlpg", 6) != 0)
+ && !startswith (mnemonic, "monitor")
+ && !startswith (mnemonic, "mwait")
+ && (strcmp (mnemonic, "pvalidate") != 0)
+ && !startswith (mnemonic, "rmp")
&& (strcmp (mnemonic, "tpause") != 0)
&& (strcmp (mnemonic, "umwait") != 0)
&& !(operand_type_check (i.types[0], imm)
if (i.imm_operands)
optimize_imm ();
+ if (i.disp_operands && !want_disp32 (current_templates->start))
+ {
+ for (j = 0; j < i.operands; ++j)
+ {
+ const expressionS *exp = i.op[j].disps;
+
+ if (!operand_type_check (i.types[j], disp))
+ continue;
+
+ if (exp->X_op != O_constant)
+ continue;
+
+ /* Since displacement is signed extended to 64bit, don't allow
+ disp32 and turn off disp32s if they are out of range. */
+ i.types[j].bitfield.disp32 = 0;
+ if (fits_in_signed_long (exp->X_add_number))
+ continue;
+
+ i.types[j].bitfield.disp32s = 0;
+ if (i.types[j].bitfield.baseindex)
+ {
+ as_bad (_("0x%" BFD_VMA_FMT "x out of range of signed 32bit displacement"),
+ exp->X_add_number);
+ return;
+ }
+ }
+ }
+
/* Don't optimize displacement for movabs since it only takes 64bit
displacement. */
if (i.disp_operands
return;
/* Check if REP prefix is OK. */
- if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
+ if (i.rep_prefix && i.tm.opcode_modifier.prefixok != PrefixRep)
{
as_bad (_("invalid instruction `%s' after `%s'"),
i.tm.name, i.rep_prefix);
/* Check for lock without a lockable instruction. Destination operand
must be memory unless it is xchg (0x86). */
if (i.prefix[LOCK_PREFIX]
- && (!i.tm.opcode_modifier.islockable
+ && (i.tm.opcode_modifier.prefixok < PrefixLock
|| i.mem_operands == 0
|| (i.tm.base_opcode != 0x86
&& !(i.flags[i.operands - 1] & Operand_Mem))))
as_bad (_("expecting valid branch instruction after `bnd'"));
/* Check NOTRACK prefix. */
- if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
+ if (i.notrack_prefix && i.tm.opcode_modifier.prefixok != PrefixNoTrack)
as_bad (_("expecting indirect branch instruction after `notrack'"));
if (i.tm.cpu_flags.bitfield.cpumpx)
if (!process_suffix ())
return;
- /* Update operand types. */
+ /* Update operand types and check extended states. */
for (j = 0; j < i.operands; j++)
- i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
+ {
+ i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
+ switch (i.tm.operand_types[j].bitfield.class)
+ {
+ default:
+ break;
+ case RegMMX:
+ i.xstate |= xstate_mmx;
+ break;
+ case RegMask:
+ i.xstate |= xstate_mask;
+ break;
+ case RegSIMD:
+ if (i.tm.operand_types[j].bitfield.tmmword)
+ i.xstate |= xstate_tmm;
+ else if (i.tm.operand_types[j].bitfield.zmmword)
+ i.xstate |= xstate_zmm;
+ else if (i.tm.operand_types[j].bitfield.ymmword)
+ i.xstate |= xstate_ymm;
+ else if (i.tm.operand_types[j].bitfield.xmmword)
+ i.xstate |= xstate_xmm;
+ break;
+ }
+ }
/* Make still unresolved immediate matches conform to size of immediate
given in i.suffix. */
&& !i.types[j].bitfield.xmmword)
i.reg_operands--;
- /* ImmExt should be processed after SSE2AVX. */
- if (!i.tm.opcode_modifier.sse2avx
- && i.tm.opcode_modifier.immext)
- process_immext ();
-
/* For insns with operands there are more diddles to do to the opcode. */
if (i.operands)
{
/* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
instructions may define INT_OPCODE as well, so avoid this corner
case for those instructions that use MODRM. */
- if (i.tm.base_opcode == INT_OPCODE
+ if (i.tm.opcode_modifier.opcodespace == SPACE_BASE
+ && i.tm.base_opcode == INT_OPCODE
&& !i.tm.opcode_modifier.modrm
&& i.op[0].imms->X_add_number == 3)
{
&& i.op[x].regs->reg_num > 3)
{
gas_assert (!(i.op[x].regs->reg_flags & RegRex));
- i.rex_encoding = FALSE;
+ i.rex_encoding = false;
break;
}
}
/* Look up instruction (or prefix) via hash table. */
- current_templates = (const templates *) hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) str_hash_find (op_hash, mnemonic);
if (*l != END_OF_INSN
&& (!is_space_char (*l) || l[1] != END_OF_INSN)
current_templates->start->name);
return NULL;
}
- if (current_templates->start->opcode_length == 0)
+
+ if (current_templates->start->base_opcode == PSEUDO_PREFIX)
{
/* Handle pseudo prefixes. */
- switch (current_templates->start->base_opcode)
+ switch (current_templates->start->extension_opcode)
{
- case 0x0:
+ case Prefix_Disp8:
/* {disp8} */
i.disp_encoding = disp_encoding_8bit;
break;
- case 0x1:
+ case Prefix_Disp16:
+ /* {disp16} */
+ i.disp_encoding = disp_encoding_16bit;
+ break;
+ case Prefix_Disp32:
/* {disp32} */
i.disp_encoding = disp_encoding_32bit;
break;
- case 0x2:
+ case Prefix_Load:
/* {load} */
i.dir_encoding = dir_encoding_load;
break;
- case 0x3:
+ case Prefix_Store:
/* {store} */
i.dir_encoding = dir_encoding_store;
break;
- case 0x4:
+ case Prefix_VEX:
/* {vex} */
i.vec_encoding = vex_encoding_vex;
break;
- case 0x5:
+ case Prefix_VEX3:
/* {vex3} */
i.vec_encoding = vex_encoding_vex3;
break;
- case 0x6:
+ case Prefix_EVEX:
/* {evex} */
i.vec_encoding = vex_encoding_evex;
break;
- case 0x7:
+ case Prefix_REX:
/* {rex} */
- i.rex_encoding = TRUE;
+ i.rex_encoding = true;
break;
- case 0x8:
+ case Prefix_NoOptimize:
/* {nooptimize} */
- i.no_optimize = TRUE;
+ i.no_optimize = true;
break;
default:
abort ();
goto check_suffix;
mnem_p = dot_p;
*dot_p = '\0';
- current_templates = (const templates *) hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) str_hash_find (op_hash, mnemonic);
}
if (!current_templates)
case QWORD_MNEM_SUFFIX:
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
break;
case SHORT_MNEM_SUFFIX:
case LONG_MNEM_SUFFIX:
{
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
}
break;
else
i.suffix = LONG_MNEM_SUFFIX;
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
}
break;
}
}
static void
-swap_2_operands (int xchg1, int xchg2)
+swap_2_operands (unsigned int xchg1, unsigned int xchg2)
{
union i386_op temp_op;
i386_operand_type temp_type;
i.reloc[xchg2] = i.reloc[xchg1];
i.reloc[xchg1] = temp_reloc;
- if (i.mask)
+ if (i.mask.reg)
{
- if (i.mask->operand == xchg1)
- i.mask->operand = xchg2;
- else if (i.mask->operand == xchg2)
- i.mask->operand = xchg1;
+ if (i.mask.operand == xchg1)
+ i.mask.operand = xchg2;
+ else if (i.mask.operand == xchg2)
+ i.mask.operand = xchg1;
}
- if (i.broadcast)
+ if (i.broadcast.type)
{
- if (i.broadcast->operand == xchg1)
- i.broadcast->operand = xchg2;
- else if (i.broadcast->operand == xchg2)
- i.broadcast->operand = xchg1;
+ if (i.broadcast.operand == xchg1)
+ i.broadcast.operand = xchg2;
+ else if (i.broadcast.operand == xchg2)
+ i.broadcast.operand = xchg1;
}
- if (i.rounding)
+ if (i.rounding.type != rc_none)
{
- if (i.rounding->operand == xchg1)
- i.rounding->operand = xchg2;
- else if (i.rounding->operand == xchg2)
- i.rounding->operand = xchg1;
+ if (i.rounding.operand == xchg1)
+ i.rounding.operand = xchg2;
+ else if (i.rounding.operand == xchg2)
+ i.rounding.operand = xchg1;
}
}
if (i.mem_operands == 2)
{
- const seg_entry *temp_seg;
+ const reg_entry *temp_seg;
temp_seg = i.seg[0];
i.seg[0] = i.seg[1];
i.seg[1] = temp_seg;
op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
i.types[op].bitfield.disp64 = 0;
}
-#ifdef BFD64
- /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
- if (i.types[op].bitfield.disp32
- && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
- {
- /* If this operand is at most 32 bits, convert
- to a signed 32 bit number and don't use 64bit
- displacement. */
- op_disp &= (((offsetT) 2 << 31) - 1);
- op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
- i.types[op].bitfield.disp64 = 0;
- }
-#endif
if (!op_disp && i.types[op].bitfield.baseindex)
{
i.types[op].bitfield.disp8 = 0;
i.op[op].disps = 0;
i.disp_operands--;
}
+#ifdef BFD64
else if (flag_code == CODE_64BIT)
{
+ if (want_disp32 (current_templates->start)
+ && fits_in_unsigned_long (op_disp))
+ i.types[op].bitfield.disp32 = 1;
+
+ /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
+ if (i.types[op].bitfield.disp32
+ && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
+ {
+ /* If this operand is at most 32 bits, convert
+ to a signed 32 bit number and don't use 64bit
+ displacement. */
+ op_disp &= (((offsetT) 2 << 31) - 1);
+ op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
+ i.types[op].bitfield.disp64 = 0;
+ }
+
if (fits_in_signed_long (op_disp))
{
i.types[op].bitfield.disp64 = 0;
i.types[op].bitfield.disp32s = 1;
}
- if (i.prefix[ADDR_PREFIX]
- && fits_in_unsigned_long (op_disp))
- i.types[op].bitfield.disp32 = 1;
}
+#endif
if ((i.types[op].bitfield.disp32
|| i.types[op].bitfield.disp32s
|| i.types[op].bitfield.disp16)
}
/* Without VSIB byte, we can't have a vector register for index. */
- if (!t->opcode_modifier.vecsib
+ if (!t->opcode_modifier.sib
&& i.index_reg
&& (i.index_reg->reg_type.bitfield.xmmword
|| i.index_reg->reg_type.bitfield.ymmword
/* Check if default mask is allowed. */
if (t->opcode_modifier.nodefmask
- && (!i.mask || i.mask->mask->reg_num == 0))
+ && (!i.mask.reg || i.mask.reg->reg_num == 0))
{
i.error = no_default_mask;
return 1;
/* For VSIB byte, we need a vector register for index, and all vector
registers must be distinct. */
- if (t->opcode_modifier.vecsib)
+ if (t->opcode_modifier.sib && t->opcode_modifier.sib != SIBMEM)
{
if (!i.index_reg
- || !((t->opcode_modifier.vecsib == VecSIB128
+ || !((t->opcode_modifier.sib == VECSIB128
&& i.index_reg->reg_type.bitfield.xmmword)
- || (t->opcode_modifier.vecsib == VecSIB256
+ || (t->opcode_modifier.sib == VECSIB256
&& i.index_reg->reg_type.bitfield.ymmword)
- || (t->opcode_modifier.vecsib == VecSIB512
+ || (t->opcode_modifier.sib == VECSIB512
&& i.index_reg->reg_type.bitfield.zmmword)))
{
i.error = invalid_vsib_address;
return 1;
}
- gas_assert (i.reg_operands == 2 || i.mask);
- if (i.reg_operands == 2 && !i.mask)
+ gas_assert (i.reg_operands == 2 || i.mask.reg);
+ if (i.reg_operands == 2 && !i.mask.reg)
{
gas_assert (i.types[0].bitfield.class == RegSIMD);
gas_assert (i.types[0].bitfield.xmmword
}
as_warn (_("mask, index, and destination registers should be distinct"));
}
- else if (i.reg_operands == 1 && i.mask)
+ else if (i.reg_operands == 1 && i.mask.reg)
{
if (i.types[1].bitfield.class == RegSIMD
&& (i.types[1].bitfield.xmmword
}
}
+ /* For AMX instructions with three tmmword operands, all tmmword operand must be
+ distinct */
+ if (t->operand_types[0].bitfield.tmmword
+ && i.reg_operands == 3)
+ {
+ if (register_number (i.op[0].regs)
+ == register_number (i.op[1].regs)
+ || register_number (i.op[0].regs)
+ == register_number (i.op[2].regs)
+ || register_number (i.op[1].regs)
+ == register_number (i.op[2].regs))
+ {
+ i.error = invalid_tmm_register_set;
+ return 1;
+ }
+ }
+
/* Check if broadcast is supported by the instruction and is applied
to the memory operand. */
- if (i.broadcast)
+ if (i.broadcast.type)
{
i386_operand_type type, overlap;
/* Check if specified broadcast is supported in this instruction,
and its broadcast bytes match the memory operand. */
- op = i.broadcast->operand;
+ op = i.broadcast.operand;
if (!t->opcode_modifier.broadcast
|| !(i.flags[op] & Operand_Mem)
|| (!i.types[op].bitfield.unspecified
return 1;
}
- i.broadcast->bytes = ((1 << (t->opcode_modifier.broadcast - 1))
- * i.broadcast->type);
+ i.broadcast.bytes = ((1 << (t->opcode_modifier.broadcast - 1))
+ * i.broadcast.type);
operand_type_set (&type, 0);
- switch (i.broadcast->bytes)
+ switch (i.broadcast.bytes)
{
case 2:
type.bitfield.word = 1;
op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
/* Check if requested masking is supported. */
- if (i.mask)
+ if (i.mask.reg)
{
switch (t->opcode_modifier.masking)
{
case BOTH_MASKING:
break;
case MERGING_MASKING:
- if (i.mask->zeroing)
+ if (i.mask.zeroing)
{
case 0:
i.error = unsupported_masking;
break;
case DYNAMIC_MASKING:
/* Memory destinations allow only merging masking. */
- if (i.mask->zeroing && i.mem_operands)
+ if (i.mask.zeroing && i.mem_operands)
{
/* Find memory operand. */
for (op = 0; op < i.operands; op++)
}
/* Check if masking is applied to dest operand. */
- if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
+ if (i.mask.reg && (i.mask.operand != i.operands - 1))
{
i.error = mask_not_on_destination;
return 1;
}
/* Check RC/SAE. */
- if (i.rounding)
+ if (i.rounding.type != rc_none)
{
if (!t->opcode_modifier.sae
- || (i.rounding->type != saeonly && !t->opcode_modifier.staticrounding))
+ || (i.rounding.type != saeonly && !t->opcode_modifier.staticrounding))
{
i.error = unsupported_rc_sae;
return 1;
them is rounding, the rounding operand should be the last
immediate operand. */
if (i.imm_operands > 1
- && i.rounding->operand != (int) (i.imm_operands - 1))
+ && i.rounding.operand != i.imm_operands - 1)
{
i.error = rc_sae_operand_not_last_imm;
return 1;
if (t->opcode_modifier.disp8memshift
&& i.disp_encoding != disp_encoding_32bit)
{
- if (i.broadcast)
+ if (i.broadcast.type)
i.memshift = t->opcode_modifier.broadcast - 1;
else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
i.memshift = t->opcode_modifier.disp8memshift;
if (cpu_flags_match (t) != CPU_FLAGS_PERFECT_MATCH)
continue;
+ /* Check Pseudo Prefix. */
+ i.error = unsupported;
+ if (t->opcode_modifier.pseudovexprefix
+ && !(i.vec_encoding == vex_encoding_vex
+ || i.vec_encoding == vex_encoding_vex3))
+ continue;
+
/* Check AT&T mnemonic. */
i.error = unsupported_with_intel_mnemonic;
if (intel_mnemonic && t->opcode_modifier.attmnemonic)
j = i.imm_operands + (t->operands > i.imm_operands + 1);
if (((i.suffix == QWORD_MNEM_SUFFIX
&& flag_code != CODE_64BIT
- && (t->base_opcode != 0x0fc7
- || t->extension_opcode != 1 /* cmpxchg8b */))
+ && !(t->opcode_modifier.opcodespace == SPACE_0F
+ && t->base_opcode == 0xc7
+ && t->opcode_modifier.opcodeprefix == PREFIX_NONE
+ && t->extension_opcode == 1) /* cmpxchg8b */)
|| (i.suffix == LONG_MNEM_SUFFIX
&& !cpu_arch_flags.bitfield.cpui386))
&& (intel_syntax
|| (operand_types[j].bitfield.class != RegMMX
&& operand_types[j].bitfield.class != RegSIMD
&& operand_types[j].bitfield.class != RegMask))
- && !t->opcode_modifier.vecsib)
+ && !t->opcode_modifier.sib)
continue;
/* Do not verify operands when there are none. */
break;
if (j < MAX_OPERANDS)
{
- bfd_boolean override = (i.prefix[ADDR_PREFIX] != 0);
+ bool override = (i.prefix[ADDR_PREFIX] != 0);
addr_prefix_disp = j;
}
/* Force 0x8b encoding for "mov foo@GOT, %eax". */
- if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
+ if (i.reloc[0] == BFD_RELOC_386_GOT32
+ && t->base_opcode == 0xa0
+ && t->opcode_modifier.opcodespace == SPACE_BASE)
continue;
/* We check register size if needed. */
if (t->opcode_modifier.checkregsize)
{
check_register = (1 << t->operands) - 1;
- if (i.broadcast)
- check_register &= ~(1 << i.broadcast->operand);
+ if (i.broadcast.type)
+ check_register &= ~(1 << i.broadcast.operand);
}
else
check_register = 0;
zero-extend %eax to %rax. */
if (flag_code == CODE_64BIT
&& t->base_opcode == 0x90
+ && t->opcode_modifier.opcodespace == SPACE_BASE
&& i.types[0].bitfield.instance == Accum
&& i.types[0].bitfield.dword
&& i.types[1].bitfield.instance == Accum
if (flag_code != CODE_64BIT
&& i.hle_prefix
&& t->base_opcode == 0xa0
+ && t->opcode_modifier.opcodespace == SPACE_BASE
&& i.types[0].bitfield.instance == Accum
&& (i.flags[1] & Operand_Mem))
continue;
as_bad (_("unsupported instruction `%s'"),
current_templates->start->name);
return NULL;
+ case invalid_sib_address:
+ err_msg = _("invalid SIB address");
+ break;
case invalid_vsib_address:
err_msg = _("invalid VSIB address");
break;
case invalid_vector_register_set:
err_msg = _("mask, index, and destination registers must be distinct");
break;
+ case invalid_tmm_register_set:
+ err_msg = _("all tmm registers must be distinct");
+ break;
case unsupported_vector_index_register:
err_msg = _("unsupported vector index register");
break;
}
/* Copy the template we found. */
- i.tm = *t;
+ install_template (t);
if (addr_prefix_disp != -1)
i.tm.operand_types[addr_prefix_disp]
unsigned int es_op = i.tm.opcode_modifier.isstring - IS_STRING_ES_OP0;
unsigned int op = i.tm.operand_types[0].bitfield.baseindex ? es_op : 0;
- if (i.seg[op] != NULL && i.seg[op] != &es)
+ if (i.seg[op] != NULL && i.seg[op] != reg_es)
{
as_bad (_("`%s' operand %u must use `%ses' segment"),
i.tm.name,
static int
process_suffix (void)
{
+ bool is_crc32 = false, is_movx = false;
+
/* If matched instruction specifies an explicit instruction mnemonic
suffix, use it. */
if (i.tm.opcode_modifier.size == SIZE16)
{
unsigned int numop = i.operands;
+ /* MOVSX/MOVZX */
+ is_movx = (i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && (i.tm.base_opcode | 8) == 0xbe)
+ || (i.tm.opcode_modifier.opcodespace == SPACE_BASE
+ && i.tm.base_opcode == 0x63
+ && i.tm.cpu_flags.bitfield.cpu64);
+
+ /* CRC32 */
+ is_crc32 = (i.tm.base_opcode == 0xf0
+ && i.tm.opcode_modifier.opcodespace == SPACE_0F38
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_0XF2);
+
/* movsx/movzx want only their source operand considered here, for the
ambiguity checking below. The suffix will be replaced afterwards
to represent the destination (register). */
- if (((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w)
- || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
+ if (is_movx && (i.tm.opcode_modifier.w || i.tm.base_opcode == 0x63))
--i.operands;
/* crc32 needs REX.W set regardless of suffix / source operand size. */
- if (i.tm.base_opcode == 0xf20f38f0
- && i.tm.operand_types[1].bitfield.qword)
+ if (is_crc32 && i.tm.operand_types[1].bitfield.qword)
i.rex |= REX_W;
/* If there's no instruction mnemonic suffix we try to invent one
Destination register type is more significant than source
register type. crc32 in SSE4.2 prefers source register
type. */
- unsigned int op = i.tm.base_opcode != 0xf20f38f0 ? i.operands : 1;
+ unsigned int op = is_crc32 ? 1 : i.operands;
while (op--)
if (i.tm.operand_types[op].bitfield.instance == InstanceNone
/* As an exception, movsx/movzx silently default to a byte source
in AT&T mode. */
- if ((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w
- && !i.suffix && !intel_syntax)
+ if (is_movx && i.tm.opcode_modifier.w && !i.suffix && !intel_syntax)
i.suffix = BYTE_MNEM_SUFFIX;
}
else if (i.suffix == BYTE_MNEM_SUFFIX)
&& (i.tm.opcode_modifier.jump == JUMP_ABSOLUTE
|| i.tm.opcode_modifier.jump == JUMP_BYTE
|| i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT
- || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
+ || (i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && i.tm.base_opcode == 0x01 /* [ls][gi]dt */
&& i.tm.extension_opcode <= 3)))
{
switch (flag_code)
/* For [XYZ]MMWORD operands inspect operand sizes. While generally
also suitable for AT&T syntax mode, it was requested that this be
restricted to just Intel syntax. */
- if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast)
+ if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast.type)
{
unsigned int op;
if (i.tm.opcode_modifier.floatmf)
i.suffix = SHORT_MNEM_SUFFIX;
- else if ((i.tm.base_opcode | 8) == 0xfbe
- || (i.tm.base_opcode == 0x63
- && i.tm.cpu_flags.bitfield.cpu64))
+ else if (is_movx)
/* handled below */;
else if (evex)
i.tm.opcode_modifier.evex = evex;
}
}
- if ((i.tm.base_opcode | 8) == 0xfbe
- || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
+ if (is_movx)
{
/* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
In AT&T syntax, if there is no suffix (warned about above), the default
break;
case 0:
- /* Select word/dword/qword operation with explict data sizing prefix
+ /* Select word/dword/qword operation with explicit data sizing prefix
when there are no suitable register operands. */
if (i.tm.opcode_modifier.w
&& (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
|| i.tm.operand_types[0].bitfield.instance == RegD
|| i.tm.operand_types[1].bitfield.instance == RegD
/* CRC32 */
- || i.tm.base_opcode == 0xf20f38f0))))
+ || is_crc32))))
i.tm.base_opcode |= 1;
break;
}
unsigned int op;
enum { need_word, need_dword, need_qword } need;
+ /* Check the register operand for the address size prefix if
+ the memory operand has no real registers, like symbol, DISP
+ or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
+ if (i.mem_operands == 1
+ && i.reg_operands == 1
+ && i.operands == 2
+ && i.types[1].bitfield.class == Reg
+ && (flag_code == CODE_32BIT
+ ? i.op[1].regs->reg_type.bitfield.word
+ : i.op[1].regs->reg_type.bitfield.dword)
+ && ((i.base_reg == NULL && i.index_reg == NULL)
+#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+ || (x86_elf_abi == X86_64_X32_ABI
+ && i.base_reg
+ && i.base_reg->reg_num == RegIP
+ && i.base_reg->reg_type.bitfield.qword))
+#else
+ || 0)
+#endif
+ && !add_prefix (ADDR_PREFIX_OPCODE))
+ return 0;
+
if (flag_code == CODE_32BIT)
need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
else if (i.prefix[ADDR_PREFIX])
continue;
/* crc32 only wants its source operand checked here. */
- if (i.tm.base_opcode == 0xf20f38f0 && op)
+ if (i.tm.base_opcode == 0xf0
+ && i.tm.opcode_modifier.opcodespace == SPACE_0F38
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_0XF2
+ && op != 0)
continue;
/* Any other register is bad. */
/* Default segment register this instruction will use for memory
accesses. 0 means unknown. This is only for optimizing out
unnecessary segment overrides. */
- const seg_entry *default_seg = 0;
+ const reg_entry *default_seg = NULL;
if (i.tm.opcode_modifier.sse2avx)
{
i.prefix[REX_PREFIX] = 0;
i.rex_encoding = 0;
}
+ /* ImmExt should be processed after SSE2AVX. */
+ else if (i.tm.opcode_modifier.immext)
+ process_immext ();
if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
{
i.flags[j] = i.flags[j - 1];
}
i.op[0].regs
- = (const reg_entry *) hash_find (reg_hash, "xmm0");
+ = (const reg_entry *) str_hash_find (reg_hash, "xmm0");
i.types[0] = regxmm;
i.tm.operand_types[0] = regxmm;
if (flag_code != CODE_64BIT
? i.tm.base_opcode == POP_SEG_SHORT
&& i.op[0].regs->reg_num == 1
- : (i.tm.base_opcode | 1) == POP_SEG386_SHORT
+ : (i.tm.base_opcode | 1) == (POP_SEG386_SHORT & 0xff)
&& i.op[0].regs->reg_num < 4)
{
as_bad (_("you can't `%s %s%s'"),
i.tm.name, register_prefix, i.op[0].regs->reg_name);
return 0;
}
- if ( i.op[0].regs->reg_num > 3 && i.tm.opcode_length == 1 )
+ if (i.op[0].regs->reg_num > 3
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE )
{
- i.tm.base_opcode ^= POP_SEG_SHORT ^ POP_SEG386_SHORT;
- i.tm.opcode_length = 2;
+ i.tm.base_opcode ^= (POP_SEG_SHORT ^ POP_SEG386_SHORT) & 0xff;
+ i.tm.opcode_modifier.opcodespace = SPACE_0F;
}
i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
}
- else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
+ else if (i.tm.opcode_modifier.opcodespace == SPACE_BASE
+ && (i.tm.base_opcode & ~3) == MOV_AX_DISP32)
{
- default_seg = &ds;
+ default_seg = reg_ds;
}
else if (i.tm.opcode_modifier.isstring)
{
/* For the string instructions that allow a segment override
on one of their operands, the default segment is ds. */
- default_seg = &ds;
+ default_seg = reg_ds;
}
else if (i.short_form)
{
if ((i.seg[0] || i.prefix[SEG_PREFIX])
&& i.tm.base_opcode == 0x8d /* lea */
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& !is_any_vex_encoding(&i.tm))
{
if (!quiet_warnings)
point, and the specified segment prefix will always be used. */
if (i.seg[0]
&& i.seg[0] != default_seg
- && i.seg[0]->seg_prefix != i.prefix[SEG_PREFIX])
+ && i386_seg_prefixes[i.seg[0]->reg_num] != i.prefix[SEG_PREFIX])
{
- if (!add_prefix (i.seg[0]->seg_prefix))
+ if (!add_prefix (i386_seg_prefixes[i.seg[0]->reg_num]))
return 0;
}
return 1;
}
static INLINE void set_rex_vrex (const reg_entry *r, unsigned int rex_bit,
- bfd_boolean do_sse2avx)
+ bool do_sse2avx)
{
if (r->reg_flags & RegRex)
{
i.vrex |= rex_bit;
}
-static const seg_entry *
+static const reg_entry *
build_modrm_byte (void)
{
- const seg_entry *default_seg = 0;
+ const reg_entry *default_seg = NULL;
unsigned int source, dest;
int vex_3_sources;
&& i.imm_operands == 1
&& (i.types[0].bitfield.imm8
|| i.types[i.operands - 1].bitfield.imm8
- || i.rounding)));
+ || i.rounding.type != rc_none)));
if (i.imm_operands == 2)
source = 2;
else
/* RC/SAE operand could be between DEST and SRC. That happens
when one operand is GPR and the other one is XMM/YMM/ZMM
register. */
- if (i.rounding && i.rounding->operand == (int) dest)
+ if (i.rounding.type != rc_none && i.rounding.operand == dest)
dest++;
if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
i386_operand_type op;
unsigned int vvvv;
- /* Check register-only source operand when two source
- operands are swapped. */
- if (!i.tm.operand_types[source].bitfield.baseindex
- && i.tm.operand_types[dest].bitfield.baseindex)
+ /* Swap two source operands if needed. */
+ if (i.tm.opcode_modifier.swapsources)
{
vvvv = source;
source = dest;
{
i.rm.reg = i.op[dest].regs->reg_num;
i.rm.regmem = i.op[source].regs->reg_num;
- if (i.op[dest].regs->reg_type.bitfield.class == RegMMX
- || i.op[source].regs->reg_type.bitfield.class == RegMMX)
- i.has_regmmx = TRUE;
- else if (i.op[dest].regs->reg_type.bitfield.class == RegSIMD
- || i.op[source].regs->reg_type.bitfield.class == RegSIMD)
- {
- if (i.types[dest].bitfield.zmmword
- || i.types[source].bitfield.zmmword)
- i.has_regzmm = TRUE;
- else if (i.types[dest].bitfield.ymmword
- || i.types[source].bitfield.ymmword)
- i.has_regymm = TRUE;
- else
- i.has_regxmm = TRUE;
- }
set_rex_vrex (i.op[dest].regs, REX_R, i.tm.opcode_modifier.sse2avx);
- set_rex_vrex (i.op[source].regs, REX_B, FALSE);
+ set_rex_vrex (i.op[source].regs, REX_B, false);
}
else
{
i.rm.reg = i.op[source].regs->reg_num;
i.rm.regmem = i.op[dest].regs->reg_num;
set_rex_vrex (i.op[dest].regs, REX_B, i.tm.opcode_modifier.sse2avx);
- set_rex_vrex (i.op[source].regs, REX_R, FALSE);
+ set_rex_vrex (i.op[source].regs, REX_R, false);
}
if (flag_code != CODE_64BIT && (i.rex & REX_R))
{
break;
gas_assert (op < i.operands);
- if (i.tm.opcode_modifier.vecsib)
+ if (i.tm.opcode_modifier.sib)
{
- if (i.index_reg->reg_num == RegIZ)
+ /* The index register of VSIB shouldn't be RegIZ. */
+ if (i.tm.opcode_modifier.sib != SIBMEM
+ && i.index_reg->reg_num == RegIZ)
abort ();
i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
i.types[op].bitfield.disp64 = 0;
- if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
+ if (want_disp32 (&i.tm))
{
/* Must be 32 bit */
i.types[op].bitfield.disp32 = 1;
i.types[op].bitfield.disp32s = 1;
}
}
- i.sib.index = i.index_reg->reg_num;
- set_rex_vrex (i.index_reg, REX_X, FALSE);
+
+ /* Since the mandatory SIB always has index register, so
+ the code logic remains unchanged. The non-mandatory SIB
+ without index register is allowed and will be handled
+ later. */
+ if (i.index_reg)
+ {
+ if (i.index_reg->reg_num == RegIZ)
+ i.sib.index = NO_INDEX_REGISTER;
+ else
+ i.sib.index = i.index_reg->reg_num;
+ set_rex_vrex (i.index_reg, REX_X, false);
+ }
}
- default_seg = &ds;
+ default_seg = reg_ds;
if (i.base_reg == 0)
{
{
i386_operand_type newdisp;
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ /* Both check for VSIB and mandatory non-vector SIB. */
+ gas_assert (!i.tm.opcode_modifier.sib
+ || i.tm.opcode_modifier.sib == SIBMEM);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
i.sib.base = NO_BASE_REGISTER;
i.sib.index = NO_INDEX_REGISTER;
- newdisp = (!i.prefix[ADDR_PREFIX] ? disp32s : disp32);
+ newdisp = (want_disp32(&i.tm) ? disp32 : disp32s);
}
else if ((flag_code == CODE_16BIT)
^ (i.prefix[ADDR_PREFIX] != 0))
i.types[op] = operand_type_and_not (i.types[op], anydisp);
i.types[op] = operand_type_or (i.types[op], newdisp);
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
/* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegIZ)
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
i.types[op].bitfield.disp64 = 0;
- if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
+ if (want_disp32 (&i.tm))
{
/* Must be 32 bit */
i.types[op].bitfield.disp32 = 1;
/* RIP addressing for 64bit mode. */
else if (i.base_reg->reg_num == RegIP)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (i.base_reg->reg_type.bitfield.word)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
i.rm.regmem = i.index_reg->reg_num - 6;
break;
case 5: /* (%bp) */
- default_seg = &ss;
+ default_seg = reg_ss;
if (i.index_reg == 0)
{
i.rm.regmem = 6;
if (operand_type_check (i.types[op], disp) == 0)
{
/* fake (%bp) into 0(%bp) */
- i.types[op].bitfield.disp8 = 1;
+ if (i.disp_encoding == disp_encoding_16bit)
+ i.types[op].bitfield.disp16 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
fake_zero_displacement = 1;
}
}
default: /* (%si) -> 4 or (%di) -> 5 */
i.rm.regmem = i.base_reg->reg_num - 6 + 4;
}
+ if (!fake_zero_displacement
+ && !i.disp_operands
+ && i.disp_encoding)
+ {
+ fake_zero_displacement = 1;
+ if (i.disp_encoding == disp_encoding_8bit)
+ i.types[op].bitfield.disp8 = 1;
+ else
+ i.types[op].bitfield.disp16 = 1;
+ }
i.rm.mode = mode_from_disp_size (i.types[op]);
}
else /* i.base_reg and 32/64 bit mode */
{
- if (flag_code == CODE_64BIT
- && operand_type_check (i.types[op], disp))
+ if (operand_type_check (i.types[op], disp))
{
i.types[op].bitfield.disp16 = 0;
i.types[op].bitfield.disp64 = 0;
- if (i.prefix[ADDR_PREFIX] == 0)
+ if (!want_disp32 (&i.tm))
{
i.types[op].bitfield.disp32 = 0;
i.types[op].bitfield.disp32s = 1;
}
}
- if (!i.tm.opcode_modifier.vecsib)
+ if (!i.tm.opcode_modifier.sib)
i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
if (!(i.base_reg->reg_flags & RegRex)
&& (i.base_reg->reg_num == EBP_REG_NUM
|| i.base_reg->reg_num == ESP_REG_NUM))
- default_seg = &ss;
+ default_seg = reg_ss;
if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
{
fake_zero_displacement = 1;
- i.types[op].bitfield.disp8 = 1;
+ if (i.disp_encoding == disp_encoding_32bit)
+ i.types[op].bitfield.disp32 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
}
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ /* Only check for VSIB. */
+ gas_assert (i.tm.opcode_modifier.sib != VECSIB128
+ && i.tm.opcode_modifier.sib != VECSIB256
+ && i.tm.opcode_modifier.sib != VECSIB512);
+
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
if (i.index_reg->reg_num == RegIZ)
i.sib.index = NO_INDEX_REGISTER;
unsigned int vex_reg = ~0;
for (op = 0; op < i.operands; op++)
- {
- if (i.types[op].bitfield.class == Reg
- || i.types[op].bitfield.class == RegBND
- || i.types[op].bitfield.class == RegMask
- || i.types[op].bitfield.class == SReg
- || i.types[op].bitfield.class == RegCR
- || i.types[op].bitfield.class == RegDR
- || i.types[op].bitfield.class == RegTR)
- break;
- if (i.types[op].bitfield.class == RegSIMD)
- {
- if (i.types[op].bitfield.zmmword)
- i.has_regzmm = TRUE;
- else if (i.types[op].bitfield.ymmword)
- i.has_regymm = TRUE;
- else
- i.has_regxmm = TRUE;
- break;
- }
- if (i.types[op].bitfield.class == RegMMX)
- {
- i.has_regmmx = TRUE;
- break;
- }
- }
+ if (i.types[op].bitfield.class == Reg
+ || i.types[op].bitfield.class == RegBND
+ || i.types[op].bitfield.class == RegMask
+ || i.types[op].bitfield.class == SReg
+ || i.types[op].bitfield.class == RegCR
+ || i.types[op].bitfield.class == RegDR
+ || i.types[op].bitfield.class == RegTR
+ || i.types[op].bitfield.class == RegSIMD
+ || i.types[op].bitfield.class == RegMMX)
+ break;
if (vex_3_sources)
op = dest;
return default_seg;
}
+static INLINE void
+frag_opcode_byte (unsigned char byte)
+{
+ if (now_seg != absolute_section)
+ FRAG_APPEND_1_CHAR (byte);
+ else
+ ++abs_section_offset;
+}
+
static unsigned int
flip_code16 (unsigned int code16)
{
symbolS *sym;
offsetT off;
+ if (now_seg == absolute_section)
+ {
+ as_bad (_("relaxable branches not supported in absolute section"));
+ return;
+ }
+
code16 = flag_code == CODE_16BIT ? CODE16 : 0;
size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
/* Return TRUE iff PLT32 relocation should be used for branching to
symbol S. */
-static bfd_boolean
+static bool
need_plt32_p (symbolS *s)
{
/* PLT32 relocation is ELF only. */
if (!IS_ELF)
- return FALSE;
+ return false;
#ifdef TE_SOLARIS
/* Don't emit PLT32 relocation on Solaris: neither native linker nor
krtld support it. */
- return FALSE;
+ return false;
#endif
/* Since there is no need to prepare for PLT branch on x86-64, we
can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
be used as a marker for 32-bit PC-relative branches. */
if (!object_64bit)
- return FALSE;
+ return false;
+
+ if (s == NULL)
+ return false;
/* Weak or undefined symbol need PLT32 relocation. */
if (S_IS_WEAK (s) || !S_IS_DEFINED (s))
- return TRUE;
+ return true;
/* Non-global symbol doesn't need PLT32 relocation. */
if (! S_IS_EXTERNAL (s))
- return FALSE;
+ return false;
/* Other global symbols need PLT32 relocation. NB: Symbol with
non-default visibilities are treated as normal global symbol
so that PLT32 relocation can be used as a marker for 32-bit
PC-relative branches. It is useful for linker relaxation. */
- return TRUE;
+ return true;
}
#endif
size = 1;
if (i.prefix[ADDR_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
+ frag_opcode_byte (ADDR_PREFIX_OPCODE);
i.prefixes -= 1;
}
/* Pentium4 branch hints. */
if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
|| i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
{
- FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
+ frag_opcode_byte (i.prefix[SEG_PREFIX]);
i.prefixes--;
}
}
if (i.prefix[DATA_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
+ frag_opcode_byte (DATA_PREFIX_OPCODE);
i.prefixes -= 1;
code16 ^= flip_code16(code16);
}
/* BND prefixed jump. */
if (i.prefix[BND_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
+ frag_opcode_byte (i.prefix[BND_PREFIX]);
i.prefixes -= 1;
}
if (i.prefix[REX_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
+ frag_opcode_byte (i.prefix[REX_PREFIX]);
i.prefixes -= 1;
}
if (i.prefixes != 0)
as_warn (_("skipping prefixes on `%s'"), i.tm.name);
- p = frag_more (i.tm.opcode_length + size);
- switch (i.tm.opcode_length)
+ if (now_seg == absolute_section)
+ {
+ abs_section_offset += i.opcode_length + size;
+ return;
+ }
+
+ p = frag_more (i.opcode_length + size);
+ switch (i.opcode_length)
{
case 2:
*p++ = i.tm.base_opcode >> 8;
if (i.prefixes != 0)
as_warn (_("skipping prefixes on `%s'"), i.tm.name);
+ if (now_seg == absolute_section)
+ {
+ abs_section_offset += prefix + 1 + 2 + size;
+ return;
+ }
+
/* 1 opcode; 2 segment; offset */
p = frag_more (prefix + 1 + 2 + size);
else
fix_new_exp (frag_now, p - frag_now->fr_literal, size,
i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
- if (i.op[0].imms->X_op != O_constant)
- as_bad (_("can't handle non absolute segment in `%s'"),
- i.tm.name);
- md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
+
+ p += size;
+ if (i.op[0].imms->X_op == O_constant)
+ md_number_to_chars (p, (valueT) i.op[0].imms->X_add_number, 2);
+ else
+ fix_new_exp (frag_now, p - frag_now->fr_literal, 2,
+ i.op[0].imms, 0, reloc (2, 0, 0, i.reloc[0]));
}
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
if (i.base_reg && i.base_reg->reg_num == RegIP)
return 0;
- /* No VEX/EVEX encoding. */
- if (is_any_vex_encoding (&i.tm))
+ /* No opcodes outside of base encoding space. */
+ if (i.tm.opcode_modifier.opcodespace != SPACE_BASE)
return 0;
/* add, sub without add/sub m, imm. */
/* NB: Don't work with COND_JUMP86 without i386. */
if (!align_branch_power
|| now_seg == absolute_section
- || !cpu_arch_flags.bitfield.cpui386)
+ || !cpu_arch_flags.bitfield.cpui386
+ || i.tm.opcode_modifier.opcodespace != SPACE_BASE)
return 0;
add_padding = 0;
add_padding = 1;
}
}
- else if (is_any_vex_encoding (&i.tm))
- return 0;
else if ((i.tm.base_opcode | 1) == 0xc3)
{
/* Near ret. */
enum mf_jcc_kind mf_jcc = mf_jcc_jo;
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF && x86_used_note)
- {
- if (i.tm.cpu_flags.bitfield.cpucmov)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
- if (i.tm.cpu_flags.bitfield.cpusse)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE;
- if (i.tm.cpu_flags.bitfield.cpusse2)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE2;
- if (i.tm.cpu_flags.bitfield.cpusse3)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE3;
- if (i.tm.cpu_flags.bitfield.cpussse3)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSSE3;
- if (i.tm.cpu_flags.bitfield.cpusse4_1)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_1;
- if (i.tm.cpu_flags.bitfield.cpusse4_2)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_2;
- if (i.tm.cpu_flags.bitfield.cpuavx)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX;
- if (i.tm.cpu_flags.bitfield.cpuavx2)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX2;
- if (i.tm.cpu_flags.bitfield.cpufma)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_FMA;
- if (i.tm.cpu_flags.bitfield.cpuavx512f)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512F;
- if (i.tm.cpu_flags.bitfield.cpuavx512cd)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512CD;
- if (i.tm.cpu_flags.bitfield.cpuavx512er)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512ER;
- if (i.tm.cpu_flags.bitfield.cpuavx512pf)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512PF;
- if (i.tm.cpu_flags.bitfield.cpuavx512vl)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512VL;
- if (i.tm.cpu_flags.bitfield.cpuavx512dq)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512DQ;
- if (i.tm.cpu_flags.bitfield.cpuavx512bw)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512BW;
- if (i.tm.cpu_flags.bitfield.cpuavx512_4fmaps)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS;
- if (i.tm.cpu_flags.bitfield.cpuavx512_4vnniw)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW;
- if (i.tm.cpu_flags.bitfield.cpuavx512_bitalg)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG;
- if (i.tm.cpu_flags.bitfield.cpuavx512ifma)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA;
- if (i.tm.cpu_flags.bitfield.cpuavx512vbmi)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI;
- if (i.tm.cpu_flags.bitfield.cpuavx512_vbmi2)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2;
- if (i.tm.cpu_flags.bitfield.cpuavx512_vnni)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI;
- if (i.tm.cpu_flags.bitfield.cpuavx512_bf16)
- x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BF16;
+ if (IS_ELF && x86_used_note && now_seg != absolute_section)
+ {
+ if ((i.xstate & xstate_tmm) == xstate_tmm
+ || i.tm.cpu_flags.bitfield.cpuamx_tile)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_TMM;
if (i.tm.cpu_flags.bitfield.cpu8087
|| i.tm.cpu_flags.bitfield.cpu287
|| i.tm.cpu_flags.bitfield.cpu687
|| i.tm.cpu_flags.bitfield.cpufisttp)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
- if (i.has_regmmx
- || i.tm.base_opcode == 0xf77 /* emms */
- || i.tm.base_opcode == 0xf0e /* femms */
- || i.tm.base_opcode == 0xf2a /* cvtpi2ps */
- || i.tm.base_opcode == 0x660f2a /* cvtpi2pd */)
+
+ if ((i.xstate & xstate_mmx)
+ || (i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && !is_any_vex_encoding (&i.tm)
+ && (i.tm.base_opcode == 0x77 /* emms */
+ || i.tm.base_opcode == 0x0e /* femms */)))
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
- if (i.has_regxmm)
+
+ if (i.index_reg)
+ {
+ if (i.index_reg->reg_type.bitfield.zmmword)
+ i.xstate |= xstate_zmm;
+ else if (i.index_reg->reg_type.bitfield.ymmword)
+ i.xstate |= xstate_ymm;
+ else if (i.index_reg->reg_type.bitfield.xmmword)
+ i.xstate |= xstate_xmm;
+ }
+
+ /* vzeroall / vzeroupper */
+ if (i.tm.base_opcode == 0x77 && i.tm.cpu_flags.bitfield.cpuavx)
+ i.xstate |= xstate_ymm;
+
+ if ((i.xstate & xstate_xmm)
+ /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
+ || (i.tm.base_opcode == 0xae
+ && (i.tm.cpu_flags.bitfield.cpusse
+ || i.tm.cpu_flags.bitfield.cpuavx))
+ || i.tm.cpu_flags.bitfield.cpuwidekl
+ || i.tm.cpu_flags.bitfield.cpukl)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
- if (i.has_regymm)
+
+ if ((i.xstate & xstate_ymm) == xstate_ymm)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
- if (i.has_regzmm)
+ if ((i.xstate & xstate_zmm) == xstate_zmm)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
+ if (i.mask.reg || (i.xstate & xstate_mask) == xstate_mask)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MASK;
if (i.tm.cpu_flags.bitfield.cpufxsr)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
if (i.tm.cpu_flags.bitfield.cpuxsave)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
if (i.tm.cpu_flags.bitfield.cpuxsavec)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
+
+ if (x86_feature_2_used
+ || i.tm.cpu_flags.bitfield.cpucmov
+ || i.tm.cpu_flags.bitfield.cpusyscall
+ || (i.tm.opcode_modifier.opcodespace == SPACE_0F
+ && i.tm.base_opcode == 0xc7
+ && i.tm.opcode_modifier.opcodeprefix == PREFIX_NONE
+ && i.tm.extension_opcode == 1) /* cmpxchg8b */)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_BASELINE;
+ if (i.tm.cpu_flags.bitfield.cpusse3
+ || i.tm.cpu_flags.bitfield.cpussse3
+ || i.tm.cpu_flags.bitfield.cpusse4_1
+ || i.tm.cpu_flags.bitfield.cpusse4_2
+ || i.tm.cpu_flags.bitfield.cpucx16
+ || i.tm.cpu_flags.bitfield.cpupopcnt
+ /* LAHF-SAHF insns in 64-bit mode. */
+ || (flag_code == CODE_64BIT
+ && (i.tm.base_opcode | 1) == 0x9f
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE))
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_V2;
+ if (i.tm.cpu_flags.bitfield.cpuavx
+ || i.tm.cpu_flags.bitfield.cpuavx2
+ /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
+ CpuAVX512DQ, LPW, TBM and AMX. */
+ || (i.tm.opcode_modifier.vex
+ && !i.tm.cpu_flags.bitfield.cpuavx512f
+ && !i.tm.cpu_flags.bitfield.cpuavx512bw
+ && !i.tm.cpu_flags.bitfield.cpuavx512dq
+ && !i.tm.cpu_flags.bitfield.cpulwp
+ && !i.tm.cpu_flags.bitfield.cputbm
+ && !(x86_feature_2_used & GNU_PROPERTY_X86_FEATURE_2_TMM))
+ || i.tm.cpu_flags.bitfield.cpuf16c
+ || i.tm.cpu_flags.bitfield.cpufma
+ || i.tm.cpu_flags.bitfield.cpulzcnt
+ || i.tm.cpu_flags.bitfield.cpumovbe
+ || i.tm.cpu_flags.bitfield.cpuxsaves
+ || (x86_feature_2_used
+ & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
+ | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
+ | GNU_PROPERTY_X86_FEATURE_2_XSAVEC)) != 0)
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_V3;
+ if (i.tm.cpu_flags.bitfield.cpuavx512f
+ || i.tm.cpu_flags.bitfield.cpuavx512bw
+ || i.tm.cpu_flags.bitfield.cpuavx512dq
+ || i.tm.cpu_flags.bitfield.cpuavx512vl
+ /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
+ VNNIW. */
+ || (i.tm.opcode_modifier.evex
+ && !i.tm.cpu_flags.bitfield.cpuavx512er
+ && !i.tm.cpu_flags.bitfield.cpuavx512pf
+ && !i.tm.cpu_flags.bitfield.cpuavx512_4vnniw))
+ x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_V4;
}
#endif
char *p;
unsigned char *q;
unsigned int j;
- unsigned int prefix;
enum mf_cmp_kind mf_cmp;
if (avoid_fence
- && (i.tm.base_opcode == 0xfaee8
- || i.tm.base_opcode == 0xfaef0
- || i.tm.base_opcode == 0xfaef8))
- {
- /* Encode lfence, mfence, and sfence as
- f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
- offsetT val = 0x240483f0ULL;
- p = frag_more (5);
- md_number_to_chars (p, val, 5);
- return;
- }
+ && (i.tm.base_opcode == 0xaee8
+ || i.tm.base_opcode == 0xaef0
+ || i.tm.base_opcode == 0xaef8))
+ {
+ /* Encode lfence, mfence, and sfence as
+ f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
+ if (now_seg != absolute_section)
+ {
+ offsetT val = 0x240483f0ULL;
+
+ p = frag_more (5);
+ md_number_to_chars (p, val, 5);
+ }
+ else
+ abs_section_offset += 5;
+ return;
+ }
/* Some processors fail on LOCK prefix. This options makes
assembler ignore LOCK prefix and serves as a workaround. */
if (omit_lock_prefix)
{
- if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
+ if (i.tm.base_opcode == LOCK_PREFIX_OPCODE
+ && i.tm.opcode_modifier.isprefix)
return;
i.prefix[LOCK_PREFIX] = 0;
}
don't need the explicit prefix. */
if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
{
- switch (i.tm.opcode_length)
+ switch (i.tm.opcode_modifier.opcodeprefix)
{
- case 3:
- if (i.tm.base_opcode & 0xff000000)
- {
- prefix = (i.tm.base_opcode >> 24) & 0xff;
- if (!i.tm.cpu_flags.bitfield.cpupadlock
- || prefix != REPE_PREFIX_OPCODE
- || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
- add_prefix (prefix);
- }
+ case PREFIX_0X66:
+ add_prefix (0x66);
break;
- case 2:
- if ((i.tm.base_opcode & 0xff0000) != 0)
+ case PREFIX_0XF2:
+ add_prefix (0xf2);
+ break;
+ case PREFIX_0XF3:
+ if (!i.tm.cpu_flags.bitfield.cpupadlock
+ || (i.prefix[REP_PREFIX] != 0xf3))
+ add_prefix (0xf3);
+ break;
+ case PREFIX_NONE:
+ switch (i.opcode_length)
{
- prefix = (i.tm.base_opcode >> 16) & 0xff;
- add_prefix (prefix);
+ case 2:
+ break;
+ case 1:
+ /* Check for pseudo prefixes. */
+ if (!i.tm.opcode_modifier.isprefix || i.tm.base_opcode)
+ break;
+ as_bad_where (insn_start_frag->fr_file,
+ insn_start_frag->fr_line,
+ _("pseudo prefix without instruction"));
+ return;
+ default:
+ abort ();
}
break;
- case 1:
- break;
- case 0:
- /* Check for pseudo prefixes. */
- as_bad_where (insn_start_frag->fr_file,
- insn_start_frag->fr_line,
- _("pseudo prefix without instruction"));
- return;
default:
abort ();
}
/* The prefix bytes. */
for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
if (*q)
- FRAG_APPEND_1_CHAR (*q);
+ frag_opcode_byte (*q);
}
else
{
{
case SEG_PREFIX:
case ADDR_PREFIX:
- FRAG_APPEND_1_CHAR (*q);
+ frag_opcode_byte (*q);
break;
default:
/* There should be no other prefixes for instructions
if (i.vrex)
abort ();
/* Now the VEX prefix. */
- p = frag_more (i.vex.length);
- for (j = 0; j < i.vex.length; j++)
- p[j] = i.vex.bytes[j];
+ if (now_seg != absolute_section)
+ {
+ p = frag_more (i.vex.length);
+ for (j = 0; j < i.vex.length; j++)
+ p[j] = i.vex.bytes[j];
+ }
+ else
+ abs_section_offset += i.vex.length;
}
/* Now the opcode; be careful about word order here! */
- if (i.tm.opcode_length == 1)
+ j = i.opcode_length;
+ if (!i.vex.length)
+ switch (i.tm.opcode_modifier.opcodespace)
+ {
+ case SPACE_BASE:
+ break;
+ case SPACE_0F:
+ ++j;
+ break;
+ case SPACE_0F38:
+ case SPACE_0F3A:
+ j += 2;
+ break;
+ default:
+ abort ();
+ }
+
+ if (now_seg == absolute_section)
+ abs_section_offset += j;
+ else if (j == 1)
{
FRAG_APPEND_1_CHAR (i.tm.base_opcode);
}
else
{
- switch (i.tm.opcode_length)
+ p = frag_more (j);
+ if (!i.vex.length
+ && i.tm.opcode_modifier.opcodespace != SPACE_BASE)
+ {
+ *p++ = 0x0f;
+ if (i.tm.opcode_modifier.opcodespace != SPACE_0F)
+ *p++ = i.tm.opcode_modifier.opcodespace == SPACE_0F38
+ ? 0x38 : 0x3a;
+ }
+
+ switch (i.opcode_length)
{
- case 4:
- p = frag_more (4);
- *p++ = (i.tm.base_opcode >> 24) & 0xff;
- *p++ = (i.tm.base_opcode >> 16) & 0xff;
- break;
- case 3:
- p = frag_more (3);
- *p++ = (i.tm.base_opcode >> 16) & 0xff;
- break;
case 2:
- p = frag_more (2);
+ /* Put out high byte first: can't use md_number_to_chars! */
+ *p++ = (i.tm.base_opcode >> 8) & 0xff;
+ /* Fall through. */
+ case 1:
+ *p = i.tm.base_opcode & 0xff;
break;
default:
abort ();
break;
}
- /* Put out high byte first: can't use md_number_to_chars! */
- *p++ = (i.tm.base_opcode >> 8) & 0xff;
- *p = i.tm.base_opcode & 0xff;
}
/* Now the modrm byte and sib byte (if present). */
if (i.tm.opcode_modifier.modrm)
{
- FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
- | i.rm.reg << 3
- | i.rm.mode << 6));
+ frag_opcode_byte ((i.rm.regmem << 0)
+ | (i.rm.reg << 3)
+ | (i.rm.mode << 6));
/* If i.rm.regmem == ESP (4)
&& i.rm.mode != (Register mode)
&& not 16 bit
if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
&& i.rm.mode != 3
&& !(i.base_reg && i.base_reg->reg_type.bitfield.word))
- FRAG_APPEND_1_CHAR ((i.sib.base << 0
- | i.sib.index << 3
- | i.sib.scale << 6));
+ frag_opcode_byte ((i.sib.base << 0)
+ | (i.sib.index << 3)
+ | (i.sib.scale << 6));
}
if (i.disp_operands)
/* Count prefixes for extended opcode maps. */
if (!i.vex.length)
- switch (i.tm.opcode_length)
+ switch (i.tm.opcode_modifier.opcodespace)
{
- case 3:
- if (((i.tm.base_opcode >> 16) & 0xff) == 0xf)
- {
- count++;
- switch ((i.tm.base_opcode >> 8) & 0xff)
- {
- case 0x38:
- case 0x3a:
- count++;
- break;
- default:
- break;
- }
- }
+ case SPACE_BASE:
break;
- case 2:
- if (((i.tm.base_opcode >> 8) & 0xff) == 0xf)
- count++;
+ case SPACE_0F:
+ count++;
break;
- case 1:
+ case SPACE_0F38:
+ case SPACE_0F3A:
+ count += 2;
break;
default:
abort ();
{
if (operand_type_check (i.types[n], disp))
{
- if (i.op[n].disps->X_op == O_constant)
+ int size = disp_size (n);
+
+ if (now_seg == absolute_section)
+ abs_section_offset += size;
+ else if (i.op[n].disps->X_op == O_constant)
{
- int size = disp_size (n);
offsetT val = i.op[n].disps->X_add_number;
val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
else
{
enum bfd_reloc_code_real reloc_type;
- int size = disp_size (n);
int sign = i.types[n].bitfield.disp32s;
int pcrel = (i.flags[n] & Operand_PCrel) != 0;
fixS *fixP;
if (!object_64bit)
{
reloc_type = BFD_RELOC_386_GOTPC;
- i.has_gotpc_tls_reloc = TRUE;
+ i.has_gotpc_tls_reloc = true;
i.op[n].imms->X_add_number +=
encoding_length (insn_start_frag, insn_start_off, p);
}
case BFD_RELOC_X86_64_GOTTPOFF:
case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
case BFD_RELOC_X86_64_TLSDESC_CALL:
- i.has_gotpc_tls_reloc = TRUE;
+ i.has_gotpc_tls_reloc = true;
default:
break;
}
&& i.rm.regmem == 5))
&& (i.rm.mode == 2
|| (i.rm.mode == 0 && i.rm.regmem == 5))
- && !is_any_vex_encoding(&i.tm)
+ && i.tm.opcode_modifier.opcodespace == SPACE_BASE
&& ((i.operands == 1
&& i.tm.base_opcode == 0xff
&& (i.rm.reg == 2 || i.rm.reg == 4))
for (n = 0; n < i.operands; n++)
{
/* Skip SAE/RC Imm operand in EVEX. They are already handled. */
- if (i.rounding && (int) n == i.rounding->operand)
+ if (i.rounding.type != rc_none && n == i.rounding.operand)
continue;
if (operand_type_check (i.types[n], imm))
{
- if (i.op[n].imms->X_op == O_constant)
+ int size = imm_size (n);
+
+ if (now_seg == absolute_section)
+ abs_section_offset += size;
+ else if (i.op[n].imms->X_op == O_constant)
{
- int size = imm_size (n);
offsetT val;
val = offset_in_range (i.op[n].imms->X_add_number,
non-absolute imms). Try to support other
sizes ... */
enum bfd_reloc_code_real reloc_type;
- int size = imm_size (n);
int sign;
if (i.types[n].bitfield.imm32s
reloc_type = BFD_RELOC_X86_64_GOTPC32;
else if (size == 8)
reloc_type = BFD_RELOC_X86_64_GOTPC64;
- i.has_gotpc_tls_reloc = TRUE;
+ i.has_gotpc_tls_reloc = true;
i.op[n].imms->X_add_number +=
encoding_length (insn_start_frag, insn_start_off, p);
}
int len;
const enum bfd_reloc_code_real rel[2];
const i386_operand_type types64;
+ bool need_GOT_symbol;
} gotrel[] = {
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
{ STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
BFD_RELOC_SIZE32 },
- OPERAND_TYPE_IMM32_64 },
+ OPERAND_TYPE_IMM32_64, false },
#endif
{ STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
BFD_RELOC_X86_64_PLTOFF64 },
- OPERAND_TYPE_IMM64 },
+ OPERAND_TYPE_IMM64, true },
{ STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
BFD_RELOC_X86_64_PLT32 },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, false },
{ STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
BFD_RELOC_X86_64_GOTPLT64 },
- OPERAND_TYPE_IMM64_DISP64 },
+ OPERAND_TYPE_IMM64_DISP64, true },
{ STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
BFD_RELOC_X86_64_GOTOFF64 },
- OPERAND_TYPE_IMM64_DISP64 },
+ OPERAND_TYPE_IMM64_DISP64, true },
{ STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
BFD_RELOC_X86_64_GOTPCREL },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
{ STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
BFD_RELOC_X86_64_TLSGD },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
{ STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
_dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE },
+ OPERAND_TYPE_NONE, true },
{ STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
BFD_RELOC_X86_64_TLSLD },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
{ STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
BFD_RELOC_X86_64_GOTTPOFF },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
{ STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
BFD_RELOC_X86_64_TPOFF32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
{ STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
_dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE },
+ OPERAND_TYPE_NONE, true },
{ STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
BFD_RELOC_X86_64_DTPOFF32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
{ STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
_dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE },
+ OPERAND_TYPE_NONE, true },
{ STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
_dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE },
+ OPERAND_TYPE_NONE, true },
{ STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
BFD_RELOC_X86_64_GOT32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32, true },
{ STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
BFD_RELOC_X86_64_GOTPC32_TLSDESC },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
{ STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
BFD_RELOC_X86_64_TLSDESC_CALL },
- OPERAND_TYPE_IMM32_32S_DISP32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
};
char *cp;
unsigned int j;
*types = gotrel[j].types64;
}
- if (j != 0 && GOT_symbol == NULL)
+ if (gotrel[j].need_GOT_symbol && GOT_symbol == NULL)
GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
/* The length of the first part of our input line. */
op_string++;
/* Check broadcasts. */
- if (strncmp (op_string, "1to", 3) == 0)
+ if (startswith (op_string, "1to"))
{
- int bcst_type;
+ unsigned int bcst_type;
- if (i.broadcast)
+ if (i.broadcast.type)
goto duplicated_vec_op;
op_string += 3;
}
op_string++;
- broadcast_op.type = bcst_type;
- broadcast_op.operand = this_operand;
- broadcast_op.bytes = 0;
- i.broadcast = &broadcast_op;
+ i.broadcast.type = bcst_type;
+ i.broadcast.operand = this_operand;
}
/* Check masking operation. */
else if ((mask = parse_register (op_string, &end_op)) != NULL)
return NULL;
}
- if (!i.mask)
+ if (!i.mask.reg)
{
- mask_op.mask = mask;
- mask_op.zeroing = 0;
- mask_op.operand = this_operand;
- i.mask = &mask_op;
+ i.mask.reg = mask;
+ i.mask.operand = this_operand;
}
+ else if (i.mask.reg->reg_num)
+ goto duplicated_vec_op;
else
{
- if (i.mask->mask)
- goto duplicated_vec_op;
-
- i.mask->mask = mask;
+ i.mask.reg = mask;
/* Only "{z}" is allowed here. No need to check
zeroing mask explicitly. */
- if (i.mask->operand != this_operand)
+ if (i.mask.operand != (unsigned int) this_operand)
{
as_bad (_("invalid write mask `%s'"), saved);
return NULL;
/* Check zeroing-flag for masking operation. */
else if (*op_string == 'z')
{
- if (!i.mask)
+ if (!i.mask.reg)
{
- mask_op.mask = NULL;
- mask_op.zeroing = 1;
- mask_op.operand = this_operand;
- i.mask = &mask_op;
+ i.mask.reg = reg_k0;
+ i.mask.zeroing = 1;
+ i.mask.operand = this_operand;
}
else
{
- if (i.mask->zeroing)
+ if (i.mask.zeroing)
{
duplicated_vec_op:
as_bad (_("duplicated `%s'"), saved);
return NULL;
}
- i.mask->zeroing = 1;
+ i.mask.zeroing = 1;
/* Only "{%k}" is allowed here. No need to check mask
register explicitly. */
- if (i.mask->operand != this_operand)
+ if (i.mask.operand != (unsigned int) this_operand)
{
as_bad (_("invalid zeroing-masking `%s'"),
saved);
return NULL;
}
- if (i.mask && i.mask->zeroing && !i.mask->mask)
+ if (i.mask.reg && i.mask.zeroing && !i.mask.reg->reg_num)
{
as_bad (_("zeroing-masking only allowed with write mask"));
return NULL;
branch template. */
static templates aux_templates;
const insn_template *t = current_templates->start;
- bfd_boolean has_intel64 = FALSE;
+ bool has_intel64 = false;
aux_templates.start = t;
while (++t < current_templates->end)
!= current_templates->start->opcode_modifier.jump)
break;
if ((t->opcode_modifier.isa64 >= INTEL64))
- has_intel64 = TRUE;
+ has_intel64 = true;
}
if (t < current_templates->end)
{
ret = 0;
}
- else if (flag_code == CODE_64BIT
- && !i.prefix[ADDR_PREFIX]
- && exp->X_op == O_constant)
- {
- /* Since displacement is signed extended to 64bit, don't allow
- disp32 and turn off disp32s if they are out of range. */
- i.types[this_operand].bitfield.disp32 = 0;
- if (!fits_in_signed_long (exp->X_add_number))
- {
- i.types[this_operand].bitfield.disp32s = 0;
- if (i.types[this_operand].bitfield.baseindex)
- {
- as_bad (_("0x%lx out range of signed 32bit displacement"),
- (long) exp->X_add_number);
- ret = 0;
- }
- }
- }
-
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
else if (exp->X_op != O_constant
&& OUTPUT_FLAVOR == bfd_target_aout_flavour
{
const char *kind = "base/index";
enum flag_code addr_mode = i386_addressing_mode ();
+ const insn_template *t = current_templates->start;
- if (current_templates->start->opcode_modifier.isstring
- && !current_templates->start->cpu_flags.bitfield.cpupadlock
+ if (t->opcode_modifier.isstring
+ && !t->cpu_flags.bitfield.cpupadlock
&& (current_templates->end[-1].opcode_modifier.isstring
|| i.mem_operands))
{
kind = "string address";
- if (current_templates->start->opcode_modifier.repprefixok)
+ if (t->opcode_modifier.prefixok == PrefixRep)
{
int es_op = current_templates->end[-1].opcode_modifier.isstring
- IS_STRING_ES_OP0;
&& current_templates->end[-1].operand_types[1]
.bitfield.baseindex))
op = 1;
- expected_reg = hash_find (reg_hash, di_si[addr_mode][op == es_op]);
+ expected_reg
+ = (const reg_entry *) str_hash_find (reg_hash,
+ di_si[addr_mode][op == es_op]);
}
else
- expected_reg = hash_find (reg_hash, bx[addr_mode]);
+ expected_reg
+ = (const reg_entry *)str_hash_find (reg_hash, bx[addr_mode]);
if (i.base_reg != expected_reg
|| i.index_reg
if (addr_mode != CODE_16BIT)
{
/* 32-bit/64-bit checks. */
+ if (i.disp_encoding == disp_encoding_16bit)
+ {
+ bad_disp:
+ as_bad (_("invalid `%s' prefix"),
+ addr_mode == CODE_16BIT ? "{disp32}" : "{disp16}");
+ return 0;
+ }
+
if ((i.base_reg
&& ((addr_mode == CODE_64BIT
? !i.base_reg->reg_type.bitfield.qword
|| !i.index_reg->reg_type.bitfield.baseindex)))
goto bad_address;
- /* bndmk, bndldx, and bndstx have special restrictions. */
- if (current_templates->start->base_opcode == 0xf30f1b
- || (current_templates->start->base_opcode & ~1) == 0x0f1a)
+ /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
+ if ((t->opcode_modifier.opcodeprefix == PREFIX_0XF3
+ && t->opcode_modifier.opcodespace == SPACE_0F
+ && t->base_opcode == 0x1b)
+ || (t->opcode_modifier.opcodeprefix == PREFIX_NONE
+ && t->opcode_modifier.opcodespace == SPACE_0F
+ && (t->base_opcode & ~1) == 0x1a)
+ || t->opcode_modifier.sib == SIBMEM)
{
/* They cannot use RIP-relative addressing. */
if (i.base_reg && i.base_reg->reg_num == RegIP)
}
/* bndldx and bndstx ignore their scale factor. */
- if (current_templates->start->base_opcode != 0xf30f1b
+ if (t->opcode_modifier.opcodeprefix == PREFIX_NONE
+ && t->opcode_modifier.opcodespace == SPACE_0F
+ && (t->base_opcode & ~1) == 0x1a
&& i.log2_scale_factor)
as_warn (_("register scaling is being ignored here"));
}
else
{
/* 16-bit checks. */
+ if (i.disp_encoding == disp_encoding_32bit)
+ goto bad_disp;
+
if ((i.base_reg
&& (!i.base_reg->reg_type.bitfield.word
|| !i.base_reg->reg_type.bitfield.baseindex))
{
if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
{
- if (!i.rounding)
- {
- rc_op.type = RC_NamesTable[j].type;
- rc_op.operand = this_operand;
- i.rounding = &rc_op;
- }
- else
+ if (i.rounding.type != rc_none)
{
as_bad (_("duplicated `%s'"), imm_start);
return 0;
}
+
+ i.rounding.type = RC_NamesTable[j].type;
+ i.rounding.operand = this_operand;
+
pstr += RC_NamesTable[j].len;
match_found = 1;
break;
if (t < current_templates->end)
{
static templates aux_templates;
- bfd_boolean recheck;
+ bool recheck;
aux_templates.start = t;
for (; t < current_templates->end; ++t)
++op_string;
if (is_space_char (*op_string))
++op_string;
- i.jumpabsolute = TRUE;
+ i.jumpabsolute = true;
}
/* Check if operand is a register. */
++op_string;
if (*op_string == ':' && r->reg_type.bitfield.class == SReg)
{
- switch (r->reg_num)
- {
- case 0:
- i.seg[i.mem_operands] = &es;
- break;
- case 1:
- i.seg[i.mem_operands] = &cs;
- break;
- case 2:
- i.seg[i.mem_operands] = &ss;
- break;
- case 3:
- i.seg[i.mem_operands] = &ds;
- break;
- case 4:
- i.seg[i.mem_operands] = &fs;
- break;
- case 5:
- i.seg[i.mem_operands] = &gs;
- break;
- }
+ i.seg[i.mem_operands] = r;
/* Skip the ':' and whitespace. */
++op_string;
++op_string;
if (is_space_char (*op_string))
++op_string;
- i.jumpabsolute = TRUE;
+ i.jumpabsolute = true;
}
goto do_memory_reference;
}
/* Are we finished with this relocation now? */
if (fixP->fx_addsy == NULL)
- fixP->fx_done = 1;
+ {
+ fixP->fx_done = 1;
+ switch (fixP->fx_r_type)
+ {
+ case BFD_RELOC_X86_64_32S:
+ fixP->fx_signed = 1;
+ break;
+
+ default:
+ break;
+ }
+ }
#if defined (OBJ_COFF) && defined (TE_PE)
else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
{
{
/* This outputs the LITTLENUMs in REVERSE order;
in accord with the bigendian 386. */
- return ieee_md_atof (type, litP, sizeP, FALSE);
+ return ieee_md_atof (type, litP, sizeP, false);
}
\f
static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
/* Verify that @r can be used in the current context. */
-static bfd_boolean check_register (const reg_entry *r)
+static bool check_register (const reg_entry *r)
{
if (allow_pseudo_reg)
- return TRUE;
+ return true;
if (operand_type_all_zero (&r->reg_type))
- return FALSE;
+ return false;
if ((r->reg_type.bitfield.dword
|| (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
|| r->reg_type.bitfield.class == RegCR
|| r->reg_type.bitfield.class == RegDR)
&& !cpu_arch_flags.bitfield.cpui386)
- return FALSE;
+ return false;
if (r->reg_type.bitfield.class == RegTR
&& (flag_code == CODE_64BIT
|| !cpu_arch_flags.bitfield.cpui386
|| cpu_arch_isa_flags.bitfield.cpui586
|| cpu_arch_isa_flags.bitfield.cpui686))
- return FALSE;
+ return false;
if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
- return FALSE;
+ return false;
if (!cpu_arch_flags.bitfield.cpuavx512f)
{
if (r->reg_type.bitfield.zmmword
|| r->reg_type.bitfield.class == RegMask)
- return FALSE;
+ return false;
if (!cpu_arch_flags.bitfield.cpuavx)
{
if (r->reg_type.bitfield.ymmword)
- return FALSE;
+ return false;
if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
- return FALSE;
+ return false;
}
}
+ if (r->reg_type.bitfield.tmmword
+ && (!cpu_arch_flags.bitfield.cpuamx_tile
+ || flag_code != CODE_64BIT))
+ return false;
+
if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
- return FALSE;
+ return false;
/* Don't allow fake index register unless allow_index_reg isn't 0. */
if (!allow_index_reg && r->reg_num == RegIZ)
- return FALSE;
+ return false;
/* Upper 16 vector registers are only available with VREX in 64bit
mode, and require EVEX encoding. */
{
if (!cpu_arch_flags.bitfield.cpuavx512f
|| flag_code != CODE_64BIT)
- return FALSE;
+ return false;
if (i.vec_encoding == vex_encoding_default)
i.vec_encoding = vex_encoding_evex;
if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
&& (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
&& flag_code != CODE_64BIT)
- return FALSE;
+ return false;
if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
&& !intel_syntax)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
/* REG_STRING starts *before* REGISTER_PREFIX. */
*end_op = s;
- r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
+ r = (const reg_entry *) str_hash_find (reg_hash, reg_name_given);
/* Handle floating point regs, allowing spaces in the (i) part. */
- if (r == i386_regtab /* %st is first entry of table */)
+ if (r == reg_st0)
{
if (!cpu_arch_flags.bitfield.cpu8087
&& !cpu_arch_flags.bitfield.cpu287
if (*s == ')')
{
*end_op = s + 1;
- r = (const reg_entry *) hash_find (reg_hash, "st(0)");
- know (r);
+ know (r[fpr].reg_num == fpr);
return r + fpr;
}
}
list = bfd_target_list ();
for (l = list; *l != NULL; l++)
- if (CONST_STRNEQ (*l, "elf64-x86-64")
+ if (startswith (*l, "elf64-x86-64")
|| strcmp (*l, "coff-x86-64") == 0
|| strcmp (*l, "pe-x86-64") == 0
|| strcmp (*l, "pei-x86-64") == 0
list = bfd_target_list ();
for (l = list; *l != NULL; l++)
- if (CONST_STRNEQ (*l, "elf32-x86-64"))
+ if (startswith (*l, "elf32-x86-64"))
{
default_arch = "x86_64:32";
break;
const char *
i386_target_format (void)
{
- if (!strncmp (default_arch, "x86_64", 6))
+ if (startswith (default_arch, "x86_64"))
{
update_code_flag (CODE_64BIT, 1);
if (default_arch[6] == '\0')
if (symbol_find (name))
as_bad (_("GOT already in symbol table"));
GOT_symbol = symbol_new (name, undefined_section,
- (valueT) 0, &zero_address_frag);
+ &zero_address_frag, 0);
};
return GOT_symbol;
}
}
}
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- else if (!object_64bit)
+ else
{
- if (fixp->fx_r_type == BFD_RELOC_386_GOT32
- && fixp->fx_tcbit2)
- fixp->fx_r_type = BFD_RELOC_386_GOT32X;
+ /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
+ to section. Since PLT32 relocation must be against symbols,
+ turn such PLT32 relocation into PC32 relocation. */
+ if (fixp->fx_addsy
+ && (fixp->fx_r_type == BFD_RELOC_386_PLT32
+ || fixp->fx_r_type == BFD_RELOC_X86_64_PLT32)
+ && symbol_section_p (fixp->fx_addsy))
+ fixp->fx_r_type = BFD_RELOC_32_PCREL;
+ if (!object_64bit)
+ {
+ if (fixp->fx_r_type == BFD_RELOC_386_GOT32
+ && fixp->fx_tcbit2)
+ fixp->fx_r_type = BFD_RELOC_386_GOT32X;
+ }
}
#endif
}
the symbol plus addend. */
valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
if (fixp->fx_r_type == BFD_RELOC_SIZE32
+ && object_64bit
&& !fits_in_unsigned_long (value))
as_bad_where (fixp->fx_file, fixp->fx_line,
_("symbol size computation overflow"));
{
if (flag_code == CODE_64BIT
&& len == sizeof ("unwind") - 1
- && strncmp (str, "unwind", 6) == 0)
+ && startswith (str, "unwind"))
return SHT_X86_64_UNWIND;
return -1;
bfd_vma
x86_64_section_word (char *str, size_t len)
{
- if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
+ if (len == 5 && flag_code == CODE_64BIT && startswith (str, "large"))
return SHF_X86_64_LARGE;
return -1;