if (type->code () == TYPE_CODE_STRUCT
|| type->code () == TYPE_CODE_UNION)
{
- for (int i = 0; i < TYPE_NFIELDS (type); i++)
+ for (int i = 0; i < type->num_fields (); i++)
{
struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
int bitpos = TYPE_FIELD_BITPOS (type, i);
/* Ignore static fields, empty fields (for example nested
empty structures), and bitfields (these are handled by
the caller). */
- if (field_is_static (&TYPE_FIELD (type, i))
+ if (field_is_static (&type->field (i))
|| (TYPE_FIELD_BITSIZE (type, i) == 0
&& TYPE_LENGTH (subtype) == 0)
|| TYPE_FIELD_PACKED (type, i))
/* Ignore static fields, or empty fields, for example nested
empty structures.*/
- if (field_is_static (&TYPE_FIELD (type, i)) || bitsize == 0)
+ if (field_is_static (&type->field (i)) || bitsize == 0)
return;
if (subtype->code () == TYPE_CODE_STRUCT
{
/* Each field of an object is classified recursively. */
int j;
- for (j = 0; j < TYPE_NFIELDS (subtype); j++)
+ for (j = 0; j < subtype->num_fields (); j++)
amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
return;
}
gdb_assert (type->code () == TYPE_CODE_STRUCT
|| type->code () == TYPE_CODE_UNION);
- for (i = 0; i < TYPE_NFIELDS (type); i++)
+ for (i = 0; i < type->num_fields (); i++)
amd64_classify_aggregate_field (type, i, theclass, 0);
}
gdb_byte *raw_insn;
};
-struct amd64_displaced_step_closure : public displaced_step_closure
+struct amd64_displaced_step_copy_insn_closure : public displaced_step_copy_insn_closure
{
- amd64_displaced_step_closure (int insn_buf_len)
+ amd64_displaced_step_copy_insn_closure (int insn_buf_len)
: insn_buf (insn_buf_len, 0)
{}
gdb::byte_vector insn_buf;
};
+typedef std::unique_ptr<amd64_displaced_step_copy_insn_closure>
+ amd64_displaced_step_copy_insn_closure_up;
+
/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
at which point delete these in favor of libopcodes' versions). */
We set base = pc + insn_length so we can leave disp unchanged. */
static void
-fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
+fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_copy_insn_closure *dsc,
CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
{
const struct amd64_insn *insn_details = &dsc->insn_details;
static void
fixup_displaced_copy (struct gdbarch *gdbarch,
- amd64_displaced_step_closure *dsc,
+ amd64_displaced_step_copy_insn_closure *dsc,
CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
{
const struct amd64_insn *details = &dsc->insn_details;
}
}
-displaced_step_closure_up
+displaced_step_copy_insn_closure_up
amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
/* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
continually watch for running off the end of the buffer. */
int fixup_sentinel_space = len;
- std::unique_ptr<amd64_displaced_step_closure> dsc
- (new amd64_displaced_step_closure (len + fixup_sentinel_space));
+ std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
+ (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
gdb_byte *buf = &dsc->insn_buf[0];
struct amd64_insn *details = &dsc->insn_details;
}
/* This is a work around for a problem with g++ 4.8. */
- return displaced_step_closure_up (dsc.release ());
+ return displaced_step_copy_insn_closure_up (dsc.release ());
}
static int
void
amd64_displaced_step_fixup (struct gdbarch *gdbarch,
- struct displaced_step_closure *dsc_,
+ struct displaced_step_copy_insn_closure *dsc_,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
{
- amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
+ amd64_displaced_step_copy_insn_closure *dsc = (amd64_displaced_step_copy_insn_closure *) dsc_;
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
/* The offset we applied to the instruction's address. */
ULONGEST insn_offset = to - from;