return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
+static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+{
+ u16 selector;
+ struct desc_struct desc;
+
+ ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
+ return selector;
+}
+
+static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
+ unsigned seg)
+{
+ u16 dummy;
+ u32 base3;
+ struct desc_struct desc;
+
+ ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
+ ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
+}
+
static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write, bool fetch,
bool usable;
ulong la;
u32 lim;
+ u16 sel;
unsigned cpl, rpl;
la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
return emulate_gp(ctxt, 0);
break;
default:
- usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL,
- addr.seg);
+ usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
+ addr.seg);
if (!usable)
goto bad;
/* code segment or read-only data segment */
goto bad;
}
cpl = ctxt->ops->cpl(ctxt);
- rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3;
+ rpl = sel & 3;
cpl = max(cpl, rpl);
if (!(desc.type & 8)) {
/* data segment */
{
if (selector & 1 << 2) {
struct desc_struct desc;
+ u16 sel;
+
memset (dt, 0, sizeof *dt);
- if (!ops->get_cached_descriptor(ctxt, &desc, NULL,
- VCPU_SREG_LDTR))
+ if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
return ret;
}
load:
- ops->set_segment_selector(ctxt, selector, seg);
- ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
+ ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
return X86EMUL_CONTINUE;
exception:
emulate_exception(ctxt, err_vec, err_code, true);
{
struct decode_cache *c = &ctxt->decode;
- c->src.val = ops->get_segment_selector(ctxt, seg);
+ c->src.val = get_segment_selector(ctxt, seg);
return em_push(ctxt);
}
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
- c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
+ c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
struct x86_emulate_ops *ops, struct desc_struct *cs,
struct desc_struct *ss)
{
+ u16 selector;
+
memset(cs, 0, sizeof(struct desc_struct));
- ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS);
+ ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
memset(ss, 0, sizeof(struct desc_struct));
cs->l = 0; /* will be adjusted later */
cs.d = 0;
cs.l = 1;
}
- ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
- ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
- ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
- ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
c->regs[VCPU_REGS_RCX] = c->eip;
if (efer & EFER_LMA) {
cs.l = 1;
}
- ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
- ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
- ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
- ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
c->eip = msr_data;
cs_sel |= SELECTOR_RPL_MASK;
ss_sel |= SELECTOR_RPL_MASK;
- ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
- ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
- ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
- ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
c->eip = c->regs[VCPU_REGS_RDX];
c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
struct desc_struct tr_seg;
u32 base3;
int r;
- u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
+ u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
- ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR);
+ ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
tss->si = c->regs[VCPU_REGS_RSI];
tss->di = c->regs[VCPU_REGS_RDI];
- tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
- tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
- tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
- tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
- tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
+ tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
+ tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* SDM says that segment selectors are loaded before segment
* descriptors
*/
- ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
- ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
- ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
- ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
- ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
+ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
+ set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
+ set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
+ set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
+ set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
/*
* Now load segment descriptors. If fault happenes at this stage
tss->esi = c->regs[VCPU_REGS_RSI];
tss->edi = c->regs[VCPU_REGS_RDI];
- tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
- tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
- tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
- tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
- tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS);
- tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS);
- tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
+ tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
+ tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
+ tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
+ tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* SDM says that segment selectors are loaded before segment
* descriptors
*/
- ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
- ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
- ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
- ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
- ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
- ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
- ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
+ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
+ set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
+ set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
+ set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
+ set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
+ set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
+ set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* Now load segment descriptors. If fault happenes at this stage
{
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
- u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR);
+ u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
- ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR);
- ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
+ ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
struct decode_cache *c = &ctxt->decode;
ulong old_eip;
int rc;
- old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS);
+ old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
old_eip = c->eip;
memcpy(&sel, c->src.valptr + c->op_bytes, 2);
rc = emulate_ud(ctxt);
goto done;
}
- c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg);
+ c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
break;
case 0x8d: /* lea r16/r32, m */
c->dst.val = c->src.addr.mem.ea;
return get_segment_base(emul_to_vcpu(ctxt), seg);
}
-static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt,
- struct desc_struct *desc, u32 *base3,
- int seg)
+static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
+ struct desc_struct *desc, u32 *base3,
+ int seg)
{
struct kvm_segment var;
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
+ *selector = var.selector;
if (var.unusable)
return false;
return true;
}
-static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt,
- struct desc_struct *desc, u32 base3,
- int seg)
+static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
+ struct desc_struct *desc, u32 base3,
+ int seg)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_segment var;
- /* needed to preserve selector */
- kvm_get_segment(vcpu, &var, seg);
-
+ var.selector = selector;
var.base = get_desc_base(desc);
#ifdef CONFIG_X86_64
var.base |= ((u64)base3) << 32;
return;
}
-static u16 emulator_get_segment_selector(struct x86_emulate_ctxt *ctxt, int seg)
-{
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
- return kvm_seg.selector;
-}
-
-static void emulator_set_segment_selector(struct x86_emulate_ctxt *ctxt,
- u16 sel, int seg)
-{
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
- kvm_seg.selector = sel;
- kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
-}
-
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
.invlpg = emulator_invlpg,
.pio_in_emulated = emulator_pio_in_emulated,
.pio_out_emulated = emulator_pio_out_emulated,
- .get_cached_descriptor = emulator_get_cached_descriptor,
- .set_cached_descriptor = emulator_set_cached_descriptor,
- .get_segment_selector = emulator_get_segment_selector,
- .set_segment_selector = emulator_set_segment_selector,
+ .get_segment = emulator_get_segment,
+ .set_segment = emulator_set_segment,
.get_cached_segment_base = emulator_get_cached_segment_base,
.get_gdt = emulator_get_gdt,
.get_idt = emulator_get_idt,