return kvm_skip_emulated_instruction(vcpu);
}
+static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
+}
+
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 vpid;
u64 gla;
} operand;
+ u16 vpid02;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
return kvm_skip_emulated_instruction(vcpu);
}
+ vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid ||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
- if (cpu_has_vmx_invvpid_individual_addr() &&
- vmx->nested.vpid02) {
+ if (cpu_has_vmx_invvpid_individual_addr()) {
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
- vmx->nested.vpid02, operand.gla);
+ vpid02, operand.gla);
} else
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, true);
break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, true);
break;
case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, true);
break;
default:
WARN_ON_ONCE(1);