static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);
-static void vmx_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);
static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
-static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
-static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
u16 error_code);
-static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type);
u64 host_efer;
-static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
-
/*
* Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
u32 exit_intr_info,
unsigned long exit_qualification);
-static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
return -1;
}
-static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
return NULL;
}
-void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
-{
- vmcs_clear(loaded_vmcs->vmcs);
- if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
- vmcs_clear(loaded_vmcs->shadow_vmcs);
- loaded_vmcs->cpu = -1;
- loaded_vmcs->launched = 0;
-}
-
#ifdef CONFIG_KEXEC_CORE
/*
* This bitmap is used to indicate whether the vmclear
return *p;
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
}
#endif
-static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs_host_state *host_state;
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
-static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
pi_set_sn(pi_desc);
}
-static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
vmx_vcpu_pi_put(vcpu);
(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
}
-static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
+unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags, save_rflags;
return to_vmx(vcpu)->rflags;
}
-static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
unsigned long old_rflags = vmx_get_rflags(vcpu);
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
}
-static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
int ret = 0;
return ret;
}
-static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
u32 interruptibility = interruptibility_old;
kvm_mmu_reset_context(vcpu);
}
-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
}
}
-static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
#define nested_guest_cr4_valid nested_cr4_valid
#define nested_host_cr4_valid nested_cr4_valid
-static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
*hw_cr0 &= ~X86_CR0_WP;
}
-static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
return eptp;
}
-static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
struct kvm *kvm = vcpu->kvm;
unsigned long guest_cr3;
vmcs_writel(GUEST_CR3, guest_cr3);
}
-static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
* Pass through host's Machine Check Enable value to hw_cr4, which
return 0;
}
-static void vmx_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 ar;
return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
}
-static int vmx_get_cpl(struct kvm_vcpu *vcpu)
+int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
return ar;
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
return r;
}
-static int allocate_vpid(void)
+int allocate_vpid(void)
{
int vpid;
return vpid;
}
-static void free_vpid(int vpid)
+void free_vpid(int vpid)
{
if (!enable_vpid || vpid == 0)
return;
}
}
-static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
+void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
* Note that host-state that does change is set elsewhere. E.g., host-state
* that is set differently for each CPU is set in vmx_vcpu_load(), not here.
*/
-static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
{
u32 low32, high32;
unsigned long tmpl;
vmcs_write64(HOST_IA32_EFER, host_efer);
}
-static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
+void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
{
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
if (enable_ept)
vmx_clear_hlt(vcpu);
}
-static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
+bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool masked;
return masked;
}
-static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmcs_write32(TPR_THRESHOLD, irr);
}
-static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
{
u32 sec_exec_control;
spinlock_t ept_pointer_lock;
};
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void vmx_vcpu_put(struct kvm_vcpu *vcpu);
+int allocate_vpid(void);
+void free_vpid(int vpid);
+void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
+void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+int vmx_get_cpl(struct kvm_vcpu *vcpu);
+unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
+void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
+void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
+void ept_save_pdptrs(struct kvm_vcpu *vcpu);
+void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
+void update_exception_bitmap(struct kvm_vcpu *vcpu);
+void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
+
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1