#include "x86.h"
#define KVM_MAX_VCPUS 4
-#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 8
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
struct kvm_vcpu_arch arch;
};
-struct kvm_mem_alias {
- gfn_t base_gfn;
- unsigned long npages;
- gfn_t target_gfn;
-};
-
struct kvm_memory_slot {
gfn_t base_gfn;
unsigned long npages;
struct kvm {
struct mutex lock; /* protects everything except vcpus */
struct mm_struct *mm; /* userspace tied to this vm */
- int naliases;
- struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
unsigned int tss_addr;
struct page *apic_access_page;
struct kvm_vm_stat stat;
+ struct kvm_arch arch;
};
/* The guest did something we don't support. */
int i;
struct kvm_mem_alias *alias;
- for (i = 0; i < kvm->naliases; ++i) {
- alias = &kvm->aliases[i];
+ for (i = 0; i < kvm->arch.naliases; ++i) {
+ alias = &kvm->arch.aliases[i];
if (gfn >= alias->base_gfn
&& gfn < alias->base_gfn + alias->npages)
return alias->target_gfn + gfn - alias->base_gfn;
mutex_lock(&kvm->lock);
- p = &kvm->aliases[alias->slot];
+ p = &kvm->arch.aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
- if (kvm->aliases[n - 1].npages)
+ if (kvm->arch.aliases[n - 1].npages)
break;
- kvm->naliases = n;
+ kvm->arch.naliases = n;
kvm_mmu_zap_all(kvm);
#define IOPL_SHIFT 12
+#define KVM_ALIAS_SLOTS 4
+
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
struct x86_emulate_ctxt emulate_ctxt;
};
+struct kvm_mem_alias {
+ gfn_t base_gfn;
+ unsigned long npages;
+ gfn_t target_gfn;
+};
+
+struct kvm_arch{
+ int naliases;
+ struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+};
+
struct kvm_vcpu_stat {
u32 pf_fixed;
u32 pf_guest;