#endif
}
------- ----------------/*
------- ---------------- * find out the number of processor cores on the die
------- ---------------- */
------- ----------------static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
------- ----------------{
------- ---------------- unsigned int eax, t;
------- ----------------
------- ---------------- if (c->cpuid_level < 4)
------- ---------------- return 1;
------- ----------------
------- ---------------- cpuid_count(4, 0, &eax, &t, &t, &t);
------- ----------------
------- ---------------- if (eax & 0x1f)
------- ---------------- return ((eax >> 26) + 1);
------- ---------------- else
------- ---------------- return 1;
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit srat_detect_node(void)
------- ----------------{
------- ----------------#ifdef CONFIG_NUMA
------- ---------------- unsigned node;
------- ---------------- int cpu = smp_processor_id();
------- ---------------- int apicid = hard_smp_processor_id();
------- ----------------
------- ---------------- /* Don't do the funky fallback heuristics the AMD version employs
------- ---------------- for now. */
------- ---------------- node = apicid_to_node[apicid];
------- ---------------- if (node == NUMA_NO_NODE || !node_online(node))
------- ---------------- node = first_node(node_online_map);
------- ---------------- numa_set_node(cpu, node);
------- ----------------
------- ---------------- printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
------- ----------------#endif
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
------- ----------------{
------- ---------------- if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
------- ---------------- (c->x86 == 0x6 && c->x86_model >= 0x0e))
------- ---------------- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{
------- ---------------- /* Cache sizes */
------- ---------------- unsigned n;
------- ----------------
------- ---------------- init_intel_cacheinfo(c);
------- ---------------- if (c->cpuid_level > 9) {
------- ---------------- unsigned eax = cpuid_eax(10);
------- ---------------- /* Check for version and the number of counters */
------- ---------------- if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
------- ---------------- set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
- }
-
- if (cpu_has_ds) {
- unsigned int l1, l2;
- rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
- if (!(l1 & (1<<11)))
- set_cpu_cap(c, X86_FEATURE_BTS);
- if (!(l1 & (1<<12)))
- set_cpu_cap(c, X86_FEATURE_PEBS);
+++++++ ++++++++++++++++ char *v = c->x86_vendor_id;
+++++++ ++++++++++++++++ int i;
+++++++ ++++++++++++++++ static int printed;
+++++++ ++++++++++++++++
+++++++ ++++++++++++++++ for (i = 0; i < X86_VENDOR_NUM; i++) {
+++++++ ++++++++++++++++ if (cpu_devs[i]) {
+++++++ ++++++++++++++++ if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
+++++++ ++++++++++++++++ (cpu_devs[i]->c_ident[1] &&
+++++++ ++++++++++++++++ !strcmp(v, cpu_devs[i]->c_ident[1]))) {
+++++++ ++++++++++++++++ c->x86_vendor = i;
+++++++ ++++++++++++++++ this_cpu = cpu_devs[i];
+++++++ ++++++++++++++++ return;
+++++++ ++++++++++++++++ }
+++++++ ++++++++++++++++ }
}
------- ----------------
----- - ---------------- if (cpu_has_ds) {
----- - ---------------- unsigned int l1, l2;
----- - ---------------- rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
----- - ---------------- if (!(l1 & (1<<11)))
----- - ---------------- set_cpu_cap(c, X86_FEATURE_BTS);
----- - ---------------- if (!(l1 & (1<<12)))
----- - ---------------- set_cpu_cap(c, X86_FEATURE_PEBS);
-
- if (cpu_has_bts)
- ds_init_intel(c);
-
- n = c->extended_cpuid_level;
- if (n >= 0x80000008) {
- unsigned eax = cpuid_eax(0x80000008);
- c->x86_virt_bits = (eax >> 8) & 0xff;
- c->x86_phys_bits = eax & 0xff;
- /* CPUID workaround for Intel 0F34 CPU */
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 0xF && c->x86_model == 0x3 &&
- c->x86_mask == 0x4)
- c->x86_phys_bits = 36;
+++++++ ++++++++++++++++ if (!printed) {
+++++++ ++++++++++++++++ printed++;
+++++++ ++++++++++++++++ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+++++++ ++++++++++++++++ printk(KERN_ERR "CPU: Your system may be unstable.\n");
}
----- - ----------------
----- - ----------------
----- - ---------------- if (cpu_has_bts)
----- - ---------------- ds_init_intel(c);
----- - ----------------
----- - ---------------- n = c->extended_cpuid_level;
----- - ---------------- if (n >= 0x80000008) {
----- - ---------------- unsigned eax = cpuid_eax(0x80000008);
----- - ---------------- c->x86_virt_bits = (eax >> 8) & 0xff;
----- - ---------------- c->x86_phys_bits = eax & 0xff;
----- - ---------------- /* CPUID workaround for Intel 0F34 CPU */
----- - ---------------- if (c->x86_vendor == X86_VENDOR_INTEL &&
----- - ---------------- c->x86 == 0xF && c->x86_model == 0x3 &&
----- - ---------------- c->x86_mask == 0x4)
----- - ---------------- c->x86_phys_bits = 36;
----- - ---------------- }
------- ----------------
------- ---------------- if (c->x86 == 15)
------- ---------------- c->x86_cache_alignment = c->x86_clflush_size * 2;
------- ---------------- if (c->x86 == 6)
------- ---------------- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
------- ---------------- set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
------- ---------------- c->x86_max_cores = intel_num_cpu_cores(c);
------- ----------------
------- ---------------- srat_detect_node();
------- ----------------}
------- ----------------
------- ----------------static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
------- ----------------{
------- ---------------- if (c->x86 == 0x6 && c->x86_model >= 0xf)
---- -- -- ------------ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- - - set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+++++++ ++++++++++++++++ c->x86_vendor = X86_VENDOR_UNKNOWN;
}
------- ----------------static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __init early_cpu_support_print(void)
{
------- ---------------- /* Cache sizes */
------- ---------------- unsigned n;
------- ----------------
------- ---------------- n = c->extended_cpuid_level;
------- ---------------- if (n >= 0x80000008) {
------- ---------------- unsigned eax = cpuid_eax(0x80000008);
------- ---------------- c->x86_virt_bits = (eax >> 8) & 0xff;
------- ---------------- c->x86_phys_bits = eax & 0xff;
------- ---------------- }
------- ----------------
------- ---------------- if (c->x86 == 0x6 && c->x86_model >= 0xf) {
------- ---------------- c->x86_cache_alignment = c->x86_clflush_size * 2;
------- ---------------- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
------- ---------------- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+++++++ ++++++++++++++++ int i,j;
+++++++ ++++++++++++++++ struct cpu_dev *cpu_devx;
+++++++ ++++++++++++++++
+++++++ ++++++++++++++++ printk("KERNEL supported cpus:\n");
+++++++ ++++++++++++++++ for (i = 0; i < X86_VENDOR_NUM; i++) {
+++++++ ++++++++++++++++ cpu_devx = cpu_devs[i];
+++++++ ++++++++++++++++ if (!cpu_devx)
+++++++ ++++++++++++++++ continue;
+++++++ ++++++++++++++++ for (j = 0; j < 2; j++) {
+++++++ ++++++++++++++++ if (!cpu_devx->c_ident[j])
+++++++ ++++++++++++++++ continue;
+++++++ ++++++++++++++++ printk(" %s %s\n", cpu_devx->c_vendor,
+++++++ ++++++++++++++++ cpu_devx->c_ident[j]);
+++++++ ++++++++++++++++ }
}
------- ---------------- set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
}
------- ----------------static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+++++++ ++++++++++++++++static void __init early_cpu_init(void)
{
------- ---------------- char *v = c->x86_vendor_id;
+++++++ ++++++++++++++++ struct cpu_vendor_dev *cvdev;
------- ---------------- if (!strcmp(v, "AuthenticAMD"))
------- ---------------- c->x86_vendor = X86_VENDOR_AMD;
------- ---------------- else if (!strcmp(v, "GenuineIntel"))
------- ---------------- c->x86_vendor = X86_VENDOR_INTEL;
------- ---------------- else if (!strcmp(v, "CentaurHauls"))
------- ---------------- c->x86_vendor = X86_VENDOR_CENTAUR;
------- ---------------- else
------- ---------------- c->x86_vendor = X86_VENDOR_UNKNOWN;
+++++++ ++++++++++++++++ for (cvdev = __x86cpuvendor_start ;
+++++++ ++++++++++++++++ cvdev < __x86cpuvendor_end ;
+++++++ ++++++++++++++++ cvdev++)
+++++++ ++++++++++++++++ cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
+++++++ ++++++++++++++++ early_cpu_support_print();
}
/* Do some early cpuid on the boot CPU to get some parameter that are
preempt_enable();
}
- --- - -- - ---- - pteval_t xen_pte_val(pte_t pte)
+++++++++++++++++++++++ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ +++ ++ + ++ + {
- --- - -- - ---- - pteval_t ret = pte.pte;
+++++++++++++++++++++++ /* Just return the pte as-is. We preserve the bits on commit */
+++++++++++++++++++++++ return *ptep;
+++++++++++++++++++++++ }
+++++++++++++++++++++++
+++++++++++++++++++++++ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+++++++++++++++++++++++ pte_t *ptep, pte_t pte)
+++++++++++++++++++++++ {
+++++++++++++++++++++++ struct mmu_update u;
+++++++++++++++++++++++
+++++++++++++++++++++++ xen_mc_batch();
+ +++ ++ + ++ +
- --- - -- - ---- - if (ret & _PAGE_PRESENT)
- --- - -- - ---- - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
+++++++++++++++++++++++ u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+++++++++++++++++++++++ u.val = pte_val_ma(pte);
+++++++++++++++++++++++ extend_mmu_update(&u);
+ +++ ++ + ++ +
- --- - -- - ---- - return ret;
+++++++++++++++++++++++ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ +++ ++ + ++ + }
+ +++ ++ + ++ +
- --- - -- - ---- - pgdval_t xen_pgd_val(pgd_t pgd)
+ +++ + ++ + ++++ + /* Assume pteval_t is equivalent to all the other *val_t types. */
+ +++ + ++ + ++++ + static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- --- - -- - ---- - pgdval_t ret = pgd.pgd;
- --- - -- - ---- - if (ret & _PAGE_PRESENT)
- --- - -- - ---- - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
- --- - -- - ---- - return ret;
+ +++ + ++ + ++++ + if (val & _PAGE_PRESENT) {
+ +++ + ++ + ++++ + unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
+ +++ + ++ + ++++ + pteval_t flags = val & ~PTE_MASK;
- -- - -- - - val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+ ++++ ++++++++++++++++ val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+ +++ + ++ + ++++ + }
+ +++ + ++ + ++++ +
+ +++ + ++ + ++++ + return val;
}
- --- - -- - ---- - pte_t xen_make_pte(pteval_t pte)
+ +++ + ++ + ++++ + static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- --- - -- - ---- - if (pte & _PAGE_PRESENT) {
- --- - -- - ---- - pte = phys_to_machine(XPADDR(pte)).maddr;
- --- - -- - ---- - pte &= ~(_PAGE_PCD | _PAGE_PWT);
+ +++ + ++ + ++++ + if (val & _PAGE_PRESENT) {
+ +++ + ++ + ++++ + unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
+ +++ + ++ + ++++ + pteval_t flags = val & ~PTE_MASK;
- -- - -- - - val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
+ ++++ ++++++++++++++++ val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
}
- --- - -- - ---- - return (pte_t){ .pte = pte };
+ +++ + ++ + ++++ + return val;
}
- --- - -- - ---- - pgd_t xen_make_pgd(pgdval_t pgd)
+ +++ + ++ + ++++ + pteval_t xen_pte_val(pte_t pte)
{
- --- - -- - ---- - if (pgd & _PAGE_PRESENT)
- --- - -- - ---- - pgd = phys_to_machine(XPADDR(pgd)).maddr;
+ +++ + ++ + ++++ + return pte_mfn_to_pfn(pte.pte);
+ +++ + ++ + ++++ + }
- --- - -- - ---- - return (pgd_t){ pgd };
+ +++ + ++ + ++++ + pgdval_t xen_pgd_val(pgd_t pgd)
+ +++ + ++ + ++++ + {
+ +++ + ++ + ++++ + return pte_mfn_to_pfn(pgd.pgd);
+ +++ + ++ + ++++ + }
+ +++ + ++ + ++++ +
+ +++ + ++ + ++++ + pte_t xen_make_pte(pteval_t pte)
+ +++ + ++ + ++++ + {
+ +++ + ++ + ++++ + pte = pte_pfn_to_mfn(pte);
+ +++ + ++ + ++++ + return native_make_pte(pte);
+ +++ + ++ + ++++ + }
+ +++ + ++ + ++++ +
+ +++ + ++ + ++++ + pgd_t xen_make_pgd(pgdval_t pgd)
+ +++ + ++ + ++++ + {
+ +++ + ++ + ++++ + pgd = pte_pfn_to_mfn(pgd);
+ +++ + ++ + ++++ + return native_make_pgd(pgd);
}
pmdval_t xen_pmd_val(pmd_t pmd)
{
- --- - -- - ---- - pmdval_t ret = native_pmd_val(pmd);
- --- - -- - ---- - if (ret & _PAGE_PRESENT)
- --- - -- - ---- - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
- --- - -- - ---- - return ret;
+ +++ + ++ + ++++ + return pte_mfn_to_pfn(pmd.pmd);
}
- --- - -- - ---- - #ifdef CONFIG_X86_PAE
- --- - -- - ---- - void xen_set_pud(pud_t *ptr, pud_t val)
+ +++ + ++ + ++++ +
- --- -- - -- - void xen_set_pud(pud_t *ptr, pud_t val)
+++++++++++++++++++++++ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
{
----------------------- struct multicall_space mcs;
----------------------- struct mmu_update *u;
+++++++++++++++++++++++ struct mmu_update u;
preempt_disable();
xen_mc_batch();
}
- --- - -- - ---- - #ifdef CONFIG_X86_PAE
- --- - -- - ---- - level = MMUEXT_PIN_L3_TABLE;
- --- - -- - ---- - #else
- --- - -- - ---- - level = MMUEXT_PIN_L2_TABLE;
- --- - -- - ---- - #endif
+ +++ + ++ + ++++ + xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
+ +++ + ++ + ++++ + xen_mc_issue(0);
+ +++ + ++ + ++++ + }
+ +++ + ++ + ++++ +
+++++++++++++++++++++++ /*
+++++++++++++++++++++++ * On save, we need to pin all pagetables to make sure they get their
+++++++++++++++++++++++ * mfns turned into pfns. Search the list for any unpinned pgds and pin
+++++++++++++++++++++++ * them (unpinned pgds are not currently in use, probably because the
+++++++++++++++++++++++ * process is under construction or destruction).
+++++++++++++++++++++++ */
+++++++++++++++++++++++ void xen_mm_pin_all(void)
+++++++++++++++++++++++ {
+++++++++++++++++++++++ unsigned long flags;
+++++++++++++++++++++++ struct page *page;
+ +++ ++ + ++ +
- --- - -- - ---- - xen_do_pin(level, PFN_DOWN(__pa(pgd)));
+++++++++++++++++++++++ spin_lock_irqsave(&pgd_lock, flags);
+ +++ ++ + ++ +
- --- - -- - ---- - xen_mc_issue(0);
+++++++++++++++++++++++ list_for_each_entry(page, &pgd_list, lru) {
+++++++++++++++++++++++ if (!PagePinned(page)) {
+++++++++++++++++++++++ xen_pgd_pin((pgd_t *)page_address(page));
+++++++++++++++++++++++ SetPageSavePinned(page);
+++++++++++++++++++++++ }
+++++++++++++++++++++++ }
+++++++++++++++++++++++
+++++++++++++++++++++++ spin_unlock_irqrestore(&pgd_lock, flags);
+ +++ ++ + ++ + }
+ +++ ++ + ++ +
/* The init_mm pagetable is really pinned as soon as its created, but
that's before we have page structures to store the bits. So do all
the book-keeping now. */