ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
+kvmap_vmemmap:
+ sub %g4, %g5, %g5
+ srlx %g5, 22, %g5
+ sethi %hi(vmemmap_table), %g1
+ sllx %g5, 3, %g5
+ or %g1, %lo(vmemmap_table), %g1
+ ba,pt %xcc, kvmap_dtlb_load
+ ldx [%g1 + %g5], %g5
+
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
sethi %hi(PAGE_SIZE), %g5
bleu,pn %xcc, kvmap_dtlb_longpath
nop
+ /* Do not use the TSB for vmemmap. */
+ mov (VMEMMAP_BASE >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4,%g5
+ bgeu,pn %xcc, kvmap_vmemmap
+ nop
+
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
kvmap_dtlb_tsbmiss:
unsigned long _PAGE_CACHE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+
+#define VMEMMAP_CHUNK_SHIFT 22
+#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
+#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
+#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+
+#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+ sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
+unsigned long vmemmap_table[VMEMMAP_SIZE];
+
+int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+{
+ unsigned long vstart = (unsigned long) start;
+ unsigned long vend = (unsigned long) (start + nr);
+ unsigned long phys_start = (vstart - VMEMMAP_BASE);
+ unsigned long phys_end = (vend - VMEMMAP_BASE);
+ unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+ unsigned long end = VMEMMAP_ALIGN(phys_end);
+ unsigned long pte_base;
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ for (; addr < end; addr += VMEMMAP_CHUNK) {
+ unsigned long *vmem_pp =
+ vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+ void *block;
+
+ if (!(*vmem_pp & _PAGE_VALID)) {
+ block = vmemmap_alloc_block(1UL << 22, node);
+ if (!block)
+ return -ENOMEM;
+
+ *vmem_pp = pte_base | __pa(block);
+
+ printk(KERN_INFO "[%p-%p] page_structs=%lu "
+ "node=%d entry=%lu/%lu\n", start, block, nr,
+ node,
+ addr >> VMEMMAP_CHUNK_SHIFT,
+ VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
#define VMALLOC_START _AC(0x0000000100000000,UL)
#define VMALLOC_END _AC(0x0000000200000000,UL)
+#define VMEMMAP_BASE _AC(0x0000000200000000,UL)
+
+#define vmemmap ((struct page *)VMEMMAP_BASE)
/* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole