struct page ***pages);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+/* Allocate/destroy a 'vmalloc' VM area. */
+extern struct vm_struct *alloc_vm_area(size_t size);
+extern void free_vm_area(struct vm_struct *area);
+
/*
* Internals. Dont't use..
*/
void __attribute__((weak)) vmalloc_sync_all(void)
{
}
+
+
+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+{
+ /* apply_to_page_range() does all the hard work. */
+ return 0;
+}
+
+/**
+ * alloc_vm_area - allocate a range of kernel address space
+ * @size: size of the area
+ * @returns: NULL on failure, vm_struct on success
+ *
+ * This function reserves a range of kernel address space, and
+ * allocates pagetables to map that range. No actual mappings
+ * are created. If the kernel address space is not shared
+ * between processes, it syncs the pagetable across all
+ * processes.
+ */
+struct vm_struct *alloc_vm_area(size_t size)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area(size, VM_IOREMAP);
+ if (area == NULL)
+ return NULL;
+
+ /*
+ * This ensures that page tables are constructed for this region
+ * of kernel virtual address space and mapped into init_mm.
+ */
+ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
+ area->size, f, NULL)) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ /* Make sure the pagetables are constructed in process kernel
+ mappings */
+ vmalloc_sync_all();
+
+ return area;
+}
+EXPORT_SYMBOL_GPL(alloc_vm_area);
+
+void free_vm_area(struct vm_struct *area)
+{
+ struct vm_struct *ret;
+ ret = remove_vm_area(area->addr);
+ BUG_ON(ret != area);
+ kfree(area);
+}
+EXPORT_SYMBOL_GPL(free_vm_area);