The first of these two routines is invoked after map_vm_area()
has installed the page table entries. The second is invoked
- before unmap_vm_area() deletes the page table entries.
+ before unmap_kernel_range() deletes the page table entries.
There exists another whole class of cpu cache issues which currently
require a whole different set of interfaces to handle properly.
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
- unmap_vm_area(tmp);
+ unmap_kernel_range((unsigned long)tmp->addr,
+ tmp->size);
kfree(tmp);
mutex_unlock(&imlist_mutex);
return;
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
- * This code is modeled after vmalloc code - unmap_vm_area()
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
unsigned long flags, int node,
gfp_t gfp_mask);
extern struct vm_struct *remove_vm_area(void *addr);
+
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
-extern void unmap_vm_area(struct vm_struct *area);
+extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/*
* Internals. Dont't use..
} while (pud++, addr = next, addr != end);
}
-void unmap_vm_area(struct vm_struct *area)
+void unmap_kernel_range(unsigned long addr, unsigned long size)
{
pgd_t *pgd;
unsigned long next;
- unsigned long addr = (unsigned long) area->addr;
- unsigned long end = addr + area->size;
+ unsigned long start = addr;
+ unsigned long end = addr + size;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
continue;
vunmap_pud_range(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
- flush_tlb_kernel_range((unsigned long) area->addr, end);
+ flush_tlb_kernel_range(start, end);
+}
+
+static void unmap_vm_area(struct vm_struct *area)
+{
+ unmap_kernel_range((unsigned long)area->addr, area->size);
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,