struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i;
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
}
static int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
+ struct gnttab_unmap_grant_ref *kunmap_op,
unsigned long mfn)
{
unsigned long flags;
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
- if (kmap_op != NULL) {
+ if (kunmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_and_replace *unmap_op;
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
- if (kmap_op->handle == -1)
+ if (kunmap_op->handle == -1)
xen_mc_flush();
/*
* Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
- if (kmap_op->handle == GNTST_general_error) {
+ if (kunmap_op->handle == GNTST_general_error) {
pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
put_balloon_scratch_page();
mcs = __xen_mc_entry(
sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
- unmap_op->host_addr = kmap_op->host_addr;
+ unmap_op->host_addr = kunmap_op->host_addr;
unmap_op->new_addr = scratch_page_address;
- unmap_op->handle = kmap_op->handle;
+ unmap_op->handle = kunmap_op->handle;
MULTI_grant_table_op(mcs.mc,
GNTTABOP_unmap_and_replace, unmap_op, 1);
xen_mc_issue(PARAVIRT_LAZY_MMU);
- kmap_op->host_addr = 0;
put_balloon_scratch_page();
}
}
}
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
- if (kmap_ops &&
+ if (kunmap_ops &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
- if (kmap_ops)
- ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
+ if (kunmap_ops)
+ ret = m2p_remove_override(pages[i], &kunmap_ops[i], mfn);
if (ret)
goto out;
}
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
struct page **pages;
};
kfree(map->map_ops);
kfree(map->unmap_ops);
kfree(map->kmap_ops);
+ kfree(map->kunmap_ops);
kfree(map);
}
add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+ add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
+ NULL == add->kunmap_ops ||
NULL == add->pages)
goto err;
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
add->kmap_ops[i].handle = -1;
+ add->kunmap_ops[i].handle = -1;
}
add->index = 0;
map->flags | GNTMAP_host_map,
map->grants[i].ref,
map->grants[i].domid);
+ gnttab_set_unmap_op(&map->kunmap_ops[i], address,
+ map->flags | GNTMAP_host_map, -1);
}
}
return err;
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status)
+ if (map->map_ops[i].status) {
err = -EINVAL;
- else {
- BUG_ON(map->map_ops[i].handle == -1);
- map->unmap_ops[i].handle = map->map_ops[i].handle;
- pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ continue;
}
+
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ if (use_ptemod)
+ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
}
return err;
}
}
err = gnttab_unmap_refs(map->unmap_ops + offset,
- use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
+ use_ptemod ? map->kunmap_ops + offset : NULL, map->pages + offset,
pages);
if (err)
return err;
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int ret;
if (ret)
return ret;
- return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
+ return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kunmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
/* Perform a batch of grant map/copy operations. Retry every batch slot