int remove_existing_mapping = 0;
int ret = 0;
- mutex_lock(&umem->odp_data->umem_mutex);
/*
* Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail
}
out:
- mutex_unlock(&umem->odp_data->umem_mutex);
-
/* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page)
put_page(page);
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
user_virt += npages << PAGE_SHIFT;
+ mutex_lock(&umem->odp_data->umem_mutex);
for (j = 0; j < npages; ++j) {
ret = ib_umem_odp_map_dma_single_page(
umem, k, base_virt_addr, local_page_list[j],
break;
k++;
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
+ mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
- mutex_lock(&umem->odp_data->umem_mutex);
if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx];
struct page *head_page = compound_head(page);
umem->odp_data->page_list[idx] = NULL;
umem->odp_data->dma_list[idx] = 0;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);