* enforcing the limit based on the max that the guest can map.
*/
down_write(¤t->mm->mmap_sem);
- npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
+ npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
down_write(¤t->mm->mmap_sem);
current->mm->locked_vm -= (container->tbl->it_size <<
- IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
+ container->tbl->it_page_shift) >> PAGE_SHIFT;
up_write(¤t->mm->mmap_sem);
}
tce, ret);
break;
}
- tce += IOMMU_PAGE_SIZE_4K;
+ tce += IOMMU_PAGE_SIZE(tbl);
}
if (ret)
if (info.argsz < minsz)
return -EINVAL;
- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_start = tbl->it_offset << tbl->it_page_shift;
+ info.dma32_window_size = tbl->it_size << tbl->it_page_shift;
info.flags = 0;
if (copy_to_user((void __user *)arg, &info, minsz))
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
- if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
- (param.vaddr & ~IOMMU_PAGE_MASK_4K))
+ if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
return -EINVAL;
/* iova is checked by the IOMMU API */
return ret;
ret = tce_iommu_build(container, tbl,
- param.iova >> IOMMU_PAGE_SHIFT_4K,
- tce, param.size >> IOMMU_PAGE_SHIFT_4K);
+ param.iova >> tbl->it_page_shift,
+ tce, param.size >> tbl->it_page_shift);
iommu_flush_tce(tbl);
if (param.flags)
return -EINVAL;
- if (param.size & ~IOMMU_PAGE_MASK_4K)
+ if (param.size & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
- param.size >> IOMMU_PAGE_SHIFT_4K);
+ param.size >> tbl->it_page_shift);
if (ret)
return ret;
ret = tce_iommu_clear(container, tbl,
- param.iova >> IOMMU_PAGE_SHIFT_4K,
- param.size >> IOMMU_PAGE_SHIFT_4K);
+ param.iova >> tbl->it_page_shift,
+ param.size >> tbl->it_page_shift);
iommu_flush_tce(tbl);
return ret;