if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IOMMU_PAGE_SHIFT_4K);
+ 1 << tbl->it_page_shift);
else
- boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K);
+ boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- n = iommu_area_alloc(tbl->it_map, limit, start, npages,
- tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K,
- align_mask);
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
+ boundary_size >> tbl->it_page_shift, align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
return DMA_ERROR_CODE;
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << IOMMU_PAGE_SHIFT_4K; /* Set the return dma address */
+ ret = entry << tbl->it_page_shift; /* Set the return dma address */
/* Put the TCEs in the HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- (unsigned long)page & IOMMU_PAGE_MASK_4K,
- direction, attrs);
+ (unsigned long)page &
+ IOMMU_PAGE_MASK(tbl), direction, attrs);
/* ppc_md.tce_build() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
{
unsigned long entry, free_entry;
- entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
unsigned long flags;
struct iommu_pool *pool;
- entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
pool = get_pool(tbl, free_entry);
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K);
+ npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
align = 0;
- if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
+ align = PAGE_SHIFT - tbl->it_page_shift;
entry = iommu_range_alloc(dev, tbl, npages, &handle,
- mask >> IOMMU_PAGE_SHIFT_4K, align);
+ mask >> tbl->it_page_shift, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << IOMMU_PAGE_SHIFT_4K;
- dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K);
+ dma_addr = entry << tbl->it_page_shift;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- vaddr & IOMMU_PAGE_MASK_4K,
- direction, attrs);
+ vaddr & IOMMU_PAGE_MASK(tbl),
+ direction, attrs);
if(unlikely(build_fail))
goto failure;
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & IOMMU_PAGE_MASK_4K;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
npages = iommu_num_pages(s->dma_address, s->dma_length,
- IOMMU_PAGE_SIZE_4K);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
if (sg->dma_length == 0)
break;
npages = iommu_num_pages(dma_handle, sg->dma_length,
- IOMMU_PAGE_SIZE_4K);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
set_bit(0, tbl->it_map);
/* We only split the IOMMU table if we have 1GB or more of space */
- if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024))
+ if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
tbl->nr_pools = IOMMU_NR_POOLS;
else
tbl->nr_pools = 1;
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K);
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
if (tbl) {
align = 0;
- if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
+ align = PAGE_SHIFT - tbl->it_page_shift;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
- mask >> IOMMU_PAGE_SHIFT_4K, align,
+ mask >> tbl->it_page_shift, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
npages);
}
} else
- dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
}
return dma_handle;
BUG_ON(direction == DMA_NONE);
if (tbl) {
- npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K);
+ npages = iommu_num_pages(dma_handle, size,
+ IOMMU_PAGE_SIZE(tbl));
iommu_free(tbl, dma_handle, npages);
}
}
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
- io_order = get_iommu_order(size);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL);
+ mask >> tbl->it_page_shift, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
unsigned int nio_pages;
size = PAGE_ALIGN(size);
- nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
if (tce_value)
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK_4K)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT_4K;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
return -EINVAL;
- if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ))
+ if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK_4K)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT_4K;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
/* if (unlikely(ret))
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
- __func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K,
+ __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
hwaddr, ret); */
return ret;
{
int ret;
struct page *page = NULL;
- unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK;
+ unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
enum dma_data_direction direction = iommu_tce_direction(tce);
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE, &page);
if (unlikely(ret != 1)) {
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
- tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */
+ tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
return -EFAULT;
}
hwaddr = (unsigned long) page_address(page) + offset;
if (ret < 0)
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
- __func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret);
+ __func__, entry << tbl->it_page_shift, tce, ret);
return ret;
}
pr_debug("iommu_tce: adding %s to iommu group %d\n",
dev_name(dev), iommu_group_id(tbl->it_group));
+ if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
+ pr_err("iommu_tce: unsupported iommu page size.");
+ pr_err("%s has not been added\n", dev_name(dev));
+ return -EINVAL;
+ }
+
ret = iommu_group_add_device(tbl->it_group, dev);
if (ret < 0)
pr_err("iommu_tce: %s has not been added, ret=%d\n",
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
dma_addr_t ret = DMA_ERROR_CODE;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
+ tbl = get_iommu_table_base(dev);
+ if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
if (unlikely(dma_mapping_error(dev, ret))) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
atomic_inc(&viodev->cmo.allocs_failed);
}
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
+ tbl = get_iommu_table_base(dev);
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
}
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
int ret, count = 0;
size_t alloc_size = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
+ alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
if (vio_cmo_alloc(viodev, alloc_size)) {
atomic_inc(&viodev->cmo.allocs_failed);
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
- alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
+ alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
size_t alloc_size = 0;
int count = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
+ alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
{
struct vio_cmo_dev_entry *dev_ent;
struct device *dev = &viodev->dev;
+ struct iommu_table *tbl;
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
+ tbl = get_iommu_table_base(dev);
+
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
}
viodev->cmo.desired =
- IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
+ IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
viodev->cmo.desired = VIO_CMO_MIN_ENT;
size = VIO_CMO_MIN_ENT;