struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+ if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return NULL;
}
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
if (unlikely(ret == NULL)) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
if (unlikely(!ret)) {
vio_cmo_dealloc(viodev, alloc_size);
atomic_inc(&viodev->cmo.allocs_failed);
+ return ret;
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)