case ION_IOC_SHARE:
case ION_IOC_MAP:
case ION_IOC_IMPORT:
- case ION_IOC_SYNC:
return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg));
default:
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
- case ION_IOC_SYNC:
case ION_IOC_FREE:
case ION_IOC_CUSTOM:
return _IOC_WRITE;
data.handle.handle = handle->id;
break;
}
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
case ION_IOC_CUSTOM:
{
if (!dev->custom_ioctl)
dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
}
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
-{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- /*
- * This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the targeted device, but this works on the currently targeted
- * hardware.
- */
- sg_dma_address(&sg) = page_to_phys(page);
- dma_sync_sg_for_device(dev, &sg, 1, dir);
-}
-
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
}
EXPORT_SYMBOL(ion_import_dma_buf_fd);
-int ion_sync_for_device(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
-
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
-}
-
int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
{
struct ion_device *dev = client->dev;
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
if (!page)
return NULL;
- if (!pool->cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
return page;
}
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan);
-/**
- * ion_pages_sync_for_device - cache flush pages for use with the specified
- * device
- * @dev: the device the pages will be used with
- * @page: the first page to be flushed
- * @size: size in bytes of region to be flushed
- * @dir: direction of dma transfer
- */
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir);
-
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int ion_sync_for_device(struct ion_client *client, int fd);
-
struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
int id);
page = ion_page_pool_alloc(pool);
- if (cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
return page;
}
buffer->sg_table = table;
- ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
-
return 0;
free_table:
*/
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
-/**
- * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
- *
- * Deprecated in favor of using the dma_buf api's correctly (syncing
- * will happen automatically when the buffer is mapped to a device).
- * If necessary should be used after touching a cached buffer from the cpu,
- * this will make the buffer in memory coherent.
- */
-#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
-
/**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
*