{
struct ion_buffer *buffer;
struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
+ int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
INIT_LIST_HEAD(&buffer->vmas);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
- /*
- * this will set up dma addresses for the sglist -- it is not
- * technically correct as per the dma api -- a specific
- * device isn't really taking ownership here. However, in practice on
- * our systems the only dma_address space is physical addresses.
- * Additionally, we can't afford the overhead of invalidating every
- * allocation via dma_map_sg. The implicit contract here is that
- * memory coming from the heaps is ready for dma, ie if it has a
- * cached mapping that mapping has been invalidated
- */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- sg_dma_address(sg) = sg_phys(sg);
- sg_dma_len(sg) = sg->length;
- }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);