staging: android: ion: Stop butchering the DMA address
authorLaura Abbott <labbott@redhat.com>
Tue, 18 Apr 2017 18:27:06 +0000 (11:27 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Apr 2017 18:43:14 +0000 (20:43 +0200)
Now that we have proper caching, stop setting the DMA address manually.
It should be set after properly calling dma_map.

Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/android/ion/ion.c

index 3d979ef543f6d1d3d4dc68ac7c2bf735bceb17fe..65638f509f6cbe3aa93eb5a7b298bc0f9000ef9b 100644 (file)
@@ -81,8 +81,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 {
        struct ion_buffer *buffer;
        struct sg_table *table;
-       struct scatterlist *sg;
-       int i, ret;
+       int ret;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer)
@@ -119,20 +118,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
        INIT_LIST_HEAD(&buffer->vmas);
        INIT_LIST_HEAD(&buffer->attachments);
        mutex_init(&buffer->lock);
-       /*
-        * this will set up dma addresses for the sglist -- it is not
-        * technically correct as per the dma api -- a specific
-        * device isn't really taking ownership here.  However, in practice on
-        * our systems the only dma_address space is physical addresses.
-        * Additionally, we can't afford the overhead of invalidating every
-        * allocation via dma_map_sg. The implicit contract here is that
-        * memory coming from the heaps is ready for dma, ie if it has a
-        * cached mapping that mapping has been invalidated
-        */
-       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
-               sg_dma_address(sg) = sg_phys(sg);
-               sg_dma_len(sg) = sg->length;
-       }
        mutex_lock(&dev->buffer_lock);
        ion_buffer_add(dev, buffer);
        mutex_unlock(&dev->buffer_lock);