iommu/exynos: Fix warnings from DMA-debug
authorMarek Szyprowski <m.szyprowski@samsung.com>
Mon, 9 Jan 2017 12:03:54 +0000 (13:03 +0100)
committerJoerg Roedel <jroedel@suse.de>
Tue, 10 Jan 2017 14:01:21 +0000 (15:01 +0100)
Add a simple checks for dma_map_single() return value to make DMA-debug
checker happly. Exynos IOMMU on Samsung Exynos SoCs always use device,
which has linear DMA mapping ops (dma address is equal to physical memory
address), so no failures are returned from dma_map_single().

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/exynos-iommu.c

index ac726e1760de2729c23b38778c6224789902fb83..dda4e5907979ca46b015ada65588d67c2064e9a7 100644 (file)
@@ -744,6 +744,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
                                DMA_TO_DEVICE);
        /* For mapping page table entries we rely on dma == phys */
        BUG_ON(handle != virt_to_phys(domain->pgtable));
+       if (dma_mapping_error(dma_dev, handle))
+               goto err_lv2ent;
 
        spin_lock_init(&domain->lock);
        spin_lock_init(&domain->pgtablelock);
@@ -755,6 +757,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 
        return &domain->domain;
 
+err_lv2ent:
+       free_pages((unsigned long)domain->lv2entcnt, 1);
 err_counter:
        free_pages((unsigned long)domain->pgtable, 2);
 err_dma_cookie:
@@ -898,6 +902,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
        }
 
        if (lv1ent_fault(sent)) {
+               dma_addr_t handle;
                sysmmu_pte_t *pent;
                bool need_flush_flpd_cache = lv1ent_zero(sent);
 
@@ -909,7 +914,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
                update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
                kmemleak_ignore(pent);
                *pgcounter = NUM_LV2ENTRIES;
-               dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
+               handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(dma_dev, handle)) {
+                       kmem_cache_free(lv2table_kmem_cache, pent);
+                       return ERR_PTR(-EADDRINUSE);
+               }
 
                /*
                 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,