dmaengine: virt-dma: Add missing locking
authorSascha Hauer <s.hauer@pengutronix.de>
Mon, 16 Dec 2019 10:53:21 +0000 (11:53 +0100)
committerVinod Koul <vkoul@kernel.org>
Thu, 26 Dec 2019 04:34:18 +0000 (10:04 +0530)
Originally freeing descriptors was split into a locked and an unlocked
part. The locked part in vchan_get_all_descriptors() collected all
descriptors on a separate list_head. This was done to allow iterating
over that new list in vchan_dma_desc_free_list() without a lock held.

This became broken in 13bb26ae8850 ("dmaengine: virt-dma: don't always
free descriptor upon completion"). With this commit
vchan_dma_desc_free_list() no longer exclusively operates on the
separate list, but starts to put descriptors which can be reused back on
&vc->desc_allocated. This list operation should have been locked, but
wasn't.
In the mean time drivers started to call vchan_dma_desc_free_list() with
their lock held so that we now have the situation that
vchan_dma_desc_free_list() is called locked from some drivers and
unlocked from others.
To clean this up we have to do two things:

1. Add missing locking in vchan_dma_desc_free_list()
2. Make sure drivers call vchan_dma_desc_free_list() unlocked

This needs to be done atomically, so in this patch the locking is added
and all drivers are fixed.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Reviewed-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Green Wan <green.wan@sifive.com>
Tested-by: Green Wan <green.wan@sifive.com>
Link: https://lore.kernel.org/r/20191216105328.15198-3-s.hauer@pengutronix.de
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/owl-dma.c
drivers/dma/s3c24xx-dma.c
drivers/dma/sf-pdma/sf-pdma.c
drivers/dma/sun4i-dma.c
drivers/dma/virt-dma.c

index a1ce307c502fa02f4d5106d21993176811360cee..14c1ac26f8664a5002005af7298488d725979107 100644 (file)
@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
 
        vchan_get_all_descriptors(&chan->vc, &head);
 
-       /*
-        * As vchan_dma_desc_free_list can access to desc_allocated list
-        * we need to call it in vc.lock context.
-        */
-       vchan_dma_desc_free_list(&chan->vc, &head);
-
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&chan->vc, &head);
+
        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
 
        return 0;
index c20e6bd4e29898eefe0cba3026291f05a1e0f7e3..29f1223b285a4d38bb78dc3bd24b65e41d57eeef 100644 (file)
@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
 
        spin_lock_irqsave(&c->vc.lock, flags);
        vchan_get_all_descriptors(&c->vc, &head);
-       vchan_dma_desc_free_list(&c->vc, &head);
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&c->vc, &head);
+
        return 0;
 }
 
index 023f951189a727af61771991c85be0ac650449e3..c683051257fd66a19b2861107c80feffa4891930 100644 (file)
@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
        }
 
        vchan_get_all_descriptors(&vchan->vc, &head);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
 
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index 43da8eeb18ef0710b21c43854d2f3a951991d525..1ed5dc1f597c8b64ceebd289696916cfac2060fc 100644 (file)
@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
        s3c24xx_dma_start_next_sg(s3cchan, txd);
 }
 
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
-                               struct s3c24xx_dma_chan *s3cchan)
-{
-       LIST_HEAD(head);
-
-       vchan_get_all_descriptors(&s3cchan->vc, &head);
-       vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
 /*
  * Try to allocate a physical channel.  When successful, assign it to
  * this virtual channel, and initiate the next descriptor.  The
@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 {
        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+       LIST_HEAD(head);
        unsigned long flags;
-       int ret = 0;
+       int ret;
 
        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 
@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
        }
 
        /* Dequeue jobs not yet fired as well */
-       s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+       vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+       spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+       vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+       return 0;
+
 unlock:
        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 
index 465256fe8b1fc30209b1c4b29dd1c8261754f7e9..6d0bec9476365515c0ca46ecf08c514833d99f65 100644 (file)
@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
        kfree(chan->desc);
        chan->desc = NULL;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        sf_pdma_disclaim_chan(chan);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 }
 
 static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
        chan->desc = NULL;
        chan->xfer_err = false;
        vchan_get_all_descriptors(&chan->vchan, &head);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
 
        return 0;
 }
index e397a50058c8825f2ea3ac188f8f64d160bf08dc..4e1575e731d800d0f01bd5063be29569b511f85c 100644 (file)
@@ -885,12 +885,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
        }
 
        spin_lock_irqsave(&vchan->vc.lock, flags);
-       vchan_dma_desc_free_list(&vchan->vc, &head);
        /* Clear these so the vchan is usable again */
        vchan->processing = NULL;
        vchan->pchan = NULL;
        spin_unlock_irqrestore(&vchan->vc.lock, flags);
 
+       vchan_dma_desc_free_list(&vchan->vc, &head);
+
        return 0;
 }
 
index ec4adf4260a098488fff4471e82c2f1a89b90c54..660267ca5e42b0fb1d8be24f1588b09e421d0003 100644 (file)
@@ -116,7 +116,11 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
 
        list_for_each_entry_safe(vd, _vd, head, node) {
                if (dmaengine_desc_test_reuse(&vd->tx)) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&vc->lock, flags);
                        list_move_tail(&vd->node, &vc->desc_allocated);
+                       spin_unlock_irqrestore(&vc->lock, flags);
                } else {
                        dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
                        list_del(&vd->node);