--- /dev/null
+From 74e0deb89a8ba27c132b1f0e08643e215b5c1f92 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 11 Feb 2019 14:20:00 +0100
+Subject: [PATCH] lantiq_etop: pass struct device to DMA API functions
+
+The DMA API generally relies on a struct device to work properly, and
+only barely works without one for legacy reasons. Pass the easily
+available struct device from the platform_device to remedy this.
+
+Note this driver seems to lack dma_unmap_* calls entirely, but fixing
+that is left for another time.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/lantiq_etop.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -112,10 +112,12 @@ struct ltq_etop_priv {
+ static int
+ ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
+ {
++ struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
++
+ ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
+ if (!ch->skb[ch->dma.desc])
+ return -ENOMEM;
+- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
++ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
+ ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
+ DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+@@ -493,7 +495,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
+ netif_trans_update(dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+- desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
++ desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE)) - byte_offset;
+ wmb();
+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
--- /dev/null
+From 74f03104ed465ff71b11076ef620e4eaa53dbf74 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 1 Feb 2019 09:47:44 +0100
+Subject: [PATCH] MIPS: lantiq: pass struct device to DMA API functions
+
+The DMA API generally relies on a struct device to work properly, and
+only barely works without one for legacy reasons. Pass the easily
+available struct device from the platform_device to remedy this.
+
+Also use GFP_KERNEL instead of GFP_ATOMIC as the gfp_t for the memory
+allocation, as we aren't in interrupt context or under a lock.
+
+Note that this whole function looks somewhat bogus given that we never
+even look at the returned dma address, and the CPHYSADDR magic on
+a returned noncached mapping looks "interesting". But I'll leave
+that to people more familiar with the code to sort out.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Vinod Koul <vkoul@kernel.org>
+Cc: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+Cc: Nicolas Ferre <nicolas.ferre@microchip.com>
+Cc: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Cc: Felipe Balbi <balbi@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: dmaengine@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Cc: linux-usb@vger.kernel.org
+Cc: linux-fbdev@vger.kernel.org
+Cc: alsa-devel@alsa-project.org
+Cc: iommu@lists.linux-foundation.org
+---
+ arch/mips/lantiq/xway/vmmc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/lantiq/xway/vmmc.c
++++ b/arch/mips/lantiq/xway/vmmc.c
+@@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_de
+ dma_addr_t dma;
+
+ cp1_base =
+- (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
+- &dma, GFP_ATOMIC));
++ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
++ &dma, GFP_KERNEL));
+
+ gpio_count = of_gpio_count(pdev->dev.of_node);
+ while (gpio_count > 0) {
struct net_device *netdev;
struct napi_struct napi;
struct ltq_dma_channel dma;
-@@ -98,21 +150,34 @@ struct ltq_etop_chan {
+@@ -98,23 +150,36 @@ struct ltq_etop_chan {
struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
+ struct clk *clk_ppe;
+ struct clk *clk_switch;
+ struct clk *clk_ephy;
-+ struct clk *clk_ephycgu;
++ struct clk *clk_ephycgu
};
+static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
+ struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+
- ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
+ ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
-@@ -147,8 +212,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
+@@ -149,8 +214,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
}
static int
-@@ -156,7 +224,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
+@@ -158,7 +226,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
{
struct ltq_etop_chan *ch = container_of(napi,
struct ltq_etop_chan, napi);
while (work_done < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
-@@ -168,7 +238,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
+@@ -170,7 +240,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
}
if (work_done < budget) {
napi_complete_done(&ch->napi, work_done);
}
return work_done;
}
-@@ -180,12 +252,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
+@@ -182,12 +254,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
container_of(napi, struct ltq_etop_chan, napi);
struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
struct netdev_queue *txq =
dev_kfree_skb_any(ch->skb[ch->tx_free]);
ch->skb[ch->tx_free] = NULL;
memset(&ch->dma.desc_base[ch->tx_free], 0,
-@@ -198,7 +272,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
+@@ -200,7 +274,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
if (netif_tx_queue_stopped(txq))
netif_tx_start_queue(txq);
napi_complete(&ch->napi);
return 1;
}
-@@ -206,9 +282,10 @@ static irqreturn_t
+@@ -208,9 +284,10 @@ static irqreturn_t
ltq_etop_dma_irq(int irq, void *_priv)
{
struct ltq_etop_priv *priv = _priv;
return IRQ_HANDLED;
}
-@@ -220,7 +297,7 @@ ltq_etop_free_channel(struct net_device
+@@ -222,7 +299,7 @@ ltq_etop_free_channel(struct net_device
ltq_dma_free(&ch->dma);
if (ch->dma.irq)
free_irq(ch->dma.irq, priv);
int desc;
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
-@@ -231,66 +308,135 @@ static void
+@@ -233,66 +310,135 @@ static void
ltq_etop_hw_exit(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
}
static void
-@@ -309,6 +455,39 @@ static const struct ethtool_ops ltq_etop
+@@ -311,6 +457,39 @@ static const struct ethtool_ops ltq_etop
};
static int
ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
{
u32 val = MDIO_REQUEST |
-@@ -316,9 +495,9 @@ ltq_etop_mdio_wr(struct mii_bus *bus, in
+@@ -318,9 +497,9 @@ ltq_etop_mdio_wr(struct mii_bus *bus, in
((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
phy_data;
return 0;
}
-@@ -329,12 +508,12 @@ ltq_etop_mdio_rd(struct mii_bus *bus, in
+@@ -331,12 +510,12 @@ ltq_etop_mdio_rd(struct mii_bus *bus, in
((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
return val;
}
-@@ -349,8 +528,18 @@ ltq_etop_mdio_probe(struct net_device *d
+@@ -351,8 +530,18 @@ ltq_etop_mdio_probe(struct net_device *d
{
struct ltq_etop_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
if (!phydev) {
netdev_err(dev, "no PHY found\n");
-@@ -358,21 +547,18 @@ ltq_etop_mdio_probe(struct net_device *d
+@@ -360,21 +549,18 @@ ltq_etop_mdio_probe(struct net_device *d
}
phydev = phy_connect(dev, phydev_name(phydev),
phydev->advertising = phydev->supported;
phy_attached_info(phydev);
-@@ -393,8 +579,13 @@ ltq_etop_mdio_init(struct net_device *de
+@@ -395,8 +581,13 @@ ltq_etop_mdio_init(struct net_device *de
}
priv->mii_bus->priv = dev;
priv->mii_bus->name = "ltq_mii";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
priv->pdev->name, priv->pdev->id);
-@@ -431,17 +622,19 @@ static int
+@@ -433,17 +624,19 @@ static int
ltq_etop_open(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
netif_tx_start_all_queues(dev);
return 0;
}
-@@ -450,18 +643,19 @@ static int
+@@ -452,18 +645,19 @@ static int
ltq_etop_stop(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
return 0;
}
-@@ -471,16 +665,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
+@@ -473,16 +667,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
int queue = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
struct ltq_etop_priv *priv = netdev_priv(dev);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
-@@ -488,7 +682,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
+@@ -490,7 +684,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
/* dma needs to start on a 16 byte aligned address */
byte_offset = CPHYSADDR(skb->data) % 16;
netif_trans_update(dev);
-@@ -498,11 +692,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
+@@ -500,11 +694,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
netif_tx_stop_queue(txq);
return NETDEV_TX_OK;
-@@ -516,8 +710,10 @@ ltq_etop_change_mtu(struct net_device *d
+@@ -518,8 +712,10 @@ ltq_etop_change_mtu(struct net_device *d
dev->mtu = new_mtu;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
-@@ -577,6 +773,9 @@ ltq_etop_init(struct net_device *dev)
+@@ -579,6 +775,9 @@ ltq_etop_init(struct net_device *dev)
if (err)
goto err_hw;
ltq_etop_change_mtu(dev, 1500);
memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
if (!is_valid_ether_addr(mac.sa_data)) {
-@@ -594,9 +793,10 @@ ltq_etop_init(struct net_device *dev)
+@@ -596,9 +795,10 @@ ltq_etop_init(struct net_device *dev)
dev->addr_assign_type = NET_ADDR_RANDOM;
ltq_etop_set_multicast_list(dev);
return 0;
err_netdev:
-@@ -616,6 +816,9 @@ ltq_etop_tx_timeout(struct net_device *d
+@@ -618,6 +818,9 @@ ltq_etop_tx_timeout(struct net_device *d
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
netif_trans_update(dev);
netif_wake_queue(dev);
return;
-@@ -639,14 +842,19 @@ static const struct net_device_ops ltq_e
+@@ -641,14 +844,19 @@ static const struct net_device_ops ltq_e
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
-@@ -672,31 +880,62 @@ ltq_etop_probe(struct platform_device *p
+@@ -674,31 +882,62 @@ ltq_etop_probe(struct platform_device *p
goto err_out;
}
err = register_netdev(dev);
if (err)
-@@ -725,31 +964,22 @@ ltq_etop_remove(struct platform_device *
+@@ -727,31 +966,22 @@ ltq_etop_remove(struct platform_device *
return 0;
}