430bed1378370fac19d363f9e4b2c697c8524236
[openwrt/staging/ynezz.git] /
1 From 39e4cf29431dec8aaad599d9734f7a0468a9c20b Mon Sep 17 00:00:00 2001
2 From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
3 Date: Wed, 4 Apr 2018 12:31:05 +0300
4 Subject: [PATCH] dpaa_eth: fix iova handling for contiguous frames
5
6 The driver relies on the no longer valid assumption that dma addresses
7 (iovas) are identical to physical addressees and uses phys_to_virt() to
8 make iova -> vaddr conversions. Fix this by adding a function that does
9 proper iova -> phys conversions using the iommu api and update the code
10 to use it.
11 Also, a dma_unmap_single() call had to be moved further down the code
12 because iova -> vaddr conversions were required before the unmap.
13 For now only the contiguous frame case is handled and the SG case is
14 split in a following patch.
15 While at it, clean-up a redundant dpaa_bpid2pool() and pass the bp
16 as parameter.
17
18 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
19 Acked-by: Madalin Bucur <madalin.bucur@nxp.com>
20 ---
21 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 44 ++++++++++++++------------
22 1 file changed, 24 insertions(+), 20 deletions(-)
23
24 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
25 +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
26 @@ -50,6 +50,7 @@
27 #include <linux/highmem.h>
28 #include <linux/percpu.h>
29 #include <linux/dma-mapping.h>
30 +#include <linux/iommu.h>
31 #include <linux/sort.h>
32 #include <linux/phy_fixed.h>
33 #include <soc/fsl/bman.h>
34 @@ -1615,6 +1616,17 @@ static int dpaa_eth_refill_bpools(struct
35 return 0;
36 }
37
38 +static phys_addr_t dpaa_iova_to_phys(struct device *dev, dma_addr_t addr)
39 +{
40 + struct iommu_domain *domain;
41 +
42 + domain = iommu_get_domain_for_dev(dev);
43 + if (domain)
44 + return iommu_iova_to_phys(domain, addr);
45 + else
46 + return addr;
47 +}
48 +
49 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
50 * either contiguous frames or scatter/gather ones.
51 * Skb freeing is not handled here.
52 @@ -1639,7 +1651,7 @@ static struct sk_buff *dpaa_cleanup_tx_f
53 int nr_frags, i;
54 u64 ns;
55
56 - skbh = (struct sk_buff **)phys_to_virt(addr);
57 + skbh = (struct sk_buff **)phys_to_virt(dpaa_iova_to_phys(dev, addr));
58 skb = *skbh;
59
60 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
61 @@ -1718,25 +1730,21 @@ static u8 rx_csum_offload(const struct d
62 * accommodate the shared info area of the skb.
63 */
64 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
65 - const struct qm_fd *fd)
66 + const struct qm_fd *fd,
67 + struct dpaa_bp *dpaa_bp,
68 + void *vaddr)
69 {
70 ssize_t fd_off = qm_fd_get_offset(fd);
71 - dma_addr_t addr = qm_fd_addr(fd);
72 - struct dpaa_bp *dpaa_bp;
73 struct sk_buff *skb;
74 - void *vaddr;
75
76 - vaddr = phys_to_virt(addr);
77 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
78
79 - dpaa_bp = dpaa_bpid2pool(fd->bpid);
80 - if (!dpaa_bp)
81 - goto free_buffer;
82 -
83 skb = build_skb(vaddr, dpaa_bp->size +
84 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
85 - if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
86 - goto free_buffer;
87 + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) {
88 + skb_free_frag(vaddr);
89 + return NULL;
90 + }
91 WARN_ON(fd_off != priv->rx_headroom);
92 skb_reserve(skb, fd_off);
93 skb_put(skb, qm_fd_get_length(fd));
94 @@ -1744,10 +1752,6 @@ static struct sk_buff *contig_fd_to_skb(
95 skb->ip_summed = rx_csum_offload(priv, fd);
96
97 return skb;
98 -
99 -free_buffer:
100 - skb_free_frag(vaddr);
101 - return NULL;
102 }
103
104 /* Build an skb with the data of the first S/G entry in the linear portion and
105 @@ -2476,12 +2480,12 @@ static enum qman_cb_dqrr_result rx_defau
106 if (!dpaa_bp)
107 return qman_cb_dqrr_consume;
108
109 - dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
110 -
111 /* prefetch the first 64 bytes of the frame or the SGT start */
112 - vaddr = phys_to_virt(addr);
113 + vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev, addr));
114 prefetch(vaddr + qm_fd_get_offset(fd));
115
116 + dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
117 +
118 /* The only FD types that we may receive are contig and S/G */
119 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
120
121 @@ -2492,7 +2496,7 @@ static enum qman_cb_dqrr_result rx_defau
122 (*count_ptr)--;
123
124 if (likely(fd_format == qm_fd_contig))
125 - skb = contig_fd_to_skb(priv, fd);
126 + skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
127 else
128 skb = sg_fd_to_skb(priv, fd);
129 if (!skb)