3e89b3d7b6e12bd1f98823a8ef530a943aa8359c
[openwrt/staging/ynezz.git] /
1 From 9821d27a36704d19c57d4b6c52585b9868703633 Mon Sep 17 00:00:00 2001
2 From: Camelia Groza <camelia.groza@nxp.com>
3 Date: Mon, 4 Sep 2017 13:41:17 +0300
4 Subject: [PATCH] sdk_dpaa: ls1043a errata: realign and linearize egress skbs
5
6 Allocate a new page and copy the skb's contents to it in order to
7 guarantee that 4k boundary crossings do not occur.
8
9 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
10 ---
11 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 159 +++++++++++----------
12 1 file changed, 84 insertions(+), 75 deletions(-)
13
14 --- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
15 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
16 @@ -742,86 +742,94 @@ int __hot skb_to_contig_fd(struct dpa_pr
17 EXPORT_SYMBOL(skb_to_contig_fd);
18
19 #ifndef CONFIG_PPC
20 -struct sk_buff *split_skb_at_4k_boundaries(struct sk_buff *skb)
21 +/* Verify the conditions that trigger the A010022 errata: 4K memory address
22 + * crossings.
23 + */
24 +bool a010022_check_skb(struct sk_buff *skb)
25 {
26 - unsigned int length, nr_frags, moved_len = 0;
27 - u64 page_start;
28 - struct page *page;
29 + int nr_frags, i = 0;
30 skb_frag_t *frag;
31 - int i = 0, j = 0;
32
33 - /* make sure skb is not shared */
34 - skb = skb_share_check(skb, GFP_ATOMIC);
35 - if (!skb)
36 - return NULL;
37 + /* Check if the headroom crosses a boundary */
38 + if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb)))
39 + return true;
40 +
41 + /* Check if the non-paged data crosses a boundary */
42 + if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb)))
43 + return true;
44 +
45 + /* Check if the entire linear skb crosses a boundary */
46 + if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb)))
47 + return true;
48
49 nr_frags = skb_shinfo(skb)->nr_frags;
50 - page_start = (u64)skb->data;
51
52 - /* split the linear part at the first 4k boundary and create one (big)
53 - * fragment with the rest
54 - */
55 - if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) {
56 - /* we'll add one more frag, make sure there's room */
57 - if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
58 - return NULL;
59 -
60 - /* next page boundary */
61 - page_start = (page_start + 0x1000) & ~0xFFF;
62 - page = virt_to_page(page_start);
63 -
64 - /* move the rest of fragments to make room for a new one at j */
65 - for (i = nr_frags - 1; i >= j; i--)
66 - skb_shinfo(skb)->frags[i + 1] = skb_shinfo(skb)->frags[i];
67 -
68 - /* move length bytes to a paged fragment at j */
69 - length = min((u64)0x1000,
70 - (u64)skb->data + skb_headlen(skb) - page_start);
71 - skb->data_len += length;
72 - moved_len += length;
73 - skb_fill_page_desc(skb, j++, page, 0, length);
74 - get_page(page);
75 - skb_shinfo(skb)->nr_frags = ++nr_frags;
76 + while (i < nr_frags) {
77 + frag = &skb_shinfo(skb)->frags[i];
78 +
79 + /* Check if the paged fragment crosses a boundary from its
80 + * offset to its end.
81 + */
82 + if (HAS_DMA_ISSUE(frag->page_offset, frag->size))
83 + return true;
84 +
85 + i++;
86 }
87 - /* adjust the tail pointer */
88 - skb->tail -= moved_len;
89 - j = 0;
90 -
91 - /* split any paged fragment that crosses a 4K boundary */
92 - while (j < nr_frags) {
93 - frag = &skb_shinfo(skb)->frags[j];
94 -
95 - /* if there is a 4K boundary between the fragment's offset and end */
96 - if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) {
97 - /* we'll add one more frag, make sure there's room */
98 - if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
99 - return NULL;
100 -
101 - /* new page boundary */
102 - page_start = (u64)page_address(skb_frag_page(frag)) +
103 - frag->page_offset + 0x1000;
104 - page_start = (u64)page_start & ~0xFFF;
105 - page = virt_to_page(page_start);
106 -
107 - /* move the rest of fragments to make room for a new one at j+1 */
108 - for (i = nr_frags - 1; i > j; i--)
109 - skb_shinfo(skb)->frags[i + 1] =
110 - skb_shinfo(skb)->frags[i];
111 -
112 - /* move length bytes to a new paged fragment at j+1 */
113 - length = (u64)page_address(skb_frag_page(frag)) +
114 - frag->page_offset + frag->size - page_start;
115 - frag->size -= length;
116 - skb_fill_page_desc(skb, j + 1, page, 0, length);
117 - get_page(page);
118 - skb_shinfo(skb)->nr_frags = ++nr_frags;
119 - }
120
121 - /* move to next frag */
122 - j++;
123 + return false;
124 +}
125 +
126 +/* Realign the skb by copying its contents at the start of a newly allocated
127 + * page. Build a new skb around the new buffer and release the old one.
128 + * A performance drop should be expected.
129 + */
130 +struct sk_buff *a010022_realign_skb(struct sk_buff *skb)
131 +{
132 + int headroom = skb_headroom(skb);
133 + struct sk_buff *nskb = NULL;
134 + struct page *npage;
135 + void *npage_addr;
136 + int nsize;
137 +
138 + npage = alloc_page(GFP_ATOMIC);
139 + if (unlikely(!npage)) {
140 + WARN_ONCE(1, "Memory allocation failure\n");
141 + return NULL;
142 + }
143 + npage_addr = page_address(npage);
144 +
145 + /* For the new skb we only need the old one's data (both non-paged and
146 + * paged) and a headroom large enough to fit our private info. We can
147 + * skip the old tailroom.
148 + *
149 + * Make sure the new linearized buffer will not exceed a page's size.
150 + */
151 + nsize = headroom + skb->len +
152 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
153 + if (unlikely(nsize > 4096))
154 + goto err;
155 +
156 + nskb = build_skb(npage_addr, nsize);
157 + if (unlikely(!nskb))
158 + goto err;
159 +
160 + /* Code borrowed and adapted from skb_copy() */
161 + skb_reserve(nskb, headroom);
162 + skb_put(nskb, skb->len);
163 + if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
164 + WARN_ONCE(1, "skb parsing failure\n");
165 + goto err;
166 }
167 + copy_skb_header(nskb, skb);
168 +
169 + dev_kfree_skb(skb);
170 + return nskb;
171
172 - return skb;
173 +err:
174 + if (nskb)
175 + dev_kfree_skb(nskb);
176 + put_page(npage);
177 + return NULL;
178 }
179 #endif
180
181 @@ -1016,9 +1024,9 @@ int __hot dpa_tx_extended(struct sk_buff
182 #endif /* CONFIG_FSL_DPAA_TS */
183
184 #ifndef CONFIG_PPC
185 -resplit_4k:
186 - if (unlikely(dpaa_errata_a010022)) {
187 - skb = split_skb_at_4k_boundaries(skb);
188 +realign_4k:
189 + if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb)) {
190 + skb = a010022_realign_skb(skb);
191 if (!skb)
192 goto skb_to_fd_failed;
193 }
194 @@ -1064,8 +1072,9 @@ resplit_4k:
195 kfree_skb(skb);
196 skb = nskb;
197 #ifndef CONFIG_PPC
198 - if (unlikely(dpaa_errata_a010022))
199 - goto resplit_4k;
200 + if (unlikely(dpaa_errata_a010022) &&
201 + a010022_check_skb(skb))
202 + goto realign_4k;
203 #endif
204 /* skb_copy() has now linearized the skbuff. */
205 } else if (unlikely(nonlinear)) {