1 From 9821d27a36704d19c57d4b6c52585b9868703633 Mon Sep 17 00:00:00 2001
2 From: Camelia Groza <camelia.groza@nxp.com>
3 Date: Mon, 4 Sep 2017 13:41:17 +0300
4 Subject: [PATCH] sdk_dpaa: ls1043a errata: realign and linearize egress skbs
6 Allocate a new page and copy the skb's contents to it in order to
7 guarantee that 4k boundary crossings do not occur.
9 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
11 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 159 +++++++++++----------
12 1 file changed, 84 insertions(+), 75 deletions(-)
14 --- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
15 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
16 @@ -742,86 +742,94 @@ int __hot skb_to_contig_fd(struct dpa_pr
17 EXPORT_SYMBOL(skb_to_contig_fd);
20 -struct sk_buff *split_skb_at_4k_boundaries(struct sk_buff *skb)
21 +/* Verify the conditions that trigger the A010022 errata: 4K memory address
24 +bool a010022_check_skb(struct sk_buff *skb)
26 - unsigned int length, nr_frags, moved_len = 0;
29 + int nr_frags, i = 0;
33 - /* make sure skb is not shared */
34 - skb = skb_share_check(skb, GFP_ATOMIC);
37 + /* Check if the headroom crosses a boundary */
38 + if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb)))
41 + /* Check if the non-paged data crosses a boundary */
42 + if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb)))
45 + /* Check if the entire linear skb crosses a boundary */
46 + if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb)))
49 nr_frags = skb_shinfo(skb)->nr_frags;
50 - page_start = (u64)skb->data;
52 - /* split the linear part at the first 4k boundary and create one (big)
53 - * fragment with the rest
55 - if (HAS_DMA_ISSUE(skb->data, skb_headlen(skb))) {
56 - /* we'll add one more frag, make sure there's room */
57 - if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
60 - /* next page boundary */
61 - page_start = (page_start + 0x1000) & ~0xFFF;
62 - page = virt_to_page(page_start);
64 - /* move the rest of fragments to make room for a new one at j */
65 - for (i = nr_frags - 1; i >= j; i--)
66 - skb_shinfo(skb)->frags[i + 1] = skb_shinfo(skb)->frags[i];
68 - /* move length bytes to a paged fragment at j */
69 - length = min((u64)0x1000,
70 - (u64)skb->data + skb_headlen(skb) - page_start);
71 - skb->data_len += length;
72 - moved_len += length;
73 - skb_fill_page_desc(skb, j++, page, 0, length);
75 - skb_shinfo(skb)->nr_frags = ++nr_frags;
76 + while (i < nr_frags) {
77 + frag = &skb_shinfo(skb)->frags[i];
79 + /* Check if the paged fragment crosses a boundary from its
80 + * offset to its end.
82 + if (HAS_DMA_ISSUE(frag->page_offset, frag->size))
87 - /* adjust the tail pointer */
88 - skb->tail -= moved_len;
91 - /* split any paged fragment that crosses a 4K boundary */
92 - while (j < nr_frags) {
93 - frag = &skb_shinfo(skb)->frags[j];
95 - /* if there is a 4K boundary between the fragment's offset and end */
96 - if (HAS_DMA_ISSUE(frag->page_offset, frag->size)) {
97 - /* we'll add one more frag, make sure there's room */
98 - if (nr_frags + 1 > DPA_SGT_MAX_ENTRIES)
101 - /* new page boundary */
102 - page_start = (u64)page_address(skb_frag_page(frag)) +
103 - frag->page_offset + 0x1000;
104 - page_start = (u64)page_start & ~0xFFF;
105 - page = virt_to_page(page_start);
107 - /* move the rest of fragments to make room for a new one at j+1 */
108 - for (i = nr_frags - 1; i > j; i--)
109 - skb_shinfo(skb)->frags[i + 1] =
110 - skb_shinfo(skb)->frags[i];
112 - /* move length bytes to a new paged fragment at j+1 */
113 - length = (u64)page_address(skb_frag_page(frag)) +
114 - frag->page_offset + frag->size - page_start;
115 - frag->size -= length;
116 - skb_fill_page_desc(skb, j + 1, page, 0, length);
118 - skb_shinfo(skb)->nr_frags = ++nr_frags;
121 - /* move to next frag */
126 +/* Realign the skb by copying its contents at the start of a newly allocated
127 + * page. Build a new skb around the new buffer and release the old one.
128 + * A performance drop should be expected.
130 +struct sk_buff *a010022_realign_skb(struct sk_buff *skb)
132 + int headroom = skb_headroom(skb);
133 + struct sk_buff *nskb = NULL;
134 + struct page *npage;
138 + npage = alloc_page(GFP_ATOMIC);
139 + if (unlikely(!npage)) {
140 + WARN_ONCE(1, "Memory allocation failure\n");
143 + npage_addr = page_address(npage);
145 + /* For the new skb we only need the old one's data (both non-paged and
146 + * paged) and a headroom large enough to fit our private info. We can
147 + * skip the old tailroom.
149 + * Make sure the new linearized buffer will not exceed a page's size.
151 + nsize = headroom + skb->len +
152 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
153 + if (unlikely(nsize > 4096))
156 + nskb = build_skb(npage_addr, nsize);
157 + if (unlikely(!nskb))
160 + /* Code borrowed and adapted from skb_copy() */
161 + skb_reserve(nskb, headroom);
162 + skb_put(nskb, skb->len);
163 + if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
164 + WARN_ONCE(1, "skb parsing failure\n");
167 + copy_skb_header(nskb, skb);
169 + dev_kfree_skb(skb);
175 + dev_kfree_skb(nskb);
181 @@ -1016,9 +1024,9 @@ int __hot dpa_tx_extended(struct sk_buff
182 #endif /* CONFIG_FSL_DPAA_TS */
186 - if (unlikely(dpaa_errata_a010022)) {
187 - skb = split_skb_at_4k_boundaries(skb);
189 + if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb)) {
190 + skb = a010022_realign_skb(skb);
192 goto skb_to_fd_failed;
194 @@ -1064,8 +1072,9 @@ resplit_4k:
198 - if (unlikely(dpaa_errata_a010022))
200 + if (unlikely(dpaa_errata_a010022) &&
201 + a010022_check_skb(skb))
204 /* skb_copy() has now linearized the skbuff. */
205 } else if (unlikely(nonlinear)) {