* headers if needed
*/
__u8 encapsulation:1;
- /* 6/8 bit hole (depending on ndisc_nodetype presence) */
+ __u8 encap_hdr_csum:1;
+ /* 5/7 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
struct skb_gso_cb {
int mac_offset;
int encap_level;
+ __u16 csum_start;
};
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
return 0;
}
+/* Compute the checksum for a gso segment. First compute the checksum value
+ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
+ * then add in skb->csum (checksum from csum_start to end of packet).
+ * skb->csum and csum_start are then updated to reflect the checksum of the
+ * resultant packet starting from the transport header-- the resultant checksum
+ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
+ * header.
+ */
+static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
+{
+ int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
+ skb_transport_offset(skb);
+ __u16 csum;
+
+ csum = csum_fold(csum_partial(skb_transport_header(skb),
+ plen, skb->csum));
+ skb->csum = res;
+ SKB_GSO_CB(skb)->csum_start -= plen;
+
+ return csum;
+}
+
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
- csum = !!can_checksum_protocol(features, proto);
+ csum = !head_skb->encap_hdr_csum &&
+ !!can_checksum_protocol(features, proto);
+
__skb_push(head_skb, doffset);
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
+ SKB_GSO_CB(nskb)->csum_start =
+ skb_headroom(nskb) + offset;
continue;
}
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
+ SKB_GSO_CB(nskb)->csum_start =
+ skb_headroom(nskb) + doffset;
}
} while ((offset += len) < head_skb->len);
return skb;
}
+ /* If packet is not gso and we are resolving any partial checksum,
+ * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
+ * on the outer header without confusing devices that implement
+ * NETIF_F_IP_CSUM with encapsulation.
+ */
+ if (csum_help)
+ skb->encapsulation = 0;
+
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))