--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 27 Apr 2024 18:54:25 +0200
+Subject: [PATCH] net: bridge: fix multicast-to-unicast with fraglist GSO
+
+Calling skb_copy on a SKB_GSO_FRAGLIST skb is not valid, since it returns
+an invalid linearized skb. This code only needs to change the ethernet
+header, so pskb_copy is the right function to call here.
+
+Fixes: 6db6f0eae605 ("bridge: multicast to unicast")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -261,7 +261,7 @@ static void maybe_deliver_addr(struct ne
+ if (skb->dev == p->dev && ether_addr_equal(src, addr))
+ return;
+
+- skb = skb_copy(skb, GFP_ATOMIC);
++ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+ DEV_STATS_INC(dev, tx_dropped);
+ return;
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 27 Apr 2024 19:29:45 +0200
+Subject: [PATCH] net: core: reject skb_copy(_expand) for fraglist GSO skbs
+
+SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become
+invalid. Return NULL if such an skb is passed to skb_copy or
+skb_copy_expand, in order to prevent a crash on a potential later
+call to skb_gso_segment.
+
+Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1720,11 +1720,17 @@ static inline int skb_alloc_rx_flag(cons
+
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+- int headerlen = skb_headroom(skb);
+- unsigned int size = skb_end_offset(skb) + skb->data_len;
+- struct sk_buff *n = __alloc_skb(size, gfp_mask,
+- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
++ struct sk_buff *n;
++ unsigned int size;
++ int headerlen;
+
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
++
++ headerlen = skb_headroom(skb);
++ size = skb_end_offset(skb) + skb->data_len;
++ n = __alloc_skb(size, gfp_mask,
++ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+
+@@ -2037,12 +2043,17 @@ struct sk_buff *skb_copy_expand(const st
+ /*
+ * Allocate the copy buffer
+ */
+- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+- gfp_mask, skb_alloc_rx_flag(skb),
+- NUMA_NO_NODE);
+- int oldheadroom = skb_headroom(skb);
+ int head_copy_len, head_copy_off;
++ struct sk_buff *n;
++ int oldheadroom;
++
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
+
++ oldheadroom = skb_headroom(skb);
++ n = __alloc_skb(newheadroom + skb->len + newtailroom,
++ gfp_mask, skb_alloc_rx_flag(skb),
++ NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 27 Apr 2024 18:54:25 +0200
+Subject: [PATCH] net: bridge: fix multicast-to-unicast with fraglist GSO
+
+Calling skb_copy on a SKB_GSO_FRAGLIST skb is not valid, since it returns
+an invalid linearized skb. This code only needs to change the ethernet
+header, so pskb_copy is the right function to call here.
+
+Fixes: 6db6f0eae605 ("bridge: multicast to unicast")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -266,7 +266,7 @@ static void maybe_deliver_addr(struct ne
+ if (skb->dev == p->dev && ether_addr_equal(src, addr))
+ return;
+
+- skb = skb_copy(skb, GFP_ATOMIC);
++ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+ DEV_STATS_INC(dev, tx_dropped);
+ return;
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 27 Apr 2024 19:29:45 +0200
+Subject: [PATCH] net: core: reject skb_copy(_expand) for fraglist GSO skbs
+
+SKB_GSO_FRAGLIST skbs must not be linearized, otherwise they become
+invalid. Return NULL if such an skb is passed to skb_copy or
+skb_copy_expand, in order to prevent a crash on a potential later
+call to skb_gso_segment.
+
+Fixes: 3a1296a38d0c ("net: Support GRO/GSO fraglist chaining.")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1971,11 +1971,17 @@ static inline int skb_alloc_rx_flag(cons
+
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+- int headerlen = skb_headroom(skb);
+- unsigned int size = skb_end_offset(skb) + skb->data_len;
+- struct sk_buff *n = __alloc_skb(size, gfp_mask,
+- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
++ struct sk_buff *n;
++ unsigned int size;
++ int headerlen;
+
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
++
++ headerlen = skb_headroom(skb);
++ size = skb_end_offset(skb) + skb->data_len;
++ n = __alloc_skb(size, gfp_mask,
++ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+
+@@ -2303,12 +2309,17 @@ struct sk_buff *skb_copy_expand(const st
+ /*
+ * Allocate the copy buffer
+ */
+- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+- gfp_mask, skb_alloc_rx_flag(skb),
+- NUMA_NO_NODE);
+- int oldheadroom = skb_headroom(skb);
+ int head_copy_len, head_copy_off;
++ struct sk_buff *n;
++ int oldheadroom;
++
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
+
++ oldheadroom = skb_headroom(skb);
++ n = __alloc_skb(newheadroom + skb->len + newtailroom,
++ gfp_mask, skb_alloc_rx_flag(skb),
++ NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+