net: core: another layer of lists, around PF_MEMALLOC skb handling
authorEdward Cree <ecree@solarflare.com>
Mon, 2 Jul 2018 15:13:40 +0000 (16:13 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 4 Jul 2018 05:06:19 +0000 (14:06 +0900)
First example of a layer splitting the list (rather than merely taking
 individual packets off it).
Involves new list.h function, list_cut_before(), like list_cut_position()
 but cuts on the other side of the given entry.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/list.h
net/core/dev.c

index 4b129df4d46b5a4c26d970c6a0c385b6c208d9d1..de04cc5ed53673ebea7362bea2a43f8355018125 100644 (file)
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
                __list_cut_position(list, head, entry);
 }
 
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list.  You should pass
+ * in @entry an element you know is on @head.  @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+                                  struct list_head *head,
+                                  struct list_head *entry)
+{
+       if (head->next == entry) {
+               INIT_LIST_HEAD(list);
+               return;
+       }
+       list->next = head->next;
+       list->next->prev = list;
+       list->prev = entry->prev;
+       list->prev->next = list;
+       head->next = entry;
+       entry->prev = head;
+}
+
 static inline void __list_splice(const struct list_head *list,
                                 struct list_head *prev,
                                 struct list_head *next)
index 1e87361df2abf311b6b66484ac252aeafb146561..9aadef976e8c99953b58d5f15851ed32f3182289 100644 (file)
@@ -4784,6 +4784,14 @@ int netif_receive_skb_core(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_receive_skb_core);
 
+static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
+{
+       struct sk_buff *skb, *next;
+
+       list_for_each_entry_safe(skb, next, head, list)
+               __netif_receive_skb_core(skb, pfmemalloc);
+}
+
 static int __netif_receive_skb(struct sk_buff *skb)
 {
        int ret;
@@ -4809,6 +4817,34 @@ static int __netif_receive_skb(struct sk_buff *skb)
        return ret;
 }
 
+static void __netif_receive_skb_list(struct list_head *head)
+{
+       unsigned long noreclaim_flag = 0;
+       struct sk_buff *skb, *next;
+       bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
+
+       list_for_each_entry_safe(skb, next, head, list) {
+               if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
+                       struct list_head sublist;
+
+                       /* Handle the previous sublist */
+                       list_cut_before(&sublist, head, &skb->list);
+                       __netif_receive_skb_list_core(&sublist, pfmemalloc);
+                       pfmemalloc = !pfmemalloc;
+                       /* See comments in __netif_receive_skb */
+                       if (pfmemalloc)
+                               noreclaim_flag = memalloc_noreclaim_save();
+                       else
+                               memalloc_noreclaim_restore(noreclaim_flag);
+               }
+       }
+       /* Handle the remaining sublist */
+       __netif_receive_skb_list_core(head, pfmemalloc);
+       /* Restore pflags */
+       if (pfmemalloc)
+               memalloc_noreclaim_restore(noreclaim_flag);
+}
+
 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
 {
        struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@@ -4843,14 +4879,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
        return ret;
 }
 
-static void __netif_receive_skb_list(struct list_head *head)
-{
-       struct sk_buff *skb, *next;
-
-       list_for_each_entry_safe(skb, next, head, list)
-               __netif_receive_skb(skb);
-}
-
 static int netif_receive_skb_internal(struct sk_buff *skb)
 {
        int ret;