xdp: allow page_pool as an allocator type in xdp_return_frame
authorJesper Dangaard Brouer <brouer@redhat.com>
Tue, 17 Apr 2018 14:46:22 +0000 (16:46 +0200)
committerDavid S. Miller <davem@davemloft.net>
Tue, 17 Apr 2018 14:50:29 +0000 (10:50 -0400)
New allocator type MEM_TYPE_PAGE_POOL for page_pool usage.

The registered allocator page_pool pointer is not available directly
from xdp_rxq_info, but it could be (if needed).  For now, the driver
should keep separate track of the page_pool pointer, which it should
use for RX-ring page allocation.

As suggested by Saeed, to maintain a symmetric API it is the drivers
responsibility to allocate/create and free/destroy the page_pool.
Thus, after the driver have called xdp_rxq_info_unreg(), it is drivers
responsibility to free the page_pool, but with a RCU free call.  This
is done easily via the page_pool helper page_pool_destroy() (which
avoids touching any driver code during the RCU callback, which could
happen after the driver have been unloaded).

V8: address issues found by kbuild test robot
 - Address sparse should be static warnings
 - Allow xdp.o to be compiled without page_pool.o

V9: Remove inline from .c file, compiler knows best

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/page_pool.h
include/net/xdp.h
net/core/xdp.c

index 1fe77db595181fec983cdbf3895fcf84071f4db5..c79087153148e127ea0baa6b8baf8a79bc59e5d0 100644 (file)
@@ -117,7 +117,12 @@ void __page_pool_put_page(struct page_pool *pool,
 
 static inline void page_pool_put_page(struct page_pool *pool, struct page *page)
 {
+       /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+        * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+        */
+#ifdef CONFIG_PAGE_POOL
        __page_pool_put_page(pool, page, false);
+#endif
 }
 /* Very limited use-cases allow recycle direct */
 static inline void page_pool_recycle_direct(struct page_pool *pool,
@@ -126,4 +131,13 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
        __page_pool_put_page(pool, page, true);
 }
 
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+       return true;
+#else
+       return false;
+#endif
+}
+
 #endif /* _NET_PAGE_POOL_H */
index 5f67c62540aa471d2f80940581eed4116f3e78ef..d0ee437753dc2dcb41f2fb40cf678e16190e5126 100644 (file)
@@ -36,6 +36,7 @@
 enum xdp_mem_type {
        MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
        MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
+       MEM_TYPE_PAGE_POOL,
        MEM_TYPE_MAX,
 };
 
@@ -44,6 +45,8 @@ struct xdp_mem_info {
        u32 id;
 };
 
+struct page_pool;
+
 struct xdp_rxq_info {
        struct net_device *dev;
        u32 queue_index;
index 8b2cb79b5de0e0ebf32a5c23916a79b00a1b48c9..33e382afbd95de0d13be3ac59643cf8b5bbd451c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
+#include <net/page_pool.h>
 
 #include <net/xdp.h>
 
@@ -27,7 +28,10 @@ static struct rhashtable *mem_id_ht;
 
 struct xdp_mem_allocator {
        struct xdp_mem_info mem;
-       void *allocator;
+       union {
+               void *allocator;
+               struct page_pool *page_pool;
+       };
        struct rhash_head node;
        struct rcu_head rcu;
 };
@@ -74,7 +78,9 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
        /* Allow this ID to be reused */
        ida_simple_remove(&mem_id_pool, xa->mem.id);
 
-       /* TODO: Depending on allocator type/pointer free resources */
+       /* Notice, driver is expected to free the *allocator,
+        * e.g. page_pool, and MUST also use RCU free.
+        */
 
        /* Poison memory */
        xa->mem.id = 0xFFFF;
@@ -225,6 +231,17 @@ again:
        return id;
 }
 
+static bool __is_supported_mem_type(enum xdp_mem_type type)
+{
+       if (type == MEM_TYPE_PAGE_POOL)
+               return is_page_pool_compiled_in();
+
+       if (type >= MEM_TYPE_MAX)
+               return false;
+
+       return true;
+}
+
 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
                               enum xdp_mem_type type, void *allocator)
 {
@@ -238,13 +255,16 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
                return -EFAULT;
        }
 
-       if (type >= MEM_TYPE_MAX)
-               return -EINVAL;
+       if (!__is_supported_mem_type(type))
+               return -EOPNOTSUPP;
 
        xdp_rxq->mem.type = type;
 
-       if (!allocator)
+       if (!allocator) {
+               if (type == MEM_TYPE_PAGE_POOL)
+                       return -EINVAL; /* Setup time check page_pool req */
                return 0;
+       }
 
        /* Delay init of rhashtable to save memory if feature isn't used */
        if (!mem_id_init) {
@@ -290,15 +310,31 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
 
 void xdp_return_frame(void *data, struct xdp_mem_info *mem)
 {
-       if (mem->type == MEM_TYPE_PAGE_SHARED) {
+       struct xdp_mem_allocator *xa;
+       struct page *page;
+
+       switch (mem->type) {
+       case MEM_TYPE_PAGE_POOL:
+               rcu_read_lock();
+               /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
+               xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+               page = virt_to_head_page(data);
+               if (xa)
+                       page_pool_put_page(xa->page_pool, page);
+               else
+                       put_page(page);
+               rcu_read_unlock();
+               break;
+       case MEM_TYPE_PAGE_SHARED:
                page_frag_free(data);
-               return;
-       }
-
-       if (mem->type == MEM_TYPE_PAGE_ORDER0) {
-               struct page *page = virt_to_page(data); /* Assumes order0 page*/
-
+               break;
+       case MEM_TYPE_PAGE_ORDER0:
+               page = virt_to_page(data); /* Assumes order0 page*/
                put_page(page);
+               break;
+       default:
+               /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
+               break;
        }
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame);