net: hns3: Add support for using order 1 pages with a 4K buffer
authorYunsheng Lin <linyunsheng@huawei.com>
Mon, 29 Jul 2019 02:53:30 +0000 (10:53 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 29 Jul 2019 15:23:41 +0000 (08:23 -0700)
Hardware supports 0.5K, 1K, 2K, 4K RX buffer size, the
RX buffer can not be reused because the hns3_page_order
return 0 when page size and RX buffer size are both 4096.

So this patch changes the hns3_page_order to return 1 when
RX buffer is greater than half of the page size and page size
is less the 8192, and dev_alloc_pages has already been used
to allocate the compound page for RX buffer.

This patch also changes hnae3_* to hns3_* for page order
and RX buffer size calculation because they are used in
hns3 module.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Reviewed-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h

index 0cf9301beb2b132ec2c08d8e949b80a07e73fa52..d2df42d30d88f8f77adc40e3528bb7fe821ab512 100644 (file)
@@ -2081,7 +2081,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
                             struct hns3_desc_cb *cb)
 {
-       unsigned int order = hnae3_page_order(ring);
+       unsigned int order = hns3_page_order(ring);
        struct page *p;
 
        p = dev_alloc_pages(order);
@@ -2092,7 +2092,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
        cb->page_offset = 0;
        cb->reuse_flag = 0;
        cb->buf  = page_address(p);
-       cb->length = hnae3_page_size(ring);
+       cb->length = hns3_page_size(ring);
        cb->type = DESC_TYPE_PAGE;
 
        return 0;
@@ -2395,7 +2395,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
 {
        struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
        int size = le16_to_cpu(desc->rx.size);
-       u32 truesize = hnae3_buf_size(ring);
+       u32 truesize = hns3_buf_size(ring);
 
        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
                        size - pull_len, truesize);
@@ -2410,7 +2410,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
        /* Move offset up to the next cache line */
        desc_cb->page_offset += truesize;
 
-       if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+       if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
                desc_cb->reuse_flag = 1;
                /* Bump ref count on page before it is given */
                get_page(desc_cb->priv);
@@ -2692,7 +2692,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
                }
 
                if (ring->tail_skb) {
-                       head_skb->truesize += hnae3_buf_size(ring);
+                       head_skb->truesize += hns3_buf_size(ring);
                        head_skb->data_len += le16_to_cpu(desc->rx.size);
                        head_skb->len += le16_to_cpu(desc->rx.size);
                        skb = ring->tail_skb;
index 848b866761dfe71e2aaa2ac2a05c2a096d78e1f6..1a17856f9a3bc020f137bac9ba40791b15d7f3b0 100644 (file)
@@ -608,9 +608,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
 
 #define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
 
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring->buf_size > (PAGE_SIZE / 2))
+               return 1;
+#endif
+       return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
 
 /* iterator for handling rings in ring group */
 #define hns3_for_each_ring(pos, head) \