static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae3_page_order(ring);
+ unsigned int order = hns3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae3_page_size(ring);
+ cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size);
- u32 truesize = hnae3_buf_size(ring);
+ u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
}
if (ring->tail_skb) {
- head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \