return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+/**
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
+ *
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
+ */
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
+#else
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @truesize: the offset that needs to be applied to page
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
- unsigned int truesize)
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
+#if (PAGE_SIZE >= 8192)
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
- if (rx_buf->page_offset > PAGE_SIZE - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset
*/
-static bool
+static void
ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
if (size <= ICE_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page))) {
- rx_buf->pagecnt_bias++;
- return true;
- }
-
- /* this page cannot be reused so discard it */
- return false;
+ rx_buf->pagecnt_bias++;
+ return;
}
/* we need the header to contain the greater of either ETH_HLEN or
add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
-
- return ice_can_reuse_rx_page(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
}
/**
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
return NULL;
}
}
/* pull page into skb */
- if (ice_add_rx_frag(rx_buf, skb, size)) {
+ ice_add_rx_frag(rx_buf, skb, size);
+
+ return skb;
+}
+
+/**
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
/* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* clear contents of buffer_info */
rx_buf->page = NULL;
-
- return skb;
}
/**
if (!skb)
break;
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */