s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats;
static inline bool mlx5e_page_is_reserved(struct page *page)
{
- return page_is_pfmemalloc(page) || page_to_nid(page) != numa_node_id();
+ return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (unlikely(page_is_pfmemalloc(dma_info->page)))
+ if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
+ rq->stats.cache_waive++;
return false;
+ }
cache->page_cache[cache->tail] = *dma_info;
cache->tail = tail_next;
u64 rx_cache_full;
u64 rx_cache_empty;
u64 rx_cache_busy;
+ u64 rx_cache_waive;
/* Special handling counters */
u64 link_down_events_phy;
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
u64 cache_full;
u64 cache_empty;
u64 cache_busy;
+ u64 cache_waive;
};
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
};
struct mlx5e_sq_stats {