--- /dev/null
+commit 8610037e8106b48c79cfe0afb92b2b2466e51c3d
+Author: Joe Damato <jdamato@fastly.com>
+Date: Tue Mar 1 23:55:47 2022 -0800
+
+ page_pool: Add allocation stats
+
+ Add per-pool statistics counters for the allocation path of a page pool.
+ These stats are incremented in softirq context, so no locking or per-cpu
+ variables are needed.
+
+ This code is disabled by default and a kernel config option is provided for
+ users who wish to enable them.
+
+ The statistics added are:
+ - fast: successful fast path allocations
+ - slow: slow path order-0 allocations
+ - slow_high_order: slow path high order allocations
+ - empty: ptr ring is empty, so a slow path allocation was forced.
+ - refill: an allocation which triggered a refill of the cache
+ - waive: pages obtained from the ptr ring that cannot be added to
+ the cache due to a NUMA mismatch.
+
+ Signed-off-by: Joe Damato <jdamato@fastly.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -82,6 +82,19 @@ struct page_pool_params {
+ unsigned int offset; /* DMA addr offset */
+ };
+
++#ifdef CONFIG_PAGE_POOL_STATS
++struct page_pool_alloc_stats {
++ u64 fast; /* fast path allocations */
++ u64 slow; /* slow-path order 0 allocations */
++ u64 slow_high_order; /* slow-path high order allocations */
++ u64 empty; /* failed refills due to empty ptr ring, forcing
++ * slow path allocation
++ */
++ u64 refill; /* allocations via successful refill */
++ u64 waive; /* failed refills due to numa zone mismatch */
++};
++#endif
++
+ struct page_pool {
+ struct page_pool_params p;
+
+@@ -132,6 +145,11 @@ struct page_pool {
+ refcount_t user_cnt;
+
+ u64 destroy_cnt;
++
++#ifdef CONFIG_PAGE_POOL_STATS
++ /* these stats are incremented while in softirq context */
++ struct page_pool_alloc_stats alloc_stats;
++#endif
+ };
+
+ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -434,6 +434,19 @@ config NET_DEVLINK
+ config PAGE_POOL
+ bool
+
++config PAGE_POOL_STATS
++ default n
++ bool "Page pool stats"
++ depends on PAGE_POOL
++ help
++ Enable page pool statistics to track page allocation and recycling
++ in page pools. This option incurs additional CPU cost in allocation
++ and recycle paths and additional memory cost to store the statistics.
++ These statistics are only available if this option is enabled and if
++ the driver using the page pool supports exporting this data.
++
++ If unsure, say N.
++
+ config FAILOVER
+ tristate "Generic failover module"
+ help
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -26,6 +26,13 @@
+
+ #define BIAS_MAX LONG_MAX
+
++#ifdef CONFIG_PAGE_POOL_STATS
++/* alloc_stat_inc is intended to be used in softirq context */
++#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
++#else
++#define alloc_stat_inc(pool, __stat)
++#endif
++
+ static int page_pool_init(struct page_pool *pool,
+ const struct page_pool_params *params)
+ {
+@@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
+ int pref_nid; /* preferred NUMA node */
+
+ /* Quicker fallback, avoid locks when ring is empty */
+- if (__ptr_ring_empty(r))
++ if (__ptr_ring_empty(r)) {
++ alloc_stat_inc(pool, empty);
+ return NULL;
++ }
+
+ /* Softirq guarantee CPU and thus NUMA node is stable. This,
+ * assumes CPU refilling driver RX-ring will also run RX-NAPI.
+@@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
+ * This limit stress on page buddy alloactor.
+ */
+ page_pool_return_page(pool, page);
++ alloc_stat_inc(pool, waive);
+ page = NULL;
+ break;
+ }
+ } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
+
+ /* Return last page */
+- if (likely(pool->alloc.count > 0))
++ if (likely(pool->alloc.count > 0)) {
+ page = pool->alloc.cache[--pool->alloc.count];
++ alloc_stat_inc(pool, refill);
++ }
+
+ spin_unlock(&r->consumer_lock);
+ return page;
+@@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
+ if (likely(pool->alloc.count)) {
+ /* Fast-path */
+ page = pool->alloc.cache[--pool->alloc.count];
++ alloc_stat_inc(pool, fast);
+ } else {
+ page = page_pool_refill_alloc_cache(pool);
+ }
+@@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
+ return NULL;
+ }
+
++ alloc_stat_inc(pool, slow_high_order);
+ page_pool_set_pp_info(pool, page);
+
+ /* Track how many pages are held 'in-flight' */
+@@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
+ }
+
+ /* Return last page */
+- if (likely(pool->alloc.count > 0))
++ if (likely(pool->alloc.count > 0)) {
+ page = pool->alloc.cache[--pool->alloc.count];
+- else
++ alloc_stat_inc(pool, slow);
++ } else {
+ page = NULL;
++ }
+
+ /* When page just alloc'ed is should/must have refcnt 1. */
+ return page;
--- /dev/null
+commit ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad
+Author: Joe Damato <jdamato@fastly.com>
+Date: Tue Mar 1 23:55:48 2022 -0800
+
+ page_pool: Add recycle stats
+
+ Add per-cpu stats tracking page pool recycling events:
+ - cached: recycling placed page in the page pool cache
+ - cache_full: page pool cache was full
+ - ring: page placed into the ptr ring
+ - ring_full: page released from page pool because the ptr ring was full
+ - released_refcnt: page released (and not recycled) because refcnt > 1
+
+ Signed-off-by: Joe Damato <jdamato@fastly.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
+ u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
+ };
++
++struct page_pool_recycle_stats {
++ u64 cached; /* recycling placed page in the cache. */
++ u64 cache_full; /* cache was full */
++ u64 ring; /* recycling placed page back into ptr ring */
++ u64 ring_full; /* page was released from page-pool because
++ * PTR ring was full.
++ */
++ u64 released_refcnt; /* page released because of elevated
++ * refcnt
++ */
++};
+ #endif
+
+ struct page_pool {
+@@ -136,6 +148,10 @@ struct page_pool {
+ */
+ struct ptr_ring ring;
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ /* recycle stats are per-cpu to avoid locking */
++ struct page_pool_recycle_stats __percpu *recycle_stats;
++#endif
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -29,8 +29,15 @@
+ #ifdef CONFIG_PAGE_POOL_STATS
+ /* alloc_stat_inc is intended to be used in softirq context */
+ #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
++/* recycle_stat_inc is safe to use when preemption is possible. */
++#define recycle_stat_inc(pool, __stat) \
++ do { \
++ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
++ this_cpu_inc(s->__stat); \
++ } while (0)
+ #else
+ #define alloc_stat_inc(pool, __stat)
++#define recycle_stat_inc(pool, __stat)
+ #endif
+
+ static int page_pool_init(struct page_pool *pool,
+@@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
+ pool->p.flags & PP_FLAG_PAGE_FRAG)
+ return -EINVAL;
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
++ if (!pool->recycle_stats)
++ return -ENOMEM;
++#endif
++
+ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
+ return -ENOMEM;
+
+@@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
+ else
+ ret = ptr_ring_produce_bh(&pool->ring, page);
+
+- return (ret == 0) ? true : false;
++ if (!ret) {
++ recycle_stat_inc(pool, ring);
++ return true;
++ }
++
++ return false;
+ }
+
+ /* Only allow direct recycling in special circumstances, into the
+@@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
+ static bool page_pool_recycle_in_cache(struct page *page,
+ struct page_pool *pool)
+ {
+- if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
++ if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
++ recycle_stat_inc(pool, cache_full);
+ return false;
++ }
+
+ /* Caller MUST have verified/know (page_ref_count(page) == 1) */
+ pool->alloc.cache[pool->alloc.count++] = page;
++ recycle_stat_inc(pool, cached);
+ return true;
+ }
+
+@@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
+ * doing refcnt based recycle tricks, meaning another process
+ * will be invoking put_page.
+ */
++ recycle_stat_inc(pool, released_refcnt);
+ /* Do not replace this with page_pool_return_page() */
+ page_pool_release_page(pool, page);
+ put_page(page);
+@@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
+ page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
+ if (page && !page_pool_recycle_in_ring(pool, page)) {
+ /* Cache full, fallback to free pages */
++ recycle_stat_inc(pool, ring_full);
+ page_pool_return_page(pool, page);
+ }
+ }
+@@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
+ if (pool->p.flags & PP_FLAG_DMA_MAP)
+ put_device(pool->p.dev);
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ free_percpu(pool->recycle_stats);
++#endif
+ kfree(pool);
+ }
+
--- /dev/null
+commit 6b95e3388b1ea0ca63500c5a6e39162dbf828433
+Author: Joe Damato <jdamato@fastly.com>
+Date: Tue Mar 1 23:55:49 2022 -0800
+
+ page_pool: Add function to batch and return stats
+
+ Adds a function page_pool_get_stats which can be used by drivers to obtain
+ stats for a specified page_pool.
+
+ Signed-off-by: Joe Damato <jdamato@fastly.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -105,6 +105,23 @@ struct page_pool_recycle_stats {
+ * refcnt
+ */
+ };
++
++/* This struct wraps the above stats structs so users of the
++ * page_pool_get_stats API can pass a single argument when requesting the
++ * stats for the page pool.
++ */
++struct page_pool_stats {
++ struct page_pool_alloc_stats alloc_stats;
++ struct page_pool_recycle_stats recycle_stats;
++};
++
++/*
++ * Drivers that wish to harvest page pool stats and report them to users
++ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
++ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
++ */
++bool page_pool_get_stats(struct page_pool *pool,
++ struct page_pool_stats *stats);
+ #endif
+
+ struct page_pool {
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -35,6 +35,31 @@
+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
+ this_cpu_inc(s->__stat); \
+ } while (0)
++
++bool page_pool_get_stats(struct page_pool *pool,
++ struct page_pool_stats *stats)
++{
++ int cpu = 0;
++
++ if (!stats)
++ return false;
++
++ memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats));
++
++ for_each_possible_cpu(cpu) {
++ const struct page_pool_recycle_stats *pcpu =
++ per_cpu_ptr(pool->recycle_stats, cpu);
++
++ stats->recycle_stats.cached += pcpu->cached;
++ stats->recycle_stats.cache_full += pcpu->cache_full;
++ stats->recycle_stats.ring += pcpu->ring;
++ stats->recycle_stats.ring_full += pcpu->ring_full;
++ stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
++ }
++
++ return true;
++}
++EXPORT_SYMBOL(page_pool_get_stats);
+ #else
+ #define alloc_stat_inc(pool, __stat)
+ #define recycle_stat_inc(pool, __stat)
--- /dev/null
+commit 590032a4d2133ecc10d3078a8db1d85a4842f12c
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon Apr 11 16:05:26 2022 +0200
+
+ page_pool: Add recycle stats to page_pool_put_page_bulk
+
+ Add missing recycle stats to page_pool_put_page_bulk routine.
+
+ Reviewed-by: Joe Damato <jdamato@fastly.com>
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Link: https://lore.kernel.org/r/3712178b51c007cfaed910ea80e68f00c916b1fa.1649685634.git.lorenzo@kernel.org
+ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -36,6 +36,12 @@
+ this_cpu_inc(s->__stat); \
+ } while (0)
+
++#define recycle_stat_add(pool, __stat, val) \
++ do { \
++ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
++ this_cpu_add(s->__stat, val); \
++ } while (0)
++
+ bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats)
+ {
+@@ -63,6 +69,7 @@ EXPORT_SYMBOL(page_pool_get_stats);
+ #else
+ #define alloc_stat_inc(pool, __stat)
+ #define recycle_stat_inc(pool, __stat)
++#define recycle_stat_add(pool, __stat, val)
+ #endif
+
+ static int page_pool_init(struct page_pool *pool,
+@@ -569,9 +576,13 @@ void page_pool_put_page_bulk(struct page
+ /* Bulk producer into ptr_ring page_pool cache */
+ page_pool_ring_lock(pool);
+ for (i = 0; i < bulk_len; i++) {
+- if (__ptr_ring_produce(&pool->ring, data[i]))
+- break; /* ring full */
++ if (__ptr_ring_produce(&pool->ring, data[i])) {
++ /* ring full */
++ recycle_stat_inc(pool, ring_full);
++ break;
++ }
+ }
++ recycle_stat_add(pool, ring, i);
+ page_pool_ring_unlock(pool);
+
+ /* Hopefully all pages was return into ptr_ring */
--- /dev/null
+commit f3c5264f452a5b0ac1de1f2f657efbabdea3c76a
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue Apr 12 18:31:58 2022 +0200
+
+ net: page_pool: introduce ethtool stats
+
+ Introduce page_pool APIs to report stats through ethtool and reduce
+ duplicated code in each driver.
+
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -115,6 +115,10 @@ struct page_pool_stats {
+ struct page_pool_recycle_stats recycle_stats;
+ };
+
++int page_pool_ethtool_stats_get_count(void);
++u8 *page_pool_ethtool_stats_get_strings(u8 *data);
++u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
++
+ /*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+@@ -122,6 +126,23 @@ struct page_pool_stats {
+ */
+ bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats);
++#else
++
++static inline int page_pool_ethtool_stats_get_count(void)
++{
++ return 0;
++}
++
++static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
++{
++ return data;
++}
++
++static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
++{
++ return data;
++}
++
+ #endif
+
+ struct page_pool {
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -18,6 +18,7 @@
+ #include <linux/page-flags.h>
+ #include <linux/mm.h> /* for __put_page() */
+ #include <linux/poison.h>
++#include <linux/ethtool.h>
+
+ #include <trace/events/page_pool.h>
+
+@@ -42,6 +43,20 @@
+ this_cpu_add(s->__stat, val); \
+ } while (0)
+
++static const char pp_stats[][ETH_GSTRING_LEN] = {
++ "rx_pp_alloc_fast",
++ "rx_pp_alloc_slow",
++ "rx_pp_alloc_slow_ho",
++ "rx_pp_alloc_empty",
++ "rx_pp_alloc_refill",
++ "rx_pp_alloc_waive",
++ "rx_pp_recycle_cached",
++ "rx_pp_recycle_cache_full",
++ "rx_pp_recycle_ring",
++ "rx_pp_recycle_ring_full",
++ "rx_pp_recycle_released_ref",
++};
++
+ bool page_pool_get_stats(struct page_pool *pool,
+ struct page_pool_stats *stats)
+ {
+@@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_poo
+ if (!stats)
+ return false;
+
+- memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats));
++ /* The caller is responsible to initialize stats. */
++ stats->alloc_stats.fast += pool->alloc_stats.fast;
++ stats->alloc_stats.slow += pool->alloc_stats.slow;
++ stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
++ stats->alloc_stats.empty += pool->alloc_stats.empty;
++ stats->alloc_stats.refill += pool->alloc_stats.refill;
++ stats->alloc_stats.waive += pool->alloc_stats.waive;
+
+ for_each_possible_cpu(cpu) {
+ const struct page_pool_recycle_stats *pcpu =
+@@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_poo
+ return true;
+ }
+ EXPORT_SYMBOL(page_pool_get_stats);
++
++u8 *page_pool_ethtool_stats_get_strings(u8 *data)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
++ memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
++ data += ETH_GSTRING_LEN;
++ }
++
++ return data;
++}
++EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
++
++int page_pool_ethtool_stats_get_count(void)
++{
++ return ARRAY_SIZE(pp_stats);
++}
++EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
++
++u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
++{
++ struct page_pool_stats *pool_stats = stats;
++
++ *data++ = pool_stats->alloc_stats.fast;
++ *data++ = pool_stats->alloc_stats.slow;
++ *data++ = pool_stats->alloc_stats.slow_high_order;
++ *data++ = pool_stats->alloc_stats.empty;
++ *data++ = pool_stats->alloc_stats.refill;
++ *data++ = pool_stats->alloc_stats.waive;
++ *data++ = pool_stats->recycle_stats.cached;
++ *data++ = pool_stats->recycle_stats.cache_full;
++ *data++ = pool_stats->recycle_stats.ring;
++ *data++ = pool_stats->recycle_stats.ring_full;
++ *data++ = pool_stats->recycle_stats.released_refcnt;
++
++ return data;
++}
++EXPORT_SYMBOL(page_pool_ethtool_stats_get);
++
+ #else
+ #define alloc_stat_inc(pool, __stat)
+ #define recycle_stat_inc(pool, __stat)
--- /dev/null
+commit 2e88d4ff03013937028f5397268b21e10cf68713
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri Jan 21 11:09:45 2022 +0100
+
+ xdp: introduce flags field in xdp_buff/xdp_frame
+
+ Introduce flags field in xdp_frame and xdp_buffer data structures
+ to define additional buffer features. At the moment the only
+ supported buffer feature is frags bit (XDP_FLAGS_HAS_FRAGS).
+ frags bit is used to specify if this is a linear buffer
+ (XDP_FLAGS_HAS_FRAGS not set) or a frags frame (XDP_FLAGS_HAS_FRAGS
+ set). In the latter case the driver is expected to initialize the
+ skb_shared_info structure at the end of the first buffer to link together
+ subsequent buffers belonging to the same frame.
+
+ Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
+ Acked-by: John Fastabend <john.fastabend@gmail.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Link: https://lore.kernel.org/r/e389f14f3a162c0a5bc6a2e1aa8dd01a90be117d.1642758637.git.lorenzo@kernel.org
+ Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+
+--- a/include/net/xdp.h
++++ b/include/net/xdp.h
+@@ -66,6 +66,10 @@ struct xdp_txq_info {
+ struct net_device *dev;
+ };
+
++enum xdp_buff_flags {
++ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */
++};
++
+ struct xdp_buff {
+ void *data;
+ void *data_end;
+@@ -74,13 +78,30 @@ struct xdp_buff {
+ struct xdp_rxq_info *rxq;
+ struct xdp_txq_info *txq;
+ u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
++ u32 flags; /* supported values defined in xdp_buff_flags */
+ };
+
++static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
++{
++ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
++}
++
++static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
++{
++ xdp->flags |= XDP_FLAGS_HAS_FRAGS;
++}
++
++static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
++{
++ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
++}
++
+ static __always_inline void
+ xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
+ {
+ xdp->frame_sz = frame_sz;
+ xdp->rxq = rxq;
++ xdp->flags = 0;
+ }
+
+ static __always_inline void
+@@ -122,8 +143,14 @@ struct xdp_frame {
+ */
+ struct xdp_mem_info mem;
+ struct net_device *dev_rx; /* used by cpumap */
++ u32 flags; /* supported values defined in xdp_buff_flags */
+ };
+
++static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
++{
++ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
++}
++
+ #define XDP_BULK_QUEUE_SIZE 16
+ struct xdp_frame_bulk {
+ int count;
+@@ -180,6 +207,7 @@ void xdp_convert_frame_to_buff(struct xd
+ xdp->data_end = frame->data + frame->len;
+ xdp->data_meta = frame->data - frame->metasize;
+ xdp->frame_sz = frame->frame_sz;
++ xdp->flags = frame->flags;
+ }
+
+ static inline
+@@ -206,6 +234,7 @@ int xdp_update_frame_from_buff(struct xd
+ xdp_frame->headroom = headroom - sizeof(*xdp_frame);
+ xdp_frame->metasize = metasize;
+ xdp_frame->frame_sz = xdp->frame_sz;
++ xdp_frame->flags = xdp->flags;
+
+ return 0;
+ }
--- /dev/null
+commit 7c48cb0176c6d6d3b55029f7ff4ffa05faee6446
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri Jan 21 11:09:50 2022 +0100
+
+ xdp: add frags support to xdp_return_{buff/frame}
+
+ Take into account if the received xdp_buff/xdp_frame is non-linear
+ recycling/returning the frame memory to the allocator or into
+ xdp_frame_bulk.
+
+ Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
+ Acked-by: John Fastabend <john.fastabend@gmail.com>
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Link: https://lore.kernel.org/r/a961069febc868508ce1bdf5e53a343eb4e57cb2.1642758637.git.lorenzo@kernel.org
+ Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+
+--- a/include/net/xdp.h
++++ b/include/net/xdp.h
+@@ -275,10 +275,24 @@ void __xdp_release_frame(void *data, str
+ static inline void xdp_release_frame(struct xdp_frame *xdpf)
+ {
+ struct xdp_mem_info *mem = &xdpf->mem;
++ struct skb_shared_info *sinfo;
++ int i;
+
+ /* Curr only page_pool needs this */
+- if (mem->type == MEM_TYPE_PAGE_POOL)
+- __xdp_release_frame(xdpf->data, mem);
++ if (mem->type != MEM_TYPE_PAGE_POOL)
++ return;
++
++ if (likely(!xdp_frame_has_frags(xdpf)))
++ goto out;
++
++ sinfo = xdp_get_shared_info_from_frame(xdpf);
++ for (i = 0; i < sinfo->nr_frags; i++) {
++ struct page *page = skb_frag_page(&sinfo->frags[i]);
++
++ __xdp_release_frame(page_address(page), mem);
++ }
++out:
++ __xdp_release_frame(xdpf->data, mem);
+ }
+
+ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -376,12 +376,38 @@ static void __xdp_return(void *data, str
+
+ void xdp_return_frame(struct xdp_frame *xdpf)
+ {
++ struct skb_shared_info *sinfo;
++ int i;
++
++ if (likely(!xdp_frame_has_frags(xdpf)))
++ goto out;
++
++ sinfo = xdp_get_shared_info_from_frame(xdpf);
++ for (i = 0; i < sinfo->nr_frags; i++) {
++ struct page *page = skb_frag_page(&sinfo->frags[i]);
++
++ __xdp_return(page_address(page), &xdpf->mem, false, NULL);
++ }
++out:
+ __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
+ }
+ EXPORT_SYMBOL_GPL(xdp_return_frame);
+
+ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
+ {
++ struct skb_shared_info *sinfo;
++ int i;
++
++ if (likely(!xdp_frame_has_frags(xdpf)))
++ goto out;
++
++ sinfo = xdp_get_shared_info_from_frame(xdpf);
++ for (i = 0; i < sinfo->nr_frags; i++) {
++ struct page *page = skb_frag_page(&sinfo->frags[i]);
++
++ __xdp_return(page_address(page), &xdpf->mem, true, NULL);
++ }
++out:
+ __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
+ }
+ EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
+@@ -417,7 +443,7 @@ void xdp_return_frame_bulk(struct xdp_fr
+ struct xdp_mem_allocator *xa;
+
+ if (mem->type != MEM_TYPE_PAGE_POOL) {
+- __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
++ xdp_return_frame(xdpf);
+ return;
+ }
+
+@@ -436,12 +462,38 @@ void xdp_return_frame_bulk(struct xdp_fr
+ bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ }
+
++ if (unlikely(xdp_frame_has_frags(xdpf))) {
++ struct skb_shared_info *sinfo;
++ int i;
++
++ sinfo = xdp_get_shared_info_from_frame(xdpf);
++ for (i = 0; i < sinfo->nr_frags; i++) {
++ skb_frag_t *frag = &sinfo->frags[i];
++
++ bq->q[bq->count++] = skb_frag_address(frag);
++ if (bq->count == XDP_BULK_QUEUE_SIZE)
++ xdp_flush_frame_bulk(bq);
++ }
++ }
+ bq->q[bq->count++] = xdpf->data;
+ }
+ EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
+
+ void xdp_return_buff(struct xdp_buff *xdp)
+ {
++ struct skb_shared_info *sinfo;
++ int i;
++
++ if (likely(!xdp_buff_has_frags(xdp)))
++ goto out;
++
++ sinfo = xdp_get_shared_info_from_buff(xdp);
++ for (i = 0; i < sinfo->nr_frags; i++) {
++ struct page *page = skb_frag_page(&sinfo->frags[i]);
++
++ __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
++ }
++out:
+ __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
+ }
+
--- /dev/null
+commit d16697cb6261d4cc23422e6b1cb2759df8aa76d0
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri Jan 21 11:09:44 2022 +0100
+
+ net: skbuff: add size metadata to skb_shared_info for xdp
+
+ Introduce xdp_frags_size field in skb_shared_info data structure
+ to store xdp_buff/xdp_frame frame paged size (xdp_frags_size will
+ be used in xdp frags support). In order to not increase
+ skb_shared_info size we will use a hole due to skb_shared_info
+ alignment.
+
+ Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
+ Acked-by: John Fastabend <john.fastabend@gmail.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Link: https://lore.kernel.org/r/8a849819a3e0a143d540f78a3a5add76e17e980d.1642758637.git.lorenzo@kernel.org
+ Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -567,6 +567,7 @@ struct skb_shared_info {
+ * Warning : all fields before dataref are cleared in __alloc_skb()
+ */
+ atomic_t dataref;
++ unsigned int xdp_frags_size;
+
+ /* Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor */
--- /dev/null
+commit 5142239a22219921a7863cf00c9ab853c00689d8
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri Mar 11 10:14:18 2022 +0100
+
+ net: veth: Account total xdp_frame len running ndo_xdp_xmit
+
+ Even if this is a theoretical issue since it is not possible to perform
+ XDP_REDIRECT on a non-linear xdp_frame, veth driver does not account
+ paged area in ndo_xdp_xmit function pointer.
+ Introduce xdp_get_frame_len utility routine to get the xdp_frame full
+ length and account total frame size running XDP_REDIRECT of a
+ non-linear xdp frame into a veth device.
+
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+ Acked-by: Toke Hoiland-Jorgensen <toke@redhat.com>
+ Acked-by: John Fastabend <john.fastabend@gmail.com>
+ Link: https://lore.kernel.org/bpf/54f9fd3bb65d190daf2c0bbae2f852ff16cfbaa0.1646989407.git.lorenzo@kernel.org
+
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -501,7 +501,7 @@ static int veth_xdp_xmit(struct net_devi
+ struct xdp_frame *frame = frames[i];
+ void *ptr = veth_xdp_to_ptr(frame);
+
+- if (unlikely(frame->len > max_len ||
++ if (unlikely(xdp_get_frame_len(frame) > max_len ||
+ __ptr_ring_produce(&rq->xdp_ring, ptr)))
+ break;
+ nxmit++;
+@@ -862,7 +862,7 @@ static int veth_xdp_rcv(struct veth_rq *
+ /* ndo_xdp_xmit */
+ struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
+
+- stats->xdp_bytes += frame->len;
++ stats->xdp_bytes += xdp_get_frame_len(frame);
+ frame = veth_xdp_rcv_one(rq, frame, bq, stats);
+ if (frame) {
+ /* XDP_PASS */
+--- a/include/net/xdp.h
++++ b/include/net/xdp.h
+@@ -295,6 +295,20 @@ out:
+ __xdp_release_frame(xdpf->data, mem);
+ }
+
++static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
++{
++ struct skb_shared_info *sinfo;
++ unsigned int len = xdpf->len;
++
++ if (likely(!xdp_frame_has_frags(xdpf)))
++ goto out;
++
++ sinfo = xdp_get_shared_info_from_frame(xdpf);
++ len += sinfo->xdp_frags_size;
++out:
++ return len;
++}
++
+ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+ struct net_device *dev, u32 queue_index, unsigned int napi_id);
+ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
--- /dev/null
+commit 7cda76d858a4e71ac4a04066c093679a12e1312c
+Author: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri Mar 11 10:14:20 2022 +0100
+
+ veth: Allow jumbo frames in xdp mode
+
+ Allow increasing the MTU over page boundaries on veth devices
+ if the attached xdp program declares to support xdp fragments.
+
+ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+ Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
+ Acked-by: John Fastabend <john.fastabend@gmail.com>
+ Link: https://lore.kernel.org/bpf/d5dc039c3d4123426e7023a488c449181a7bc57f.1646989407.git.lorenzo@kernel.org
+
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1470,9 +1470,14 @@ static int veth_xdp_set(struct net_devic
+ goto err;
+ }
+
+- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
+- peer->hard_header_len -
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
++ peer->hard_header_len;
++ /* Allow increasing the max_mtu if the program supports
++ * XDP fragments.
++ */
++ //if (prog->aux->xdp_has_frags)
++ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
++
+ if (peer->mtu > max_mtu) {
+ NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
+ err = -ERANGE;
--- /dev/null
+From 23233e577ef973c2c5d0dd757a0a4605e34ecb57 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:36 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on page_pool for single page
+ buffers
+
+Rely on page_pool allocator for single page buffers in order to keep
+them dma mapped and add skb recycling support.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/Kconfig | 1 +
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++-----
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++
+ 3 files changed, 156 insertions(+), 40 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -16,6 +16,7 @@ config NET_MEDIATEK_SOC
+ depends on NET_DSA || !NET_DSA
+ select PHYLINK
+ select DIMLIB
++ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1388,6 +1388,68 @@ static void mtk_update_rx_cpu_idx(struct
+ }
+ }
+
++static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
++ struct xdp_rxq_info *xdp_q,
++ int id, int size)
++{
++ struct page_pool_params pp_params = {
++ .order = 0,
++ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
++ .pool_size = size,
++ .nid = NUMA_NO_NODE,
++ .dev = eth->dma_dev,
++ .dma_dir = DMA_FROM_DEVICE,
++ .offset = MTK_PP_HEADROOM,
++ .max_len = MTK_PP_MAX_BUF_SIZE,
++ };
++ struct page_pool *pp;
++ int err;
++
++ pp = page_pool_create(&pp_params);
++ if (IS_ERR(pp))
++ return pp;
++
++ err = xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
++ eth->rx_napi.napi_id);
++ if (err < 0)
++ goto err_free_pp;
++
++ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
++ if (err)
++ goto err_unregister_rxq;
++
++ return pp;
++
++err_unregister_rxq:
++ xdp_rxq_info_unreg(xdp_q);
++err_free_pp:
++ page_pool_destroy(pp);
++
++ return ERR_PTR(err);
++}
++
++static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
++ gfp_t gfp_mask)
++{
++ struct page *page;
++
++ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
++ if (!page)
++ return NULL;
++
++ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
++ return page_address(page);
++}
++
++static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
++{
++ if (ring->page_pool)
++ page_pool_put_full_page(ring->page_pool,
++ virt_to_head_page(data), napi);
++ else
++ skb_free_frag(data);
++}
++
+ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+ {
+@@ -1401,9 +1463,9 @@ static int mtk_poll_rx(struct napi_struc
+
+ while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
++ u32 hash, reason, reserve_len;
+ struct net_device *netdev;
+ dma_addr_t dma_addr;
+- u32 hash, reason;
+ int mac = 0;
+
+ ring = mtk_get_rx_ring(eth);
+@@ -1434,36 +1496,54 @@ static int mtk_poll_rx(struct napi_struc
+ goto release_desc;
+
+ /* alloc new buffer */
+- if (ring->frag_size <= PAGE_SIZE)
+- new_data = napi_alloc_frag(ring->frag_size);
+- else
+- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+- if (unlikely(!new_data)) {
+- netdev->stats.rx_dropped++;
+- goto release_desc;
+- }
+- dma_addr = dma_map_single(eth->dma_dev,
+- new_data + NET_SKB_PAD +
+- eth->ip_align,
+- ring->buf_size,
+- DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
+- skb_free_frag(new_data);
+- netdev->stats.rx_dropped++;
+- goto release_desc;
+- }
++ if (ring->page_pool) {
++ new_data = mtk_page_pool_get_buff(ring->page_pool,
++ &dma_addr,
++ GFP_ATOMIC);
++ if (unlikely(!new_data)) {
++ netdev->stats.rx_dropped++;
++ goto release_desc;
++ }
++ } else {
++ if (ring->frag_size <= PAGE_SIZE)
++ new_data = napi_alloc_frag(ring->frag_size);
++ else
++ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
++
++ if (unlikely(!new_data)) {
++ netdev->stats.rx_dropped++;
++ goto release_desc;
++ }
+
+- dma_unmap_single(eth->dma_dev, trxd.rxd1,
+- ring->buf_size, DMA_FROM_DEVICE);
++ dma_addr = dma_map_single(eth->dma_dev,
++ new_data + NET_SKB_PAD + eth->ip_align,
++ ring->buf_size, DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(eth->dma_dev,
++ dma_addr))) {
++ skb_free_frag(new_data);
++ netdev->stats.rx_dropped++;
++ goto release_desc;
++ }
++
++ dma_unmap_single(eth->dma_dev, trxd.rxd1,
++ ring->buf_size, DMA_FROM_DEVICE);
++ }
+
+ /* receive data */
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+- skb_free_frag(data);
++ mtk_rx_put_buff(ring, data, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
++
++ if (ring->page_pool) {
++ reserve_len = MTK_PP_HEADROOM;
++ skb_mark_for_recycle(skb);
++ } else {
++ reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
++ }
++ skb_reserve(skb, reserve_len);
+
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+@@ -1517,7 +1597,6 @@ static int mtk_poll_rx(struct napi_struc
+ skip_rx:
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+-
+ release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+@@ -1525,7 +1604,6 @@ release_desc:
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ ring->calc_idx = idx;
+-
+ done++;
+ }
+
+@@ -1889,13 +1967,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (!ring->data)
+ return -ENOMEM;
+
+- for (i = 0; i < rx_dma_size; i++) {
+- if (ring->frag_size <= PAGE_SIZE)
+- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+- else
+- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
+- if (!ring->data[i])
+- return -ENOMEM;
++ if (!eth->hwlro) {
++ struct page_pool *pp;
++
++ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
++ rx_dma_size);
++ if (IS_ERR(pp))
++ return PTR_ERR(pp);
++
++ ring->page_pool = pp;
+ }
+
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+@@ -1906,16 +1986,33 @@ static int mtk_rx_alloc(struct mtk_eth *
+
+ for (i = 0; i < rx_dma_size; i++) {
+ struct mtk_rx_dma_v2 *rxd;
+-
+- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
+- ring->data[i] + NET_SKB_PAD + eth->ip_align,
+- ring->buf_size,
+- DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+- return -ENOMEM;
++ dma_addr_t dma_addr;
++ void *data;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ if (ring->page_pool) {
++ data = mtk_page_pool_get_buff(ring->page_pool,
++ &dma_addr, GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++ } else {
++ if (ring->frag_size <= PAGE_SIZE)
++ data = netdev_alloc_frag(ring->frag_size);
++ else
++ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
++
++ if (!data)
++ return -ENOMEM;
++
++ dma_addr = dma_map_single(eth->dma_dev,
++ data + NET_SKB_PAD + eth->ip_align,
++ ring->buf_size, DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(eth->dma_dev,
++ dma_addr)))
++ return -ENOMEM;
++ }
+ rxd->rxd1 = (unsigned int)dma_addr;
++ ring->data[i] = data;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+@@ -1931,6 +2028,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ rxd->rxd8 = 0;
+ }
+ }
++
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+@@ -1982,7 +2080,7 @@ static void mtk_rx_clean(struct mtk_eth
+
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+- skb_free_frag(ring->data[i]);
++ mtk_rx_put_buff(ring, ring->data[i], false);
+ }
+ kfree(ring->data);
+ ring->data = NULL;
+@@ -1994,6 +2092,13 @@ static void mtk_rx_clean(struct mtk_eth
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
++
++ if (ring->page_pool) {
++ if (xdp_rxq_info_is_reg(&ring->xdp_q))
++ xdp_rxq_info_unreg(&ring->xdp_q);
++ page_pool_destroy(ring->page_pool);
++ ring->page_pool = NULL;
++ }
+ }
+
+ static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -18,6 +18,8 @@
+ #include <linux/rhashtable.h>
+ #include <linux/dim.h>
+ #include <linux/bitfield.h>
++#include <net/page_pool.h>
++#include <linux/bpf_trace.h>
+ #include "mtk_ppe.h"
+
+ #define MTK_QDMA_PAGE_SIZE 2048
+@@ -49,6 +51,11 @@
+ #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+ #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
++#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
++#define MTK_PP_PAD (MTK_PP_HEADROOM + \
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
++
+ #define MTK_QRX_OFFSET 0x10
+
+ #define MTK_MAX_RX_RING_NUM 4
+@@ -742,6 +749,9 @@ struct mtk_rx_ring {
+ bool calc_idx_update;
+ u16 calc_idx;
+ u32 crx_idx_reg;
++ /* page_pool */
++ struct page_pool *page_pool;
++ struct xdp_rxq_info xdp_q;
+ };
+
+ enum mkt_eth_capabilities {
--- /dev/null
+From 7c26c20da5d420cde55618263be4aa2f6de53056 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:37 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add basic XDP support
+
+Introduce basic XDP support to mtk_eth_soc driver.
+Supported XDP verdicts:
+- XDP_PASS
+- XDP_DROP
+- XDP_REDIRECT
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 162 +++++++++++++++++---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +
+ 2 files changed, 145 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1388,6 +1388,11 @@ static void mtk_update_rx_cpu_idx(struct
+ }
+ }
+
++static bool mtk_page_pool_enabled(struct mtk_eth *eth)
++{
++ return !eth->hwlro;
++}
++
+ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+@@ -1450,11 +1455,52 @@ static void mtk_rx_put_buff(struct mtk_r
+ skb_free_frag(data);
+ }
+
++static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
++ struct xdp_buff *xdp, struct net_device *dev)
++{
++ struct bpf_prog *prog;
++ u32 act = XDP_PASS;
++
++ rcu_read_lock();
++
++ prog = rcu_dereference(eth->prog);
++ if (!prog)
++ goto out;
++
++ act = bpf_prog_run_xdp(prog, xdp);
++ switch (act) {
++ case XDP_PASS:
++ goto out;
++ case XDP_REDIRECT:
++ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
++ act = XDP_DROP;
++ break;
++ }
++ goto out;
++ default:
++ bpf_warn_invalid_xdp_action(act);
++ fallthrough;
++ case XDP_ABORTED:
++ trace_xdp_exception(dev, prog, act);
++ fallthrough;
++ case XDP_DROP:
++ break;
++ }
++
++ page_pool_put_full_page(ring->page_pool,
++ virt_to_head_page(xdp->data), true);
++out:
++ rcu_read_unlock();
++
++ return act;
++}
++
+ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+ {
+ struct dim_sample dim_sample = {};
+ struct mtk_rx_ring *ring;
++ bool xdp_flush = false;
+ int idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+@@ -1463,9 +1509,9 @@ static int mtk_poll_rx(struct napi_struc
+
+ while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
+- u32 hash, reason, reserve_len;
+ struct net_device *netdev;
+ dma_addr_t dma_addr;
++ u32 hash, reason;
+ int mac = 0;
+
+ ring = mtk_get_rx_ring(eth);
+@@ -1495,8 +1541,14 @@ static int mtk_poll_rx(struct napi_struc
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto release_desc;
+
++ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
++
+ /* alloc new buffer */
+ if (ring->page_pool) {
++ struct page *page = virt_to_head_page(data);
++ struct xdp_buff xdp;
++ u32 ret;
++
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+@@ -1504,6 +1556,34 @@ static int mtk_poll_rx(struct napi_struc
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
++
++ dma_sync_single_for_cpu(eth->dma_dev,
++ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
++ pktlen, page_pool_get_dma_dir(ring->page_pool));
++
++ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
++ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
++ false);
++ xdp_buff_clear_frags_flag(&xdp);
++
++ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
++ if (ret == XDP_REDIRECT)
++ xdp_flush = true;
++
++ if (ret != XDP_PASS)
++ goto skip_rx;
++
++ skb = build_skb(data, PAGE_SIZE);
++ if (unlikely(!skb)) {
++ page_pool_put_full_page(ring->page_pool,
++ page, true);
++ netdev->stats.rx_dropped++;
++ goto skip_rx;
++ }
++
++ skb_reserve(skb, xdp.data - xdp.data_hard_start);
++ skb_put(skb, xdp.data_end - xdp.data);
++ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+@@ -1527,27 +1607,20 @@ static int mtk_poll_rx(struct napi_struc
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+- }
+
+- /* receive data */
+- skb = build_skb(data, ring->frag_size);
+- if (unlikely(!skb)) {
+- mtk_rx_put_buff(ring, data, true);
+- netdev->stats.rx_dropped++;
+- goto skip_rx;
+- }
++ skb = build_skb(data, ring->frag_size);
++ if (unlikely(!skb)) {
++ netdev->stats.rx_dropped++;
++ skb_free_frag(data);
++ goto skip_rx;
++ }
+
+- if (ring->page_pool) {
+- reserve_len = MTK_PP_HEADROOM;
+- skb_mark_for_recycle(skb);
+- } else {
+- reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
++ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
++ skb_put(skb, pktlen);
+ }
+- skb_reserve(skb, reserve_len);
+
+- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+- skb_put(skb, pktlen);
++ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rxdcsum = &trxd.rxd3;
+@@ -1559,7 +1632,6 @@ static int mtk_poll_rx(struct napi_struc
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+- bytes += pktlen;
+
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY) {
+@@ -1622,6 +1694,9 @@ rx_done:
+ &dim_sample);
+ net_dim(ð->rx_dim, dim_sample);
+
++ if (xdp_flush)
++ xdp_do_flush_map();
++
+ return done;
+ }
+
+@@ -1967,7 +2042,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (!ring->data)
+ return -ENOMEM;
+
+- if (!eth->hwlro) {
++ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+@@ -2707,6 +2782,48 @@ static int mtk_stop(struct net_device *d
+ return 0;
+ }
+
++static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
++ struct netlink_ext_ack *extack)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_eth *eth = mac->hw;
++ struct bpf_prog *old_prog;
++ bool need_update;
++
++ if (eth->hwlro) {
++ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
++ return -EOPNOTSUPP;
++ }
++
++ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
++ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
++ return -EOPNOTSUPP;
++ }
++
++ need_update = !!eth->prog != !!prog;
++ if (netif_running(dev) && need_update)
++ mtk_stop(dev);
++
++ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++ if (old_prog)
++ bpf_prog_put(old_prog);
++
++ if (netif_running(dev) && need_update)
++ return mtk_open(dev);
++
++ return 0;
++}
++
++static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
++{
++ switch (xdp->command) {
++ case XDP_SETUP_PROG:
++ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
++ default:
++ return -EINVAL;
++ }
++}
++
+ static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+ {
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+@@ -3002,6 +3119,12 @@ static int mtk_change_mtu(struct net_dev
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
++ if (rcu_access_pointer(eth->prog) &&
++ length > MTK_PP_MAX_BUF_SIZE) {
++ netdev_err(dev, "Invalid MTU for XDP mode\n");
++ return -EINVAL;
++ }
++
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+@@ -3329,6 +3452,7 @@ static const struct net_device_ops mtk_n
+ .ndo_poll_controller = mtk_poll_controller,
+ #endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
++ .ndo_bpf = mtk_xdp,
+ };
+
+ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1085,6 +1085,8 @@ struct mtk_eth {
+
+ struct mtk_ppe *ppe;
+ struct rhashtable flow_table;
++
++ struct bpf_prog __rcu *prog;
+ };
+
+ /* struct mtk_mac - the structure that holds the info about the MACs of the
--- /dev/null
+From 916a6ee836d6b7b8ef1ed5f0515e256ca60e9968 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:38 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters
+
+Report xdp stats through ethtool
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 +++++++++++++++++++--
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 ++++++++++
+ 2 files changed, 36 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message lev
+ #define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
++#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
++ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
++ sizeof(u64) }
++
+ static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats {
+ MTK_ETHTOOL_STAT(rx_long_errors),
+ MTK_ETHTOOL_STAT(rx_checksum_errors),
+ MTK_ETHTOOL_STAT(rx_flow_control_packets),
++ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
++ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
++ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
++ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
++ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
++ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
++ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
+ };
+
+ static const char * const mtk_clks_source_name[] = {
+@@ -1458,6 +1469,9 @@ static void mtk_rx_put_buff(struct mtk_r
+ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+ {
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_hw_stats *hw_stats = mac->hw_stats;
++ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+@@ -1470,13 +1484,16 @@ static u32 mtk_xdp_run(struct mtk_eth *e
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+- goto out;
++ count = &hw_stats->xdp_stats.rx_xdp_pass;
++ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+- goto out;
++
++ count = &hw_stats->xdp_stats.rx_xdp_redirect;
++ goto update_stats;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+@@ -1489,6 +1506,11 @@ static u32 mtk_xdp_run(struct mtk_eth *e
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
++
++update_stats:
++ u64_stats_update_begin(&hw_stats->syncp);
++ *count = *count + 1;
++ u64_stats_update_end(&hw_stats->syncp);
+ out:
+ rcu_read_unlock();
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -567,6 +567,16 @@ struct mtk_tx_dma_v2 {
+ struct mtk_eth;
+ struct mtk_mac;
+
++struct mtk_xdp_stats {
++ u64 rx_xdp_redirect;
++ u64 rx_xdp_pass;
++ u64 rx_xdp_drop;
++ u64 rx_xdp_tx;
++ u64 rx_xdp_tx_errors;
++ u64 tx_xdp_xmit;
++ u64 tx_xdp_xmit_errors;
++};
++
+ /* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock: make sure that stats operations are atomic
+ * @reg_offset: the status register offset of the SoC
+@@ -590,6 +600,8 @@ struct mtk_hw_stats {
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
+
++ struct mtk_xdp_stats xdp_stats;
++
+ spinlock_t stats_lock;
+ u32 reg_offset;
+ struct u64_stats_sync syncp;
--- /dev/null
+From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:39 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support
+
+Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
+pointer.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
+ 2 files changed, 180 insertions(+), 22 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -987,15 +987,26 @@ static void mtk_tx_unmap(struct mtk_eth
+ }
+ }
+
+- tx_buf->flags = 0;
+- if (tx_buf->skb &&
+- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
+- if (napi)
+- napi_consume_skb(tx_buf->skb, napi);
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ if (tx_buf->data &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
++
++ if (napi)
++ napi_consume_skb(skb, napi);
++ else
++ dev_kfree_skb_any(skb);
++ }
++ } else if (tx_buf->data) {
++ struct xdp_frame *xdpf = tx_buf->data;
++
++ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
++ xdp_return_frame_rx_napi(xdpf);
+ else
+- dev_kfree_skb_any(tx_buf->skb);
++ xdp_return_frame(xdpf);
+ }
+- tx_buf->skb = NULL;
++ tx_buf->flags = 0;
++ tx_buf->data = NULL;
+ }
+
+ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+@@ -1012,7 +1023,7 @@ static void setup_tx_buf(struct mtk_eth
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+@@ -1188,7 +1199,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ soc->txrx.txd_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
+ MTK_TX_FLAGS_FPORT1;
+@@ -1202,7 +1213,8 @@ static int mtk_tx_map(struct sk_buff *sk
+ }
+
+ /* store skb to cleanup */
+- itx_buf->skb = skb;
++ itx_buf->type = MTK_TYPE_SKB;
++ itx_buf->data = skb;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+@@ -1414,13 +1426,14 @@ static struct page_pool *mtk_create_page
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+- .dma_dir = DMA_FROM_DEVICE,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
++ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
++ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+@@ -1466,6 +1479,122 @@ static void mtk_rx_put_buff(struct mtk_r
+ skb_free_frag(data);
+ }
+
++static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
++ struct net_device *dev, bool dma_map)
++{
++ const struct mtk_soc_data *soc = eth->soc;
++ struct mtk_tx_ring *ring = ð->tx_ring;
++ struct mtk_tx_dma_desc_info txd_info = {
++ .size = xdpf->len,
++ .first = true,
++ .last = true,
++ };
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_tx_dma *txd, *txd_pdma;
++ int err = 0, index = 0, n_desc = 1;
++ struct mtk_tx_buf *tx_buf;
++
++ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
++ return -EBUSY;
++
++ if (unlikely(atomic_read(&ring->free_count) <= 1))
++ return -EBUSY;
++
++ spin_lock(ð->page_lock);
++
++ txd = ring->next_free;
++ if (txd == ring->last_free) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++ memset(tx_buf, 0, sizeof(*tx_buf));
++
++ if (dma_map) { /* ndo_xdp_xmit */
++ txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
++ txd_info.size, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
++ } else {
++ struct page *page = virt_to_head_page(xdpf->data);
++
++ txd_info.addr = page_pool_get_dma_addr(page) +
++ sizeof(*xdpf) + xdpf->headroom;
++ dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
++ txd_info.size,
++ DMA_BIDIRECTIONAL);
++ }
++ mtk_tx_set_dma_desc(dev, txd, &txd_info);
++
++ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
++
++ txd_pdma = qdma_to_pdma(ring, txd);
++ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
++ index++);
++
++ /* store xdpf for cleanup */
++ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
++ tx_buf->data = xdpf;
++
++ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
++ if (index & 1)
++ txd_pdma->txd2 |= TX_DMA_LS0;
++ else
++ txd_pdma->txd2 |= TX_DMA_LS1;
++ }
++
++ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
++ atomic_sub(n_desc, &ring->free_count);
++
++ /* make sure that all changes to the dma ring are flushed before we
++ * continue
++ */
++ wmb();
++
++ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
++ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
++ } else {
++ int idx;
++
++ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
++ MT7628_TX_CTX_IDX0);
++ }
++out:
++ spin_unlock(ð->page_lock);
++
++ return err;
++}
++
++static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
++ struct xdp_frame **frames, u32 flags)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_hw_stats *hw_stats = mac->hw_stats;
++ struct mtk_eth *eth = mac->hw;
++ int i, nxmit = 0;
++
++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++ return -EINVAL;
++
++ for (i = 0; i < num_frame; i++) {
++ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
++ break;
++ nxmit++;
++ }
++
++ u64_stats_update_begin(&hw_stats->syncp);
++ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
++ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
++ u64_stats_update_end(&hw_stats->syncp);
++
++ return nxmit;
++}
++
+ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+ {
+@@ -1494,6 +1623,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
++ case XDP_TX: {
++ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
++
++ if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
++ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
++ act = XDP_DROP;
++ break;
++ }
++
++ count = &hw_stats->xdp_stats.rx_xdp_tx;
++ goto update_stats;
++ }
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+@@ -1727,9 +1868,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+- struct mtk_tx_dma *desc;
+- struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
++ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->last_free_ptr;
+@@ -1750,15 +1890,21 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
+ mac = 1;
+
+- skb = tx_buf->skb;
+- if (!skb)
++ if (!tx_buf->data)
+ break;
+
+- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
++
+ bytes[mac] += skb->len;
+ done[mac]++;
+ budget--;
++ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
++ tx_buf->type == MTK_TYPE_XDP_NDO) {
++ budget--;
+ }
++
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ ring->last_free = desc;
+@@ -1777,9 +1923,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ unsigned int *done, unsigned int *bytes)
+ {
+ struct mtk_tx_ring *ring = ð->tx_ring;
+- struct mtk_tx_dma *desc;
+- struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
++ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+@@ -1787,14 +1932,18 @@ static int mtk_poll_tx_pdma(struct mtk_e
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+- skb = tx_buf->skb;
+- if (!skb)
++ if (!tx_buf->data)
+ break;
+
+- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB &&
++ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ struct sk_buff *skb = tx_buf->data;
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
++ } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
++ tx_buf->type == MTK_TYPE_XDP_NDO) {
++ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf, true);
+@@ -3475,6 +3624,7 @@ static const struct net_device_ops mtk_n
+ #endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
++ .ndo_xdp_xmit = mtk_xdp_xmit,
+ };
+
+ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -693,6 +693,12 @@ enum mtk_dev_state {
+ MTK_RESETTING
+ };
+
++enum mtk_tx_buf_type {
++ MTK_TYPE_SKB,
++ MTK_TYPE_XDP_TX,
++ MTK_TYPE_XDP_NDO,
++};
++
+ /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+@@ -702,7 +708,9 @@ enum mtk_dev_state {
+ * @dma_len1: The length of the second segment
+ */
+ struct mtk_tx_buf {
+- struct sk_buff *skb;
++ enum mtk_tx_buf_type type;
++ void *data;
++
+ u32 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);
--- /dev/null
+From 84b9cd389036d4a262d8cee794d56c04095358a7 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 22 Jul 2022 09:19:40 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for
+ page_pool_get_stats
+
+Introduce support for the page_pool stats API into mtk_eth_soc driver.
+Report page_pool stats through ethtool.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/Kconfig | 1 +
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++--
+ 2 files changed, 35 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
+ select PHYLINK
+ select DIMLIB
+ select PAGE_POOL
++ select PAGE_POOL_STATS
+ help
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3485,11 +3485,18 @@ static void mtk_get_strings(struct net_d
+ int i;
+
+ switch (stringset) {
+- case ETH_SS_STATS:
++ case ETH_SS_STATS: {
++ struct mtk_mac *mac = netdev_priv(dev);
++
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+ memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
++ if (mtk_page_pool_enabled(mac->hw))
++ page_pool_ethtool_stats_get_strings(data);
++ break;
++ }
++ default:
+ break;
+ }
+ }
+@@ -3497,13 +3504,35 @@ static void mtk_get_strings(struct net_d
+ static int mtk_get_sset_count(struct net_device *dev, int sset)
+ {
+ switch (sset) {
+- case ETH_SS_STATS:
+- return ARRAY_SIZE(mtk_ethtool_stats);
++ case ETH_SS_STATS: {
++ int count = ARRAY_SIZE(mtk_ethtool_stats);
++ struct mtk_mac *mac = netdev_priv(dev);
++
++ if (mtk_page_pool_enabled(mac->hw))
++ count += page_pool_ethtool_stats_get_count();
++ return count;
++ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
++static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
++{
++ struct page_pool_stats stats = {};
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
++ struct mtk_rx_ring *ring = ð->rx_ring[i];
++
++ if (!ring->page_pool)
++ continue;
++
++ page_pool_get_stats(ring->page_pool, &stats);
++ }
++ page_pool_ethtool_stats_get(data, &stats);
++}
++
+ static void mtk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+ {
+@@ -3531,6 +3560,8 @@ static void mtk_get_ethtool_stats(struct
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
++ if (mtk_page_pool_enabled(mac->hw))
++ mtk_ethtool_pp_stats(mac->hw, data_dst);
+ } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+ }
+
--- /dev/null
+From b16fe6d82b71fa0dd5c957bc22d66a694976d6eb Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 27 Jul 2022 23:20:50 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_xdp_frame_map
+ utility routine
+
+This is a preliminary patch to add xdp multi-frag support to mtk_eth_soc
+driver
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 68 +++++++++++++--------
+ 1 file changed, 42 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1479,6 +1479,41 @@ static void mtk_rx_put_buff(struct mtk_r
+ skb_free_frag(data);
+ }
+
++static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
++ struct mtk_tx_dma_desc_info *txd_info,
++ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
++ void *data, u16 headroom, int index, bool dma_map)
++{
++ struct mtk_tx_ring *ring = ð->tx_ring;
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_tx_dma *txd_pdma;
++
++ if (dma_map) { /* ndo_xdp_xmit */
++ txd_info->addr = dma_map_single(eth->dma_dev, data,
++ txd_info->size, DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
++ return -ENOMEM;
++
++ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
++ } else {
++ struct page *page = virt_to_head_page(data);
++
++ txd_info->addr = page_pool_get_dma_addr(page) +
++ sizeof(struct xdp_frame) + headroom;
++ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
++ txd_info->size, DMA_BIDIRECTIONAL);
++ }
++ mtk_tx_set_dma_desc(dev, txd, txd_info);
++
++ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
++
++ txd_pdma = qdma_to_pdma(ring, txd);
++ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
++ index);
++
++ return 0;
++}
++
+ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+ {
+@@ -1489,9 +1524,8 @@ static int mtk_xdp_submit_frame(struct m
+ .first = true,
+ .last = true,
+ };
+- struct mtk_mac *mac = netdev_priv(dev);
+- struct mtk_tx_dma *txd, *txd_pdma;
+ int err = 0, index = 0, n_desc = 1;
++ struct mtk_tx_dma *txd, *txd_pdma;
+ struct mtk_tx_buf *tx_buf;
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+@@ -1511,36 +1545,18 @@ static int mtk_xdp_submit_frame(struct m
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+- if (dma_map) { /* ndo_xdp_xmit */
+- txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
+- txd_info.size, DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
+- err = -ENOMEM;
+- goto out;
+- }
+- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+- } else {
+- struct page *page = virt_to_head_page(xdpf->data);
+-
+- txd_info.addr = page_pool_get_dma_addr(page) +
+- sizeof(*xdpf) + xdpf->headroom;
+- dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
+- txd_info.size,
+- DMA_BIDIRECTIONAL);
+- }
+- mtk_tx_set_dma_desc(dev, txd, &txd_info);
+-
+- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+-
+- txd_pdma = qdma_to_pdma(ring, txd);
+- setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
+- index++);
++ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
++ xdpf->data, xdpf->headroom, index,
++ dma_map);
++ if (err < 0)
++ goto out;
+
+ /* store xdpf for cleanup */
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
++ txd_pdma = qdma_to_pdma(ring, txd);
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
--- /dev/null
+From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 27 Jul 2022 23:20:51 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support
+
+Add the capability to map non-linear xdp frames in XDP_TX and
+ndo_xdp_xmit callback.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++-------
+ 1 file changed, 82 insertions(+), 43 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -987,23 +987,22 @@ static void mtk_tx_unmap(struct mtk_eth
+ }
+ }
+
+- if (tx_buf->type == MTK_TYPE_SKB) {
+- if (tx_buf->data &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+- }
+- } else if (tx_buf->data) {
+- struct xdp_frame *xdpf = tx_buf->data;
++ } else {
++ struct xdp_frame *xdpf = tx_buf->data;
+
+- if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+- xdp_return_frame_rx_napi(xdpf);
+- else
+- xdp_return_frame(xdpf);
++ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
++ xdp_return_frame_rx_napi(xdpf);
++ else
++ xdp_return_frame(xdpf);
++ }
+ }
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
+@@ -1506,6 +1505,8 @@ static int mtk_xdp_frame_map(struct mtk_
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
++ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
++ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+@@ -1517,43 +1518,69 @@ static int mtk_xdp_frame_map(struct mtk_
+ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+ {
++ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+- .last = true,
++ .last = !xdp_frame_has_frags(xdpf),
+ };
+- int err = 0, index = 0, n_desc = 1;
+- struct mtk_tx_dma *txd, *txd_pdma;
+- struct mtk_tx_buf *tx_buf;
++ int err, index = 0, n_desc = 1, nr_frags;
++ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
++ struct mtk_tx_buf *htx_buf, *tx_buf;
++ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ return -EBUSY;
+
+- if (unlikely(atomic_read(&ring->free_count) <= 1))
++ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
++ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(ð->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+- err = -ENOMEM;
+- goto out;
++ spin_unlock(ð->page_lock);
++ return -ENOMEM;
+ }
++ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
++ htx_buf = tx_buf;
+
+- err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+- xdpf->data, xdpf->headroom, index,
+- dma_map);
+- if (err < 0)
+- goto out;
++ for (;;) {
++ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
++ data, xdpf->headroom, index, dma_map);
++ if (err < 0)
++ goto unmap;
++
++ if (txd_info.last)
++ break;
+
++ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
++ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
++ txd_pdma = qdma_to_pdma(ring, txd);
++ if (txd == ring->last_free)
++ goto unmap;
++
++ tx_buf = mtk_desc_to_tx_buf(ring, txd,
++ soc->txrx.txd_size);
++ memset(tx_buf, 0, sizeof(*tx_buf));
++ n_desc++;
++ }
++
++ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
++ txd_info.size = skb_frag_size(&sinfo->frags[index]);
++ txd_info.last = index + 1 == nr_frags;
++ data = skb_frag_address(&sinfo->frags[index]);
++
++ index++;
++ }
+ /* store xdpf for cleanup */
+- tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+- tx_buf->data = xdpf;
++ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
+@@ -1580,7 +1607,24 @@ static int mtk_xdp_submit_frame(struct m
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+-out:
++
++ spin_unlock(ð->page_lock);
++
++ return 0;
++
++unmap:
++ while (htxd != txd) {
++ txd_pdma = qdma_to_pdma(ring, htxd);
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++ mtk_tx_unmap(eth, tx_buf, false);
++
++ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
++ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
++ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
++
++ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
++ }
++
+ spin_unlock(ð->page_lock);
+
+ return err;
+@@ -1909,18 +1953,15 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ if (!tx_buf->data)
+ break;
+
+- if (tx_buf->type == MTK_TYPE_SKB &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+- struct sk_buff *skb = tx_buf->data;
++ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ struct sk_buff *skb = tx_buf->data;
+
+- bytes[mac] += skb->len;
+- done[mac]++;
+- budget--;
+- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+- tx_buf->type == MTK_TYPE_XDP_NDO) {
++ bytes[mac] += skb->len;
++ done[mac]++;
++ }
+ budget--;
+ }
+-
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ ring->last_free = desc;
+@@ -1951,17 +1992,15 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ if (!tx_buf->data)
+ break;
+
+- if (tx_buf->type == MTK_TYPE_SKB &&
+- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+- struct sk_buff *skb = tx_buf->data;
+- bytes[0] += skb->len;
+- done[0]++;
+- budget--;
+- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
+- tx_buf->type == MTK_TYPE_XDP_NDO) {
++ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
++ if (tx_buf->type == MTK_TYPE_SKB) {
++ struct sk_buff *skb = tx_buf->data;
++
++ bytes[0] += skb->len;
++ done[0]++;
++ }
+ budget--;
+ }
+-
+ mtk_tx_unmap(eth, tx_buf, true);
+
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1469,10 +1469,19 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1845,10 +1845,19 @@ static int mtk_poll_rx(struct napi_struc
skb->dev = netdev;
- skb_put(skb, pktlen);
+ bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
-@@ -1481,16 +1490,9 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1856,16 +1865,9 @@ static int mtk_poll_rx(struct napi_struc
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -307,6 +307,11 @@
+@@ -314,6 +314,11 @@
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_SPECIAL_TAG BIT(22)
--- /dev/null
+From c9daab322313087afde8c46f41df3c628410ae20 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 5 Sep 2022 14:46:01 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: remove mtk_foe_entry_timestamp
+
+Get rid of mtk_foe_entry_timestamp routine since it is no longer used.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -302,17 +302,6 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s
+ __mtk_ppe_check_skb(ppe, skb, hash);
+ }
+
+-static inline int
+-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
+-{
+- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
+-
+- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
+- return -1;
+-
+- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
+-}
+-
+ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
+ u8 pse_port, u8 *src_mac, u8 *dest_mac);
+ int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
--- /dev/null
+From 5e69163d3b9931098922b3fc2f8e786af8c1f37e Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 13 Sep 2022 15:03:05 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: enable XDP support just for
+ MT7986 SoC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Disable page_pool/XDP support for MT7621 SoC in order fix a regression
+introduce adding XDP for MT7986 SoC. There is no a real use case for XDP
+on MT7621 since it is a low-end cpu. Moreover this patch reduces the
+memory footprint.
+
+Tested-by: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+Tested-by: Arınç ĂœNAL <arinc.unal@arinc9.com>
+Fixes: 23233e577ef9 ("net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/2bf31e27b888c43228b0d84dd2ef5033338269e2.1663074002.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1412,7 +1412,7 @@ static void mtk_update_rx_cpu_idx(struct
+
+ static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+ {
+- return !eth->hwlro;
++ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+ }
+
+ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
*/
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -2851,6 +2851,10 @@ static inline int pskb_trim(struct sk_bu
+@@ -2852,6 +2852,10 @@ static inline int pskb_trim(struct sk_bu
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
/**
* pskb_trim_unique - remove end from a paged unique (not cloned) buffer
* @skb: buffer to alter
-@@ -3001,16 +3005,6 @@ static inline struct sk_buff *dev_alloc_
+@@ -3002,16 +3006,6 @@ static inline struct sk_buff *dev_alloc_
}
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -2817,7 +2817,7 @@ static inline int pskb_network_may_pull(
+@@ -2818,7 +2818,7 @@ static inline int pskb_network_may_pull(
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
#endif
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -890,6 +890,7 @@ struct sk_buff {
+@@ -891,6 +891,7 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2383,8 +2383,8 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -2789,8 +2789,8 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
}
return IRQ_HANDLED;
-@@ -2396,8 +2396,8 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -2802,8 +2802,8 @@ static irqreturn_t mtk_handle_irq_tx(int
eth->tx_events++;
if (likely(napi_schedule_prep(ð->tx_napi))) {
}
return IRQ_HANDLED;
-@@ -3587,6 +3587,8 @@ static int mtk_probe(struct platform_dev
+@@ -4074,6 +4074,8 @@ static int mtk_probe(struct platform_dev
* for NAPI to work
*/
init_dummy_netdev(ð->dummy_dev);
--- /dev/null
+From patchwork Thu Sep 8 19:33:38 2022
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Lorenzo Bianconi <lorenzo@kernel.org>
+X-Patchwork-Id: 12970556
+X-Patchwork-Delegate: kuba@kernel.org
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+To: netdev@vger.kernel.org
+Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com,
+ Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com,
+ kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com,
+ linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com,
+ Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com,
+ ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com,
+ devicetree@vger.kernel.org, robh@kernel.org
+Subject: [PATCH net-next 03/12] net: ethernet: mtk_eth_soc: move gdma_to_ppe
+ and ppe_base definitions in mtk register map
+Date: Thu, 8 Sep 2022 21:33:37 +0200
+Message-Id:
+ <95938fc9cbe0223714be2658a49ca58e9baace00.1662661555.git.lorenzo@kernel.org>
+X-Mailer: git-send-email 2.37.3
+In-Reply-To: <cover.1662661555.git.lorenzo@kernel.org>
+References: <cover.1662661555.git.lorenzo@kernel.org>
+MIME-Version: 1.0
+Precedence: bulk
+List-ID: <netdev.vger.kernel.org>
+X-Mailing-List: netdev@vger.kernel.org
+X-Patchwork-Delegate: kuba@kernel.org
+
+This is a preliminary patch to introduce mt7986 hw packet engine.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 +++++++++++----
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 ++-
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 2 --
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -73,6 +73,8 @@ static const struct mtk_reg_map mtk_reg_
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
++ .gdma_to_ppe0 = 0x4444,
++ .ppe_base = 0x0c00,
+ };
+
+ static const struct mtk_reg_map mt7628_reg_map = {
+@@ -126,6 +128,8 @@ static const struct mtk_reg_map mt7986_r
+ .fq_blen = 0x472c,
+ },
+ .gdm1_cnt = 0x1c00,
++ .gdma_to_ppe0 = 0x3333,
++ .ppe_base = 0x2000,
+ };
+
+ /* strings used by ethtool */
+@@ -2924,6 +2928,7 @@ static int mtk_open(struct net_device *d
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(ð->dma_refcnt)) {
++ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config = MTK_GDMA_TO_PDMA;
+ int err;
+
+@@ -2931,15 +2936,15 @@ static int mtk_open(struct net_device *d
+ if (err)
+ return err;
+
+- if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
+- gdm_config = MTK_GDMA_TO_PPE;
++ if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
++ gdm_config = soc->reg_map->gdma_to_ppe0;
+
+ mtk_gdm_config(eth, gdm_config);
+
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+@@ -4045,7 +4050,9 @@ static int mtk_probe(struct platform_dev
+ }
+
+ if (eth->soc->offload_version) {
+- eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
++ u32 ppe_addr = eth->soc->reg_map->ppe_base;
++
++ eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
+ if (!eth->ppe) {
+ err = -ENOMEM;
+ goto err_free_dev;
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -105,7 +105,6 @@
+ #define MTK_GDMA_TCS_EN BIT(21)
+ #define MTK_GDMA_UCS_EN BIT(20)
+ #define MTK_GDMA_TO_PDMA 0x0
+-#define MTK_GDMA_TO_PPE 0x4444
+ #define MTK_GDMA_DROP_ALL 0x7777
+
+ /* Unicast Filter MAC Address Register - Low */
+@@ -952,6 +951,8 @@ struct mtk_reg_map {
+ u32 fq_blen; /* fq free page buffer length */
+ } qdma;
+ u32 gdm1_cnt;
++ u32 gdma_to_ppe0;
++ u32 ppe_base;
+ };
+
+ /* struct mtk_eth_data - This is the structure holding all differences
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -8,8 +8,6 @@
+ #include <linux/bitfield.h>
+ #include <linux/rhashtable.h>
+
+-#define MTK_ETH_PPE_BASE 0xc00
+-
+ #define MTK_PPE_ENTRIES_SHIFT 3
+ #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
+ #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
--- /dev/null
+From patchwork Thu Sep 8 19:33:38 2022
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Lorenzo Bianconi <lorenzo@kernel.org>
+X-Patchwork-Id: 12970557
+X-Patchwork-Delegate: kuba@kernel.org
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+To: netdev@vger.kernel.org
+Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com,
+ Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com,
+ kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com,
+ linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com,
+ Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com,
+ ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com,
+ devicetree@vger.kernel.org, robh@kernel.org
+Subject: [PATCH net-next 04/12] net: ethernet: mtk_eth_soc: move ppe table
+ hash offset to mtk_soc_data structure
+Date: Thu, 8 Sep 2022 21:33:38 +0200
+Message-Id:
+ <cc263ffeaa3e1d7314e36a4f941e96d38e41a6bf.1662661555.git.lorenzo@kernel.org>
+X-Mailer: git-send-email 2.37.3
+In-Reply-To: <cover.1662661555.git.lorenzo@kernel.org>
+References: <cover.1662661555.git.lorenzo@kernel.org>
+MIME-Version: 1.0
+Precedence: bulk
+List-ID: <netdev.vger.kernel.org>
+X-Mailing-List: netdev@vger.kernel.org
+X-Patchwork-Delegate: kuba@kernel.org
+
+This is a preliminary patch to introduce mt7986 hw packet engine.
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++++
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 ++
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 24 +++++++++++++++------
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
+ 4 files changed, 25 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4150,6 +4150,7 @@ static const struct mtk_soc_data mt7621_
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
+ .offload_version = 2,
++ .hash_offset = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4168,6 +4169,7 @@ static const struct mtk_soc_data mt7622_
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
+ .offload_version = 2,
++ .hash_offset = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4185,6 +4187,7 @@ static const struct mtk_soc_data mt7623_
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .offload_version = 2,
++ .hash_offset = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4218,6 +4221,7 @@ static const struct mtk_soc_data mt7986_
+ .caps = MT7986_CAPS,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
++ .hash_offset = 4,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -966,6 +966,7 @@ struct mtk_reg_map {
+ * the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
++ * @hash_offset Flow table hash offset.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+@@ -980,6 +981,7 @@ struct mtk_soc_data {
+ u32 required_clks;
+ bool required_pctl;
+ u8 offload_version;
++ u8 hash_offset;
+ netdev_features_t hw_features;
+ struct {
+ u32 txd_size;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -88,7 +88,7 @@ static void mtk_ppe_cache_enable(struct
+ enable * MTK_PPE_CACHE_CTL_EN);
+ }
+
+-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
++static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
+ {
+ u32 hv1, hv2, hv3;
+ u32 hash;
+@@ -122,7 +122,7 @@ static u32 mtk_ppe_hash_entry(struct mtk
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+- hash <<= 1;
++ hash <<= (ffs(eth->soc->hash_offset) - 1);
+ hash &= MTK_PPE_ENTRIES - 1;
+
+ return hash;
+@@ -540,15 +540,16 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
++ const struct mtk_soc_data *soc = ppe->eth->soc;
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+- hash = mtk_ppe_hash_entry(&entry->data);
++ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+- hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
++ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+@@ -558,6 +559,7 @@ static void
+ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+ {
++ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_mac_info *l2;
+@@ -572,7 +574,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+- hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
++ hlist_add_head(&flow_info->list,
++ &ppe->foe_flow[hash / soc->hash_offset]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = &ppe->foe_table[hash];
+@@ -596,7 +599,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+
+ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+ {
+- struct hlist_head *head = &ppe->foe_flow[hash / 2];
++ const struct mtk_soc_data *soc = ppe->eth->soc;
++ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+@@ -680,9 +684,11 @@ int mtk_foe_entry_idle_time(struct mtk_p
+ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version)
+ {
++ const struct mtk_soc_data *soc = eth->soc;
+ struct device *dev = eth->dev;
+ struct mtk_foe_entry *foe;
+ struct mtk_ppe *ppe;
++ u32 foe_flow_size;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+@@ -705,6 +711,12 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
+
+ ppe->foe_table = foe;
+
++ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
++ sizeof(*ppe->foe_flow);
++ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
++ if (!ppe->foe_flow)
++ return NULL;
++
+ mtk_ppe_debugfs_init(ppe);
+
+ return ppe;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -270,7 +270,7 @@ struct mtk_ppe {
+ dma_addr_t foe_phys;
+
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+- struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
++ struct hlist_head *foe_flow;
+
+ struct rhashtable l2_flows;
+
--- /dev/null
+From patchwork Thu Sep 8 19:33:39 2022
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Lorenzo Bianconi <lorenzo@kernel.org>
+X-Patchwork-Id: 12970559
+X-Patchwork-Delegate: kuba@kernel.org
+Return-Path: <netdev-owner@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+To: netdev@vger.kernel.org
+Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com,
+ Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com,
+ kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com,
+ linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com,
+ Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com,
+ ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com,
+ devicetree@vger.kernel.org, robh@kernel.org
+Subject: [PATCH net-next 05/12] net: ethernet: mtk_eth_soc: add the capability
+ to run multiple ppe
+Date: Thu, 8 Sep 2022 21:33:39 +0200
+Message-Id:
+ <dd0254775390eb031c67c448df8b19e87df58558.1662661555.git.lorenzo@kernel.org>
+X-Mailer: git-send-email 2.37.3
+In-Reply-To: <cover.1662661555.git.lorenzo@kernel.org>
+References: <cover.1662661555.git.lorenzo@kernel.org>
+MIME-Version: 1.0
+Precedence: bulk
+List-ID: <netdev.vger.kernel.org>
+X-Mailing-List: netdev@vger.kernel.org
+X-Patchwork-Delegate: kuba@kernel.org
+
+mt7986 chipset support multiple packet engines for wlan <-> eth
+packet forwarding.
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 ++++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +-
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 14 +++++---
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 9 +++--
+ .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 8 ++---
+ .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +++----
+ 6 files changed, 48 insertions(+), 33 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1871,7 +1871,7 @@ static int mtk_poll_rx(struct napi_struc
+
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+- mtk_ppe_check_skb(eth->ppe, skb, hash);
++ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+@@ -2929,15 +2929,19 @@ static int mtk_open(struct net_device *d
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(ð->dma_refcnt)) {
+ const struct mtk_soc_data *soc = eth->soc;
+- u32 gdm_config = MTK_GDMA_TO_PDMA;
++ u32 gdm_config;
++ int i;
+ int err;
+
+ err = mtk_start_dma(eth);
+ if (err)
+ return err;
+
+- if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
+- gdm_config = soc->reg_map->gdma_to_ppe0;
++ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
++ mtk_ppe_start(eth->ppe[i]);
++
++ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe0
++ : MTK_GDMA_TO_PDMA;
+
+ mtk_gdm_config(eth, gdm_config);
+
+@@ -2982,6 +2986,7 @@ static int mtk_stop(struct net_device *d
+ {
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
++ int i;
+
+ phylink_stop(mac->phylink);
+
+@@ -3009,8 +3014,8 @@ static int mtk_stop(struct net_device *d
+
+ mtk_dma_free(eth);
+
+- if (eth->soc->offload_version)
+- mtk_ppe_stop(eth->ppe);
++ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
++ mtk_ppe_stop(eth->ppe[i]);
+
+ return 0;
+ }
+@@ -4050,12 +4055,19 @@ static int mtk_probe(struct platform_dev
+ }
+
+ if (eth->soc->offload_version) {
+- u32 ppe_addr = eth->soc->reg_map->ppe_base;
++ u32 num_ppe;
+
+- eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
+- if (!eth->ppe) {
+- err = -ENOMEM;
+- goto err_free_dev;
++ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
++ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
++ for (i = 0; i < num_ppe; i++) {
++ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
++
++ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
++ eth->soc->offload_version, i);
++ if (!eth->ppe[i]) {
++ err = -ENOMEM;
++ goto err_free_dev;
++ }
+ }
+
+ err = mtk_eth_offload_init(eth);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1111,7 +1111,7 @@ struct mtk_eth {
+
+ int ip_align;
+
+- struct mtk_ppe *ppe;
++ struct mtk_ppe *ppe[2];
+ struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -682,7 +682,7 @@ int mtk_foe_entry_idle_time(struct mtk_p
+ }
+
+ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+- int version)
++ int version, int index)
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ struct device *dev = eth->dev;
+@@ -717,7 +717,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
+ if (!ppe->foe_flow)
+ return NULL;
+
+- mtk_ppe_debugfs_init(ppe);
++ mtk_ppe_debugfs_init(ppe, index);
+
+ return ppe;
+ }
+@@ -738,10 +738,13 @@ static void mtk_ppe_init_foe_table(struc
+ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+ }
+
+-int mtk_ppe_start(struct mtk_ppe *ppe)
++void mtk_ppe_start(struct mtk_ppe *ppe)
+ {
+ u32 val;
+
++ if (!ppe)
++ return;
++
+ mtk_ppe_init_foe_table(ppe);
+ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
+
+@@ -809,8 +812,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
+-
+- return 0;
+ }
+
+ int mtk_ppe_stop(struct mtk_ppe *ppe)
+@@ -818,6 +819,9 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
+ u32 val;
+ int i;
+
++ if (!ppe)
++ return 0;
++
+ for (i = 0; i < MTK_PPE_ENTRIES; i++)
+ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -247,6 +247,7 @@ struct mtk_flow_entry {
+ };
+ u8 type;
+ s8 wed_index;
++ u8 ppe_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+@@ -265,6 +266,7 @@ struct mtk_ppe {
+ struct device *dev;
+ void __iomem *base;
+ int version;
++ char dirname[5];
+
+ struct mtk_foe_entry *foe_table;
+ dma_addr_t foe_phys;
+@@ -277,8 +279,9 @@ struct mtk_ppe {
+ void *acct_table;
+ };
+
+-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
+-int mtk_ppe_start(struct mtk_ppe *ppe);
++struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
++ int version, int index);
++void mtk_ppe_start(struct mtk_ppe *ppe);
+ int mtk_ppe_stop(struct mtk_ppe *ppe);
+
+ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+@@ -317,6 +320,6 @@ int mtk_foe_entry_set_wdma(struct mtk_fo
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
+
+ #endif
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+@@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct ino
+ inode->i_private);
+ }
+
+-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
++int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
+ {
+ static const struct file_operations fops_all = {
+ .open = mtk_ppe_debugfs_foe_open_all,
+@@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+-
+ static const struct file_operations fops_bind = {
+ .open = mtk_ppe_debugfs_foe_open_bind,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+-
+ struct dentry *root;
+
+- root = debugfs_create_dir("mtk_ppe", NULL);
++ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
++
++ root = debugfs_create_dir(ppe->dirname, NULL);
+ if (!root)
+ return -ENOMEM;
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -434,7 +434,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+
+- err = mtk_foe_entry_commit(eth->ppe, entry);
++ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
+ if (err < 0)
+ goto free;
+
+@@ -446,7 +446,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ return 0;
+
+ clear:
+- mtk_foe_entry_clear(eth->ppe, entry);
++ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
+ free:
+ kfree(entry);
+ if (wed_index >= 0)
+@@ -464,7 +464,7 @@ mtk_flow_offload_destroy(struct mtk_eth
+ if (!entry)
+ return -ENOENT;
+
+- mtk_foe_entry_clear(eth->ppe, entry);
++ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
+ rhashtable_remove_fast(ð->flow_table, &entry->node,
+ mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+@@ -485,7 +485,7 @@ mtk_flow_offload_stats(struct mtk_eth *e
+ if (!entry)
+ return -ENOENT;
+
+- idle = mtk_foe_entry_idle_time(eth->ppe, entry);
++ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ return 0;
+@@ -537,7 +537,7 @@ mtk_eth_setup_tc_block(struct net_device
+ struct flow_block_cb *block_cb;
+ flow_setup_cb_t *cb;
+
+- if (!eth->ppe || !eth->ppe->foe_table)
++ if (!eth->soc->offload_version)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+@@ -589,8 +589,5 @@ int mtk_eth_setup_tc(struct net_device *
+
+ int mtk_eth_offload_init(struct mtk_eth *eth)
+ {
+- if (!eth->ppe || !eth->ppe->foe_table)
+- return 0;
+-
+ return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
+ }
--- /dev/null
+From 0dcbe607cec32ccae23b02a641b8bd6191a328ae Mon Sep 17 00:00:00 2001
+Message-Id: <0dcbe607cec32ccae23b02a641b8bd6191a328ae.1662243796.git.lorenzo@kernel.org>
+In-Reply-To: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org>
+References: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 23 Aug 2022 23:09:05 +0200
+Subject: [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: move wdma_base
+ definitions in mtk register map
+
+This is a preliminary patch to introduce mt7986 wed support.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 ++++++++++------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 +---
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -75,6 +75,10 @@ static const struct mtk_reg_map mtk_reg_
+ .gdm1_cnt = 0x2400,
+ .gdma_to_ppe0 = 0x4444,
+ .ppe_base = 0x0c00,
++ .wdma_base = {
++ [0] = 0x2800,
++ [1] = 0x2c00,
++ },
+ };
+
+ static const struct mtk_reg_map mt7628_reg_map = {
+@@ -130,6 +134,10 @@ static const struct mtk_reg_map mt7986_r
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe0 = 0x3333,
+ .ppe_base = 0x2000,
++ .wdma_base = {
++ [0] = 0x4800,
++ [1] = 0x4c00,
++ },
+ };
+
+ /* strings used by ethtool */
+@@ -3967,16 +3975,12 @@ static int mtk_probe(struct platform_dev
+ for (i = 0;; i++) {
+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+- static const u32 wdma_regs[] = {
+- MTK_WDMA0_BASE,
+- MTK_WDMA1_BASE
+- };
+ void __iomem *wdma;
+
+- if (!np || i >= ARRAY_SIZE(wdma_regs))
++ if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+- wdma = eth->base + wdma_regs[i];
++ wdma = eth->base + eth->soc->reg_map->wdma_base[i];
+ mtk_wed_add_hw(np, eth, wdma, i);
+ }
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -268,9 +268,6 @@
+ #define TX_DMA_FPORT_MASK_V2 0xf
+ #define TX_DMA_SWC_V2 BIT(30)
+
+-#define MTK_WDMA0_BASE 0x2800
+-#define MTK_WDMA1_BASE 0x2c00
+-
+ /* QDMA descriptor txd4 */
+ #define TX_DMA_CHKSUM (0x7 << 29)
+ #define TX_DMA_TSO BIT(28)
+@@ -953,6 +950,7 @@ struct mtk_reg_map {
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe0;
+ u32 ppe_base;
++ u32 wdma_base[2];
+ };
+
+ /* struct mtk_eth_data - This is the structure holding all differences
--- /dev/null
+From e3c27d869fccc1f2b8d0b4cde4763ab223874e8c Mon Sep 17 00:00:00 2001
+Message-Id: <e3c27d869fccc1f2b8d0b4cde4763ab223874e8c.1662243796.git.lorenzo@kernel.org>
+In-Reply-To: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org>
+References: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 21 Aug 2022 17:51:17 +0200
+Subject: [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: add foe_entry_size
+ to mtk_eth_soc
+
+Introduce foe_entry_size to mtk_eth_soc data structure since mt7986
+relies on a bigger mtk_foe_entry data structure.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++++
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 55 +++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
+ .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 2 +-
+ 5 files changed, 48 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4167,6 +4167,7 @@ static const struct mtk_soc_data mt7621_
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 2,
++ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4186,6 +4187,7 @@ static const struct mtk_soc_data mt7622_
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 2,
++ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4204,6 +4206,7 @@ static const struct mtk_soc_data mt7623_
+ .required_pctl = true,
+ .offload_version = 2,
+ .hash_offset = 2,
++ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -965,6 +965,7 @@ struct mtk_reg_map {
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
++ * @foe_entry_size Foe table entry size.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+@@ -980,6 +981,7 @@ struct mtk_soc_data {
+ bool required_pctl;
+ u8 offload_version;
+ u8 hash_offset;
++ u16 foe_entry_size;
+ netdev_features_t hw_features;
+ struct {
+ u32 txd_size;
+@@ -1140,6 +1142,14 @@ struct mtk_mac {
+ /* the struct describing the SoC. these are declared in the soc_xyz.c files */
+ extern const struct of_device_id of_mtk_match[];
+
++static inline struct mtk_foe_entry *
++mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
++{
++ const struct mtk_soc_data *soc = ppe->eth->soc;
++
++ return ppe->foe_table + hash * soc->foe_entry_size;
++}
++
+ /* read the hardware status register */
+ void mtk_stats_update_mac(struct mtk_mac *mac);
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -410,9 +410,10 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
+- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
+- MTK_FOE_STATE_BIND);
++ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
++
++ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
++ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
+@@ -451,7 +452,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+ int cur_idle;
+ u32 ib1;
+
+- hwe = &ppe->foe_table[cur->hash];
++ hwe = mtk_foe_get_entry(ppe, cur->hash);
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+@@ -473,8 +474,8 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+ static void
+ mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
++ struct mtk_foe_entry foe = {};
+ struct mtk_foe_entry *hwe;
+- struct mtk_foe_entry foe;
+
+ spin_lock_bh(&ppe_lock);
+
+@@ -486,8 +487,8 @@ mtk_flow_entry_update(struct mtk_ppe *pp
+ if (entry->hash == 0xffff)
+ goto out;
+
+- hwe = &ppe->foe_table[entry->hash];
+- memcpy(&foe, hwe, sizeof(foe));
++ hwe = mtk_foe_get_entry(ppe, entry->hash);
++ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+@@ -511,8 +512,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+
+- hwe = &ppe->foe_table[hash];
+- memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
++ hwe = mtk_foe_get_entry(ppe, hash);
++ memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size);
+ wmb();
+ hwe->ib1 = entry->ib1;
+
+@@ -561,7 +562,7 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ {
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+- struct mtk_foe_entry foe, *hwe;
++ struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ int type;
+@@ -578,8 +579,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ &ppe->foe_flow[hash / soc->hash_offset]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+- hwe = &ppe->foe_table[hash];
+- memcpy(&foe, hwe, sizeof(foe));
++ hwe = mtk_foe_get_entry(ppe, hash);
++ memcpy(&foe, hwe, soc->foe_entry_size);
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+@@ -601,7 +602,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+ {
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+- struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
++ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+@@ -686,9 +687,9 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ struct device *dev = eth->dev;
+- struct mtk_foe_entry *foe;
+ struct mtk_ppe *ppe;
+ u32 foe_flow_size;
++ void *foe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+@@ -704,7 +705,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
+ ppe->dev = dev;
+ ppe->version = version;
+
+- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
++ foe = dmam_alloc_coherent(ppe->dev,
++ MTK_PPE_ENTRIES * soc->foe_entry_size,
+ &ppe->foe_phys, GFP_KERNEL);
+ if (!foe)
+ return NULL;
+@@ -727,15 +729,21 @@ static void mtk_ppe_init_foe_table(struc
+ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
+ int i, k;
+
+- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
++ memset(ppe->foe_table, 0,
++ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
+
+ if (!IS_ENABLED(CONFIG_SOC_MT7621))
+ return;
+
+ /* skip all entries that cross the 1024 byte boundary */
+- for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
+- for (k = 0; k < ARRAY_SIZE(skip); k++)
+- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
++ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
++ for (k = 0; k < ARRAY_SIZE(skip); k++) {
++ struct mtk_foe_entry *hwe;
++
++ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
++ hwe->ib1 |= MTK_FOE_IB1_STATIC;
++ }
++ }
+ }
+
+ void mtk_ppe_start(struct mtk_ppe *ppe)
+@@ -822,9 +830,12 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
+ if (!ppe)
+ return 0;
+
+- for (i = 0; i < MTK_PPE_ENTRIES; i++)
+- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+- MTK_FOE_STATE_INVALID);
++ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
++ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
++
++ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
++ MTK_FOE_STATE_INVALID);
++ }
+
+ mtk_ppe_cache_enable(ppe, false);
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -268,7 +268,7 @@ struct mtk_ppe {
+ int version;
+ char dirname[5];
+
+- struct mtk_foe_entry *foe_table;
++ void *foe_table;
+ dma_addr_t foe_phys;
+
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+@@ -79,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
+ int i;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+- struct mtk_foe_entry *entry = &ppe->foe_table[i];
++ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
+ struct mtk_foe_mac_info *l2;
+ struct mtk_flow_addr_info ai = {};
+ unsigned char h_source[ETH_ALEN];
--- /dev/null
+From 12ff69304c83c679ca01ef3db963ab0db9de19fb Mon Sep 17 00:00:00 2001
+Message-Id: <12ff69304c83c679ca01ef3db963ab0db9de19fb.1662332102.git.lorenzo@kernel.org>
+In-Reply-To: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org>
+References: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 5 Sep 2022 00:43:43 +0200
+Subject: [PATCH net-next 2/6] net: ethernet: mtk_eth_soc: fix typo in
+ __mtk_foe_entry_clear
+
+Set ib1 state to MTK_FOE_STATE_UNBIND in __mtk_foe_entry_clear routine.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -413,7 +413,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+- hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND);
++ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND);
+ dma_wmb();
+ }
+ entry->hash = 0xffff;
--- /dev/null
+From 4253e6e2b795a18ab534adcd5c313d3fc4150975 Mon Sep 17 00:00:00 2001
+Message-Id: <4253e6e2b795a18ab534adcd5c313d3fc4150975.1662332102.git.lorenzo@kernel.org>
+In-Reply-To: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org>
+References: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 5 Sep 2022 00:48:52 +0200
+Subject: [PATCH net-next 3/6] net: ethernet: mtk_eth_soc: check max allowed
+ value in mtk_ppe_check_skb
+
+Check theoretical OOB accesses in mtk_ppe_check_skb routine
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -294,6 +294,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s
+ if (!ppe)
+ return;
+
++ if (hash > MTK_PPE_HASH_MASK)
++ return;
++
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
--- /dev/null
+From e5ecb4f619197b93fa682d722452dc8412864cdb Mon Sep 17 00:00:00 2001
+Message-Id: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Fri, 26 Aug 2022 01:12:57 +0200
+Subject: [PATCH net-next 1/5] net: ethernet: mtk_eth_wed: add
+ mtk_wed_configure_irq and mtk_wed_dma_{enable/disable}
+
+Introduce mtk_wed_configure_irq, mtk_wed_dma_enable and mtk_wed_dma_disable
+utility routines.
+This is a preliminary patch to introduce mt7986 wed support.
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed.c | 87 +++++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_wed_regs.h | 6 +-
+ 2 files changed, 64 insertions(+), 29 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -237,9 +237,30 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ }
+
+ static void
+-mtk_wed_stop(struct mtk_wed_device *dev)
++mtk_wed_dma_disable(struct mtk_wed_device *dev)
+ {
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++
++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++
++ wed_clr(dev, MTK_WED_GLO_CFG,
++ MTK_WED_GLO_CFG_TX_DMA_EN |
++ MTK_WED_GLO_CFG_RX_DMA_EN);
++
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
++ wdma_m32(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_TX_DMA_EN |
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
++}
++
++static void
++mtk_wed_stop(struct mtk_wed_device *dev)
++{
++ mtk_wed_dma_disable(dev);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_clr(dev, MTK_WED_CTRL,
+@@ -252,15 +273,6 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+-
+- wed_clr(dev, MTK_WED_GLO_CFG,
+- MTK_WED_GLO_CFG_TX_DMA_EN |
+- MTK_WED_GLO_CFG_RX_DMA_EN);
+- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+- wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+- MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ }
+
+ static void
+@@ -313,7 +325,10 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+- wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+
+ offset = dev->hw->index ? 0x04000400 : 0;
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+@@ -520,43 +535,38 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_d
+ }
+
+ static void
+-mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
++mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+- u32 wdma_mask;
+- u32 val;
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+- if (!dev->tx_wdma[i].desc)
+- mtk_wed_wdma_ring_setup(dev, i, 16);
+-
+- wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+-
+- mtk_wed_hw_init(dev);
++ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
++ /* wed control cr set */
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
++ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+-
++ /* initail wdma interrupt agent */
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+-
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
++}
++
++static void
++mtk_wed_dma_enable(struct mtk_wed_device *dev)
++{
++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+@@ -567,6 +577,26 @@ mtk_wed_start(struct mtk_wed_device *dev
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_TX_DMA_EN |
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
++}
++
++static void
++mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
++{
++ u32 val;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
++ if (!dev->tx_wdma[i].desc)
++ mtk_wed_wdma_ring_setup(dev, i, 16);
++
++ mtk_wed_hw_init(dev);
++ mtk_wed_configure_irq(dev, irq_mask);
++
+ mtk_wed_set_ext_int(dev, true);
+ val = dev->wlan.wpdma_phys |
+ MTK_PCIE_MIRROR_MAP_EN |
+@@ -577,6 +607,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+ val |= BIT(0);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+
++ mtk_wed_dma_enable(dev);
+ dev->running = true;
+ }
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -224,7 +224,11 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+ #define MTK_WDMA_GLO_CFG 0x204
+-#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
++#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
++#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
++#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
++#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
++#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
+
+ #define MTK_WDMA_RESET_IDX 0x208
+ #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
--- /dev/null
+From 463a71af080fbc77339bee2037fb1e081e3824f7 Mon Sep 17 00:00:00 2001
+Message-Id: <463a71af080fbc77339bee2037fb1e081e3824f7.1662886034.git.lorenzo@kernel.org>
+In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 27 Aug 2022 16:15:14 +0200
+Subject: [PATCH net-next 2/5] net: ethernet: mtk_eth_wed: add wed support for
+ mt7986 chipset
+
+Introduce Wireless Etherne Dispatcher support on transmission side
+for mt7986 chipset
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 34 +-
+ drivers/net/ethernet/mediatek/mtk_wed.c | 371 ++++++++++++++----
+ drivers/net/ethernet/mediatek/mtk_wed.h | 8 +-
+ .../net/ethernet/mediatek/mtk_wed_debugfs.c | 3 +
+ drivers/net/ethernet/mediatek/mtk_wed_regs.h | 81 +++-
+ include/linux/soc/mediatek/mtk_wed.h | 8 +
+ 6 files changed, 408 insertions(+), 97 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3892,6 +3892,7 @@ void mtk_eth_set_dma_device(struct mtk_e
+
+ static int mtk_probe(struct platform_device *pdev)
+ {
++ struct resource *res = NULL;
+ struct device_node *mac_np;
+ struct mtk_eth *eth;
+ int err, i;
+@@ -3972,16 +3973,31 @@ static int mtk_probe(struct platform_dev
+ }
+ }
+
+- for (i = 0;; i++) {
+- struct device_node *np = of_parse_phandle(pdev->dev.of_node,
+- "mediatek,wed", i);
+- void __iomem *wdma;
+-
+- if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+- break;
+-
+- wdma = eth->base + eth->soc->reg_map->wdma_base[i];
+- mtk_wed_add_hw(np, eth, wdma, i);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -EINVAL;
++ }
++
++ if (eth->soc->offload_version) {
++ for (i = 0;; i++) {
++ struct device_node *np;
++ phys_addr_t wdma_phy;
++ u32 wdma_base;
++
++ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
++ break;
++
++ np = of_parse_phandle(pdev->dev.of_node,
++ "mediatek,wed", i);
++ if (!np)
++ break;
++
++ wdma_base = eth->soc->reg_map->wdma_base[i];
++ wdma_phy = res ? res->start + wdma_base : 0;
++ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
++ wdma_phy, i);
++ }
+ }
+
+ for (i = 0; i < 3; i++) {
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -25,6 +25,11 @@
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
++#define MTK_WED_MAX_GROUP_SIZE 0x100
++#define MTK_WED_VLD_GROUP_SIZE 0x40
++#define MTK_WED_PER_GROUP_PKT 128
++
++#define MTK_WED_FBUF_SIZE 128
+
+ static struct mtk_wed_hw *hw_list[2];
+ static DEFINE_MUTEX(hw_lock);
+@@ -150,10 +155,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG1;
++
++ if (dev->hw->version == 1)
++ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
++ MTK_WED_BUF_SIZE - txd_size) |
++ MTK_WDMA_DESC_CTRL_LAST_SEG1;
++ else
++ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
++ MTK_WED_BUF_SIZE - txd_size) |
++ MTK_WDMA_DESC_CTRL_LAST_SEG0;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+@@ -209,7 +221,7 @@ mtk_wed_free_ring(struct mtk_wed_device
+ if (!ring->desc)
+ return;
+
+- dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
++ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
+ ring->desc, ring->desc_phys);
+ }
+
+@@ -229,6 +241,14 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
++ if (dev->hw->version == 1)
++ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++ else
++ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
++ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
++ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+@@ -237,6 +257,20 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ }
+
+ static void
++mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
++{
++ if (enable) {
++ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
++ wed_w32(dev, MTK_WED_TXP_DW1,
++ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
++ } else {
++ wed_w32(dev, MTK_WED_TXP_DW1,
++ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
++ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
++ }
++}
++
++static void
+ mtk_wed_dma_disable(struct mtk_wed_device *dev)
+ {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+@@ -249,12 +283,22 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+- regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
++
++ if (dev->hw->version == 1) {
++ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
++ wdma_m32(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
++ } else {
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++
++ mtk_wed_set_512_support(dev, false);
++ }
+ }
+
+ static void
+@@ -293,7 +337,7 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+- if (of_dma_is_coherent(wlan_node))
++ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+
+@@ -308,14 +352,69 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mutex_unlock(&hw_lock);
+ }
+
++#define PCIE_BASE_ADDR0 0x11280000
++static void
++mtk_wed_bus_init(struct mtk_wed_device *dev)
++{
++ struct device_node *np = dev->hw->eth->dev->of_node;
++ struct regmap *regs;
++ u32 val;
++
++ regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie");
++ if (IS_ERR(regs))
++ return;
++
++ regmap_update_bits(regs, 0, BIT(0), BIT(0));
++
++ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
++
++ /* pcie interrupt control: pola/source selection */
++ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
++ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
++
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
++
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
++
++ /* pcie interrupt status trigger register */
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
++
++ /* pola setting */
++ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
++ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++}
++
++static void
++mtk_wed_set_wpdma(struct mtk_wed_device *dev)
++{
++ if (dev->hw->version == 1) {
++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
++ } else {
++ mtk_wed_bus_init(dev);
++
++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
++ }
++}
++
+ static void
+ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+ {
+ u32 mask, set;
+- u32 offset;
+
+ mtk_wed_stop(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
++ mtk_wed_set_wpdma(dev);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+@@ -325,17 +424,33 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+- wdma_set(dev, MTK_WDMA_GLO_CFG,
+- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+-
+- offset = dev->hw->index ? 0x04000400 : 0;
+- wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+- wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
++ if (dev->hw->version == 1) {
++ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
+- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
++
++ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
++ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ MTK_PCIE_BASE(dev->hw->index));
++ } else {
++ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
++ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
++ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
++ MTK_WDMA_INT_STATUS) |
++ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
++ MTK_WDMA_GLO_CFG));
++
++ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
++ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
++ MTK_WDMA_RING_TX(0)) |
++ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
++ MTK_WDMA_RING_RX(0)));
++ }
+ }
+
+ static void
+@@ -355,37 +470,65 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
+
+- wed_w32(dev, MTK_WED_TX_BM_TKID,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start + dev->wlan.nbuf - 1));
+-
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+- MTK_WED_TX_BM_DYN_THR_HI);
++ if (dev->hw->version == 1) {
++ wed_w32(dev, MTK_WED_TX_BM_TKID,
++ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
++ dev->wlan.token_start) |
++ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++ dev->wlan.token_start +
++ dev->wlan.nbuf - 1));
++ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
++ MTK_WED_TX_BM_DYN_THR_HI);
++ } else {
++ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
++ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
++ dev->wlan.token_start) |
++ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++ dev->wlan.token_start +
++ dev->wlan.nbuf - 1));
++ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
++ MTK_WED_TX_BM_DYN_THR_HI_V2);
++ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
++ dev->buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
++ dev->buf_ring.size / 128));
++ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++ MTK_WED_TX_TKID_DYN_THR_HI);
++ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+- wed_set(dev, MTK_WED_CTRL,
+- MTK_WED_CTRL_WED_TX_BM_EN |
+- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
++ if (dev->hw->version == 1)
++ wed_set(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WED_TX_BM_EN |
++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
++ else
++ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+ }
+
+ static void
+-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
++mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
+ {
++ void *head = (void *)ring->desc;
+ int i;
+
+ for (i = 0; i < size; i++) {
+- desc[i].buf0 = 0;
+- desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+- desc[i].buf1 = 0;
+- desc[i].info = 0;
++ struct mtk_wdma_desc *desc;
++
++ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = 0;
+ }
+ }
+
+@@ -436,12 +579,10 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+- struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
+-
+- if (!desc)
++ if (!dev->tx_ring[i].desc)
+ continue;
+
+- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
++ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
+ }
+
+ if (mtk_wed_poll_busy(dev))
+@@ -498,16 +639,16 @@ mtk_wed_reset_dma(struct mtk_wed_device
+
+ static int
+ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+- int size)
++ int size, u32 desc_size)
+ {
+- ring->desc = dma_alloc_coherent(dev->hw->dev,
+- size * sizeof(*ring->desc),
++ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
++ ring->desc_size = desc_size;
+ ring->size = size;
+- mtk_wed_ring_reset(ring->desc, size);
++ mtk_wed_ring_reset(ring, size);
+
+ return 0;
+ }
+@@ -515,9 +656,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+ static int
+ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
+ {
++ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
+
+- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
++ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -546,16 +688,41 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+- MTK_WED_PCIE_INT_TRIGGER_STATUS);
++ if (dev->hw->version == 1) {
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
++ MTK_WED_PCIE_INT_TRIGGER_STATUS);
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
++ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
++ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+- wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+- MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+- MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
++ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
++ } else {
++ /* initail tx interrupt trigger */
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
++ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
++ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
++ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
++ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
++ dev->wlan.tx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
++ dev->wlan.tx_tbit[1]));
++
++ /* initail txfree interrupt trigger */
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
++ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
++ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
++ dev->wlan.txfree_tbit));
++
++ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
++ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
++ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
++ dev->wdma_idx));
++ }
+
+- /* initail wdma interrupt agent */
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+- wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+@@ -580,14 +747,28 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
++
++ if (dev->hw->version == 1) {
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
++ } else {
++ wed_set(dev, MTK_WED_WPDMA_CTRL,
++ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
++ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
++ }
+ }
+
+ static void
+ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+- u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+@@ -598,14 +779,17 @@ mtk_wed_start(struct mtk_wed_device *dev
+ mtk_wed_configure_irq(dev, irq_mask);
+
+ mtk_wed_set_ext_int(dev, true);
+- val = dev->wlan.wpdma_phys |
+- MTK_PCIE_MIRROR_MAP_EN |
+- FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
+-
+- if (dev->hw->index)
+- val |= BIT(1);
+- val |= BIT(0);
+- regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
++
++ if (dev->hw->version == 1) {
++ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
++ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
++ dev->hw->index);
++
++ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
++ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
++ } else {
++ mtk_wed_set_512_support(dev, true);
++ }
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+@@ -639,7 +823,9 @@ mtk_wed_attach(struct mtk_wed_device *de
+ goto out;
+ }
+
+- dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
++ dev_info(&dev->wlan.pci_dev->dev,
++ "attaching wed device %d version %d\n",
++ hw->index, hw->version);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+@@ -657,7 +843,9 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
++ if (hw->hifsys)
++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
++ BIT(hw->index), 0);
+
+ out:
+ mutex_unlock(&hw_lock);
+@@ -684,7 +872,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+
+ BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
+
+- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
++ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
++ sizeof(*ring->desc)))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
+@@ -711,21 +900,21 @@ static int
+ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+ {
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+- int i;
++ int i, index = dev->hw->version == 1;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+- ring->reg_base = MTK_WED_RING_RX(1);
++ ring->reg_base = MTK_WED_RING_RX(index);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+- wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
+- wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
++ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
++ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
+ }
+
+ return 0;
+@@ -734,11 +923,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+ static u32
+ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ {
+- u32 val;
++ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++
++ if (dev->hw->version == 1)
++ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++ else
++ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
++ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
++ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++ val &= ext_mask;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+@@ -813,7 +1010,8 @@ out:
+ }
+
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+- void __iomem *wdma, int index)
++ void __iomem *wdma, phys_addr_t wdma_phy,
++ int index)
+ {
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+@@ -860,26 +1058,33 @@ void mtk_wed_add_hw(struct device_node *
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
++
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
++ hw->wdma_phy = wdma_phy;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+- hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+- "mediatek,pcie-mirror");
+- hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+- "mediatek,hifsys");
+- if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+- kfree(hw);
+- goto unlock;
+- }
++ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+- if (!index) {
+- regmap_write(hw->mirror, 0, 0);
+- regmap_write(hw->mirror, 4, 0);
++ if (hw->version == 1) {
++ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
++ "mediatek,pcie-mirror");
++ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
++ "mediatek,hifsys");
++ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
++ kfree(hw);
++ goto unlock;
++ }
++
++ if (!index) {
++ regmap_write(hw->mirror, 0, 0);
++ regmap_write(hw->mirror, 4, 0);
++ }
+ }
++
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -18,11 +18,13 @@ struct mtk_wed_hw {
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
++ phys_addr_t wdma_phy;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ u32 debugfs_reg;
+ u32 num_flows;
++ u8 version;
+ char dirname[5];
+ int irq;
+ int index;
+@@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *
+ }
+
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+- void __iomem *wdma, int index);
++ void __iomem *wdma, phys_addr_t wdma_phy,
++ int index);
+ void mtk_wed_exit(void);
+ int mtk_wed_flow_add(int index);
+ void mtk_wed_flow_remove(int index);
+ #else
+ static inline void
+ mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+- void __iomem *wdma, int index)
++ void __iomem *wdma, phys_addr_t wdma_phy,
++ int index)
+ {
+ }
+ static inline void
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
++
++ DUMP_STR("TX FREE"),
++ DUMP_WED(WED_RX_MIB(0)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -5,6 +5,7 @@
+ #define __MTK_WED_REGS_H
+
+ #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
++#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
+ #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+ #define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+ #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+@@ -41,6 +42,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_RESERVE_EN BIT(12)
+ #define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
+ #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
++#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+ #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+ #define MTK_WED_EXT_INT_STATUS 0x020
+@@ -57,7 +59,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+ #define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+-#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
++#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
++#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+ #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+@@ -65,8 +68,7 @@ struct mtk_wdma_desc {
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+- MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
+- MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
++ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
+
+ #define MTK_WED_EXT_INT_MASK 0x028
+
+@@ -81,6 +83,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_BM_BASE 0x084
+
+ #define MTK_WED_TX_BM_TKID 0x088
++#define MTK_WED_TX_BM_TKID_V2 0x0c8
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+@@ -94,7 +97,25 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_TX_BM_DYN_THR 0x0a0
+ #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
++#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
+ #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
++#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
++
++#define MTK_WED_TX_TKID_CTRL 0x0c0
++#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
++#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
++#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
++
++#define MTK_WED_TX_TKID_DYN_THR 0x0e0
++#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
++#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
++
++#define MTK_WED_TXP_DW0 0x120
++#define MTK_WED_TXP_DW1 0x124
++#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
++#define MTK_WED_TXDP_CTRL 0x130
++#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
++#define MTK_WED_RX_BM_TKID_MIB 0x1cc
+
+ #define MTK_WED_INT_STATUS 0x200
+ #define MTK_WED_INT_MASK 0x204
+@@ -125,6 +146,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+
+ #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
++#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
+
+ #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+@@ -155,21 +177,62 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
++/* CONFIG_MEDIATEK_NETSYS_V2 */
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
++#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
++
+ #define MTK_WED_WPDMA_RESET_IDX 0x50c
+ #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+ #define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
++#define MTK_WED_WPDMA_CTRL 0x518
++#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
++
+ #define MTK_WED_WPDMA_INT_CTRL 0x520
+ #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+
+ #define MTK_WED_WPDMA_INT_MASK 0x524
+
++#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
++#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
++
++#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
++
++#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
++#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
++
+ #define MTK_WED_PCIE_CFG_BASE 0x560
+
++#define MTK_WED_PCIE_CFG_BASE 0x560
++#define MTK_WED_PCIE_CFG_INTM 0x564
++#define MTK_WED_PCIE_CFG_MSIS 0x568
+ #define MTK_WED_PCIE_INT_TRIGGER 0x570
+ #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
++#define MTK_WED_PCIE_INT_CTRL 0x57c
++#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
++#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
++#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
++
+ #define MTK_WED_WPDMA_CFG_BASE 0x580
++#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
++#define MTK_WED_WPDMA_CFG_TX 0x588
++#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
+
+ #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+ #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+@@ -203,15 +266,24 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+ #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
++#define MTK_WED_WDMA_INT_CLR 0xa24
++#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
++
+ #define MTK_WED_WDMA_INT_TRIGGER 0xa28
+ #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_INT_CTRL 0xa2c
+ #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
++#define MTK_WED_WDMA_CFG_BASE 0xaa0
+ #define MTK_WED_WDMA_OFFSET0 0xaa4
+ #define MTK_WED_WDMA_OFFSET1 0xaa8
+
++#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
++#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
++#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
++#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
++
+ #define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+ #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+ #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+@@ -221,6 +293,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RING_OFS_CPU_IDX 0x08
+ #define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
++#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
+ #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+ #define MTK_WDMA_GLO_CFG 0x204
+@@ -234,6 +307,8 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+ #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
++#define MTK_WDMA_INT_STATUS 0x220
++
+ #define MTK_WDMA_INT_MASK 0x228
+ #define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+ #define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -14,6 +14,7 @@ struct mtk_wdma_desc;
+ struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
++ u32 desc_size;
+ int size;
+
+ u32 reg_base;
+@@ -45,10 +46,17 @@ struct mtk_wed_device {
+ struct pci_dev *pci_dev;
+
+ u32 wpdma_phys;
++ u32 wpdma_int;
++ u32 wpdma_mask;
++ u32 wpdma_tx;
++ u32 wpdma_txfree;
+
+ u16 token_start;
+ unsigned int nbuf;
+
++ u8 tx_tbit[MTK_WED_TX_QUEUES];
++ u8 txfree_tbit;
++
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
--- /dev/null
+From 6e1df49f330dce7c58a39d6772f1385b6887bb03 Mon Sep 17 00:00:00 2001
+Message-Id: <6e1df49f330dce7c58a39d6772f1385b6887bb03.1662990860.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Thu, 8 Sep 2022 11:26:10 +0200
+Subject: [PATCH net-next] net: ethernet: mtk_eth_wed: add axi bus support
+
+Other than pcie bus, introduce support for axi bus to mtk wed driver.
+Axi bus is used to connect mt7986-wmac soc chip available on mt7986
+device.
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed.c | 116 +++++++++++++------
+ drivers/net/ethernet/mediatek/mtk_wed_regs.h | 2 +
+ include/linux/soc/mediatek/mtk_wed.h | 11 +-
+ 3 files changed, 91 insertions(+), 38 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -85,11 +85,31 @@ static struct mtk_wed_hw *
+ mtk_wed_assign(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_hw *hw;
++ int i;
++
++ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
++ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
++ if (!hw)
++ return NULL;
++
++ if (!hw->wed_dev)
++ goto out;
++
++ if (hw->version == 1)
++ return NULL;
++
++ /* MT7986 WED devices do not have any pcie slot restrictions */
++ }
++ /* MT7986 PCIE or AXI */
++ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
++ hw = hw_list[i];
++ if (hw && !hw->wed_dev)
++ goto out;
++ }
+
+- hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+- if (!hw || hw->wed_dev)
+- return NULL;
++ return NULL;
+
++out:
+ hw->wed_dev = dev;
+ return hw;
+ }
+@@ -322,7 +342,6 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ static void
+ mtk_wed_detach(struct mtk_wed_device *dev)
+ {
+- struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mutex_lock(&hw_lock);
+@@ -337,9 +356,14 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mtk_wed_free_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+- if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+- BIT(hw->index), BIT(hw->index));
++ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
++ struct device_node *wlan_node;
++
++ wlan_node = dev->wlan.pci_dev->dev.of_node;
++ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
++ BIT(hw->index), BIT(hw->index));
++ }
+
+ if (!hw_list[!hw->index]->wed_dev &&
+ hw->eth->dma_dev != hw->eth->dev)
+@@ -356,40 +380,54 @@ mtk_wed_detach(struct mtk_wed_device *de
+ static void
+ mtk_wed_bus_init(struct mtk_wed_device *dev)
+ {
+- struct device_node *np = dev->hw->eth->dev->of_node;
+- struct regmap *regs;
+- u32 val;
+-
+- regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie");
+- if (IS_ERR(regs))
+- return;
++ switch (dev->wlan.bus_type) {
++ case MTK_WED_BUS_PCIE: {
++ struct device_node *np = dev->hw->eth->dev->of_node;
++ struct regmap *regs;
++ u32 val;
++
++ regs = syscon_regmap_lookup_by_phandle(np,
++ "mediatek,wed-pcie");
++ if (IS_ERR(regs))
++ break;
+
+- regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+- wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
++ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+- /* pcie interrupt control: pola/source selection */
+- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+-
+- val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
+- val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+-
+- val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
+- val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+-
+- /* pcie interrupt status trigger register */
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+-
+- /* pola setting */
+- val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+- wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++ /* pcie interrupt control: pola/source selection */
++ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
++ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
++
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
++
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
++ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
++
++ /* pcie interrupt status trigger register */
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
++
++ /* pola setting */
++ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
++ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++ break;
++ }
++ case MTK_WED_BUS_AXI:
++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
++ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
++ break;
++ default:
++ break;
++ }
+ }
+
+ static void
+@@ -800,12 +838,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+ __releases(RCU)
+ {
+ struct mtk_wed_hw *hw;
++ struct device *device;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+- if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
++ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
++ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+@@ -823,8 +863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+ goto out;
+ }
+
+- dev_info(&dev->wlan.pci_dev->dev,
+- "attaching wed device %d version %d\n",
++ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
++ ? &dev->wlan.pci_dev->dev
++ : &dev->wlan.platform_dev->dev;
++ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
+
+ dev->hw = hw;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -198,6 +198,8 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_INT_CTRL 0x520
+ #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
++#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
++#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
+
+ #define MTK_WED_WPDMA_INT_MASK 0x524
+
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -11,6 +11,11 @@
+ struct mtk_wed_hw;
+ struct mtk_wdma_desc;
+
++enum mtk_wed_bus_tye {
++ MTK_WED_BUS_PCIE,
++ MTK_WED_BUS_AXI,
++};
++
+ struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+@@ -43,7 +48,11 @@ struct mtk_wed_device {
+
+ /* filled by driver: */
+ struct {
+- struct pci_dev *pci_dev;
++ union {
++ struct platform_device *platform_dev;
++ struct pci_dev *pci_dev;
++ };
++ enum mtk_wed_bus_tye bus_type;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
--- /dev/null
+From 93408c858e5dc01d97c55efa721268f63fde2ae5 Mon Sep 17 00:00:00 2001
+Message-Id: <93408c858e5dc01d97c55efa721268f63fde2ae5.1662886034.git.lorenzo@kernel.org>
+In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sat, 3 Sep 2022 18:34:09 +0200
+Subject: [PATCH net-next 4/5] net: ethernet: mtk_eth_soc: introduce flow
+ offloading support for mt7986
+
+Introduce hw flow offload support for mt7986 chipset. PPE is not enabled
+yet in mt7986 since mt76 support is not available yet.
+
+Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +-
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 72 ++++++
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 213 +++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 52 ++++-
+ .../net/ethernet/mediatek/mtk_ppe_offload.c | 49 ++--
+ drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 8 +
+ 6 files changed, 289 insertions(+), 116 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1858,12 +1858,14 @@ static int mtk_poll_rx(struct napi_struc
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+ } else {
++ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+@@ -1877,7 +1879,6 @@ static int mtk_poll_rx(struct napi_struc
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
+@@ -4183,7 +4184,7 @@ static const struct mtk_soc_data mt7621_
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 2,
+- .foe_entry_size = sizeof(struct mtk_foe_entry),
++ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4203,7 +4204,7 @@ static const struct mtk_soc_data mt7622_
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 2,
+- .foe_entry_size = sizeof(struct mtk_foe_entry),
++ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4222,7 +4223,7 @@ static const struct mtk_soc_data mt7623_
+ .required_pctl = true,
+ .offload_version = 2,
+ .hash_offset = 2,
+- .foe_entry_size = sizeof(struct mtk_foe_entry),
++ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+@@ -4254,9 +4255,11 @@ static const struct mtk_soc_data mt7986_
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
++ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .hash_offset = 4,
++ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1150,6 +1150,78 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u
+ return ppe->foe_table + hash * soc->foe_entry_size;
+ }
+
++static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
++
++ return MTK_FOE_IB1_BIND_TIMESTAMP;
++}
++
++static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB1_BIND_PPPOE_V2;
++
++ return MTK_FOE_IB1_BIND_PPPOE;
++}
++
++static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
++
++ return MTK_FOE_IB1_BIND_VLAN_TAG;
++}
++
++static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
++
++ return MTK_FOE_IB1_BIND_VLAN_LAYER;
++}
++
++static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
++
++ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
++}
++
++static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
++
++ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
++}
++
++static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB1_PACKET_TYPE_V2;
++
++ return MTK_FOE_IB1_PACKET_TYPE;
++}
++
++static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
++
++ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
++}
++
++static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
++{
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++ return MTK_FOE_IB2_MULTICAST_V2;
++
++ return MTK_FOE_IB2_MULTICAST;
++}
++
+ /* read the hardware status register */
+ void mtk_stats_update_mac(struct mtk_mac *mac);
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe
+
+ static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+ {
+- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
++ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
+ }
+
+ static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
+@@ -93,7 +93,7 @@ static u32 mtk_ppe_hash_entry(struct mtk
+ u32 hv1, hv2, hv3;
+ u32 hash;
+
+- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
++ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = e->ipv4.orig.ports;
+@@ -129,9 +129,9 @@ static u32 mtk_ppe_hash_entry(struct mtk
+ }
+
+ static inline struct mtk_foe_mac_info *
+-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
++mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
+ {
+- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
+@@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e
+ }
+
+ static inline u32 *
+-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
++mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
+ {
+- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
+@@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *
+ return &entry->ipv4.ib2;
+ }
+
+-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
+- u8 pse_port, u8 *src_mac, u8 *dest_mac)
++int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int type, int l4proto, u8 pse_port, u8 *src_mac,
++ u8 *dest_mac)
+ {
+ struct mtk_foe_mac_info *l2;
+ u32 ports_pad, val;
+
+ memset(entry, 0, sizeof(*entry));
+
+- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+- MTK_FOE_IB1_BIND_TTL |
+- MTK_FOE_IB1_BIND_CACHE;
+- entry->ib1 = val;
+-
+- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
+- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
++ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
++ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
++ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
++ entry->ib1 = val;
++
++ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
++ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
++ } else {
++ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
++ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
++ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
++ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
++ entry->ib1 = val;
++
++ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
++ FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
++ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
++ }
+
+ if (is_multicast_ether_addr(dest_mac))
+- val |= MTK_FOE_IB2_MULTICAST;
++ val |= mtk_get_ib2_multicast_mask(eth);
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
+@@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe
+ return 0;
+ }
+
+-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
++int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry, u8 port)
+ {
+- u32 *ib2 = mtk_foe_entry_ib2(entry);
+- u32 val;
++ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
++ u32 val = *ib2;
+
+- val = *ib2;
+- val &= ~MTK_FOE_IB2_DEST_PORT;
+- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
++ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
++ } else {
++ val &= ~MTK_FOE_IB2_DEST_PORT;
++ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
++ }
+ *ib2 = val;
+
+ return 0;
+ }
+
+-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
++int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry, bool egress,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port)
+ {
+- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+ struct mtk_ipv4_tuple *t;
+
+ switch (type) {
+@@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct
+ return 0;
+ }
+
+-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
++int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port)
+ {
+- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+ u32 *src, *dest;
+ int i;
+
+@@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct
+ return 0;
+ }
+
+-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
++int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int port)
+ {
+- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+ l2->etype = BIT(port);
+
+- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
+- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
++ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
+ else
+ l2->etype |= BIT(8);
+
+- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
++ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
+
+ return 0;
+ }
+
+-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
++int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int vid)
+ {
+- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
++ switch (mtk_prep_ib1_vlan_layer(eth, entry->ib1)) {
+ case 0:
+- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
+- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
++ mtk_prep_ib1_vlan_layer(eth, 1);
+ l2->vlan1 = vid;
+ return 0;
+ case 1:
+- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
++ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
+ l2->vlan1 = vid;
+ l2->etype |= BIT(8);
+ } else {
+ l2->vlan2 = vid;
+- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
++ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
+ }
+ return 0;
+ default:
+@@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_fo
+ }
+ }
+
+-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
++int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int sid)
+ {
+- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
+- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
++ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
++ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
+ l2->etype = ETH_P_PPP_SES;
+
+- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
++ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
+ l2->pppoe_id = sid;
+
+ return 0;
+ }
+
+-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+- int bss, int wcid)
++int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int wdma_idx, int txq, int bss, int wcid)
+ {
+- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+- u32 *ib2 = mtk_foe_entry_ib2(entry);
++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
++ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+- *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+- *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+- if (wdma_idx)
+- *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+-
+- l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+- FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+- FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
++ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
++ MTK_FOE_IB2_WDMA_WINFO_V2;
++ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
++ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
++ } else {
++ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
++ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
++ if (wdma_idx)
++ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
++ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
++ }
+
+ return 0;
+ }
+@@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable(
+ }
+
+ static bool
+-mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
++mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
++ struct mtk_foe_entry *data)
+ {
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
++ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+@@ -427,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+
+ static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+ {
+- u16 timestamp;
+- u16 now;
+-
+- now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
+- timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
++ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
++ u16 now = mtk_eth_timestamp(ppe->eth);
++ u16 timestamp = ib1 & ib1_ts_mask;
+
+ if (timestamp > now)
+- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
++ return ib1_ts_mask + 1 - timestamp + now;
+ else
+ return now - timestamp;
+ }
+@@ -442,6 +469,7 @@ static int __mtk_foe_entry_idle_time(str
+ static void
+ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
++ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ struct mtk_flow_entry *cur;
+ struct mtk_foe_entry *hwe;
+ struct hlist_node *tmp;
+@@ -466,8 +494,8 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+ continue;
+
+ idle = cur_idle;
+- entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+- entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
++ entry->data.ib1 &= ~ib1_ts_mask;
++ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ }
+ }
+
+@@ -489,7 +517,7 @@ mtk_flow_entry_update(struct mtk_ppe *pp
+
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+- if (!mtk_flow_entry_match(entry, &foe)) {
++ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+@@ -504,16 +532,22 @@ static void
+ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+ {
++ struct mtk_eth *eth = ppe->eth;
++ u16 timestamp = mtk_eth_timestamp(eth);
+ struct mtk_foe_entry *hwe;
+- u16 timestamp;
+
+- timestamp = mtk_eth_timestamp(ppe->eth);
+- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
+- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
++ timestamp);
++ } else {
++ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
++ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
++ timestamp);
++ }
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+- memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size);
++ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size);
+ wmb();
+ hwe->ib1 = entry->ib1;
+
+@@ -540,8 +574,8 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *
+
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ const struct mtk_soc_data *soc = ppe->eth->soc;
++ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+@@ -564,7 +598,7 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
+- u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
++ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+@@ -584,16 +618,16 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+- l2 = mtk_foe_entry_l2(&foe);
++ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
++ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+- *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
++ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
+ }
+@@ -626,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+ continue;
+ }
+
+- if (found || !mtk_flow_entry_match(entry, hwe)) {
++ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+@@ -771,6 +805,8 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
+ MTK_PPE_ENTRIES_SHIFT);
++ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
++ val |= MTK_PPE_TB_CFG_INFO_SEL;
+ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
+@@ -778,15 +814,21 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
+
+ mtk_ppe_cache_enable(ppe, true);
+
+- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
+- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
++ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_6RD |
+ MTK_PPE_FLOW_CFG_IP4_NAT |
+ MTK_PPE_FLOW_CFG_IP4_NAPT |
+ MTK_PPE_FLOW_CFG_IP4_DSLITE |
+ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
++ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
++ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
++ MTK_PPE_MD_TOAP_BYP_CRSN1 |
++ MTK_PPE_MD_TOAP_BYP_CRSN2 |
++ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
++ else
++ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
++ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
+
+ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
+@@ -820,6 +862,11 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
++
++ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
++ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
++ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
++ }
+ }
+
+ int mtk_ppe_stop(struct mtk_ppe *ppe)
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -32,6 +32,15 @@
+ #define MTK_FOE_IB1_UDP BIT(30)
+ #define MTK_FOE_IB1_STATIC BIT(31)
+
++/* CONFIG_MEDIATEK_NETSYS_V2 */
++#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
++#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
++#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
++#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
++#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
++#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
++#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
++
+ enum {
+ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
+@@ -53,14 +62,25 @@ enum {
+
+ #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+
++#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
+ #define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
+
+ #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
+
++/* CONFIG_MEDIATEK_NETSYS_V2 */
++#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
++#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
++#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
++#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
++#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
++
+ #define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+ #define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+ #define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+
++#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
++#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
++
+ enum {
+ MTK_FOE_STATE_INVALID,
+ MTK_FOE_STATE_UNBIND,
+@@ -81,6 +101,9 @@ struct mtk_foe_mac_info {
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
++
++ u16 minfo;
++ u16 winfo;
+ };
+
+ /* software-only entry type */
+@@ -198,7 +221,7 @@ struct mtk_foe_entry {
+ struct mtk_foe_ipv4_dslite dslite;
+ struct mtk_foe_ipv6 ipv6;
+ struct mtk_foe_ipv6_6rd ipv6_6rd;
+- u32 data[19];
++ u32 data[23];
+ };
+ };
+
+@@ -306,20 +329,27 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s
+ __mtk_ppe_check_skb(ppe, skb, hash);
+ }
+
+-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
+- u8 pse_port, u8 *src_mac, u8 *dest_mac);
+-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
+-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
++int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int type, int l4proto, u8 pse_port, u8 *src_mac,
++ u8 *dest_mac);
++int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry, u8 port);
++int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry, bool orig,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port);
+-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
++int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
++ struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port);
+-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
+-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
+-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
+-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
+- int bss, int wcid);
++int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int port);
++int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int vid);
++int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int sid);
++int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
++ int wdma_idx, int txq, int bss, int wcid);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -52,18 +52,19 @@ static const struct rhashtable_params mt
+ };
+
+ static int
+-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
+- bool egress)
++mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
++ struct mtk_flow_data *data, bool egress)
+ {
+- return mtk_foe_entry_set_ipv4_tuple(foe, egress,
++ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
+ data->v4.src_addr, data->src_port,
+ data->v4.dst_addr, data->dst_port);
+ }
+
+ static int
+-mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
++mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
++ struct mtk_flow_data *data)
+ {
+- return mtk_foe_entry_set_ipv6_tuple(foe,
++ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
+ }
+@@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_et
+ int pse_port, dsa_port;
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+- mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
+- info.wcid);
+- pse_port = 3;
++ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
++ info.bss, info.wcid);
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
++ switch (info.wdma_idx) {
++ case 0:
++ pse_port = 8;
++ break;
++ case 1:
++ pse_port = 9;
++ break;
++ default:
++ return -EINVAL;
++ }
++ } else {
++ pse_port = 3;
++ }
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
+
+ dsa_port = mtk_flow_get_dsa_port(&dev);
+ if (dsa_port >= 0)
+- mtk_foe_entry_set_dsa(foe, dsa_port);
++ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+
+ if (dev == eth->netdev[0])
+ pse_port = 1;
+@@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_et
+ return -EOPNOTSUPP;
+
+ out:
+- mtk_foe_entry_set_pse_port(foe, pse_port);
++ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
+
+ return 0;
+ }
+@@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
+- data.eth.h_source,
+- data.eth.h_dest);
++ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
++ data.eth.h_source, data.eth.h_dest);
+ if (err)
+ return err;
+
+@@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+
+- mtk_flow_set_ipv4_addr(&foe, &data, false);
++ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+@@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+- mtk_flow_set_ipv6_addr(&foe, &data);
++ mtk_flow_set_ipv6_addr(eth, &foe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+@@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+- err = mtk_flow_set_ipv4_addr(&foe, &data, true);
++ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
+ if (err)
+ return err;
+ }
+@@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth
+ if (data.vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+- mtk_foe_entry_set_vlan(&foe, data.vlan.id);
++ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
+ }
+ if (data.pppoe.num == 1)
+- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
++ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
+
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+@@ -21,6 +21,9 @@
+ #define MTK_PPE_GLO_CFG_BUSY BIT(31)
+
+ #define MTK_PPE_FLOW_CFG 0x204
++#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
++#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
++#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
+ #define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
+ #define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
+ #define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
+@@ -54,6 +57,7 @@
+ #define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
+ #define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
+ #define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
++#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
+
+ enum {
+ MTK_PPE_SCAN_MODE_DISABLED,
+@@ -112,6 +116,8 @@ enum {
+ #define MTK_PPE_DEFAULT_CPU_PORT 0x248
+ #define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+
++#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
++
+ #define MTK_PPE_MTU_DROP 0x308
+
+ #define MTK_PPE_VLAN_MTU0 0x30c
+@@ -141,4 +147,6 @@ enum {
+ #define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
+ #define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+
++#define MTK_PPE_SBW_CTRL 0x374
++
+ #endif
--- /dev/null
+From b94b02a270471337bef73c44fa3493a521e31a61 Mon Sep 17 00:00:00 2001
+Message-Id: <b94b02a270471337bef73c44fa3493a521e31a61.1662886034.git.lorenzo@kernel.org>
+In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 5 Sep 2022 13:56:13 +0200
+Subject: [PATCH net-next 5/5] net: ethernet: mtk_eth_soc: enable flow
+ offloading support for mt7986
+
+Enable hw packet engine and wireless packet dispatcher for mt7986
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4258,6 +4258,7 @@ static const struct mtk_soc_data mt7986_
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
++ .offload_version = 2,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
#reset-cells = <1>;
};
+ wed_pcie: wed-pcie@10003000 {
+ compatible = "mediatek,mt7986-wed-pcie",
+ "syscon";
+ reg = <0 0x10003000 0 0x10>;
+ };
+
+ wed0: wed@15010000 {
+ compatible = "mediatek,mt7986-wed",
+ "syscon";
+ reg = <0 0x15010000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ wed1: wed@15011000 {
+ compatible = "mediatek,mt7986-wed",
+ "syscon";
+ reg = <0 0x15011000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
eth: ethernet@15100000 {
compatible = "mediatek,mt7986-eth";
reg = <0 0x15100000 0 0x80000>;
<&apmixedsys CLK_APMIXED_SGMPLL>;
mediatek,ethsys = <ðsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ mediatek,wed-pcie = <&wed_pcie>;
+ mediatek,wed = <&wed0>, <&wed1>;
#reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3639,6 +3639,7 @@ static const struct mtk_soc_data mt2701_
+@@ -4166,6 +4166,7 @@ static const struct mtk_soc_data mt2701_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -196,13 +196,35 @@ static int _mtk_mdio_write(struct mtk_et
+@@ -219,13 +219,35 @@ static int _mtk_mdio_write(struct mtk_et
if (ret < 0)
return ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -219,12 +241,33 @@ static int _mtk_mdio_read(struct mtk_eth
+@@ -242,12 +264,33 @@ static int _mtk_mdio_read(struct mtk_eth
if (ret < 0)
return ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -621,6 +664,7 @@ static int mtk_mdio_init(struct mtk_eth
+@@ -644,6 +687,7 @@ static int mtk_mdio_init(struct mtk_eth
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -327,9 +327,12 @@
+@@ -330,9 +330,12 @@
#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
#define PHY_IAC_CMD_MASK GENMASK(19, 18)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3230,6 +3230,7 @@ static const struct net_device_ops mtk_n
+@@ -3736,6 +3736,7 @@ static const struct net_device_ops mtk_n
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
phy_interface_t phy_mode;
struct phylink *phylink;
-@@ -3349,6 +3350,9 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -3855,6 +3856,9 @@ static int mtk_add_mac(struct mtk_eth *e
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;