-LINUX_VERSION-6.1 = .53
-LINUX_KERNEL_HASH-6.1.53 = 5f57e0a04810d24f2b1a8fc95451241f80530e678717eda0f45104c6dc78ed7e
+LINUX_VERSION-6.1 = .54
+LINUX_KERNEL_HASH-6.1.54 = a3181e46d407cd6ab15f412402e8220684ff9659b0262b7a3de7384405ce4e27
return false;
return true;
-@@ -800,13 +804,13 @@ static inline void ipv6_addr_set_v4mappe
+@@ -805,13 +809,13 @@ static inline void ipv6_addr_set_v4mappe
*/
static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen)
{
if (xb)
return i * 32 + 31 - __fls(ntohl(xb));
}
-@@ -1000,17 +1004,18 @@ static inline u32 ip6_multipath_hash_fie
+@@ -1005,17 +1009,18 @@ static inline u32 ip6_multipath_hash_fie
static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass,
__be32 flowlabel)
{
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/clocksource_ids.h>
-@@ -1021,6 +1022,16 @@ static void __init arch_timer_of_configu
+@@ -1028,6 +1029,16 @@ static void __init arch_timer_of_configu
if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
arch_timer_rate = rate;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ restart:
spin_lock_irq(&lruvec->lru_lock);
-
-@@ -4387,7 +4387,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4389,7 +4389,7 @@ static bool try_to_inc_max_seq(struct lr
bool success;
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
-@@ -4452,7 +4452,7 @@ static bool should_run_aging(struct lruv
+@@ -4454,7 +4454,7 @@ static bool should_run_aging(struct lruv
unsigned long old = 0;
unsigned long young = 0;
unsigned long total = 0;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
-@@ -4737,7 +4737,7 @@ static bool sort_folio(struct lruvec *lr
+@@ -4740,7 +4740,7 @@ static bool sort_folio(struct lruvec *lr
int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
int tier = lru_tier_from_refs(refs);
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
-@@ -4837,7 +4837,7 @@ static int scan_folios(struct lruvec *lr
+@@ -4848,7 +4848,7 @@ static int scan_folios(struct lruvec *lr
int scanned = 0;
int isolated = 0;
int remaining = MAX_LRU_BATCH;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list));
-@@ -5237,7 +5237,7 @@ done:
+@@ -5249,7 +5249,7 @@ done:
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{
if (lrugen->enabled) {
enum lru_list lru;
-@@ -5519,7 +5519,7 @@ static void lru_gen_seq_show_full(struct
+@@ -5531,7 +5531,7 @@ static void lru_gen_seq_show_full(struct
int i;
int type, tier;
int hist = lru_hist_from_seq(seq);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
-@@ -5569,7 +5569,7 @@ static int lru_gen_seq_show(struct seq_f
+@@ -5581,7 +5581,7 @@ static int lru_gen_seq_show(struct seq_f
unsigned long seq;
bool full = !debugfs_real_fops(m->file)->write;
struct lruvec *lruvec = v;
int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
-@@ -5823,7 +5823,7 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5835,7 +5835,7 @@ void lru_gen_init_lruvec(struct lruvec *
{
int i;
int gen, type, zone;
+++ /dev/null
-From 656287d55d9cfc72a4bcd4d9bd098570f12ce409 Mon Sep 17 00:00:00 2001
-From: Yu Zhao <yuzhao@google.com>
-Date: Wed, 21 Dec 2022 21:19:00 -0700
-Subject: [PATCH 02/19] UPSTREAM: mm: multi-gen LRU: rename lrugen->lists[] to
- lrugen->folios[]
-
-lru_gen_folio will be chained into per-node lists by the coming
-lrugen->list.
-
-Link: https://lkml.kernel.org/r/20221222041905.2431096-3-yuzhao@google.com
-Signed-off-by: Yu Zhao <yuzhao@google.com>
-Cc: Johannes Weiner <hannes@cmpxchg.org>
-Cc: Jonathan Corbet <corbet@lwn.net>
-Cc: Michael Larabel <Michael@MichaelLarabel.com>
-Cc: Michal Hocko <mhocko@kernel.org>
-Cc: Mike Rapoport <rppt@kernel.org>
-Cc: Roman Gushchin <roman.gushchin@linux.dev>
-Cc: Suren Baghdasaryan <surenb@google.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Bug: 274865848
-(cherry picked from commit 6df1b2212950aae2b2188c6645ea18e2a9e3fdd5)
-Change-Id: I09f53e0fb2cd6b8b3adbb8a80b15dc5efbeae857
-Signed-off-by: T.J. Mercier <tjmercier@google.com>
----
- Documentation/mm/multigen_lru.rst | 8 ++++----
- include/linux/mm_inline.h | 4 ++--
- include/linux/mmzone.h | 8 ++++----
- mm/vmscan.c | 20 ++++++++++----------
- 4 files changed, 20 insertions(+), 20 deletions(-)
-
---- a/Documentation/mm/multigen_lru.rst
-+++ b/Documentation/mm/multigen_lru.rst
-@@ -89,15 +89,15 @@ variables are monotonically increasing.
-
- Generation numbers are truncated into ``order_base_2(MAX_NR_GENS+1)``
- bits in order to fit into the gen counter in ``folio->flags``. Each
--truncated generation number is an index to ``lrugen->lists[]``. The
-+truncated generation number is an index to ``lrugen->folios[]``. The
- sliding window technique is used to track at least ``MIN_NR_GENS`` and
- at most ``MAX_NR_GENS`` generations. The gen counter stores a value
- within ``[1, MAX_NR_GENS]`` while a page is on one of
--``lrugen->lists[]``; otherwise it stores zero.
-+``lrugen->folios[]``; otherwise it stores zero.
-
- Each generation is divided into multiple tiers. A page accessed ``N``
- times through file descriptors is in tier ``order_base_2(N)``. Unlike
--generations, tiers do not have dedicated ``lrugen->lists[]``. In
-+generations, tiers do not have dedicated ``lrugen->folios[]``. In
- contrast to moving across generations, which requires the LRU lock,
- moving across tiers only involves atomic operations on
- ``folio->flags`` and therefore has a negligible cost. A feedback loop
-@@ -127,7 +127,7 @@ page mapped by this PTE to ``(max_seq%MA
- Eviction
- --------
- The eviction consumes old generations. Given an ``lruvec``, it
--increments ``min_seq`` when ``lrugen->lists[]`` indexed by
-+increments ``min_seq`` when ``lrugen->folios[]`` indexed by
- ``min_seq%MAX_NR_GENS`` becomes empty. To select a type and a tier to
- evict from, it first compares ``min_seq[]`` to select the older type.
- If both types are equally old, it selects the one whose first tier has
---- a/include/linux/mm_inline.h
-+++ b/include/linux/mm_inline.h
-@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(str
- lru_gen_update_size(lruvec, folio, -1, gen);
- /* for folio_rotate_reclaimable() */
- if (reclaiming)
-- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
-+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
- else
-- list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
-+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
-
- return true;
- }
---- a/include/linux/mmzone.h
-+++ b/include/linux/mmzone.h
-@@ -312,7 +312,7 @@ enum lruvec_flags {
- * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
- * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
- * corresponding generation. The gen counter in folio->flags stores gen+1 while
-- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
-+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
- *
- * A page is added to the youngest generation on faulting. The aging needs to
- * check the accessed bit at least twice before handing this page over to the
-@@ -324,8 +324,8 @@ enum lruvec_flags {
- * rest of generations, if they exist, are considered inactive. See
- * lru_gen_is_active().
- *
-- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
-- * the aging needs not to worry about it. And it's set again when a page
-+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
-+ * that the aging needs not to worry about it. And it's set again when a page
- * considered active is isolated for non-reclaiming purposes, e.g., migration.
- * See lru_gen_add_folio() and lru_gen_del_folio().
- *
-@@ -412,7 +412,7 @@ struct lru_gen_folio {
- /* the birth time of each generation in jiffies */
- unsigned long timestamps[MAX_NR_GENS];
- /* the multi-gen LRU lists, lazily sorted on eviction */
-- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
-+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
- /* the multi-gen LRU sizes, eventually consistent */
- long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
- /* the exponential moving average of refaulted */
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -4258,7 +4258,7 @@ static bool inc_min_seq(struct lruvec *l
-
- /* prevent cold/hot inversion if force_scan is true */
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-- struct list_head *head = &lrugen->lists[old_gen][type][zone];
-+ struct list_head *head = &lrugen->folios[old_gen][type][zone];
-
- while (!list_empty(head)) {
- struct folio *folio = lru_to_folio(head);
-@@ -4269,7 +4269,7 @@ static bool inc_min_seq(struct lruvec *l
- VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
-
- new_gen = folio_inc_gen(lruvec, folio, false);
-- list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
-+ list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
-
- if (!--remaining)
- return false;
-@@ -4297,7 +4297,7 @@ static bool try_to_inc_min_seq(struct lr
- gen = lru_gen_from_seq(min_seq[type]);
-
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-- if (!list_empty(&lrugen->lists[gen][type][zone]))
-+ if (!list_empty(&lrugen->folios[gen][type][zone]))
- goto next;
- }
-
-@@ -4762,7 +4762,7 @@ static bool sort_folio(struct lruvec *lr
-
- /* promoted */
- if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
-- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
-+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
- return true;
- }
-
-@@ -4771,7 +4771,7 @@ static bool sort_folio(struct lruvec *lr
- int hist = lru_hist_from_seq(lrugen->min_seq[type]);
-
- gen = folio_inc_gen(lruvec, folio, false);
-- list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
-+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
-
- WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
- lrugen->protected[hist][type][tier - 1] + delta);
-@@ -4783,7 +4783,7 @@ static bool sort_folio(struct lruvec *lr
- if (folio_test_locked(folio) || folio_test_writeback(folio) ||
- (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
- gen = folio_inc_gen(lruvec, folio, true);
-- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
-+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
- return true;
- }
-
-@@ -4850,7 +4850,7 @@ static int scan_folios(struct lruvec *lr
- for (zone = sc->reclaim_idx; zone >= 0; zone--) {
- LIST_HEAD(moved);
- int skipped = 0;
-- struct list_head *head = &lrugen->lists[gen][type][zone];
-+ struct list_head *head = &lrugen->folios[gen][type][zone];
-
- while (!list_empty(head)) {
- struct folio *folio = lru_to_folio(head);
-@@ -5250,7 +5250,7 @@ static bool __maybe_unused state_is_vali
- int gen, type, zone;
-
- for_each_gen_type_zone(gen, type, zone) {
-- if (!list_empty(&lrugen->lists[gen][type][zone]))
-+ if (!list_empty(&lrugen->folios[gen][type][zone]))
- return false;
- }
- }
-@@ -5295,7 +5295,7 @@ static bool drain_evictable(struct lruve
- int remaining = MAX_LRU_BATCH;
-
- for_each_gen_type_zone(gen, type, zone) {
-- struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
-+ struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
-
- while (!list_empty(head)) {
- bool success;
-@@ -5832,7 +5832,7 @@ void lru_gen_init_lruvec(struct lruvec *
- lrugen->timestamps[i] = jiffies;
-
- for_each_gen_type_zone(gen, type, zone)
-- INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
-+ INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
-
- lruvec->mm_state.seq = MIN_NR_GENS;
- init_waitqueue_head(&lruvec->mm_state.wait);
static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
-@@ -4993,8 +5003,7 @@ static int isolate_folios(struct lruvec
+@@ -5005,8 +5015,7 @@ static int isolate_folios(struct lruvec
return scanned;
}
{
int type;
int scanned;
-@@ -5083,9 +5092,6 @@ retry:
+@@ -5095,9 +5104,6 @@ retry:
goto retry;
}
return scanned;
}
-@@ -5124,67 +5130,26 @@ done:
+@@ -5136,67 +5142,26 @@ done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}
lru_add_drain();
-@@ -5208,7 +5173,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5220,7 +5185,7 @@ static void lru_gen_shrink_lruvec(struct
if (!nr_to_scan)
goto done;
if (!delta)
goto done;
-@@ -5216,7 +5181,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5228,7 +5193,7 @@ static void lru_gen_shrink_lruvec(struct
if (scanned >= nr_to_scan)
break;
break;
cond_resched();
-@@ -5666,7 +5631,7 @@ static int run_eviction(struct lruvec *l
+@@ -5678,7 +5643,7 @@ static int run_eviction(struct lruvec *l
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;
unsigned long last_reclaimed;
#endif
-@@ -4455,7 +4454,7 @@ done:
+@@ -4457,7 +4456,7 @@ done:
return true;
}
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
{
int gen, type, zone;
-@@ -4464,6 +4463,13 @@ static bool should_run_aging(struct lruv
+@@ -4466,6 +4465,13 @@ static bool should_run_aging(struct lruv
unsigned long total = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
unsigned long seq;
-@@ -4492,8 +4498,6 @@ static bool should_run_aging(struct lruv
+@@ -4494,8 +4500,6 @@ static bool should_run_aging(struct lruv
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
* ideal number of generations is MIN_NR_GENS+1.
*/
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
return false;
-@@ -4512,40 +4516,54 @@ static bool should_run_aging(struct lruv
+@@ -4514,40 +4518,54 @@ static bool should_run_aging(struct lruv
return false;
}
}
/* to protect the working set of the last N jiffies */
-@@ -4554,46 +4572,32 @@ static unsigned long lru_gen_min_ttl __r
+@@ -4556,46 +4574,32 @@ static unsigned long lru_gen_min_ttl __r
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
*/
if (mutex_trylock(&oom_lock)) {
struct oom_control oc = {
-@@ -5101,33 +5105,27 @@ retry:
+@@ -5113,33 +5117,27 @@ retry:
* reclaim.
*/
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -5146,9 +5144,7 @@ static unsigned long get_nr_to_reclaim(s
+@@ -5158,9 +5156,7 @@ static unsigned long get_nr_to_reclaim(s
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
struct blk_plug plug;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
lru_add_drain();
-@@ -5169,13 +5165,13 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5181,13 +5177,13 @@ static void lru_gen_shrink_lruvec(struct
else
swappiness = 0;
scanned += delta;
if (scanned >= nr_to_scan)
-@@ -5187,10 +5183,6 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5199,10 +5195,6 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4454,68 +4454,6 @@ done:
+@@ -4456,68 +4456,6 @@ done:
return true;
}
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
-@@ -5099,6 +5037,68 @@ retry:
+@@ -5111,6 +5049,68 @@ retry:
return scanned;
}
if (order > 0)
return 0;
-@@ -5383,6 +5396,7 @@ static int mem_cgroup_css_online(struct
+@@ -5373,6 +5386,7 @@ static int mem_cgroup_css_online(struct
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
return 0;
offline_kmem:
memcg_offline_kmem(memcg);
-@@ -5414,6 +5428,7 @@ static void mem_cgroup_css_offline(struc
+@@ -5404,6 +5418,7 @@ static void mem_cgroup_css_offline(struc
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
-@@ -5425,6 +5440,7 @@ static void mem_cgroup_css_released(stru
+@@ -5415,6 +5430,7 @@ static void mem_cgroup_css_released(stru
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
invalidate_reclaim_iterators(memcg);
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
-@@ -4440,8 +4440,7 @@ done:
+@@ -4442,8 +4442,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq));
}
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
-@@ -4514,8 +4513,6 @@ static void lru_gen_age_node(struct pgli
+@@ -4516,8 +4515,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
/* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return;
-@@ -5104,8 +5101,7 @@ static bool should_run_aging(struct lruv
+@@ -5116,8 +5113,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
{
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
-@@ -5122,10 +5118,8 @@ static unsigned long get_nr_to_scan(stru
+@@ -5134,10 +5130,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY)
return nr_to_scan;
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -5134,29 +5128,18 @@ static unsigned long get_nr_to_reclaim(s
+@@ -5146,29 +5140,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc))
return -1;
if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc);
-@@ -5166,7 +5149,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5178,7 +5161,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
break;
delta = evict_folios(lruvec, sc, swappiness);
-@@ -5183,10 +5166,251 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5195,10 +5178,251 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
/******************************************************************************
* state change
-@@ -5644,11 +5868,11 @@ static int run_cmd(char cmd, int memcg_i
+@@ -5656,11 +5880,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) {
rcu_read_lock();
rcu_read_unlock();
if (!memcg)
-@@ -5796,6 +6020,19 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5808,6 +6032,19 @@ void lru_gen_init_lruvec(struct lruvec *
}
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
INIT_LIST_HEAD(&memcg->mm_list.fifo);
-@@ -5819,7 +6056,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -5831,7 +6068,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
}
static int __init init_lru_gen(void)
{
-@@ -5846,6 +6145,10 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5858,6 +6157,10 @@ static void lru_gen_shrink_lruvec(struct
{
}
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-@@ -5859,7 +6162,7 @@ static void shrink_lruvec(struct lruvec
+@@ -5871,7 +6174,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
-@@ -6102,6 +6405,11 @@ static void shrink_node(pg_data_t *pgdat
+@@ -6114,6 +6417,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec;
bool reclaimable = false;
VM_WARN_ON_ONCE(current_is_kswapd());
walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
-@@ -4417,7 +4420,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4419,7 +4422,7 @@ static bool try_to_inc_max_seq(struct lr
goto done;
}
if (!walk) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
-@@ -4486,8 +4489,6 @@ static bool lruvec_is_reclaimable(struct
+@@ -4488,8 +4491,6 @@ static bool lruvec_is_reclaimable(struct
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec);
/* see the comment on lru_gen_folio */
gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
-@@ -4743,12 +4744,8 @@ static bool isolate_folio(struct lruvec
+@@ -4753,12 +4754,8 @@ static bool isolate_folio(struct lruvec
{
bool success;
(folio_test_dirty(folio) ||
(folio_test_anon(folio) && !folio_test_swapcache(folio))))
return false;
-@@ -4845,9 +4842,8 @@ static int scan_folios(struct lruvec *lr
+@@ -4857,9 +4854,8 @@ static int scan_folios(struct lruvec *lr
__count_vm_events(PGSCAN_ANON + type, isolated);
/*
*/
return isolated || !remaining ? scanned : 0;
}
-@@ -5107,8 +5103,7 @@ static long get_nr_to_scan(struct lruvec
+@@ -5119,8 +5115,7 @@ static long get_nr_to_scan(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
return 0;
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
-@@ -5136,17 +5131,14 @@ static bool try_to_shrink_lruvec(struct
+@@ -5148,17 +5143,14 @@ static bool try_to_shrink_lruvec(struct
long nr_to_scan;
unsigned long scanned = 0;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0)
-@@ -5277,12 +5269,13 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5289,12 +5281,13 @@ static void lru_gen_shrink_lruvec(struct
struct blk_plug plug;
VM_WARN_ON_ONCE(global_reclaim(sc));
if (try_to_shrink_lruvec(lruvec, sc))
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
-@@ -5338,11 +5331,19 @@ static void lru_gen_shrink_node(struct p
+@@ -5350,11 +5343,19 @@ static void lru_gen_shrink_node(struct p
VM_WARN_ON_ONCE(!global_reclaim(sc));
set_initial_priority(pgdat, sc);
-@@ -5360,7 +5361,7 @@ static void lru_gen_shrink_node(struct p
+@@ -5372,7 +5373,7 @@ static void lru_gen_shrink_node(struct p
clear_mm_walk();
blk_finish_plug(&plug);
/* kswapd should never fail */
pgdat->kswapd_failures = 0;
}
-@@ -5932,7 +5933,7 @@ static ssize_t lru_gen_seq_write(struct
+@@ -5944,7 +5945,7 @@ static ssize_t lru_gen_seq_write(struct
set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save();
blk_start_plug(&plug);
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4415,7 +4415,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4417,7 +4417,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -5206,18 +5206,20 @@ static int shrink_one(struct lruvec *lru
+@@ -5218,18 +5218,20 @@ static int shrink_one(struct lruvec *lru
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock();
-@@ -5241,14 +5243,22 @@ restart:
+@@ -5253,14 +5255,22 @@ restart:
op = shrink_one(lruvec, sc);
/* restart if raced with lru_gen_rotate_memcg() */
if (gen != get_nulls_value(pos))
goto restart;
-@@ -5257,11 +5267,6 @@ restart:
+@@ -5269,11 +5279,6 @@ restart:
bin = get_memcg_bin(bin + 1);
if (bin != first_bin)
goto restart;
The multi-gen LRU can be disassembled into the following parts:
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4459,6 +4459,10 @@ done:
+@@ -4461,6 +4461,10 @@ done:
return true;
}
The multi-gen LRU can be disassembled into the following parts:
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4553,6 +4553,10 @@ static void lru_gen_age_node(struct pgli
+@@ -4555,6 +4555,10 @@ static void lru_gen_age_node(struct pgli
}
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4690,6 +4690,148 @@ void lru_gen_look_around(struct page_vma
+@@ -4692,6 +4692,148 @@ void lru_gen_look_around(struct page_vma
}
/******************************************************************************
* the eviction
******************************************************************************/
-@@ -5386,53 +5528,6 @@ done:
+@@ -5398,53 +5540,6 @@ done:
pgdat->kswapd_failures = 0;
}
/******************************************************************************
* state change
******************************************************************************/
-@@ -6078,67 +6173,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -6090,67 +6185,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -6160,12 +6160,17 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -6172,12 +6172,17 @@ void lru_gen_exit_memcg(struct mem_cgrou
int i;
int nid;
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4571,13 +4571,12 @@ static void lru_gen_age_node(struct pgli
+@@ -4573,13 +4573,12 @@ static void lru_gen_age_node(struct pgli
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
int i;
struct folio *folio = pfn_folio(pvmw->pfn);
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
-@@ -4594,25 +4593,28 @@ void lru_gen_look_around(struct page_vma
+@@ -4596,25 +4595,28 @@ void lru_gen_look_around(struct page_vma
/* avoid taking the LRU lock under the PTL when possible */
walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
unsigned long pfn;
-@@ -4637,56 +4639,27 @@ void lru_gen_look_around(struct page_vma
+@@ -4639,56 +4641,27 @@ void lru_gen_look_around(struct page_vma
!folio_test_swapcache(folio)))
folio_mark_dirty(folio);
/* folio_update_gen() requires stable folio_memcg() */
if (!mem_cgroup_trylock_pages(memcg))
break;
-@@ -4442,25 +4425,12 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4444,25 +4427,12 @@ static bool try_to_inc_max_seq(struct lr
success = iterate_mm_list(lruvec, walk, &mm);
if (mm)
walk_mm(lruvec, mm, walk);
}
/******************************************************************************
-@@ -6105,7 +6075,6 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -6117,7 +6087,6 @@ void lru_gen_init_lruvec(struct lruvec *
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
lruvec->mm_state.seq = MIN_NR_GENS;
}
#ifdef CONFIG_MEMCG
-@@ -6138,7 +6107,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -6150,7 +6119,6 @@ void lru_gen_exit_memcg(struct mem_cgrou
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4331,6 +4331,7 @@ static const struct mtk_soc_data mt7986_
+@@ -4334,6 +4334,7 @@ static const struct mtk_soc_data mt7986_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3478,11 +3478,8 @@ static void mtk_pending_work(struct work
+@@ -3481,11 +3481,8 @@ static void mtk_pending_work(struct work
rtnl_lock();
dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
-@@ -3516,7 +3513,7 @@ static void mtk_pending_work(struct work
+@@ -3519,7 +3516,7 @@ static void mtk_pending_work(struct work
dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3254,6 +3254,27 @@ static void mtk_set_mcr_max_rx(struct mt
+@@ -3257,6 +3257,27 @@ static void mtk_set_mcr_max_rx(struct mt
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
static int mtk_hw_init(struct mtk_eth *eth)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-@@ -3293,22 +3314,9 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3296,22 +3317,9 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3275,7 +3275,54 @@ static void mtk_hw_reset(struct mtk_eth
+@@ -3278,7 +3278,54 @@ static void mtk_hw_reset(struct mtk_eth
0x3ffffff);
}
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
-@@ -3314,7 +3361,12 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3317,7 +3364,12 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
/* Set FE to PDMAv2 if necessary */
-@@ -3505,7 +3557,7 @@ static void mtk_pending_work(struct work
+@@ -3508,7 +3560,7 @@ static void mtk_pending_work(struct work
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
-@@ -4107,7 +4159,7 @@ static int mtk_probe(struct platform_dev
+@@ -4110,7 +4162,7 @@ static int mtk_probe(struct platform_dev
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(ð->pending_work, mtk_pending_work);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2842,14 +2842,29 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2845,14 +2845,29 @@ static void mtk_dma_free(struct mtk_eth
kfree(eth->scratch_head);
}
schedule_work(ð->pending_work);
}
-@@ -3329,15 +3344,17 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3332,15 +3347,17 @@ static int mtk_hw_init(struct mtk_eth *e
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (eth->ethsys)
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
-@@ -3466,8 +3483,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3469,8 +3486,10 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
err_disable_pm:
return ret;
}
-@@ -3529,30 +3548,53 @@ static int mtk_do_ioctl(struct net_devic
+@@ -3532,30 +3551,53 @@ static int mtk_do_ioctl(struct net_devic
return -EOPNOTSUPP;
}
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
-@@ -3563,15 +3605,19 @@ static void mtk_pending_work(struct work
+@@ -3566,15 +3608,19 @@ static void mtk_pending_work(struct work
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
continue;
};
/* strings used by ethtool */
-@@ -3337,6 +3343,102 @@ static void mtk_hw_warm_reset(struct mtk
+@@ -3340,6 +3346,102 @@ static void mtk_hw_warm_reset(struct mtk
val, rst_mask);
}
static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-@@ -3655,6 +3757,7 @@ static int mtk_cleanup(struct mtk_eth *e
+@@ -3658,6 +3760,7 @@ static int mtk_cleanup(struct mtk_eth *e
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(ð->pending_work);
return 0;
}
-@@ -4092,6 +4195,7 @@ static int mtk_probe(struct platform_dev
+@@ -4095,6 +4198,7 @@ static int mtk_probe(struct platform_dev
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
-@@ -4294,6 +4398,8 @@ static int mtk_probe(struct platform_dev
+@@ -4297,6 +4401,8 @@ static int mtk_probe(struct platform_dev
netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3686,6 +3686,11 @@ static void mtk_pending_work(struct work
+@@ -3689,6 +3689,11 @@ static void mtk_pending_work(struct work
set_bit(MTK_RESETTING, ð->state);
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
-@@ -3723,6 +3728,8 @@ static void mtk_pending_work(struct work
+@@ -3726,6 +3731,8 @@ static void mtk_pending_work(struct work
clear_bit(MTK_RESETTING, ð->state);
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
-@@ -2830,7 +2836,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2833,7 +2839,7 @@ static void mtk_dma_free(struct mtk_eth
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4477,7 +4477,7 @@ static const struct mtk_soc_data mt7621_
+@@ -4480,7 +4480,7 @@ static const struct mtk_soc_data mt7621_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.hash_offset = 2,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
-@@ -4516,7 +4516,7 @@ static const struct mtk_soc_data mt7623_
+@@ -4519,7 +4519,7 @@ static const struct mtk_soc_data mt7623_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
-@@ -2960,7 +3076,7 @@ static int mtk_start_dma(struct mtk_eth
+@@ -2963,7 +3079,7 @@ static int mtk_start_dma(struct mtk_eth
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
-@@ -3006,6 +3122,45 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3009,6 +3125,45 @@ static void mtk_gdm_config(struct mtk_et
mtk_w32(eth, 0, MTK_RST_GL);
}
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
-@@ -3048,7 +3203,8 @@ static int mtk_open(struct net_device *d
+@@ -3051,7 +3206,8 @@ static int mtk_open(struct net_device *d
refcount_inc(ð->dma_refcnt);
phylink_start(mac->phylink);
return 0;
}
-@@ -3757,8 +3913,12 @@ static int mtk_unreg_dev(struct mtk_eth
+@@ -3760,8 +3916,12 @@ static int mtk_unreg_dev(struct mtk_eth
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
unregister_netdev(eth->netdev[i]);
}
-@@ -3975,6 +4135,23 @@ static int mtk_set_rxnfc(struct net_devi
+@@ -3978,6 +4138,23 @@ static int mtk_set_rxnfc(struct net_devi
return ret;
}
static const struct ethtool_ops mtk_ethtool_ops = {
.get_link_ksettings = mtk_get_link_ksettings,
.set_link_ksettings = mtk_set_link_ksettings,
-@@ -4009,6 +4186,7 @@ static const struct net_device_ops mtk_n
+@@ -4012,6 +4189,7 @@ static const struct net_device_ops mtk_n
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
.ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-@@ -4018,6 +4196,7 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4021,6 +4199,7 @@ static int mtk_add_mac(struct mtk_eth *e
struct phylink *phylink;
struct mtk_mac *mac;
int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
-@@ -4035,7 +4214,10 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4038,7 +4217,10 @@ static int mtk_add_mac(struct mtk_eth *e
return -EINVAL;
}
if (!eth->netdev[id]) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
-@@ -4143,6 +4325,11 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4146,6 +4328,11 @@ static int mtk_add_mac(struct mtk_eth *e
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
}
skb_record_rx_queue(skb, 0);
-@@ -2856,15 +2863,30 @@ static netdev_features_t mtk_fix_feature
+@@ -2859,15 +2866,30 @@ static netdev_features_t mtk_fix_feature
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
}
/* wait for DMA to finish whatever it is doing before we start using it again */
-@@ -3161,11 +3183,45 @@ found:
+@@ -3164,11 +3186,45 @@ found:
return NOTIFY_DONE;
}
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
-@@ -3686,6 +3742,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3689,6 +3745,10 @@ static int mtk_hw_init(struct mtk_eth *e
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
-@@ -3905,6 +3965,12 @@ static int mtk_free_dev(struct mtk_eth *
+@@ -3908,6 +3968,12 @@ static int mtk_free_dev(struct mtk_eth *
free_netdev(eth->netdev[i]);
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3199,7 +3199,8 @@ static int mtk_open(struct net_device *d
+@@ -3202,7 +3202,8 @@ static int mtk_open(struct net_device *d
struct mtk_eth *eth = mac->hw;
int i, err;
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
-@@ -3216,7 +3217,8 @@ static int mtk_open(struct net_device *d
+@@ -3219,7 +3220,8 @@ static int mtk_open(struct net_device *d
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3134,7 +3134,7 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3137,7 +3137,7 @@ static void mtk_gdm_config(struct mtk_et
val |= config;
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
-@@ -3199,8 +3199,7 @@ static int mtk_open(struct net_device *d
+@@ -3202,8 +3202,7 @@ static int mtk_open(struct net_device *d
struct mtk_eth *eth = mac->hw;
int i, err;
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
-@@ -3217,8 +3216,7 @@ static int mtk_open(struct net_device *d
+@@ -3220,8 +3219,7 @@ static int mtk_open(struct net_device *d
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
mtk_eth_path_name(path), __func__, updated);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4801,6 +4801,26 @@ static const struct mtk_soc_data mt7629_
+@@ -4804,6 +4804,26 @@ static const struct mtk_soc_data mt7629_
},
};
static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
-@@ -4843,6 +4863,7 @@ const struct of_device_id of_mtk_match[]
+@@ -4846,6 +4866,7 @@ const struct of_device_id of_mtk_match[]
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
}
return NULL;
-@@ -4014,8 +4015,17 @@ static int mtk_unreg_dev(struct mtk_eth
+@@ -4017,8 +4018,17 @@ static int mtk_unreg_dev(struct mtk_eth
return 0;
}
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(ð->pending_work);
-@@ -4455,6 +4465,36 @@ void mtk_eth_set_dma_device(struct mtk_e
+@@ -4458,6 +4468,36 @@ void mtk_eth_set_dma_device(struct mtk_e
rtnl_unlock();
}
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = NULL;
-@@ -4518,13 +4558,7 @@ static int mtk_probe(struct platform_dev
+@@ -4521,13 +4561,7 @@ static int mtk_probe(struct platform_dev
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
if (err)
return err;
-@@ -4535,14 +4569,17 @@ static int mtk_probe(struct platform_dev
+@@ -4538,14 +4572,17 @@ static int mtk_probe(struct platform_dev
"mediatek,pctl");
if (IS_ERR(eth->pctl)) {
dev_err(&pdev->dev, "no pctl regmap found\n");
}
if (eth->soc->offload_version) {
-@@ -4701,6 +4738,8 @@ err_deinit_hw:
+@@ -4704,6 +4741,8 @@ err_deinit_hw:
mtk_hw_deinit(eth);
err_wed_exit:
mtk_wed_exit();
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4689,8 +4689,8 @@ static int mtk_probe(struct platform_dev
+@@ -4692,8 +4692,8 @@ static int mtk_probe(struct platform_dev
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
-@@ -4814,6 +4814,7 @@ static const struct mtk_soc_data mt7622_
+@@ -4817,6 +4817,7 @@ static const struct mtk_soc_data mt7622_
.required_pctl = false,
.offload_version = 2,
.hash_offset = 2,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
-@@ -4851,6 +4852,7 @@ static const struct mtk_soc_data mt7629_
+@@ -4854,6 +4855,7 @@ static const struct mtk_soc_data mt7629_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4871,6 +4873,7 @@ static const struct mtk_soc_data mt7981_
+@@ -4874,6 +4876,7 @@ static const struct mtk_soc_data mt7981_
.offload_version = 2,
.hash_offset = 4,
.foe_entry_size = sizeof(struct mtk_foe_entry),
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
-@@ -4891,6 +4894,7 @@ static const struct mtk_soc_data mt7986_
+@@ -4894,6 +4897,7 @@ static const struct mtk_soc_data mt7986_
.offload_version = 2,
.hash_offset = 4,
.foe_entry_size = sizeof(struct mtk_foe_entry),
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
-@@ -2887,29 +2870,11 @@ static netdev_features_t mtk_fix_feature
+@@ -2890,29 +2873,11 @@ static netdev_features_t mtk_fix_feature
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
return 0;
}
-@@ -3223,30 +3188,6 @@ static int mtk_open(struct net_device *d
+@@ -3226,30 +3191,6 @@ static int mtk_open(struct net_device *d
struct mtk_eth *eth = mac->hw;
int i, err;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
-@@ -3285,6 +3226,35 @@ static int mtk_open(struct net_device *d
+@@ -3288,6 +3229,35 @@ static int mtk_open(struct net_device *d
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
return 0;
}
-@@ -3769,10 +3739,9 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3772,10 +3742,9 @@ static int mtk_hw_init(struct mtk_eth *e
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
/* set interrupt delays based on current Net DIM sample */
mtk_dim_rx(ð->rx_dim.work);
-@@ -4412,7 +4381,7 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4415,7 +4384,7 @@ static int mtk_add_mac(struct mtk_eth *e
eth->netdev[id]->hw_features |= NETIF_F_LRO;
eth->netdev[id]->vlan_features = eth->soc->hw_features &
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4763,7 +4763,7 @@ static const struct mtk_soc_data mt7621_
+@@ -4766,7 +4766,7 @@ static const struct mtk_soc_data mt7621_
.required_pctl = false,
.offload_version = 1,
.hash_offset = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4784,7 +4784,7 @@ static const struct mtk_soc_data mt7622_
+@@ -4787,7 +4787,7 @@ static const struct mtk_soc_data mt7622_
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4803,7 +4803,7 @@ static const struct mtk_soc_data mt7623_
+@@ -4806,7 +4806,7 @@ static const struct mtk_soc_data mt7623_
.required_pctl = true,
.offload_version = 1,
.hash_offset = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4841,8 +4841,8 @@ static const struct mtk_soc_data mt7981_
+@@ -4844,8 +4844,8 @@ static const struct mtk_soc_data mt7981_
.required_pctl = false,
.offload_version = 2,
.hash_offset = 4,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
-@@ -4862,8 +4862,8 @@ static const struct mtk_soc_data mt7986_
+@@ -4865,8 +4865,8 @@ static const struct mtk_soc_data mt7986_
.required_pctl = false,
.offload_version = 2,
.hash_offset = 4,
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
-@@ -4340,13 +4312,19 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4343,13 +4315,19 @@ static int mtk_add_mac(struct mtk_eth *e
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
__set_bit(PHY_INTERFACE_MODE_TRGMII,
-@@ -4804,6 +4782,7 @@ static const struct mtk_soc_data mt7623_
+@@ -4807,6 +4785,7 @@ static const struct mtk_soc_data mt7623_
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
-@@ -4307,8 +4274,6 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4310,8 +4277,6 @@ static int mtk_add_mac(struct mtk_eth *e
mac->phylink_config.dev = ð->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
-@@ -3023,7 +3023,7 @@ static int mtk_start_dma(struct mtk_eth
+@@ -3026,7 +3026,7 @@ static int mtk_start_dma(struct mtk_eth
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
-@@ -3165,7 +3165,7 @@ static int mtk_open(struct net_device *d
+@@ -3168,7 +3168,7 @@ static int mtk_open(struct net_device *d
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
return 0;
if (mtk_uses_dsa(dev) && !eth->prog) {
-@@ -3430,7 +3430,7 @@ static void mtk_hw_reset(struct mtk_eth
+@@ -3433,7 +3433,7 @@ static void mtk_hw_reset(struct mtk_eth
{
u32 val;
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
val = RSTCTRL_PPE0_V2;
} else {
-@@ -3442,7 +3442,7 @@ static void mtk_hw_reset(struct mtk_eth
+@@ -3445,7 +3445,7 @@ static void mtk_hw_reset(struct mtk_eth
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
-@@ -3468,7 +3468,7 @@ static void mtk_hw_warm_reset(struct mtk
+@@ -3471,7 +3471,7 @@ static void mtk_hw_warm_reset(struct mtk
return;
}
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
else
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
-@@ -3638,7 +3638,7 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3641,7 +3641,7 @@ static int mtk_hw_init(struct mtk_eth *e
else
mtk_hw_reset(eth);
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
-@@ -3675,7 +3675,7 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3678,7 +3678,7 @@ static int mtk_hw_init(struct mtk_eth *e
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
-@@ -3697,7 +3697,7 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3700,7 +3700,7 @@ static int mtk_hw_init(struct mtk_eth *e
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
-@@ -4486,7 +4486,7 @@ static int mtk_probe(struct platform_dev
+@@ -4489,7 +4489,7 @@ static int mtk_probe(struct platform_dev
}
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
-@@ -4594,9 +4594,8 @@ static int mtk_probe(struct platform_dev
+@@ -4597,9 +4597,8 @@ static int mtk_probe(struct platform_dev
}
if (eth->soc->offload_version) {
num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
-@@ -4688,6 +4687,7 @@ static const struct mtk_soc_data mt2701_
+@@ -4691,6 +4690,7 @@ static const struct mtk_soc_data mt2701_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4704,6 +4704,7 @@ static const struct mtk_soc_data mt7621_
+@@ -4707,6 +4707,7 @@ static const struct mtk_soc_data mt7621_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-@@ -4724,6 +4725,7 @@ static const struct mtk_soc_data mt7622_
+@@ -4727,6 +4728,7 @@ static const struct mtk_soc_data mt7622_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
-@@ -4744,6 +4746,7 @@ static const struct mtk_soc_data mt7623_
+@@ -4747,6 +4749,7 @@ static const struct mtk_soc_data mt7623_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-@@ -4766,6 +4769,7 @@ static const struct mtk_soc_data mt7629_
+@@ -4769,6 +4772,7 @@ static const struct mtk_soc_data mt7629_
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.has_accounting = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
-@@ -4783,6 +4787,7 @@ static const struct mtk_soc_data mt7981_
+@@ -4786,6 +4790,7 @@ static const struct mtk_soc_data mt7981_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7981_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
-@@ -4804,6 +4809,7 @@ static const struct mtk_soc_data mt7986_
+@@ -4807,6 +4812,7 @@ static const struct mtk_soc_data mt7986_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
-@@ -4824,6 +4830,7 @@ static const struct mtk_soc_data rt5350_
+@@ -4827,6 +4833,7 @@ static const struct mtk_soc_data rt5350_
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
!eth->netdev[mac]))
goto release_desc;
-@@ -2897,7 +2897,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2900,7 +2900,7 @@ static void mtk_dma_free(struct mtk_eth
const struct mtk_soc_data *soc = eth->soc;
int i;
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
-@@ -3051,8 +3051,13 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3054,8 +3054,13 @@ static void mtk_gdm_config(struct mtk_et
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
-@@ -3062,7 +3067,7 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3065,7 +3070,7 @@ static void mtk_gdm_config(struct mtk_et
val |= config;
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
-@@ -3659,15 +3664,15 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3662,15 +3667,15 @@ static int mtk_hw_init(struct mtk_eth *e
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
}
/* Indicates CDM to parse the MTK special tag from CPU
-@@ -3847,7 +3852,7 @@ static void mtk_pending_work(struct work
+@@ -3850,7 +3855,7 @@ static void mtk_pending_work(struct work
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
-@@ -3863,8 +3868,8 @@ static void mtk_pending_work(struct work
+@@ -3866,8 +3871,8 @@ static void mtk_pending_work(struct work
mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
continue;
if (mtk_open(eth->netdev[i])) {
-@@ -3891,7 +3896,7 @@ static int mtk_free_dev(struct mtk_eth *
+@@ -3894,7 +3899,7 @@ static int mtk_free_dev(struct mtk_eth *
{
int i;
if (!eth->netdev[i])
continue;
free_netdev(eth->netdev[i]);
-@@ -3910,7 +3915,7 @@ static int mtk_unreg_dev(struct mtk_eth
+@@ -3913,7 +3918,7 @@ static int mtk_unreg_dev(struct mtk_eth
{
int i;
struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
-@@ -4211,7 +4216,7 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4214,7 +4219,7 @@ static int mtk_add_mac(struct mtk_eth *e
}
id = be32_to_cpup(_id);
dev_err(eth->dev, "%d is not a valid mac id\n", id);
return -EINVAL;
}
-@@ -4356,7 +4361,7 @@ void mtk_eth_set_dma_device(struct mtk_e
+@@ -4359,7 +4364,7 @@ void mtk_eth_set_dma_device(struct mtk_e
rtnl_lock();
dev = eth->netdev[i];
if (!dev || !(dev->flags & IFF_UP))
-@@ -4662,7 +4667,7 @@ static int mtk_remove(struct platform_de
+@@ -4665,7 +4670,7 @@ static int mtk_remove(struct platform_de
int i;
/* stop all devices to make sure that dma is properly shut down */
budget--;
}
-@@ -3702,7 +3730,24 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3705,7 +3733,24 @@ static int mtk_hw_init(struct mtk_eth *e
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
-@@ -4264,7 +4309,11 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4267,7 +4312,11 @@ static int mtk_add_mac(struct mtk_eth *e
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
-@@ -4358,6 +4487,17 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4361,6 +4490,17 @@ static int mtk_add_mac(struct mtk_eth *e
mac->phylink_config.supported_interfaces);
}
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4878,6 +5018,24 @@ static const struct mtk_soc_data mt7986_
+@@ -4881,6 +5021,24 @@ static const struct mtk_soc_data mt7986_
},
};
static const struct mtk_soc_data rt5350_data = {
.reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
-@@ -4896,14 +5054,15 @@ static const struct mtk_soc_data rt5350_
+@@ -4899,14 +5057,15 @@ static const struct mtk_soc_data rt5350_
};
const struct of_device_id of_mtk_match[] = {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -5026,6 +5026,9 @@ static const struct mtk_soc_data mt7988_
+@@ -5029,6 +5029,9 @@ static const struct mtk_soc_data mt7988_
.required_clks = MT7988_CLKS_BITMAP,
.required_pctl = false,
.version = 3,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -5028,6 +5028,7 @@ static const struct mtk_soc_data mt7988_
+@@ -5031,6 +5031,7 @@ static const struct mtk_soc_data mt7988_
.version = 3,
.offload_version = 2,
.hash_offset = 4,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3592,19 +3592,34 @@ static void mtk_hw_reset(struct mtk_eth
+@@ -3595,19 +3595,34 @@ static void mtk_hw_reset(struct mtk_eth
{
u32 val;
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
-@@ -3630,13 +3645,21 @@ static void mtk_hw_warm_reset(struct mtk
+@@ -3633,13 +3648,21 @@ static void mtk_hw_warm_reset(struct mtk
return;
}
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
-@@ -3988,11 +4011,17 @@ static void mtk_prepare_for_reset(struct
+@@ -3991,11 +4014,17 @@ static void mtk_prepare_for_reset(struct
u32 val;
int i;
/* adjust PPE configurations to prepare for reset */
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
-@@ -4053,11 +4082,18 @@ static void mtk_pending_work(struct work
+@@ -4056,11 +4085,18 @@ static void mtk_pending_work(struct work
}
}
dma_free_coherent(eth->dma_dev,
ring->dma_size * eth->soc->txrx.rxd_size,
ring->dma, ring->phys);
-@@ -3057,7 +3081,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -3060,7 +3084,7 @@ static void mtk_dma_free(struct mtk_eth
for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->phy_scratch_ring);
-@@ -3065,13 +3089,13 @@ static void mtk_dma_free(struct mtk_eth
+@@ -3068,13 +3092,13 @@ static void mtk_dma_free(struct mtk_eth
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
}
kfree(eth->scratch_head);
-@@ -4639,7 +4663,7 @@ static int mtk_sgmii_init(struct mtk_eth
+@@ -4642,7 +4666,7 @@ static int mtk_sgmii_init(struct mtk_eth
static int mtk_probe(struct platform_device *pdev)
{
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
-@@ -4659,6 +4683,20 @@ static int mtk_probe(struct platform_dev
+@@ -4662,6 +4686,20 @@ static int mtk_probe(struct platform_dev
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
spin_lock_init(ð->page_lock);
spin_lock_init(ð->tx_irq_lock);
spin_lock_init(ð->rx_irq_lock);
-@@ -4722,6 +4760,18 @@ static int mtk_probe(struct platform_dev
+@@ -4725,6 +4763,18 @@ static int mtk_probe(struct platform_dev
err = -EINVAL;
goto err_destroy_sgmii;
}
ring->buf_size, DMA_FROM_DEVICE);
mtk_rx_put_buff(ring, ring->data[i], false);
}
-@@ -4697,6 +4715,14 @@ static int mtk_probe(struct platform_dev
+@@ -4700,6 +4718,14 @@ static int mtk_probe(struct platform_dev
}
}
*/
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -3032,6 +3032,10 @@ static inline int pskb_trim(struct sk_bu
+@@ -3040,6 +3040,10 @@ static inline int pskb_trim(struct sk_bu
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
/**
* pskb_trim_unique - remove end from a paged unique (not cloned) buffer
* @skb: buffer to alter
-@@ -3181,16 +3185,6 @@ static inline struct sk_buff *dev_alloc_
+@@ -3189,16 +3193,6 @@ static inline struct sk_buff *dev_alloc_
}
#include <net/protocol.h>
#include <net/dst.h>
-@@ -707,6 +708,22 @@ skb_fail:
+@@ -709,6 +710,22 @@ skb_fail:
}
EXPORT_SYMBOL(__napi_alloc_skb);
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
-@@ -6893,6 +6894,22 @@ static void rtl_tally_reset(struct r8152
+@@ -6896,6 +6897,22 @@ static void rtl_tally_reset(struct r8152
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data);
}
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
-@@ -6934,6 +6951,8 @@ static void r8152b_init(struct r8152 *tp
+@@ -6937,6 +6954,8 @@ static void r8152b_init(struct r8152 *tp
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
static void r8153_init(struct r8152 *tp)
-@@ -7074,6 +7093,8 @@ static void r8153_init(struct r8152 *tp)
+@@ -7077,6 +7096,8 @@ static void r8153_init(struct r8152 *tp)
tp->coalesce = COALESCE_SLOW;
break;
}
}
static void r8153b_init(struct r8152 *tp)
-@@ -7156,6 +7177,8 @@ static void r8153b_init(struct r8152 *tp
+@@ -7159,6 +7180,8 @@ static void r8153b_init(struct r8152 *tp
rtl_tally_reset(tp);
tp->coalesce = 15000; /* 15 us */
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
u32));
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
-@@ -2179,9 +2193,11 @@ static void __sk_free(struct sock *sk)
+@@ -2180,9 +2194,11 @@ static void __sk_free(struct sock *sk)
if (likely(sk->sk_net_refcnt))
sock_inuse_add(sock_net(sk), -1);
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -4104,6 +4104,8 @@ static __net_initdata struct pernet_oper
+@@ -4105,6 +4105,8 @@ static __net_initdata struct pernet_oper
static int __init proto_init(void)
{
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
-@@ -3031,11 +3031,13 @@ static const struct seq_operations fib_r
+@@ -3032,11 +3032,13 @@ static const struct seq_operations fib_r
int __net_init fib_proc_init(struct net *net)
{
fib_triestat_seq_show, NULL))
goto out2;
-@@ -3046,17 +3048,21 @@ int __net_init fib_proc_init(struct net
+@@ -3047,17 +3049,21 @@ int __net_init fib_proc_init(struct net
return 0;
out3:
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -2998,7 +2998,7 @@ static inline int pskb_network_may_pull(
+@@ -3006,7 +3006,7 @@ static inline int pskb_network_may_pull(
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
static void rt_fibinfo_free(struct rtable __rcu **rtp)
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
-@@ -2778,6 +2778,7 @@ static const char *const rtn_type_names[
+@@ -2779,6 +2779,7 @@ static const char *const rtn_type_names[
[RTN_THROW] = "THROW",
[RTN_NAT] = "NAT",
[RTN_XRESOLVE] = "XRESOLVE",
static const struct rt6_info ip6_blk_hole_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
-@@ -1036,6 +1050,7 @@ static const int fib6_prop[RTN_MAX + 1]
+@@ -1039,6 +1053,7 @@ static const int fib6_prop[RTN_MAX + 1]
[RTN_BLACKHOLE] = -EINVAL,
[RTN_UNREACHABLE] = -EHOSTUNREACH,
[RTN_PROHIBIT] = -EACCES,
[RTN_THROW] = -EAGAIN,
[RTN_NAT] = -EINVAL,
[RTN_XRESOLVE] = -EINVAL,
-@@ -1071,6 +1086,10 @@ static void ip6_rt_init_dst_reject(struc
+@@ -1074,6 +1089,10 @@ static void ip6_rt_init_dst_reject(struc
rt->dst.output = ip6_pkt_prohibit_out;
rt->dst.input = ip6_pkt_prohibit;
break;
case RTN_THROW:
case RTN_UNREACHABLE:
default:
-@@ -4540,6 +4559,17 @@ static int ip6_pkt_prohibit_out(struct n
+@@ -4543,6 +4562,17 @@ static int ip6_pkt_prohibit_out(struct n
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
/*
* Allocate a dst for local (unicast / anycast) address.
*/
-@@ -5033,7 +5063,8 @@ static int rtm_to_fib6_config(struct sk_
+@@ -5036,7 +5066,8 @@ static int rtm_to_fib6_config(struct sk_
if (rtm->rtm_type == RTN_UNREACHABLE ||
rtm->rtm_type == RTN_BLACKHOLE ||
rtm->rtm_type == RTN_PROHIBIT ||
cfg->fc_flags |= RTF_REJECT;
if (rtm->rtm_type == RTN_LOCAL)
-@@ -6287,6 +6318,8 @@ static int ip6_route_dev_notify(struct n
+@@ -6290,6 +6321,8 @@ static int ip6_route_dev_notify(struct n
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry->dst.dev = dev;
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
-@@ -6298,6 +6331,7 @@ static int ip6_route_dev_notify(struct n
+@@ -6301,6 +6334,7 @@ static int ip6_route_dev_notify(struct n
in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
#endif
}
-@@ -6489,6 +6523,8 @@ static int __net_init ip6_route_net_init
+@@ -6492,6 +6526,8 @@ static int __net_init ip6_route_net_init
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_has_custom_rules = false;
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
sizeof(*net->ipv6.ip6_prohibit_entry),
GFP_KERNEL);
-@@ -6499,11 +6535,21 @@ static int __net_init ip6_route_net_init
+@@ -6502,11 +6538,21 @@ static int __net_init ip6_route_net_init
ip6_template_metrics, true);
INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
ip6_template_metrics, true);
-@@ -6530,6 +6576,8 @@ out:
+@@ -6533,6 +6579,8 @@ out:
return ret;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
-@@ -6549,6 +6597,7 @@ static void __net_exit ip6_route_net_exi
+@@ -6552,6 +6600,7 @@ static void __net_exit ip6_route_net_exi
kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
kfree(net->ipv6.ip6_prohibit_entry);
kfree(net->ipv6.ip6_blk_hole_entry);
#endif
dst_entries_destroy(&net->ipv6.ip6_dst_ops);
-@@ -6632,6 +6681,9 @@ void __init ip6_route_init_special_entri
+@@ -6635,6 +6684,9 @@ void __init ip6_route_init_special_entri
init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#endif
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -964,6 +964,7 @@ struct sk_buff {
+@@ -972,6 +972,7 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3152,8 +3152,8 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -3155,8 +3155,8 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
}
return IRQ_HANDLED;
-@@ -3165,8 +3165,8 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -3168,8 +3168,8 @@ static irqreturn_t mtk_handle_irq_tx(int
eth->tx_events++;
if (likely(napi_schedule_prep(ð->tx_napi))) {
}
return IRQ_HANDLED;
-@@ -4938,6 +4938,8 @@ static int mtk_probe(struct platform_dev
+@@ -4941,6 +4941,8 @@ static int mtk_probe(struct platform_dev
* for NAPI to work
*/
init_dummy_netdev(ð->dummy_dev);
switch (speed) {
case SPEED_2500:
case SPEED_1000:
-@@ -3345,6 +3346,9 @@ found:
+@@ -3348,6 +3349,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
-@@ -4613,8 +4727,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4616,8 +4730,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4807,6 +4934,13 @@ static int mtk_probe(struct platform_dev
+@@ -4810,6 +4937,13 @@ static int mtk_probe(struct platform_dev
if (err)
return err;
return 0;
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
-@@ -187,6 +187,7 @@ static int phylink_interface_max_speed(p
+@@ -192,6 +192,7 @@ static int phylink_interface_max_speed(p
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
-@@ -448,6 +449,7 @@ unsigned long phylink_get_capabilities(p
+@@ -453,6 +454,7 @@ unsigned long phylink_get_capabilities(p
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
-@@ -814,6 +816,7 @@ static int phylink_parse_mode(struct phy
+@@ -819,6 +821,7 @@ static int phylink_parse_mode(struct phy
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
+#endif
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -4580,6 +4580,9 @@ enum skb_ext_id {
+@@ -4588,6 +4588,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
SKB_EXT_MCTP,
#endif
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
-@@ -4510,6 +4514,9 @@ static const u8 skb_ext_type_len[] = {
+@@ -4515,6 +4519,9 @@ static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
[SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
#endif
};
static __always_inline unsigned int skb_ext_total_length(void)
-@@ -4530,6 +4537,9 @@ static __always_inline unsigned int skb_
+@@ -4535,6 +4542,9 @@ static __always_inline unsigned int skb_
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
skb_ext_type_len[SKB_EXT_MCTP] +
#endif
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
-@@ -591,6 +591,54 @@
+@@ -594,6 +594,54 @@
status = "disabled";
};
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
-@@ -591,6 +591,82 @@
+@@ -594,6 +594,82 @@
status = "disabled";
};
/ {
#address-cells = <1>;
-@@ -724,22 +725,38 @@
+@@ -727,22 +728,38 @@
ethphy0: ethernet-phy@0 {
reg = <0>;
+++ /dev/null
-From 23316be8a9d450f33a21f1efe7d89570becbec58 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <ansuelsmth@gmail.com>
-Date: Sun, 16 Jul 2023 04:28:04 +0200
-Subject: [PATCH] hwspinlock: qcom: add missing regmap config for SFPB MMIO
- implementation
-
-Commit 5d4753f741d8 ("hwspinlock: qcom: add support for MMIO on older
-SoCs") introduced and made regmap_config mandatory in the of_data struct
-but didn't add the regmap_config for sfpb based devices.
-
-SFPB based devices can both use the legacy syscon way to probe or the
-new MMIO way and currently device that use the MMIO way are broken as
-they lack the definition of the now required regmap_config and always
-return -EINVAL (and indirectly makes fail probing everything that
-depends on it, smem, nandc with smem-parser...)
-
-Fix this by correctly adding the missing regmap_config and restore
-function of hwspinlock on SFPB based devices with MMIO implementation.
-
-Cc: stable@vger.kernel.org
-Fixes: 5d4753f741d8 ("hwspinlock: qcom: add support for MMIO on older SoCs")
-Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
-Link: https://lore.kernel.org/r/20230716022804.21239-1-ansuelsmth@gmail.com
-Signed-off-by: Bjorn Andersson <andersson@kernel.org>
----
- drivers/hwspinlock/qcom_hwspinlock.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
---- a/drivers/hwspinlock/qcom_hwspinlock.c
-+++ b/drivers/hwspinlock/qcom_hwspinlock.c
-@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_
- .unlock = qcom_hwspinlock_unlock,
- };
-
-+static const struct regmap_config sfpb_mutex_config = {
-+ .reg_bits = 32,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .max_register = 0x100,
-+ .fast_io = true,
-+};
-+
- static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
- .offset = 0x4,
- .stride = 0x4,
-+ .regmap_config = &sfpb_mutex_config,
- };
-
- static const struct regmap_config tcsr_msm8226_mutex_config = {
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
-@@ -1348,6 +1348,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
+@@ -1352,6 +1352,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a83t-cubietruck-plus.dtb \
sun8i-a83t-tbs-a711.dtb \
sun8i-h2-plus-bananapi-m2-zero.dtb \