Refresh backport patches with make target/linux/refresh.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -606,5 +606,13 @@ static __always_inline void del_page_fro
- make_pte_marker(PTE_MARKER_UFFD_WP));
+@@ -578,4 +578,12 @@ pte_install_uffd_wp_if_needed(struct vm_
#endif
}
-+
+
+static inline bool vma_has_recency(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+
+ return true;
+}
-
++
#endif
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -1353,8 +1354,7 @@ again:
+@@ -1435,8 +1435,7 @@ again:
force_flush = 1;
set_page_dirty(page);
}
mark_page_accessed(page);
}
rss[mm_counter(page)]--;
-@@ -4795,8 +4795,8 @@ static inline void mm_account_fault(stru
+@@ -5170,8 +5169,8 @@ static inline void mm_account_fault(stru
#ifdef CONFIG_LRU_GEN
static void lru_gen_enter_fault(struct vm_area_struct *vma)
{
static void lru_gen_exit_fault(void)
--- a/mm/rmap.c
+++ b/mm/rmap.c
-@@ -794,25 +794,14 @@ static bool page_referenced_one(struct p
+@@ -823,25 +823,14 @@ static bool folio_referenced_one(struct
}
if (pvmw.pte) {
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_flush_young_notify(vma, address,
pvmw.pmd))
-@@ -846,7 +835,20 @@ static bool invalid_page_referenced_vma(
- struct page_referenced_arg *pra = arg;
+@@ -875,7 +864,20 @@ static bool invalid_folio_referenced_vma
+ struct folio_referenced_arg *pra = arg;
struct mem_cgroup *memcg = pra->memcg;
- if (!mm_match_cgroup(vma->vm_mm, memcg))
return true;
return false;
-@@ -876,6 +878,7 @@ int page_referenced(struct page *page,
+@@ -906,6 +908,7 @@ int folio_referenced(struct folio *folio
.arg = (void *)&pra,
.anon_lock = folio_lock_anon_vma_read,
.try_lock = true,
};
*vm_flags = 0;
-@@ -891,15 +894,6 @@ int page_referenced(struct page *page,
+@@ -921,15 +924,6 @@ int folio_referenced(struct folio *folio
return 1;
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long
+@@ -3766,7 +3766,10 @@ static int should_skip_vma(unsigned long
if (is_vm_hugetlb_page(vma))
return true;
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -167,6 +167,8 @@ typedef int (dio_iodone_t)(struct kiocb
- /* File is stream-like */
- #define FMODE_STREAM ((__force fmode_t)0x200000)
+@@ -166,6 +166,8 @@ typedef int (dio_iodone_t)(struct kiocb
+ /* File supports DIRECT IO */
+ #define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+#define FMODE_NOREUSE ((__force fmode_t)0x400000)
+
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -339,6 +339,9 @@ static inline bool vma_has_recency(struc
+@@ -583,6 +583,9 @@ static inline bool vma_has_recency(struc
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_DONTNEED:
- if (!inode_write_congested(mapping->host))
+ __filemap_fdatawrite_range(mapping, offset, endbyte,
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -168,7 +168,7 @@ static inline void lru_gen_update_size(s
+@@ -178,7 +178,7 @@ static inline void lru_gen_update_size(s
int zone = folio_zonenum(folio);
- int delta = thp_nr_folios(folio);
+ int delta = folio_nr_pages(folio);
enum lru_list lru = type * LRU_INACTIVE_FILE;
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
-@@ -214,7 +214,7 @@ static inline bool lru_gen_add_folio(stru
+@@ -224,7 +224,7 @@ static inline bool lru_gen_add_folio(str
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
-@@ -394,7 +394,7 @@ enum {
- * The number of folios in each generation is eventually consistent and therefore
+@@ -404,7 +404,7 @@ enum {
+ * The number of pages in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
*/
-struct lru_gen_struct {
/* the aging increments the youngest generation number */
unsigned long max_seq;
/* the eviction increments the oldest generation numbers */
-@@ -451,7 +451,7 @@ struct lru_gen_mm_state {
+@@ -461,7 +461,7 @@ struct lru_gen_mm_state {
struct lru_gen_mm_walk {
/* the lruvec under reclaim */
struct lruvec *lruvec;
unsigned long max_seq;
/* the next address within an mm to scan */
unsigned long next_addr;
-@@ -514,7 +514,7 @@ struct lruvec {
+@@ -524,7 +524,7 @@ struct lruvec {
unsigned long flags;
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
#endif
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -2910,7 +2910,7 @@ static int get_nr_gens(struct lruvec *lr
+@@ -3190,7 +3190,7 @@ static int get_nr_gens(struct lruvec *lr
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
-@@ -3316,7 +3316,7 @@ struct ctrl_pos {
+@@ -3596,7 +3596,7 @@ struct ctrl_pos {
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct ctrl_pos *pos)
{
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
pos->refaulted = lrugen->avg_refaulted[type][tier] +
-@@ -3331,7 +3331,7 @@ static void read_ctrl_pos(struct lruvec
+@@ -3611,7 +3611,7 @@ static void read_ctrl_pos(struct lruvec
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
{
int hist, tier;
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
-@@ -3408,7 +3408,7 @@ static int folio_update_gen(struct folio *
+@@ -3688,7 +3688,7 @@ static int folio_update_gen(struct folio
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
int type = folio_is_file_lru(folio);
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
-@@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru
+@@ -3733,7 +3733,7 @@ static void update_batch_size(struct lru
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
walk->batched = 0;
-@@ -3979,7 +3979,7 @@ static bool inc_min_seq(struct lruvec *l
+@@ -4253,7 +4253,7 @@ static bool inc_min_seq(struct lruvec *l
{
int zone;
int remaining = MAX_LRU_BATCH;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
if (type == LRU_GEN_ANON && !can_swap)
-@@ -4015,7 +4015,7 @@ static bool try_to_inc_min_seq(struct lr
+@@ -4289,7 +4289,7 @@ static bool try_to_inc_min_seq(struct lr
{
int gen, type, zone;
bool success = false;
DEFINE_MIN_SEQ(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
-@@ -4036,7 +4036,7 @@ next:
+@@ -4310,7 +4310,7 @@ next:
;
}
if (can_swap) {
min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
-@@ -4058,7 +4058,7 @@ static void inc_max_seq(struct lruvec *l
+@@ -4332,7 +4332,7 @@ static void inc_max_seq(struct lruvec *l
{
int prev, next;
int type, zone;
spin_lock_irq(&lruvec->lru_lock);
-@@ -4116,7 +4116,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4390,7 +4390,7 @@ static bool try_to_inc_max_seq(struct lr
bool success;
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
-@@ -4181,7 +4181,7 @@ static bool should_run_aging(struct lruv
+@@ -4455,7 +4455,7 @@ static bool should_run_aging(struct lruv
unsigned long old = 0;
unsigned long young = 0;
unsigned long total = 0;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
-@@ -4466,7 +4466,7 @@ static bool sort_folio(struct lruvec *lru
- int delta = thp_nr_folios(folio);
+@@ -4740,7 +4740,7 @@ static bool sort_folio(struct lruvec *lr
+ int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
int tier = lru_tier_from_refs(refs);
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
- VM_WARN_ON_ONCE_folio(gen >= MAX_NR_GENS, folio);
+ VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
-@@ -4566,7 +4566,7 @@ static int scan_folios(struct lruvec *lru
+@@ -4840,7 +4840,7 @@ static int scan_folios(struct lruvec *lr
int scanned = 0;
int isolated = 0;
int remaining = MAX_LRU_BATCH;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list));
-@@ -4967,7 +4967,7 @@ done:
+@@ -5240,7 +5240,7 @@ done:
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{
if (lrugen->enabled) {
enum lru_list lru;
-@@ -5247,7 +5247,7 @@ static void lru_gen_seq_show_full(struct
+@@ -5522,7 +5522,7 @@ static void lru_gen_seq_show_full(struct
int i;
int type, tier;
int hist = lru_hist_from_seq(seq);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
-@@ -5296,7 +5296,7 @@ static int lru_gen_seq_show(struct seq_f
+@@ -5572,7 +5572,7 @@ static int lru_gen_seq_show(struct seq_f
unsigned long seq;
bool full = !debugfs_real_fops(m->file)->write;
struct lruvec *lruvec = v;
int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
-@@ -5549,7 +5549,7 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5826,7 +5826,7 @@ void lru_gen_init_lruvec(struct lruvec *
{
int i;
int gen, type, zone;
lrugen->enabled = lru_gen_enabled();
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct pag
+@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct fol
unsigned long token;
unsigned long min_seq;
struct lruvec *lruvec;
- struct lru_gen_struct *lrugen;
+ struct lru_gen_folio *lrugen;
int type = folio_is_file_lru(folio);
- int delta = thp_nr_folios(folio);
+ int delta = folio_nr_pages(folio);
int refs = folio_lru_refs(folio);
@@ -252,7 +252,7 @@ static void lru_gen_refault(struct folio
unsigned long token;
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -246,9 +246,9 @@ static inline bool lru_gen_add_folio(stru
+@@ -256,9 +256,9 @@ static inline bool lru_gen_add_folio(str
lru_gen_update_size(lruvec, folio, -1, gen);
- /* for rotate_reclaimable_folio() */
+ /* for folio_rotate_reclaimable() */
if (reclaiming)
- list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
}
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
-@@ -302,7 +302,7 @@ enum lruvec_flags {
+@@ -312,7 +312,7 @@ enum lruvec_flags {
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
*
- * A folio is added to the youngest generation on faulting. The aging needs to
- * check the accessed bit at least twice before handing this folio over to the
-@@ -314,8 +314,8 @@ enum lruvec_flags {
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+@@ -324,8 +324,8 @@ enum lruvec_flags {
* rest of generations, if they exist, are considered inactive. See
* lru_gen_is_active().
*
* considered active is isolated for non-reclaiming purposes, e.g., migration.
* See lru_gen_add_folio() and lru_gen_del_folio().
*
-@@ -402,7 +402,7 @@ struct lru_gen_folio {
+@@ -412,7 +412,7 @@ struct lru_gen_folio {
/* the birth time of each generation in jiffies */
unsigned long timestamps[MAX_NR_GENS];
/* the multi-gen LRU lists, lazily sorted on eviction */
- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the multi-gen LRU sizes, eventually consistent */
- long nr_folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* the exponential moving average of refaulted */
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *l
+@@ -4261,7 +4261,7 @@ static bool inc_min_seq(struct lruvec *l
/* prevent cold/hot inversion if force_scan is true */
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
-@@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *l
- VM_WARN_ON_ONCE_folio(folio_zonenum(folio) != zone, folio);
+@@ -4272,7 +4272,7 @@ static bool inc_min_seq(struct lruvec *l
+ VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
new_gen = folio_inc_gen(lruvec, folio, false);
- list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
if (!--remaining)
return false;
-@@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lr
+@@ -4300,7 +4300,7 @@ static bool try_to_inc_min_seq(struct lr
gen = lru_gen_from_seq(min_seq[type]);
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
goto next;
}
-@@ -4491,7 +4491,7 @@ static bool sort_folio(struct lruvec *lru
+@@ -4765,7 +4765,7 @@ static bool sort_folio(struct lruvec *lr
/* promoted */
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
return true;
}
-@@ -4500,7 +4500,7 @@ static bool sort_folio(struct lruvec *lru
+@@ -4774,7 +4774,7 @@ static bool sort_folio(struct lruvec *lr
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
gen = folio_inc_gen(lruvec, folio, false);
WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
lrugen->protected[hist][type][tier - 1] + delta);
-@@ -4512,7 +4512,7 @@ static bool sort_folio(struct lruvec *lru
- if (folioLocked(folio) || folioWriteback(folio) ||
- (type == LRU_GEN_FILE && folioDirty(folio))) {
+@@ -4786,7 +4786,7 @@ static bool sort_folio(struct lruvec *lr
+ if (folio_test_locked(folio) || folio_test_writeback(folio) ||
+ (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
gen = folio_inc_gen(lruvec, folio, true);
- list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
-@@ -4579,7 +4579,7 @@ static int scan_folios(struct lruvec *lru
+@@ -4853,7 +4853,7 @@ static int scan_folios(struct lruvec *lr
for (zone = sc->reclaim_idx; zone >= 0; zone--) {
LIST_HEAD(moved);
int skipped = 0;
while (!list_empty(head)) {
struct folio *folio = lru_to_folio(head);
-@@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_vali
+@@ -5253,7 +5253,7 @@ static bool __maybe_unused state_is_vali
int gen, type, zone;
for_each_gen_type_zone(gen, type, zone) {
return false;
}
}
-@@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruve
+@@ -5298,7 +5298,7 @@ static bool drain_evictable(struct lruve
int remaining = MAX_LRU_BATCH;
for_each_gen_type_zone(gen, type, zone) {
while (!list_empty(head)) {
bool success;
-@@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5835,7 +5835,7 @@ void lru_gen_init_lruvec(struct lruvec *
lrugen->timestamps[i] = jiffies;
for_each_gen_type_zone(gen, type, zone)
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -443,6 +443,11 @@ static bool cgroup_reclaim(struct scan_c
+@@ -448,6 +448,11 @@ static bool cgroup_reclaim(struct scan_c
return sc->target_mem_cgroup;
}
/**
* writeback_throttling_sane - is the usual dirty throttling mechanism available?
* @sc: scan_control in question
-@@ -493,6 +498,11 @@ static bool cgroup_reclaim(struct scan_c
+@@ -498,6 +503,11 @@ static bool cgroup_reclaim(struct scan_c
return false;
}
static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
-@@ -4722,8 +4732,7 @@ static int isolate_folios(struct lruvec *
+@@ -4996,8 +5006,7 @@ static int isolate_folios(struct lruvec
return scanned;
}
{
int type;
int scanned;
-@@ -4812,9 +4821,6 @@ retry:
+@@ -5086,9 +5095,6 @@ retry:
goto retry;
}
return scanned;
}
-@@ -4853,67 +4859,26 @@ done:
+@@ -5127,67 +5133,26 @@ done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}
lru_add_drain();
-@@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5211,7 +5176,7 @@ static void lru_gen_shrink_lruvec(struct
if (!nr_to_scan)
goto done;
if (!delta)
goto done;
-@@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5219,7 +5184,7 @@ static void lru_gen_shrink_lruvec(struct
if (scanned >= nr_to_scan)
break;
break;
cond_resched();
-@@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *l
+@@ -5669,7 +5634,7 @@ static int run_eviction(struct lruvec *l
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -131,7 +131,6 @@ struct scan_control {
+@@ -136,7 +136,6 @@ struct scan_control {
#ifdef CONFIG_LRU_GEN
/* help kswapd make better choices among multiple memcgs */
unsigned long last_reclaimed;
#endif
-@@ -4184,7 +4183,7 @@ done:
+@@ -4458,7 +4457,7 @@ done:
return true;
}
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
{
int gen, type, zone;
-@@ -4193,6 +4192,13 @@ static bool should_run_aging(struct lruv
+@@ -4467,6 +4466,13 @@ static bool should_run_aging(struct lruv
unsigned long total = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
unsigned long seq;
-@@ -4221,8 +4227,6 @@ static bool should_run_aging(struct lruv
+@@ -4495,8 +4501,6 @@ static bool should_run_aging(struct lruv
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
* ideal number of generations is MIN_NR_GENS+1.
*/
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
return false;
-@@ -4241,40 +4245,54 @@ static bool should_run_aging(struct lruv
+@@ -4515,40 +4519,54 @@ static bool should_run_aging(struct lruv
return false;
}
}
/* to protect the working set of the last N jiffies */
-@@ -4283,46 +4301,32 @@ static unsigned long lru_gen_min_ttl __r
+@@ -4557,46 +4575,32 @@ static unsigned long lru_gen_min_ttl __r
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
*/
if (mutex_trylock(&oom_lock)) {
struct oom_control oc = {
-@@ -4830,33 +4834,27 @@ retry:
+@@ -5104,33 +5108,27 @@ retry:
* reclaim.
*/
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -4875,9 +4873,7 @@ static unsigned long get_nr_to_reclaim(s
+@@ -5149,9 +5147,7 @@ static unsigned long get_nr_to_reclaim(s
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
struct blk_plug plug;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
lru_add_drain();
-@@ -4898,13 +4894,13 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5172,13 +5168,13 @@ static void lru_gen_shrink_lruvec(struct
else
swappiness = 0;
scanned += delta;
if (scanned >= nr_to_scan)
-@@ -4916,10 +4912,6 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5190,10 +5186,6 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4183,68 +4183,6 @@ done:
+@@ -4457,68 +4457,6 @@ done:
return true;
}
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
-@@ -4828,6 +4766,68 @@ retry:
+@@ -5102,6 +5040,68 @@ retry:
return scanned;
}
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
-@@ -818,6 +818,11 @@ static inline void obj_cgroup_put(struct
+@@ -790,6 +790,11 @@ static inline void obj_cgroup_put(struct
percpu_ref_put(&objcg->refcnt);
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
-@@ -1283,6 +1288,11 @@ struct mem_cgroup *mem_cgroup_from_css(s
- return NULL;
+@@ -1290,6 +1295,11 @@ static inline void obj_cgroup_put(struct
+ {
}
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
}
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -112,6 +112,18 @@ static inline bool lru_gen_in_fault(void
+@@ -122,6 +122,18 @@ static inline bool lru_gen_in_fault(void
return current->in_lru_fault;
}
static inline int lru_gen_from_seq(unsigned long seq)
{
return seq % MAX_NR_GENS;
-@@ -287,6 +299,11 @@ static inline bool lru_gen_in_fault(void
+@@ -297,6 +309,11 @@ static inline bool lru_gen_in_fault(void
return false;
}
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
-@@ -357,6 +358,15 @@ struct folio_vma_mapped_walk;
+@@ -367,6 +368,15 @@ struct page_vma_mapped_walk;
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
#ifdef CONFIG_LRU_GEN
enum {
-@@ -416,6 +426,14 @@ struct lru_gen_folio {
+@@ -426,6 +436,14 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
};
enum {
-@@ -469,12 +487,87 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -479,12 +497,87 @@ void lru_gen_init_lruvec(struct lruvec *
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-@@ -484,6 +577,7 @@ static inline void lru_gen_look_around(s
+@@ -494,6 +587,7 @@ static inline void lru_gen_look_around(s
}
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
-@@ -491,7 +585,24 @@ static inline void lru_gen_init_memcg(st
+@@ -501,7 +595,24 @@ static inline void lru_gen_init_memcg(st
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_LRU_GEN */
-@@ -1105,6 +1216,8 @@ typedef struct pglist_data {
+@@ -1219,6 +1330,8 @@ typedef struct pglist_data {
#ifdef CONFIG_LRU_GEN
/* kswap mm walk data */
struct lru_gen_mm_walk mm_walk;
+ struct lru_gen_memcg memcg_lru;
#endif
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -549,6 +549,16 @@ static void mem_cgroup_update_tree(struc
+@@ -477,6 +477,16 @@ static void mem_cgroup_update_tree(struc
struct mem_cgroup_per_node *mz;
struct mem_cgroup_tree_per_node *mctz;
mctz = soft_limit_tree.rb_tree_per_node[nid];
if (!mctz)
return;
-@@ -3433,6 +3443,9 @@ unsigned long mem_cgroup_soft_limit_recl
+@@ -3522,6 +3532,9 @@ unsigned long mem_cgroup_soft_limit_recl
+ struct mem_cgroup_tree_per_node *mctz;
unsigned long excess;
- unsigned long nr_scanned;
+ if (lru_gen_enabled())
+ return 0;
if (order > 0)
return 0;
-@@ -5321,6 +5334,7 @@ static int mem_cgroup_css_online(struct
+@@ -5382,6 +5395,7 @@ static int mem_cgroup_css_online(struct
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
+ lru_gen_online_memcg(memcg);
return 0;
- }
-
-@@ -5347,6 +5361,7 @@ static void mem_cgroup_css_offline(struc
+ offline_kmem:
+ memcg_offline_kmem(memcg);
+@@ -5413,6 +5427,7 @@ static void mem_cgroup_css_offline(struc
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
-@@ -5358,6 +5373,7 @@ static void mem_cgroup_css_released(stru
+@@ -5424,6 +5439,7 @@ static void mem_cgroup_css_released(stru
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
invalidate_reclaim_iterators(memcg);
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7661,6 +7661,7 @@ static void __init free_area_init_node(i
+@@ -7957,6 +7957,7 @@ static void __init free_area_init_node(i
pgdat_set_deferred_range(pgdat);
free_area_init_core(pgdat);
+ lru_gen_init_pgdat(pgdat);
}
- void __init free_area_init_memoryless_node(int nid)
+ static void __init free_area_init_memoryless_node(int nid)
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -54,6 +54,8 @@
#include <asm/tlbflush.h>
#include <asm/div64.h>
-@@ -129,11 +131,6 @@ struct scan_control {
+@@ -134,11 +136,6 @@ struct scan_control {
/* Always discard instead of demoting to lower tier memory */
unsigned int no_demotion:1;
/* Allocation order */
s8 order;
-@@ -2880,6 +2877,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
+@@ -3160,6 +3157,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
-@@ -4169,8 +4169,7 @@ done:
+@@ -4443,8 +4443,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq));
}
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
-@@ -4243,8 +4242,6 @@ static void lru_gen_age_node(struct pgli
+@@ -4517,8 +4516,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
/* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return;
-@@ -4833,8 +4830,7 @@ static bool should_run_aging(struct lruv
+@@ -5107,8 +5104,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
{
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
-@@ -4851,10 +4847,8 @@ static unsigned long get_nr_to_scan(stru
+@@ -5125,10 +5121,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY)
return nr_to_scan;
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -4863,29 +4857,18 @@ static unsigned long get_nr_to_reclaim(s
+@@ -5137,29 +5131,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc))
return -1;
if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc);
-@@ -4895,7 +4878,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5169,7 +5152,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
break;
delta = evict_folios(lruvec, sc, swappiness);
-@@ -4912,10 +4895,250 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5186,10 +5169,250 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
/******************************************************************************
* state change
-@@ -5370,11 +5593,11 @@ static int run_cmd(char cmd, int memcg_i
+@@ -5647,11 +5870,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) {
rcu_read_lock();
rcu_read_unlock();
if (!memcg)
-@@ -5521,6 +5744,19 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5799,6 +6022,19 @@ void lru_gen_init_lruvec(struct lruvec *
}
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
INIT_LIST_HEAD(&memcg->mm_list.fifo);
-@@ -5544,7 +5780,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -5822,7 +6058,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
}
static int __init init_lru_gen(void)
{
-@@ -5571,6 +5869,10 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5849,6 +6147,10 @@ static void lru_gen_shrink_lruvec(struct
{
}
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-@@ -5584,7 +5886,7 @@ static void shrink_lruvec(struct lruvec
+@@ -5862,7 +6164,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
-@@ -5826,6 +6128,11 @@ static void shrink_node(pg_data_t *pgdat
+@@ -6105,6 +6407,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec;
bool reclaimable = false;
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -2905,6 +2905,9 @@ static int get_swappiness(struct lruvec
+@@ -3185,6 +3185,9 @@ static int get_swappiness(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ return 0;
+
if (!can_demote(pgdat->node_id, sc) &&
- mem_cgroup_get_nr_swap_folios(memcg) < MIN_LRU_BATCH)
+ mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
-@@ -3952,7 +3955,7 @@ static void walk_mm(struct lruvec *lruve
+@@ -4226,7 +4229,7 @@ static void walk_mm(struct lruvec *lruve
} while (err == -EAGAIN);
}
{
struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
-@@ -3960,7 +3963,7 @@ static struct lru_gen_mm_walk *set_mm_wa
+@@ -4234,7 +4237,7 @@ static struct lru_gen_mm_walk *set_mm_wa
VM_WARN_ON_ONCE(walk);
walk = &pgdat->mm_walk;
VM_WARN_ON_ONCE(current_is_kswapd());
walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
-@@ -4146,7 +4149,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4420,7 +4423,7 @@ static bool try_to_inc_max_seq(struct lr
goto done;
}
if (!walk) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
-@@ -4215,8 +4218,6 @@ static bool lruvec_is_reclaimable(struct
+@@ -4489,8 +4492,6 @@ static bool lruvec_is_reclaimable(struct
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec);
/* see the comment on lru_gen_folio */
gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
-@@ -4472,12 +4473,8 @@ static bool isolate_folio(struct lruvec *
+@@ -4746,12 +4747,8 @@ static bool isolate_folio(struct lruvec
{
bool success;
(folio_test_dirty(folio) ||
(folio_test_anon(folio) && !folio_test_swapcache(folio))))
return false;
-@@ -4574,9 +4571,8 @@ static int scan_folios(struct lruvec *lru
+@@ -4848,9 +4845,8 @@ static int scan_folios(struct lruvec *lr
__count_vm_events(PGSCAN_ANON + type, isolated);
/*
*/
return isolated || !remaining ? scanned : 0;
}
-@@ -4836,8 +4832,7 @@ static long get_nr_to_scan(struct lruvec
+@@ -5110,8 +5106,7 @@ static long get_nr_to_scan(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
return 0;
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
-@@ -4865,17 +4860,14 @@ static bool try_to_shrink_lruvec(struct
+@@ -5139,17 +5134,14 @@ static bool try_to_shrink_lruvec(struct
long nr_to_scan;
unsigned long scanned = 0;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0)
-@@ -5005,12 +4997,13 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5279,12 +5271,13 @@ static void lru_gen_shrink_lruvec(struct
struct blk_plug plug;
VM_WARN_ON_ONCE(global_reclaim(sc));
if (try_to_shrink_lruvec(lruvec, sc))
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
-@@ -5066,11 +5059,19 @@ static void lru_gen_shrink_node(struct p
+@@ -5340,11 +5333,19 @@ static void lru_gen_shrink_node(struct p
VM_WARN_ON_ONCE(!global_reclaim(sc));
set_initial_priority(pgdat, sc);
-@@ -5088,7 +5089,7 @@ static void lru_gen_shrink_node(struct p
+@@ -5362,7 +5363,7 @@ static void lru_gen_shrink_node(struct p
clear_mm_walk();
blk_finish_plug(&plug);
/* kswapd should never fail */
pgdat->kswapd_failures = 0;
}
-@@ -5656,7 +5657,7 @@ static ssize_t lru_gen_seq_write(struct
+@@ -5934,7 +5935,7 @@ static ssize_t lru_gen_seq_write(struct
set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save();
blk_start_plug(&plug);
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4144,7 +4144,7 @@ static bool try_to_inc_max_seq(struct lr
+@@ -4418,7 +4418,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -4934,18 +4934,20 @@ static int shrink_one(struct lruvec *lru
+@@ -5208,18 +5208,20 @@ static int shrink_one(struct lruvec *lru
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock();
-@@ -4969,14 +4971,22 @@ restart:
+@@ -5243,14 +5245,22 @@ restart:
op = shrink_one(lruvec, sc);
/* restart if raced with lru_gen_rotate_memcg() */
if (gen != get_nulls_value(pos))
goto restart;
-@@ -4985,11 +4995,6 @@ restart:
+@@ -5259,11 +5269,6 @@ restart:
bin = get_memcg_bin(bin + 1);
if (bin != first_bin)
goto restart;
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
-@@ -569,18 +569,16 @@ static void mtd_check_of_node(struct mtd
+@@ -551,18 +551,16 @@ static void mtd_check_of_node(struct mtd
struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
const char *pname, *prefix = "partition-";
int plen, mtd_name_len, offset, prefix_len;
if (!parent_dn)
return;
-@@ -593,15 +591,15 @@ static void mtd_check_of_node(struct mtd
+@@ -575,15 +573,15 @@ static void mtd_check_of_node(struct mtd
/* Search if a partition is defined with the same name */
for_each_child_of_node(partitions, mtd_dn) {
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
-@@ -569,20 +569,22 @@ static void mtd_check_of_node(struct mtd
+@@ -551,20 +551,22 @@ static void mtd_check_of_node(struct mtd
struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
const char *pname, *prefix = "partition-";
int plen, mtd_name_len, offset, prefix_len;
if (!partitions)
goto exit_parent;
-@@ -606,19 +608,11 @@ static void mtd_check_of_node(struct mtd
+@@ -588,19 +590,11 @@ static void mtd_check_of_node(struct mtd
plen = strlen(pname) - offset;
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-@@ -748,6 +749,17 @@ int add_mtd_device(struct mtd_info *mtd)
+@@ -737,6 +738,17 @@ int add_mtd_device(struct mtd_info *mtd)
not->add(mtd);
mutex_unlock(&mtd_table_mutex);
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
-@@ -113,6 +113,21 @@ config MTD_AFS_PARTS
+@@ -123,6 +123,21 @@ config MTD_AFS_PARTS
for your particular device. It won't happen automatically. The
'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST)
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
-@@ -9,6 +9,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) +=
+@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_BCM4908) +=
ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS)+= ofpart_linksys_ns.o
obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
-@@ -960,8 +960,8 @@ static int mtd_otp_nvmem_add(struct mtd_
+@@ -953,8 +953,8 @@ static int mtd_otp_nvmem_add(struct mtd_
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
mtd_nvmem_user_otp_reg_read);
if (IS_ERR(nvmem)) {
}
mtd->otp_user_nvmem = nvmem;
}
-@@ -978,7 +978,6 @@ static int mtd_otp_nvmem_add(struct mtd_
+@@ -971,7 +971,6 @@ static int mtd_otp_nvmem_add(struct mtd_
nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
mtd_nvmem_fact_otp_reg_read);
if (IS_ERR(nvmem)) {
err = PTR_ERR(nvmem);
goto err;
}
-@@ -991,7 +990,7 @@ static int mtd_otp_nvmem_add(struct mtd_
+@@ -983,7 +982,7 @@ static int mtd_otp_nvmem_add(struct mtd_
err:
nvmem_unregister(mtd->otp_user_nvmem);
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
-@@ -357,7 +357,7 @@ static inline void page_pool_nid_changed
+@@ -386,7 +386,7 @@ static inline void page_pool_nid_changed
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
-@@ -366,7 +366,7 @@ static inline void page_pool_ring_lock(s
+@@ -395,7 +395,7 @@ static inline void page_pool_ring_lock(s
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
spin_unlock_bh(&pool->ring.producer_lock);
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
-@@ -512,8 +512,8 @@ static void page_pool_return_page(struct
+@@ -511,8 +511,8 @@ static void page_pool_return_page(struct
static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
{
int ret;
ret = ptr_ring_produce(&pool->ring, page);
else
ret = ptr_ring_produce_bh(&pool->ring, page);
-@@ -576,7 +576,7 @@ __page_pool_put_page(struct page_pool *p
+@@ -570,7 +570,7 @@ __page_pool_put_page(struct page_pool *p
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
-@@ -478,6 +478,20 @@ static inline void eth_addr_inc(u8 *addr
+@@ -508,6 +508,20 @@ static inline void eth_addr_inc(u8 *addr
}
/**
--- a/MAINTAINERS
+++ b/MAINTAINERS
-@@ -11789,6 +11789,14 @@ L: netdev@vger.kernel.org
+@@ -12926,6 +12926,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
L: linux-i2c@vger.kernel.org
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
-@@ -18,4 +18,11 @@ config PCS_LYNX
- This module provides helpers to phylink for managing the Lynx PCS
- which is part of the Layerscape and QorIQ Ethernet SERDES.
+@@ -32,4 +32,11 @@ config PCS_ALTERA_TSE
+ This module provides helper functions for the Altera Triple Speed
+ Ethernet SGMII PCS, that can be found on the Intel Socfpga family.
+config PCS_MTK_LYNXI
+ tristate
endmenu
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
-@@ -5,3 +5,4 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.
+@@ -7,3 +7,4 @@ obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
}
static void
-@@ -695,10 +695,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -688,10 +688,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
return -ENOMEM;
-@@ -812,9 +812,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -805,9 +805,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -923,7 +923,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -916,7 +916,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
sizeof(*ring->desc)))
return -ENOMEM;
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -441,10 +667,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+@@ -434,10 +660,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
} else {
mtk_wed_bus_init(dev);
}
}
-@@ -494,6 +722,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -487,6 +715,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
}
}
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
-@@ -505,11 +859,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -498,11 +852,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
-@@ -536,9 +890,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -529,9 +883,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
-@@ -546,18 +900,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -539,18 +893,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
{
void *head = (void *)ring->desc;
int i;
-@@ -567,7 +931,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -560,7 +924,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
desc->buf1 = 0;
desc->info = 0;
}
-@@ -623,7 +990,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -616,7 +983,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
if (!dev->tx_ring[i].desc)
continue;
}
if (mtk_wed_poll_busy(dev))
-@@ -641,6 +1009,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -634,6 +1002,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
-@@ -675,12 +1046,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -668,12 +1039,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
-@@ -689,7 +1059,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -682,7 +1052,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
ring->desc_size = desc_size;
ring->size = size;
return 0;
}
-@@ -698,9 +1068,14 @@ static int
+@@ -691,9 +1061,14 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -717,6 +1092,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -710,6 +1085,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return 0;
}
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
-@@ -739,6 +1168,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -732,6 +1161,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
-@@ -757,6 +1188,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -750,6 +1181,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
-@@ -794,9 +1235,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -787,9 +1228,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
-@@ -804,6 +1251,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -797,6 +1244,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
}
}
-@@ -829,7 +1285,19 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -822,7 +1278,19 @@ mtk_wed_start(struct mtk_wed_device *dev
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
}
mtk_wed_dma_enable(dev);
-@@ -863,7 +1331,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -856,7 +1324,7 @@ mtk_wed_attach(struct mtk_wed_device *de
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
-@@ -876,15 +1344,24 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -869,15 +1337,24 @@ mtk_wed_attach(struct mtk_wed_device *de
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
}
mtk_wed_hw_init_early(dev);
-@@ -893,8 +1370,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -886,8 +1363,10 @@ mtk_wed_attach(struct mtk_wed_device *de
BIT(hw->index), 0);
else
ret = mtk_wed_wo_init(hw);
mutex_unlock(&hw_lock);
return ret;
-@@ -917,10 +1396,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -910,10 +1389,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
* WDMA RX.
*/
return -ENOMEM;
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
-@@ -967,6 +1447,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -960,6 +1440,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
return 0;
}
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
-@@ -1063,7 +1574,9 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1056,7 +1567,9 @@ void mtk_wed_add_hw(struct device_node *
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
-@@ -1072,6 +1585,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1065,6 +1578,7 @@ void mtk_wed_add_hw(struct device_node *
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3437,11 +3437,8 @@ static void mtk_pending_work(struct work
+@@ -3495,11 +3495,8 @@ static void mtk_pending_work(struct work
rtnl_lock();
dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
-@@ -3475,7 +3472,7 @@ static void mtk_pending_work(struct work
+@@ -3533,7 +3530,7 @@ static void mtk_pending_work(struct work
dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
}
static void
-@@ -1297,9 +1297,10 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1290,9 +1290,10 @@ mtk_wed_start(struct mtk_wed_device *dev
if (mtk_wed_rro_cfg(dev))
return;
mtk_wed_dma_enable(dev);
dev->running = true;
}
-@@ -1365,11 +1366,13 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1358,11 +1359,13 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
}
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
-@@ -1006,11 +1009,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -999,11 +1002,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
-@@ -677,7 +691,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -670,7 +684,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
{
u32 mask, set;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -944,42 +944,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
}
static u32
}
static void
-@@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -997,19 +1085,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
true);
}
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
-@@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1026,6 +1118,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
}
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
-@@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1033,8 +1128,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
-@@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1045,6 +1151,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
}
static int
-@@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1267,6 +1384,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16);
-@@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1355,10 +1475,6 @@ mtk_wed_attach(struct mtk_wed_device *de
goto out;
if (mtk_wed_get_rx_capa(dev)) {
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1188,7 +1188,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -1181,7 +1181,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1197,8 +1198,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1190,8 +1191,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->rx_wdma[idx];
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1396,7 +1397,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1389,7 +1390,7 @@ mtk_wed_start(struct mtk_wed_device *dev
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -1505,7 +1506,8 @@ unlock:
+@@ -1498,7 +1499,8 @@ unlock:
}
static int
{
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
-@@ -1524,11 +1526,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -1517,11 +1519,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
return -EINVAL;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3200,6 +3200,27 @@ static void mtk_set_mcr_max_rx(struct mt
+@@ -3254,6 +3254,27 @@ static void mtk_set_mcr_max_rx(struct mt
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
static int mtk_hw_init(struct mtk_eth *eth)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-@@ -3239,22 +3260,9 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3293,22 +3314,9 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3221,7 +3221,54 @@ static void mtk_hw_reset(struct mtk_eth
+@@ -3275,7 +3275,54 @@ static void mtk_hw_reset(struct mtk_eth
0x3ffffff);
}
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
-@@ -3260,7 +3307,12 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3314,7 +3361,12 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
/* Set FE to PDMAv2 if necessary */
-@@ -3465,7 +3517,7 @@ static void mtk_pending_work(struct work
+@@ -3522,7 +3574,7 @@ static void mtk_pending_work(struct work
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
-@@ -4057,7 +4109,7 @@ static int mtk_probe(struct platform_dev
+@@ -4114,7 +4166,7 @@ static int mtk_probe(struct platform_dev
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(ð->pending_work, mtk_pending_work);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2786,14 +2786,29 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2842,14 +2842,29 @@ static void mtk_dma_free(struct mtk_eth
kfree(eth->scratch_head);
}
schedule_work(ð->pending_work);
}
-@@ -3275,15 +3290,17 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3329,15 +3344,17 @@ static int mtk_hw_init(struct mtk_eth *e
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (eth->ethsys)
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
-@@ -3409,8 +3426,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3466,8 +3483,10 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
err_disable_pm:
return ret;
}
-@@ -3489,30 +3508,53 @@ static int mtk_do_ioctl(struct net_devic
+@@ -3546,30 +3565,53 @@ static int mtk_do_ioctl(struct net_devic
return -EOPNOTSUPP;
}
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
-@@ -3523,15 +3565,19 @@ static void mtk_pending_work(struct work
+@@ -3580,15 +3622,19 @@ static void mtk_pending_work(struct work
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
continue;
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -716,6 +716,33 @@ int mtk_foe_entry_idle_time(struct mtk_p
+@@ -710,6 +710,33 @@ int mtk_foe_entry_idle_time(struct mtk_p
return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -306,6 +306,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
- int version, int index);
+ void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
};
/* strings used by ethtool */
-@@ -3283,6 +3289,102 @@ static void mtk_hw_warm_reset(struct mtk
+@@ -3337,6 +3343,102 @@ static void mtk_hw_warm_reset(struct mtk
val, rst_mask);
}
static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
-@@ -3615,6 +3717,7 @@ static int mtk_cleanup(struct mtk_eth *e
+@@ -3672,6 +3774,7 @@ static int mtk_cleanup(struct mtk_eth *e
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(ð->pending_work);
return 0;
}
-@@ -4042,6 +4145,7 @@ static int mtk_probe(struct platform_dev
+@@ -4099,6 +4202,7 @@ static int mtk_probe(struct platform_dev
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
-@@ -4246,6 +4350,8 @@ static int mtk_probe(struct platform_dev
- NAPI_POLL_WEIGHT);
+@@ -4301,6 +4405,8 @@ static int mtk_probe(struct platform_dev
+ netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
+ schedule_delayed_work(ð->reset.monitor_work,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -256,6 +256,8 @@
+@@ -257,6 +257,8 @@
#define MTK_RX_DONE_INT_V2 BIT(14)
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
-@@ -538,6 +540,17 @@
+@@ -542,6 +544,17 @@
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
-@@ -934,6 +947,7 @@ struct mtk_reg_map {
+@@ -938,6 +951,7 @@ struct mtk_reg_map {
u32 delay_irq; /* delay interrupt */
u32 irq_status; /* interrupt status */
u32 irq_mask; /* interrupt mask */
u32 int_grp;
} pdma;
struct {
-@@ -960,6 +974,8 @@ struct mtk_reg_map {
- u32 gdma_to_ppe0;
+@@ -964,6 +978,8 @@ struct mtk_reg_map {
+ u32 gdma_to_ppe;
u32 ppe_base;
u32 wdma_base[2];
+ u32 pse_iq_sta;
};
/* struct mtk_eth_data - This is the structure holding all differences
-@@ -1002,6 +1018,8 @@ struct mtk_soc_data {
+@@ -1006,6 +1022,8 @@ struct mtk_soc_data {
} txrx;
};
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
-@@ -1124,6 +1142,14 @@ struct mtk_eth {
+@@ -1128,6 +1146,14 @@ struct mtk_eth {
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3646,6 +3646,11 @@ static void mtk_pending_work(struct work
+@@ -3703,6 +3703,11 @@ static void mtk_pending_work(struct work
set_bit(MTK_RESETTING, ð->state);
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
-@@ -3683,6 +3688,8 @@ static void mtk_pending_work(struct work
+@@ -3740,6 +3745,8 @@ static void mtk_pending_work(struct work
clear_bit(MTK_RESETTING, ð->state);
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1259,7 +1259,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1252,7 +1252,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
}
static int
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1268,8 +1269,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1261,8 +1262,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->tx_wdma[idx];
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1279,6 +1280,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1272,6 +1273,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
if (!idx) {
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
-@@ -1618,18 +1622,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -1611,18 +1615,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
}
static int
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -901,7 +901,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -945,7 +945,7 @@ static int mtk_init_fq_dma(struct mtk_et
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
dma_addr_t dma_addr;
int i;
-@@ -2155,19 +2155,25 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2209,19 +2209,25 @@ static int mtk_tx_alloc(struct mtk_eth *
struct mtk_tx_ring *ring = ð->tx_ring;
int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd;
u32 next_ptr = ring->phys + next * sz;
txd = ring->dma + i * sz;
-@@ -2187,22 +2193,22 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2241,22 +2247,22 @@ static int mtk_tx_alloc(struct mtk_eth *
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
-@@ -2214,14 +2220,14 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2268,14 +2274,14 @@ static int mtk_tx_alloc(struct mtk_eth *
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
-@@ -2239,7 +2245,7 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -2293,7 +2299,7 @@ static void mtk_tx_clean(struct mtk_eth
int i;
if (ring->buf) {
mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
-@@ -2247,14 +2253,14 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -2301,14 +2307,14 @@ static void mtk_tx_clean(struct mtk_eth
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
-@@ -2774,7 +2780,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2830,7 +2836,7 @@ static void mtk_dma_free(struct mtk_eth
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4428,7 +4428,7 @@ static const struct mtk_soc_data mt7621_
+@@ -4484,7 +4484,7 @@ static const struct mtk_soc_data mt7621_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.hash_offset = 2,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
-@@ -4467,7 +4467,7 @@ static const struct mtk_soc_data mt7623_
+@@ -4523,7 +4523,7 @@ static const struct mtk_soc_data mt7623_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe0 = 0x3333,
-@@ -577,6 +581,75 @@ static void mtk_mac_link_down(struct phy
+ .gdma_to_ppe = 0x3333,
+@@ -620,6 +624,75 @@ static void mtk_mac_link_down(struct phy
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
static void mtk_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
-@@ -602,6 +675,8 @@ static void mtk_mac_link_up(struct phyli
+@@ -645,6 +718,8 @@ static void mtk_mac_link_up(struct phyli
break;
}
/* Configure duplex */
if (duplex == DUPLEX_FULL)
mcr |= MAC_MCR_FORCE_DPX;
-@@ -1060,7 +1135,8 @@ static void mtk_tx_set_dma_desc_v1(struc
+@@ -1106,7 +1181,8 @@ static void mtk_tx_set_dma_desc_v1(struc
WRITE_ONCE(desc->txd1, info->addr);
if (info->last)
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
-@@ -1094,9 +1170,6 @@ static void mtk_tx_set_dma_desc_v2(struc
+@@ -1140,9 +1216,6 @@ static void mtk_tx_set_dma_desc_v2(struc
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
-@@ -1140,11 +1213,12 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1186,11 +1259,12 @@ static int mtk_tx_map(struct sk_buff *sk
.gso = gso,
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
.vlan = skb_vlan_tag_present(skb),
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
const struct mtk_soc_data *soc = eth->soc;
-@@ -1152,8 +1226,10 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1198,8 +1272,10 @@ static int mtk_tx_map(struct sk_buff *sk
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
int i, n_desc = 1;
itxd = ring->next_free;
itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
-@@ -1202,7 +1278,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1248,7 +1324,7 @@ static int mtk_tx_map(struct sk_buff *sk
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
soc->txrx.dma_max_len);
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
-@@ -1241,7 +1317,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1287,7 +1363,7 @@ static int mtk_tx_map(struct sk_buff *sk
txd_pdma->txd2 |= TX_DMA_LS1;
}
skb_tx_timestamp(skb);
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
-@@ -1253,8 +1329,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1299,8 +1375,7 @@ static int mtk_tx_map(struct sk_buff *sk
wmb();
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
int next_idx;
-@@ -1323,7 +1398,7 @@ static void mtk_wake_queue(struct mtk_et
+@@ -1369,7 +1444,7 @@ static void mtk_wake_queue(struct mtk_et
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
}
}
-@@ -1347,7 +1422,7 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1393,7 +1468,7 @@ static netdev_tx_t mtk_start_xmit(struct
tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(ð->page_lock);
-@@ -1373,7 +1448,7 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1419,7 +1494,7 @@ static netdev_tx_t mtk_start_xmit(struct
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
spin_unlock(ð->page_lock);
-@@ -1540,10 +1615,12 @@ static int mtk_xdp_submit_frame(struct m
+@@ -1586,10 +1661,12 @@ static int mtk_xdp_submit_frame(struct m
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
+ .qid = mac->id,
};
int err, index = 0, n_desc = 1, nr_frags;
- struct mtk_tx_dma *htxd, *txd, *txd_pdma;
-@@ -1594,6 +1671,7 @@ static int mtk_xdp_submit_frame(struct m
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+@@ -1639,6 +1716,7 @@ static int mtk_xdp_submit_frame(struct m
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = skb_frag_size(&sinfo->frags[index]);
txd_info.last = index + 1 == nr_frags;
data = skb_frag_address(&sinfo->frags[index]);
index++;
-@@ -1945,8 +2023,46 @@ rx_done:
+@@ -1993,8 +2071,46 @@ rx_done:
return done;
}
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = ð->tx_ring;
-@@ -1976,12 +2092,9 @@ static int mtk_poll_tx_qdma(struct mtk_e
+@@ -2026,12 +2142,9 @@ static int mtk_poll_tx_qdma(struct mtk_e
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- }
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
-@@ -1999,7 +2112,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
+@@ -2050,7 +2163,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
}
static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_tx_buf *tx_buf;
-@@ -2015,12 +2128,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
+@@ -2068,12 +2181,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ mtk_poll_tx_done(eth, state, 0, tx_buf->data);
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
-@@ -2041,26 +2150,15 @@ static int mtk_poll_tx(struct mtk_eth *e
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
+@@ -2095,26 +2204,15 @@ static int mtk_poll_tx(struct mtk_eth *e
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct dim_sample dim_sample = {};
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
-@@ -2070,7 +2168,7 @@ static int mtk_poll_tx(struct mtk_eth *e
+@@ -2124,7 +2222,7 @@ static int mtk_poll_tx(struct mtk_eth *e
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
}
static void mtk_handle_status_irq(struct mtk_eth *eth)
-@@ -2156,6 +2254,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2210,6 +2308,7 @@ static int mtk_tx_alloc(struct mtk_eth *
int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
-@@ -2223,8 +2322,25 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2277,8 +2376,25 @@ static int mtk_tx_alloc(struct mtk_eth *
ring->phys + ((ring_size - 1) * sz),
soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
-@@ -2904,7 +3020,7 @@ static int mtk_start_dma(struct mtk_eth
+@@ -2960,7 +3076,7 @@ static int mtk_start_dma(struct mtk_eth
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
-@@ -2950,6 +3066,45 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3006,6 +3122,45 @@ static void mtk_gdm_config(struct mtk_et
mtk_w32(eth, 0, MTK_RST_GL);
}
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
-@@ -2994,7 +3149,8 @@ static int mtk_open(struct net_device *d
+@@ -3048,7 +3203,8 @@ static int mtk_open(struct net_device *d
refcount_inc(ð->dma_refcnt);
phylink_start(mac->phylink);
return 0;
}
-@@ -3717,8 +3873,12 @@ static int mtk_unreg_dev(struct mtk_eth
+@@ -3774,8 +3930,12 @@ static int mtk_unreg_dev(struct mtk_eth
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
unregister_netdev(eth->netdev[i]);
}
-@@ -3935,6 +4095,23 @@ static int mtk_set_rxnfc(struct net_devi
+@@ -3992,6 +4152,23 @@ static int mtk_set_rxnfc(struct net_devi
return ret;
}
static const struct ethtool_ops mtk_ethtool_ops = {
.get_link_ksettings = mtk_get_link_ksettings,
.set_link_ksettings = mtk_set_link_ksettings,
-@@ -3970,6 +4147,7 @@ static const struct net_device_ops mtk_n
+@@ -4027,6 +4204,7 @@ static const struct net_device_ops mtk_n
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
.ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-@@ -3979,6 +4157,7 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4036,6 +4214,7 @@ static int mtk_add_mac(struct mtk_eth *e
struct phylink *phylink;
struct mtk_mac *mac;
int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
-@@ -3996,7 +4175,10 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4053,7 +4232,10 @@ static int mtk_add_mac(struct mtk_eth *e
return -EINVAL;
}
if (!eth->netdev[id]) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
-@@ -4093,6 +4275,11 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4150,6 +4332,11 @@ static int mtk_add_mac(struct mtk_eth *e
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
-@@ -215,8 +216,26 @@
+@@ -216,8 +217,26 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
/* QDMA Global Configuration Register */
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
-@@ -235,6 +254,7 @@
+@@ -236,6 +255,7 @@
#define MTK_WCOMP_EN BIT(24)
#define MTK_RESV_BUF (0x40 << 16)
#define MTK_MUTLI_CNT (0x4 << 12)
/* QDMA Flow Control Register */
#define FC_THRES_DROP_MODE BIT(20)
-@@ -265,8 +285,6 @@
+@@ -266,8 +286,6 @@
#define MTK_STAT_OFFSET 0x40
/* QDMA TX NUM */
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
-@@ -296,6 +314,7 @@
+@@ -297,6 +315,7 @@
#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
-@@ -953,6 +972,7 @@ struct mtk_reg_map {
+@@ -957,6 +976,7 @@ struct mtk_reg_map {
} pdma;
struct {
u32 qtx_cfg; /* tx queue configuration */
u32 rx_ptr; /* rx base pointer */
u32 rx_cnt_cfg; /* rx max count configuration */
u32 qcrx_ptr; /* rx cpu pointer */
-@@ -970,6 +990,7 @@ struct mtk_reg_map {
+@@ -974,6 +994,7 @@ struct mtk_reg_map {
u32 fq_tail; /* fq tail pointer */
u32 fq_count; /* fq free page count */
u32 fq_blen; /* fq free page buffer length */
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
} qdma;
u32 gdm1_cnt;
- u32 gdma_to_ppe0;
-@@ -1173,6 +1194,7 @@ struct mtk_mac {
+ u32 gdma_to_ppe;
+@@ -1177,6 +1198,7 @@ struct mtk_mac {
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
unsigned int syscfg0;
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -405,6 +405,24 @@ static inline bool mtk_foe_entry_usable(
- FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
+@@ -399,6 +399,24 @@ int mtk_foe_entry_set_wdma(struct mtk_et
+ return 0;
}
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -917,7 +917,13 @@ enum mkt_eth_capabilities {
+@@ -921,7 +921,13 @@ enum mkt_eth_capabilities {
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
-@@ -940,12 +940,14 @@ bool __skb_flow_dissect(const struct net
+@@ -971,12 +971,14 @@ bool __skb_flow_dissect(const struct net
#if IS_ENABLED(CONFIG_NET_DSA)
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
proto == htons(ETH_P_XDSA))) {
else
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
-@@ -20,6 +20,7 @@
+@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <linux/ptp_classify.h>
#include "dsa_priv.h"
-@@ -225,6 +226,7 @@ static bool dsa_skb_defer_rx_timestamp(s
+@@ -216,6 +217,7 @@ static bool dsa_skb_defer_rx_timestamp(s
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
struct dsa_slave_priv *p;
-@@ -238,7 +240,22 @@ static int dsa_switch_rcv(struct sk_buff
+@@ -229,7 +231,22 @@ static int dsa_switch_rcv(struct sk_buff
if (!skb)
return 0;
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
-@@ -1974,16 +1975,22 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2022,16 +2023,22 @@ static int mtk_poll_rx(struct napi_struc
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
} else if (trxd.rxd2 & RX_DMA_VTAG) {
}
skb_record_rx_queue(skb, 0);
-@@ -2800,15 +2807,30 @@ static netdev_features_t mtk_fix_feature
+@@ -2856,15 +2863,30 @@ static netdev_features_t mtk_fix_feature
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
}
/* wait for DMA to finish whatever it is doing before we start using it again */
-@@ -3105,11 +3127,45 @@ found:
+@@ -3161,11 +3183,45 @@ found:
return NOTIFY_DONE;
}
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
-@@ -3632,6 +3688,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -3686,6 +3742,10 @@ static int mtk_hw_init(struct mtk_eth *e
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
-@@ -3865,6 +3925,12 @@ static int mtk_free_dev(struct mtk_eth *
+@@ -3922,6 +3982,12 @@ static int mtk_free_dev(struct mtk_eth *
free_netdev(eth->netdev[i]);
}
/* CDMP Ingress Control Register */
#define MTK_CDMP_IG_CTRL 0x400
#define MTK_CDMP_STAG_EN BIT(0)
-@@ -1166,6 +1172,8 @@ struct mtk_eth {
+@@ -1170,6 +1176,8 @@ struct mtk_eth {
int ip_align;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3143,7 +3143,8 @@ static int mtk_open(struct net_device *d
+@@ -3199,7 +3199,8 @@ static int mtk_open(struct net_device *d
struct mtk_eth *eth = mac->hw;
int i, err;
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
-@@ -3160,7 +3161,8 @@ static int mtk_open(struct net_device *d
+@@ -3216,7 +3217,8 @@ static int mtk_open(struct net_device *d
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3078,7 +3078,7 @@ static void mtk_gdm_config(struct mtk_et
+@@ -3134,7 +3134,7 @@ static void mtk_gdm_config(struct mtk_et
val |= config;
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
-@@ -3143,8 +3143,7 @@ static int mtk_open(struct net_device *d
+@@ -3199,8 +3199,7 @@ static int mtk_open(struct net_device *d
struct mtk_eth *eth = mac->hw;
int i, err;
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
-@@ -3161,8 +3160,7 @@ static int mtk_open(struct net_device *d
+@@ -3217,8 +3216,7 @@ static int mtk_open(struct net_device *d
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1830,7 +1830,9 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1878,7 +1878,9 @@ static int mtk_poll_rx(struct napi_struc
while (done < budget) {
unsigned int pktlen, *rxdcsum;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
-@@ -1970,27 +1972,29 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2018,27 +2020,29 @@ static int mtk_poll_rx(struct napi_struc
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -786,7 +786,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
+@@ -779,7 +779,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
mutex_unlock(&hw_lock);
}
-@@ -1545,8 +1550,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1538,8 +1543,10 @@ mtk_wed_attach(struct mtk_wed_device *de
ret = mtk_wed_wo_init(hw);
}
out:
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -676,8 +676,6 @@ static void mtk_mac_link_up(struct phyli
+@@ -719,8 +719,6 @@ static void mtk_mac_link_up(struct phyli
break;
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -501,8 +501,10 @@
+@@ -504,8 +504,10 @@
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
/* SGMII subsystem config registers */
#define SGMII_AN_RESTART BIT(9)
#define SGMII_ISOLATE BIT(10)
#define SGMII_AN_ENABLE BIT(12)
-@@ -512,13 +514,18 @@
+@@ -515,13 +517,18 @@
#define SGMII_PCS_FAULT BIT(23)
#define SGMII_AN_EXPANSION_CLR BIT(30)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1070,11 +1070,13 @@ struct mtk_soc_data {
+@@ -1073,11 +1073,13 @@ struct mtk_soc_data {
* @regmap: The register map pointing at the range used to setup
* SGMII modes
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -531,7 +531,7 @@
+@@ -534,7 +534,7 @@
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -539,6 +539,10 @@
+@@ -542,6 +542,10 @@
#define SGMII_SEND_AN_ERROR_EN BIT(11)
#define SGMII_IF_MODE_MASK GENMASK(5, 1)
mtk_eth_path_name(path), __func__, updated);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4755,6 +4755,26 @@ static const struct mtk_soc_data mt7629_
+@@ -4808,6 +4808,26 @@ static const struct mtk_soc_data mt7629_
},
};
static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
-@@ -4797,6 +4817,7 @@ const struct of_device_id of_mtk_match[]
+@@ -4849,6 +4869,7 @@ const struct of_device_id of_mtk_match[]
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
{},
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -553,11 +553,22 @@
+@@ -556,11 +556,22 @@
#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
#define SGMII_PHYA_PWD BIT(4)
/* MT7628/88 specific stuff */
#define MT7628_PDMA_OFFSET 0x0800
#define MT7628_SDM_OFFSET 0x0c00
-@@ -738,6 +749,17 @@ enum mtk_clks_map {
+@@ -741,6 +752,17 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
-@@ -851,6 +873,7 @@ enum mkt_eth_capabilities {
+@@ -854,6 +876,7 @@ enum mkt_eth_capabilities {
MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
-@@ -885,6 +908,7 @@ enum mkt_eth_capabilities {
+@@ -888,6 +911,7 @@ enum mkt_eth_capabilities {
#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
-@@ -963,6 +987,11 @@ enum mkt_eth_capabilities {
+@@ -966,6 +990,11 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
-@@ -1076,12 +1105,14 @@ struct mtk_soc_data {
+@@ -1079,12 +1108,14 @@ struct mtk_soc_data {
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
* @interface: Currently configured interface mode
* @pcs: Phylink PCS structure
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -702,8 +702,10 @@ static const struct phylink_mac_ops mtk_
+@@ -745,8 +745,10 @@ static const struct phylink_mac_ops mtk_
static int mtk_mdio_init(struct mtk_eth *eth)
{
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
-@@ -729,6 +731,25 @@ static int mtk_mdio_init(struct mtk_eth
+@@ -773,6 +775,25 @@ static int mtk_mdio_init(struct mtk_eth
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
-@@ -18,6 +18,8 @@ config NET_MEDIATEK_SOC
+@@ -19,6 +19,8 @@ config NET_MEDIATEK_SOC
select DIMLIB
select PAGE_POOL
select PAGE_POOL_STATS
#include <linux/jhash.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
-@@ -357,7 +358,7 @@ static struct phylink_pcs *mtk_mac_selec
+@@ -400,7 +401,7 @@ static struct phylink_pcs *mtk_mac_selec
sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
0 : mac->id;
}
return NULL;
-@@ -3977,8 +3978,17 @@ static int mtk_unreg_dev(struct mtk_eth
+@@ -4031,8 +4032,17 @@ static int mtk_unreg_dev(struct mtk_eth
return 0;
}
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(ð->pending_work);
-@@ -4408,6 +4418,36 @@ void mtk_eth_set_dma_device(struct mtk_e
+@@ -4462,6 +4472,36 @@ void mtk_eth_set_dma_device(struct mtk_e
rtnl_unlock();
}
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = NULL;
-@@ -4471,13 +4511,7 @@ static int mtk_probe(struct platform_dev
+@@ -4525,13 +4565,7 @@ static int mtk_probe(struct platform_dev
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
if (err)
return err;
-@@ -4488,14 +4522,17 @@ static int mtk_probe(struct platform_dev
+@@ -4542,14 +4576,17 @@ static int mtk_probe(struct platform_dev
"mediatek,pctl");
if (IS_ERR(eth->pctl)) {
dev_err(&pdev->dev, "no pctl regmap found\n");
}
if (eth->soc->offload_version) {
-@@ -4655,6 +4692,8 @@ err_deinit_hw:
+@@ -4708,6 +4745,8 @@ err_deinit_hw:
mtk_hw_deinit(eth);
err_wed_exit:
mtk_wed_exit();
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -507,65 +507,6 @@
+@@ -510,65 +510,6 @@
#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
/* Infrasys subsystem config registers */
#define INFRA_MISC2 0x70c
#define CO_QPHY_SEL BIT(0)
-@@ -1105,31 +1046,6 @@ struct mtk_soc_data {
+@@ -1108,31 +1049,6 @@ struct mtk_soc_data {
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
-@@ -1149,6 +1065,7 @@ struct mtk_sgmii {
+@@ -1152,6 +1068,7 @@ struct mtk_sgmii {
* MII modes
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
-@@ -1189,8 +1106,8 @@ struct mtk_eth {
+@@ -1192,8 +1109,8 @@ struct mtk_eth {
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
-@@ -1352,10 +1269,6 @@ void mtk_stats_update_mac(struct mtk_mac
+@@ -1355,10 +1272,6 @@ void mtk_stats_update_mac(struct mtk_mac
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-@@ -2573,128 +2574,11 @@ static int mt7531_rgmii_setup(struct mt7
+@@ -2597,128 +2598,11 @@ static int mt7531_rgmii_setup(struct mt7
return 0;
}
static int
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
-@@ -2717,11 +2601,11 @@ mt7531_mac_config(struct dsa_switch *ds,
+@@ -2741,11 +2625,11 @@ mt7531_mac_config(struct dsa_switch *ds,
phydev = dp->slave->phydev;
return mt7531_rgmii_setup(priv, port, interface, phydev);
case PHY_INTERFACE_MODE_SGMII:
default:
return -EINVAL;
}
-@@ -2746,11 +2630,11 @@ mt753x_phylink_mac_select_pcs(struct dsa
+@@ -2770,11 +2654,11 @@ mt753x_phylink_mac_select_pcs(struct dsa
switch (interface) {
case PHY_INTERFACE_MODE_TRGMII:
default:
return NULL;
}
-@@ -2991,86 +2875,6 @@ static void mt7530_pcs_get_state(struct
+@@ -3015,86 +2899,6 @@ static void mt7530_pcs_get_state(struct
state->pause |= MLO_PAUSE_TX;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising,
-@@ -3090,18 +2894,57 @@ static const struct phylink_pcs_ops mt75
+@@ -3114,18 +2918,57 @@ static const struct phylink_pcs_ops mt75
.pcs_an_restart = mt7530_pcs_an_restart,
};
int i, ret;
/* Initialise the PCS devices */
-@@ -3109,8 +2952,6 @@ mt753x_setup(struct dsa_switch *ds)
+@@ -3133,8 +2976,6 @@ mt753x_setup(struct dsa_switch *ds)
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
ret = priv->info->sw_setup(ds);
-@@ -3125,6 +2966,16 @@ mt753x_setup(struct dsa_switch *ds)
+@@ -3149,6 +2990,16 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
return ret;
}
-@@ -3216,7 +3067,7 @@ static const struct mt753x_info mt753x_t
+@@ -3240,7 +3091,7 @@ static const struct mt753x_info mt753x_t
},
[ID_MT7531] = {
.id = ID_MT7531,
.sw_setup = mt7531_setup,
.phy_read = mt7531_ind_phy_read,
.phy_write = mt7531_ind_phy_write,
-@@ -3324,7 +3175,7 @@ static void
+@@ -3348,7 +3199,7 @@ static void
mt7530_remove(struct mdio_device *mdiodev)
{
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
-@@ -3343,6 +3194,10 @@ mt7530_remove(struct mdio_device *mdiode
+@@ -3367,6 +3218,10 @@ mt7530_remove(struct mdio_device *mdiode
mt7530_free_irq(priv);
dsa_unregister_switch(priv->ds);
+ mtk_pcs_lynxi_destroy(priv->ports[5 + i].sgmii_pcs);
+
mutex_destroy(&priv->reg_mutex);
+ }
- dev_set_drvdata(&mdiodev->dev, NULL);
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -364,47 +364,8 @@ enum mt7530_vlan_port_acc_frm {
--- a/MAINTAINERS
+++ b/MAINTAINERS
-@@ -12697,6 +12697,7 @@ F: include/uapi/linux/meye.h
+@@ -13959,6 +13959,7 @@ F: include/uapi/linux/meye.h
MOTORCOMM PHY DRIVER
M: Peter Geis <pgwipeout@gmail.com>
F: drivers/net/phy/motorcomm.c
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
-@@ -245,7 +245,7 @@ config MOTORCOMM_PHY
+@@ -260,7 +260,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
-@@ -245,7 +245,7 @@ config MOTORCOMM_PHY
+@@ -260,7 +260,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
-@@ -245,7 +245,7 @@ config MOTORCOMM_PHY
+@@ -260,7 +260,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
--- a/Documentation/driver-api/nvmem.rst
+++ b/Documentation/driver-api/nvmem.rst
-@@ -189,3 +189,18 @@ ex::
+@@ -185,3 +185,18 @@ ex::
=====================
See Documentation/devicetree/bindings/nvmem/nvmem.txt
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
-@@ -254,7 +254,6 @@ MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_
+@@ -244,7 +244,6 @@ MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_
static int bcm_otpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct otpc_priv *priv;
struct nvmem_device *nvmem;
int err;
-@@ -269,8 +268,7 @@ static int bcm_otpc_probe(struct platfor
+@@ -259,8 +258,7 @@ static int bcm_otpc_probe(struct platfor
return -ENODEV;
/* Get OTP base address register. */
--- a/include/linux/of.h
+++ b/include/linux/of.h
-@@ -1169,6 +1169,31 @@ static inline int of_parse_phandle_with_
+@@ -1009,6 +1009,31 @@ static inline int of_parse_phandle_with_
}
/**
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
-@@ -1173,8 +1173,8 @@ static struct device_node *parse_prop_ce
+@@ -1202,8 +1202,8 @@ static struct device_node *parse_prop_ce
if (strcmp(prop_name, list_name))
return NULL;
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
-@@ -1276,7 +1276,7 @@ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-c
+@@ -1307,7 +1307,7 @@ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-c
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
-@@ -249,7 +249,7 @@ static ssize_t of_device_get_modalias(st
+@@ -256,7 +256,7 @@ static ssize_t of_device_get_modalias(st
ssize_t csize;
ssize_t tsize;
return -ENODEV;
/* Name & Type */
-@@ -372,7 +372,7 @@ int of_device_uevent_modalias(struct dev
+@@ -379,7 +379,7 @@ int of_device_uevent_modalias(struct dev
{
int sl;
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
-@@ -381,6 +381,8 @@ int of_device_uevent_modalias(struct dev
+@@ -388,6 +388,8 @@ int of_device_uevent_modalias(struct dev
sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
sizeof(env->buf) - env->buflen);
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
-@@ -241,7 +241,7 @@ const void *of_device_get_match_data(con
+@@ -248,7 +248,7 @@ const void *of_device_get_match_data(con
}
EXPORT_SYMBOL(of_device_get_match_data);
{
const char *compat;
char *c;
-@@ -249,19 +249,16 @@ static ssize_t of_device_get_modalias(st
+@@ -256,19 +256,16 @@ static ssize_t of_device_get_modalias(st
ssize_t csize;
ssize_t tsize;
csize = strlen(compat) + 1;
tsize += csize;
if (csize > len)
-@@ -286,7 +283,10 @@ int of_device_request_module(struct devi
+@@ -293,7 +290,10 @@ int of_device_request_module(struct devi
ssize_t size;
int ret;
if (size < 0)
return size;
-@@ -297,7 +297,7 @@ int of_device_request_module(struct devi
+@@ -304,7 +304,7 @@ int of_device_request_module(struct devi
if (!str)
return -ENOMEM;
str[size - 1] = '\0';
ret = request_module(str);
kfree(str);
-@@ -314,7 +314,12 @@ EXPORT_SYMBOL_GPL(of_device_request_modu
+@@ -321,7 +321,12 @@ EXPORT_SYMBOL_GPL(of_device_request_modu
*/
ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
{
if (sl < 0)
return sl;
if (sl > len - 2)
-@@ -379,8 +384,8 @@ int of_device_uevent_modalias(struct dev
+@@ -386,8 +391,8 @@ int of_device_uevent_modalias(struct dev
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
-@@ -785,9 +785,10 @@ static bool acpi_of_modalias(struct acpi
+@@ -806,9 +806,10 @@ static bool acpi_of_modalias(struct acpi
* @modalias: Pointer to buffer that modalias value will be copied into
* @len: Length of modalias buffer
*
}
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
-@@ -1159,19 +1159,23 @@ struct device_node *of_find_matching_nod
+@@ -1208,19 +1208,23 @@ struct device_node *of_find_matching_nod
EXPORT_SYMBOL(of_find_matching_node_and_match);
/**
{
const char *compatible, *p;
int cplen;
-@@ -1180,10 +1184,10 @@ int of_modalias_node(struct device_node
+@@ -1229,10 +1233,10 @@ int of_modalias_node(struct device_node
if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
p = strchr(compatible, ',');
* of_find_node_by_phandle - Find a node given a phandle
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
-@@ -2128,8 +2128,8 @@ of_register_spi_device(struct spi_contro
+@@ -2315,8 +2315,8 @@ of_register_spi_device(struct spi_contro
}
/* Select device driver */
goto err_out;
--- a/include/linux/of.h
+++ b/include/linux/of.h
-@@ -361,7 +361,8 @@ extern int of_n_addr_cells(struct device
+@@ -362,7 +362,8 @@ extern int of_n_addr_cells(struct device
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
-@@ -241,42 +240,6 @@ const void *of_device_get_match_data(con
+@@ -248,42 +247,6 @@ const void *of_device_get_match_data(con
}
EXPORT_SYMBOL(of_device_get_match_data);
+}
--- a/include/linux/of.h
+++ b/include/linux/of.h
-@@ -373,6 +373,9 @@ extern int of_parse_phandle_with_args_ma
+@@ -374,6 +374,9 @@ extern int of_parse_phandle_with_args_ma
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
/* phandle iterator functions */
extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
-@@ -885,6 +888,12 @@ static inline int of_count_phandle_with_
+@@ -731,6 +734,12 @@ static inline int of_count_phandle_with_
return -ENOSYS;
}
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-@@ -242,30 +241,10 @@ EXPORT_SYMBOL(of_device_get_match_data);
+@@ -249,30 +248,10 @@ EXPORT_SYMBOL(of_device_get_match_data);
int of_device_request_module(struct device *dev)
{
+EXPORT_SYMBOL_GPL(of_request_module);
--- a/include/linux/of.h
+++ b/include/linux/of.h
-@@ -375,6 +375,7 @@ extern int of_count_phandle_with_args(co
+@@ -376,6 +376,7 @@ extern int of_count_phandle_with_args(co
/* module functions */
extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len);
/* phandle iterator functions */
extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
-@@ -893,6 +894,11 @@ static inline ssize_t of_modalias(const
+@@ -739,6 +740,11 @@ static inline ssize_t of_modalias(const
{
return -ENODEV;
}