From: Christian Marangi Date: Tue, 18 Oct 2022 20:49:57 +0000 (+0200) Subject: generic: 6.1: drop backport patches X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=3be6f3592efa3a81366fc643445786ef789b85de;p=openwrt%2Fstaging%2Fblogic.git generic: 6.1: drop backport patches Drop all backport patches that are now included in kernel 6.1. Signed-off-by: Christian Marangi --- diff --git a/target/linux/generic/backport-6.1/005-v5.17-01-Kbuild-use-Wdeclaration-after-statement.patch b/target/linux/generic/backport-6.1/005-v5.17-01-Kbuild-use-Wdeclaration-after-statement.patch deleted file mode 100644 index b481dd306179..000000000000 --- a/target/linux/generic/backport-6.1/005-v5.17-01-Kbuild-use-Wdeclaration-after-statement.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 2fd7e7f9317d3048a14026816d081b08ba98ea8e Mon Sep 17 00:00:00 2001 -From: Mark Rutland -Date: Tue, 8 Mar 2022 22:56:13 +0100 -Subject: [PATCH 1/3] Kbuild: use -Wdeclaration-after-statement - -The kernel is moving from using `-std=gnu89` to `-std=gnu11`, permitting -the use of additional C11 features such as for-loop initial declarations. - -One contentious aspect of C99 is that it permits mixed declarations and -code, and for now at least, it seems preferable to enforce that -declarations must come first. - -These warnings were already enabled in the kernel itself, but not -for KBUILD_USERCFLAGS or the compat VDSO on arch/arm64, which uses -a separate set of CFLAGS. - -This patch fixes an existing violation in modpost.c, which is not -reported because of the missing flag in KBUILD_USERCFLAGS: - -| scripts/mod/modpost.c: In function ‘match’: -| scripts/mod/modpost.c:837:3: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement] -| 837 | const char *endp = p + strlen(p) - 1; -| | ^~~~~ - -Signed-off-by: Mark Rutland -[arnd: don't add a duplicate flag to the default set, update changelog] -Signed-off-by: Arnd Bergmann -Reviewed-by: Nathan Chancellor -Reviewed-by: Nick Desaulniers -Tested-by: Sedat Dilek # LLVM/Clang v13.0.0 (x86-64) -Signed-off-by: Masahiro Yamada ---- - Makefile | 3 ++- - arch/arm64/kernel/vdso32/Makefile | 1 + - scripts/mod/modpost.c | 4 +++- - 3 files changed, 6 insertions(+), 2 deletions(-) - ---- a/Makefile -+++ b/Makefile -@@ -440,7 +440,8 @@ endif - HOSTPKG_CONFIG = pkg-config - - export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \ -- -O2 -fomit-frame-pointer -std=gnu89 -+ -O2 -fomit-frame-pointer -std=gnu89 \ -+ -Wdeclaration-after-statement - export KBUILD_USERLDFLAGS := - - KBUILD_HOSTCFLAGS := $(KBUILD_USERCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS) ---- a/arch/arm64/kernel/vdso32/Makefile -+++ b/arch/arm64/kernel/vdso32/Makefile -@@ -76,6 +76,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-pr - -fno-strict-aliasing -fno-common \ - -Werror-implicit-function-declaration \ - -Wno-format-security \ -+ -Wdeclaration-after-statement \ - -std=gnu89 - VDSO_CFLAGS += -O2 - # Some useful compiler-dependent flags from top-level Makefile ---- a/scripts/mod/modpost.c -+++ b/scripts/mod/modpost.c -@@ -833,8 +833,10 @@ static int match(const char *sym, const - { - const char *p; - while (*pat) { -+ const char *endp; -+ - p = *pat++; -- const char *endp = p + strlen(p) - 1; -+ endp = p + strlen(p) - 1; - - /* "*foo*" */ - if (*p == '*' && *endp == '*') { diff --git a/target/linux/generic/backport-6.1/005-v5.17-02-Kbuild-move-to-std-gnu11.patch b/target/linux/generic/backport-6.1/005-v5.17-02-Kbuild-move-to-std-gnu11.patch deleted file mode 100644 index 94fc52fd8e03..000000000000 --- a/target/linux/generic/backport-6.1/005-v5.17-02-Kbuild-move-to-std-gnu11.patch +++ /dev/null @@ -1,60 +0,0 @@ -From b810c8e719ea082e47c7a8f7cf878bc84fa2455d Mon Sep 17 00:00:00 2001 -From: Arnd Bergmann -Date: Tue, 8 Mar 2022 22:56:14 +0100 -Subject: [PATCH 2/3] Kbuild: move to -std=gnu11 - -During a patch discussion, Linus brought up the option of changing -the C standard version from gnu89 to gnu99, which allows using variable -declaration inside of a for() loop. While the C99, C11 and later standards -introduce many other features, most of these are already available in -gnu89 as GNU extensions as well. - -An earlier attempt to do this when gcc-5 started defaulting to --std=gnu11 failed because at the time that caused warnings about -designated initializers with older compilers. Now that gcc-5.1 is -the minimum compiler version used for building kernels, that is no -longer a concern. Similarly, the behavior of 'inline' functions changes -between gnu89 using gnu_inline behavior and gnu11 using standard c99+ -behavior, but this was taken care of by defining 'inline' to include -__attribute__((gnu_inline)) in order to allow building with clang a -while ago. - -Nathan Chancellor reported a new -Wdeclaration-after-statement -warning that appears in a system header on arm, this still needs a -workaround. - -The differences between gnu99, gnu11, gnu1x and gnu17 are fairly -minimal and mainly impact warnings at the -Wpedantic level that the -kernel never enables. Between these, gnu11 is the newest version -that is supported by all supported compiler versions, though it is -only the default on gcc-5, while all other supported versions of -gcc or clang default to gnu1x/gnu17. - -Link: https://lore.kernel.org/lkml/CAHk-=wiyCH7xeHcmiFJ-YgXUy2Jaj7pnkdKpcovt8fYbVFW3TA@mail.gmail.com/ -Link: https://github.com/ClangBuiltLinux/linux/issues/1603 -Suggested-by: Linus Torvalds -Acked-by: Marco Elver -Acked-by: Jani Nikula -Acked-by: David Sterba -Tested-by: Sedat Dilek -Reviewed-by: Alex Shi -Reviewed-by: Nick Desaulniers -Reviewed-by: Miguel Ojeda -Signed-off-by: Arnd Bergmann -Reviewed-by: Nathan Chancellor -Signed-off-by: Masahiro Yamada ---- - Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/Makefile -+++ b/Makefile -@@ -524,7 +524,7 @@ KBUILD_CFLAGS := -Wall -Wundef -Werror - -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ - -Werror=implicit-function-declaration -Werror=implicit-int \ - -Werror=return-type -Wno-format-security \ -- -std=gnu89 -+ -std=gnu11 - KBUILD_CPPFLAGS := -D__KERNEL__ - KBUILD_AFLAGS_KERNEL := - KBUILD_CFLAGS_KERNEL := diff --git a/target/linux/generic/backport-6.1/005-v5.17-03-Kbuild-use-std-gnu11-for-KBUILD_USERCFLAGS.patch b/target/linux/generic/backport-6.1/005-v5.17-03-Kbuild-use-std-gnu11-for-KBUILD_USERCFLAGS.patch deleted file mode 100644 index e34acbba1716..000000000000 --- a/target/linux/generic/backport-6.1/005-v5.17-03-Kbuild-use-std-gnu11-for-KBUILD_USERCFLAGS.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 40337d6f3d677aee7ad3052ae662d3f53dd4d5cb Mon Sep 17 00:00:00 2001 -From: Arnd Bergmann -Date: Tue, 8 Mar 2022 22:56:15 +0100 -Subject: [PATCH 3/3] Kbuild: use -std=gnu11 for KBUILD_USERCFLAGS - -As we change the C language standard for the kernel from gnu89 to -gnu11, it makes sense to also update the version for user space -compilation. - -Some users have older native compilers than what they use for -kernel builds, so I considered using gnu99 as the default version -for wider compatibility with gcc-4.6 and earlier. - -However, testing with older compilers showed that we already require -HOSTCC version 5.1 as well because a lot of host tools include -linux/compiler.h that uses __has_attribute(): - - CC tools/objtool/exec-cmd.o -In file included from tools/include/linux/compiler_types.h:36:0, - from tools/include/linux/compiler.h:5, - from exec-cmd.c:2: -tools/include/linux/compiler-gcc.h:19:5: error: "__has_attribute" is not defined [-Werror=undef] - -Signed-off-by: Arnd Bergmann -Reviewed-by: Nathan Chancellor -Reviewed-by: Nick Desaulniers -Tested-by: Sedat Dilek -Signed-off-by: Masahiro Yamada ---- - Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/Makefile -+++ b/Makefile -@@ -440,7 +440,7 @@ endif - HOSTPKG_CONFIG = pkg-config - - export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \ -- -O2 -fomit-frame-pointer -std=gnu89 \ -+ -O2 -fomit-frame-pointer -std=gnu11 \ - -Wdeclaration-after-statement - export KBUILD_USERLDFLAGS := - diff --git a/target/linux/generic/backport-6.1/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch b/target/linux/generic/backport-6.1/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch deleted file mode 100644 index df854ffd3d81..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch +++ /dev/null @@ -1,425 +0,0 @@ -From a4103262b01a1b8704b37c01c7c813df91b7b119 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 01:59:58 -0600 -Subject: [PATCH 01/29] mm: x86, arm64: add arch_has_hw_pte_young() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Patch series "Multi-Gen LRU Framework", v14. - -What's new -========== -1. OpenWrt, in addition to Android, Arch Linux Zen, Armbian, ChromeOS, - Liquorix, post-factum and XanMod, is now shipping MGLRU on 5.15. -2. Fixed long-tailed direct reclaim latency seen on high-memory (TBs) - machines. The old direct reclaim backoff, which tries to enforce a - minimum fairness among all eligible memcgs, over-swapped by about - (total_mem>>DEF_PRIORITY)-nr_to_reclaim. The new backoff, which - pulls the plug on swapping once the target is met, trades some - fairness for curtailed latency: - https://lore.kernel.org/r/20220918080010.2920238-10-yuzhao@google.com/ -3. Fixed minior build warnings and conflicts. More comments and nits. - -TLDR -==== -The current page reclaim is too expensive in terms of CPU usage and it -often makes poor choices about what to evict. This patchset offers an -alternative solution that is performant, versatile and -straightforward. - -Patchset overview -================= -The design and implementation overview is in patch 14: -https://lore.kernel.org/r/20220918080010.2920238-15-yuzhao@google.com/ - -01. mm: x86, arm64: add arch_has_hw_pte_young() -02. mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG -Take advantage of hardware features when trying to clear the accessed -bit in many PTEs. - -03. mm/vmscan.c: refactor shrink_node() -04. Revert "include/linux/mm_inline.h: fold __update_lru_size() into - its sole caller" -Minor refactors to improve readability for the following patches. - -05. mm: multi-gen LRU: groundwork -Adds the basic data structure and the functions that insert pages to -and remove pages from the multi-gen LRU (MGLRU) lists. - -06. mm: multi-gen LRU: minimal implementation -A minimal implementation without optimizations. - -07. mm: multi-gen LRU: exploit locality in rmap -Exploits spatial locality to improve efficiency when using the rmap. - -08. mm: multi-gen LRU: support page table walks -Further exploits spatial locality by optionally scanning page tables. - -09. mm: multi-gen LRU: optimize multiple memcgs -Optimizes the overall performance for multiple memcgs running mixed -types of workloads. - -10. mm: multi-gen LRU: kill switch -Adds a kill switch to enable or disable MGLRU at runtime. - -11. mm: multi-gen LRU: thrashing prevention -12. mm: multi-gen LRU: debugfs interface -Provide userspace with features like thrashing prevention, working set -estimation and proactive reclaim. - -13. mm: multi-gen LRU: admin guide -14. mm: multi-gen LRU: design doc -Add an admin guide and a design doc. - -Benchmark results -================= -Independent lab results ------------------------ -Based on the popularity of searches [01] and the memory usage in -Google's public cloud, the most popular open-source memory-hungry -applications, in alphabetical order, are: - Apache Cassandra Memcached - Apache Hadoop MongoDB - Apache Spark PostgreSQL - MariaDB (MySQL) Redis - -An independent lab evaluated MGLRU with the most widely used benchmark -suites for the above applications. They posted 960 data points along -with kernel metrics and perf profiles collected over more than 500 -hours of total benchmark time. Their final reports show that, with 95% -confidence intervals (CIs), the above applications all performed -significantly better for at least part of their benchmark matrices. - -On 5.14: -1. Apache Spark [02] took 95% CIs [9.28, 11.19]% and [12.20, 14.93]% - less wall time to sort three billion random integers, respectively, - under the medium- and the high-concurrency conditions, when - overcommitting memory. There were no statistically significant - changes in wall time for the rest of the benchmark matrix. -2. MariaDB [03] achieved 95% CIs [5.24, 10.71]% and [20.22, 25.97]% - more transactions per minute (TPM), respectively, under the medium- - and the high-concurrency conditions, when overcommitting memory. - There were no statistically significant changes in TPM for the rest - of the benchmark matrix. -3. Memcached [04] achieved 95% CIs [23.54, 32.25]%, [20.76, 41.61]% - and [21.59, 30.02]% more operations per second (OPS), respectively, - for sequential access, random access and Gaussian (distribution) - access, when THP=always; 95% CIs [13.85, 15.97]% and - [23.94, 29.92]% more OPS, respectively, for random access and - Gaussian access, when THP=never. There were no statistically - significant changes in OPS for the rest of the benchmark matrix. -4. MongoDB [05] achieved 95% CIs [2.23, 3.44]%, [6.97, 9.73]% and - [2.16, 3.55]% more operations per second (OPS), respectively, for - exponential (distribution) access, random access and Zipfian - (distribution) access, when underutilizing memory; 95% CIs - [8.83, 10.03]%, [21.12, 23.14]% and [5.53, 6.46]% more OPS, - respectively, for exponential access, random access and Zipfian - access, when overcommitting memory. - -On 5.15: -5. Apache Cassandra [06] achieved 95% CIs [1.06, 4.10]%, [1.94, 5.43]% - and [4.11, 7.50]% more operations per second (OPS), respectively, - for exponential (distribution) access, random access and Zipfian - (distribution) access, when swap was off; 95% CIs [0.50, 2.60]%, - [6.51, 8.77]% and [3.29, 6.75]% more OPS, respectively, for - exponential access, random access and Zipfian access, when swap was - on. -6. Apache Hadoop [07] took 95% CIs [5.31, 9.69]% and [2.02, 7.86]% - less average wall time to finish twelve parallel TeraSort jobs, - respectively, under the medium- and the high-concurrency - conditions, when swap was on. There were no statistically - significant changes in average wall time for the rest of the - benchmark matrix. -7. PostgreSQL [08] achieved 95% CI [1.75, 6.42]% more transactions per - minute (TPM) under the high-concurrency condition, when swap was - off; 95% CIs [12.82, 18.69]% and [22.70, 46.86]% more TPM, - respectively, under the medium- and the high-concurrency - conditions, when swap was on. There were no statistically - significant changes in TPM for the rest of the benchmark matrix. -8. Redis [09] achieved 95% CIs [0.58, 5.94]%, [6.55, 14.58]% and - [11.47, 19.36]% more total operations per second (OPS), - respectively, for sequential access, random access and Gaussian - (distribution) access, when THP=always; 95% CIs [1.27, 3.54]%, - [10.11, 14.81]% and [8.75, 13.64]% more total OPS, respectively, - for sequential access, random access and Gaussian access, when - THP=never. - -Our lab results ---------------- -To supplement the above results, we ran the following benchmark suites -on 5.16-rc7 and found no regressions [10]. - fs_fio_bench_hdd_mq pft - fs_lmbench pgsql-hammerdb - fs_parallelio redis - fs_postmark stream - hackbench sysbenchthread - kernbench tpcc_spark - memcached unixbench - multichase vm-scalability - mutilate will-it-scale - nginx - -[01] https://trends.google.com -[02] https://lore.kernel.org/r/20211102002002.92051-1-bot@edi.works/ -[03] https://lore.kernel.org/r/20211009054315.47073-1-bot@edi.works/ -[04] https://lore.kernel.org/r/20211021194103.65648-1-bot@edi.works/ -[05] https://lore.kernel.org/r/20211109021346.50266-1-bot@edi.works/ -[06] https://lore.kernel.org/r/20211202062806.80365-1-bot@edi.works/ -[07] https://lore.kernel.org/r/20211209072416.33606-1-bot@edi.works/ -[08] https://lore.kernel.org/r/20211218071041.24077-1-bot@edi.works/ -[09] https://lore.kernel.org/r/20211122053248.57311-1-bot@edi.works/ -[10] https://lore.kernel.org/r/20220104202247.2903702-1-yuzhao@google.com/ - -Read-world applications -======================= -Third-party testimonials ------------------------- -Konstantin reported [11]: - I have Archlinux with 8G RAM + zswap + swap. While developing, I - have lots of apps opened such as multiple LSP-servers for different - langs, chats, two browsers, etc... Usually, my system gets quickly - to a point of SWAP-storms, where I have to kill LSP-servers, - restart browsers to free memory, etc, otherwise the system lags - heavily and is barely usable. - - 1.5 day ago I migrated from 5.11.15 kernel to 5.12 + the LRU - patchset, and I started up by opening lots of apps to create memory - pressure, and worked for a day like this. Till now I had not a - single SWAP-storm, and mind you I got 3.4G in SWAP. I was never - getting to the point of 3G in SWAP before without a single - SWAP-storm. - -Vaibhav from IBM reported [12]: - In a synthetic MongoDB Benchmark, seeing an average of ~19% - throughput improvement on POWER10(Radix MMU + 64K Page Size) with - MGLRU patches on top of 5.16 kernel for MongoDB + YCSB across - three different request distributions, namely, Exponential, Uniform - and Zipfan. - -Shuang from U of Rochester reported [13]: - With the MGLRU, fio achieved 95% CIs [38.95, 40.26]%, [4.12, 6.64]% - and [9.26, 10.36]% higher throughput, respectively, for random - access, Zipfian (distribution) access and Gaussian (distribution) - access, when the average number of jobs per CPU is 1; 95% CIs - [42.32, 49.15]%, [9.44, 9.89]% and [20.99, 22.86]% higher - throughput, respectively, for random access, Zipfian access and - Gaussian access, when the average number of jobs per CPU is 2. - -Daniel from Michigan Tech reported [14]: - With Memcached allocating ~100GB of byte-addressable Optante, - performance improvement in terms of throughput (measured as queries - per second) was about 10% for a series of workloads. - -Large-scale deployments ------------------------ -We've rolled out MGLRU to tens of millions of ChromeOS users and -about a million Android users. Google's fleetwide profiling [15] shows -an overall 40% decrease in kswapd CPU usage, in addition to -improvements in other UX metrics, e.g., an 85% decrease in the number -of low-memory kills at the 75th percentile and an 18% decrease in -app launch time at the 50th percentile. - -The downstream kernels that have been using MGLRU include: -1. Android [16] -2. Arch Linux Zen [17] -3. Armbian [18] -4. ChromeOS [19] -5. Liquorix [20] -6. OpenWrt [21] -7. post-factum [22] -8. XanMod [23] - -[11] https://lore.kernel.org/r/140226722f2032c86301fbd326d91baefe3d7d23.camel@yandex.ru/ -[12] https://lore.kernel.org/r/87czj3mux0.fsf@vajain21.in.ibm.com/ -[13] https://lore.kernel.org/r/20220105024423.26409-1-szhai2@cs.rochester.edu/ -[14] https://lore.kernel.org/r/CA+4-3vksGvKd18FgRinxhqHetBS1hQekJE2gwco8Ja-bJWKtFw@mail.gmail.com/ -[15] https://dl.acm.org/doi/10.1145/2749469.2750392 -[16] https://android.com -[17] https://archlinux.org -[18] https://armbian.com -[19] https://chromium.org -[20] https://liquorix.net -[21] https://openwrt.org -[22] https://codeberg.org/pf-kernel -[23] https://xanmod.org - -Summary -======= -The facts are: -1. The independent lab results and the real-world applications - indicate substantial improvements; there are no known regressions. -2. Thrashing prevention, working set estimation and proactive reclaim - work out of the box; there are no equivalent solutions. -3. There is a lot of new code; no smaller changes have been - demonstrated similar effects. - -Our options, accordingly, are: -1. Given the amount of evidence, the reported improvements will likely - materialize for a wide range of workloads. -2. Gauging the interest from the past discussions, the new features - will likely be put to use for both personal computers and data - centers. -3. Based on Google's track record, the new code will likely be well - maintained in the long term. It'd be more difficult if not - impossible to achieve similar effects with other approaches. - -This patch (of 14): - -Some architectures automatically set the accessed bit in PTEs, e.g., x86 -and arm64 v8.2. On architectures that do not have this capability, -clearing the accessed bit in a PTE usually triggers a page fault following -the TLB miss of this PTE (to emulate the accessed bit). - -Being aware of this capability can help make better decisions, e.g., -whether to spread the work out over a period of time to reduce bursty page -faults when trying to clear the accessed bit in many PTEs. - -Note that theoretically this capability can be unreliable, e.g., -hotplugged CPUs might be different from builtin ones. Therefore it should -not be used in architecture-independent code that involves correctness, -e.g., to determine whether TLB flushes are required (in combination with -the accessed bit). - -Link: https://lkml.kernel.org/r/20220918080010.2920238-1-yuzhao@google.com -Link: https://lkml.kernel.org/r/20220918080010.2920238-2-yuzhao@google.com -Signed-off-by: Yu Zhao -Reviewed-by: Barry Song -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Acked-by: Will Deacon -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: linux-arm-kernel@lists.infradead.org -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Miaohe Lin -Cc: Mike Rapoport -Cc: Qi Zheng -Signed-off-by: Andrew Morton ---- - arch/arm64/include/asm/pgtable.h | 14 ++------------ - arch/x86/include/asm/pgtable.h | 6 +++--- - include/linux/pgtable.h | 13 +++++++++++++ - mm/memory.c | 14 +------------- - 4 files changed, 19 insertions(+), 28 deletions(-) - ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -999,23 +999,13 @@ static inline void update_mmu_cache(stru - * page after fork() + CoW for pfn mappings. We don't always have a - * hardware-managed access flag on arm64. - */ --static inline bool arch_faults_on_old_pte(void) --{ -- WARN_ON(preemptible()); -- -- return !cpu_has_hw_af(); --} --#define arch_faults_on_old_pte arch_faults_on_old_pte -+#define arch_has_hw_pte_young cpu_has_hw_af - - /* - * Experimentally, it's cheap to set the access flag in hardware and we - * benefit from prefaulting mappings as 'old' to start with. - */ --static inline bool arch_wants_old_prefaulted_pte(void) --{ -- return !arch_faults_on_old_pte(); --} --#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte -+#define arch_wants_old_prefaulted_pte cpu_has_hw_af - - #endif /* !__ASSEMBLY__ */ - ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -1397,10 +1397,10 @@ static inline bool arch_has_pfn_modify_c - return boot_cpu_has_bug(X86_BUG_L1TF); - } - --#define arch_faults_on_old_pte arch_faults_on_old_pte --static inline bool arch_faults_on_old_pte(void) -+#define arch_has_hw_pte_young arch_has_hw_pte_young -+static inline bool arch_has_hw_pte_young(void) - { -- return false; -+ return true; - } - - #endif /* __ASSEMBLY__ */ ---- a/include/linux/pgtable.h -+++ b/include/linux/pgtable.h -@@ -259,6 +259,19 @@ static inline int pmdp_clear_flush_young - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - #endif - -+#ifndef arch_has_hw_pte_young -+/* -+ * Return whether the accessed bit is supported on the local CPU. -+ * -+ * This stub assumes accessing through an old PTE triggers a page fault. -+ * Architectures that automatically set the access bit should overwrite it. -+ */ -+static inline bool arch_has_hw_pte_young(void) -+{ -+ return false; -+} -+#endif -+ - #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR - static inline pte_t ptep_get_and_clear(struct mm_struct *mm, - unsigned long address, ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -121,18 +121,6 @@ int randomize_va_space __read_mostly = - 2; - #endif - --#ifndef arch_faults_on_old_pte --static inline bool arch_faults_on_old_pte(void) --{ -- /* -- * Those arches which don't have hw access flag feature need to -- * implement their own helper. By default, "true" means pagefault -- * will be hit on old pte. -- */ -- return true; --} --#endif -- - #ifndef arch_wants_old_prefaulted_pte - static inline bool arch_wants_old_prefaulted_pte(void) - { -@@ -2782,7 +2770,7 @@ static inline bool cow_user_page(struct - * On architectures with software "accessed" bits, we would - * take a double page fault, so mark it accessed here. - */ -- if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { -+ if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { - pte_t entry; - - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); diff --git a/target/linux/generic/backport-6.1/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch b/target/linux/generic/backport-6.1/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch deleted file mode 100644 index 9e0430ea2aac..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch +++ /dev/null @@ -1,153 +0,0 @@ -From 493de1c4b0f2cd909169401da8c445f6c8a7e29d Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 01:59:59 -0600 -Subject: [PATCH 02/29] mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Some architectures support the accessed bit in non-leaf PMD entries, e.g., -x86 sets the accessed bit in a non-leaf PMD entry when using it as part of -linear address translation [1]. Page table walkers that clear the -accessed bit may use this capability to reduce their search space. - -Note that: -1. Although an inline function is preferable, this capability is added - as a configuration option for consistency with the existing macros. -2. Due to the little interest in other varieties, this capability was - only tested on Intel and AMD CPUs. - -Thanks to the following developers for their efforts [2][3]. - Randy Dunlap - Stephen Rothwell - -[1]: Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 3 (June 2021), section 4.8 -[2] https://lore.kernel.org/r/bfdcc7c8-922f-61a9-aa15-7e7250f04af7@infradead.org/ -[3] https://lore.kernel.org/r/20220413151513.5a0d7a7e@canb.auug.org.au/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-3-yuzhao@google.com -Signed-off-by: Yu Zhao -Reviewed-by: Barry Song -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - arch/Kconfig | 8 ++++++++ - arch/x86/Kconfig | 1 + - arch/x86/include/asm/pgtable.h | 3 ++- - arch/x86/mm/pgtable.c | 5 ++++- - include/linux/pgtable.h | 4 ++-- - 5 files changed, 17 insertions(+), 4 deletions(-) - ---- a/arch/Kconfig -+++ b/arch/Kconfig -@@ -1295,6 +1295,14 @@ config ARCH_HAS_ELFCORE_COMPAT - config ARCH_HAS_PARANOID_L1D_FLUSH - bool - -+config ARCH_HAS_NONLEAF_PMD_YOUNG -+ bool -+ help -+ Architectures that select this option are capable of setting the -+ accessed bit in non-leaf PMD entries when using them as part of linear -+ address translations. Page table walkers that clear the accessed bit -+ may use this capability to reduce their search space. -+ - source "kernel/gcov/Kconfig" - - source "scripts/gcc-plugins/Kconfig" ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -84,6 +84,7 @@ config X86 - select ARCH_HAS_PMEM_API if X86_64 - select ARCH_HAS_PTE_DEVMAP if X86_64 - select ARCH_HAS_PTE_SPECIAL -+ select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2 - select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 - select ARCH_HAS_COPY_MC if X86_64 - select ARCH_HAS_SET_MEMORY ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -817,7 +817,8 @@ static inline unsigned long pmd_page_vad - - static inline int pmd_bad(pmd_t pmd) - { -- return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; -+ return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) != -+ (_KERNPG_TABLE & ~_PAGE_ACCESSED); - } - - static inline unsigned long pages_to_mb(unsigned long npg) ---- a/arch/x86/mm/pgtable.c -+++ b/arch/x86/mm/pgtable.c -@@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_ - return ret; - } - --#ifdef CONFIG_TRANSPARENT_HUGEPAGE -+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) - int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmdp) - { -@@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_ - - return ret; - } -+#endif -+ -+#ifdef CONFIG_TRANSPARENT_HUGEPAGE - int pudp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pud_t *pudp) - { ---- a/include/linux/pgtable.h -+++ b/include/linux/pgtable.h -@@ -212,7 +212,7 @@ static inline int ptep_test_and_clear_yo - #endif - - #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG --#ifdef CONFIG_TRANSPARENT_HUGEPAGE -+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) - static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) -@@ -233,7 +233,7 @@ static inline int pmdp_test_and_clear_yo - BUILD_BUG(); - return 0; - } --#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ - #endif - - #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH diff --git a/target/linux/generic/backport-6.1/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch b/target/linux/generic/backport-6.1/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch deleted file mode 100644 index b8d2917d26ce..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch +++ /dev/null @@ -1,275 +0,0 @@ -From 9e17efd11450d3d2069adaa3c58db9ac8ebd1c66 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:00 -0600 -Subject: [PATCH 03/29] mm/vmscan.c: refactor shrink_node() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This patch refactors shrink_node() to improve readability for the upcoming -changes to mm/vmscan.c. - -Link: https://lkml.kernel.org/r/20220918080010.2920238-4-yuzhao@google.com -Signed-off-by: Yu Zhao -Reviewed-by: Barry Song -Reviewed-by: Miaohe Lin -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - mm/vmscan.c | 198 +++++++++++++++++++++++++++------------------------- - 1 file changed, 104 insertions(+), 94 deletions(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -2497,6 +2497,109 @@ enum scan_balance { - SCAN_FILE, - }; - -+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) -+{ -+ unsigned long file; -+ struct lruvec *target_lruvec; -+ -+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); -+ -+ /* -+ * Flush the memory cgroup stats, so that we read accurate per-memcg -+ * lruvec stats for heuristics. -+ */ -+ mem_cgroup_flush_stats(); -+ -+ /* -+ * Determine the scan balance between anon and file LRUs. -+ */ -+ spin_lock_irq(&target_lruvec->lru_lock); -+ sc->anon_cost = target_lruvec->anon_cost; -+ sc->file_cost = target_lruvec->file_cost; -+ spin_unlock_irq(&target_lruvec->lru_lock); -+ -+ /* -+ * Target desirable inactive:active list ratios for the anon -+ * and file LRU lists. -+ */ -+ if (!sc->force_deactivate) { -+ unsigned long refaults; -+ -+ refaults = lruvec_page_state(target_lruvec, -+ WORKINGSET_ACTIVATE_ANON); -+ if (refaults != target_lruvec->refaults[0] || -+ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) -+ sc->may_deactivate |= DEACTIVATE_ANON; -+ else -+ sc->may_deactivate &= ~DEACTIVATE_ANON; -+ -+ /* -+ * When refaults are being observed, it means a new -+ * workingset is being established. Deactivate to get -+ * rid of any stale active pages quickly. -+ */ -+ refaults = lruvec_page_state(target_lruvec, -+ WORKINGSET_ACTIVATE_FILE); -+ if (refaults != target_lruvec->refaults[1] || -+ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) -+ sc->may_deactivate |= DEACTIVATE_FILE; -+ else -+ sc->may_deactivate &= ~DEACTIVATE_FILE; -+ } else -+ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; -+ -+ /* -+ * If we have plenty of inactive file pages that aren't -+ * thrashing, try to reclaim those first before touching -+ * anonymous pages. -+ */ -+ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); -+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) -+ sc->cache_trim_mode = 1; -+ else -+ sc->cache_trim_mode = 0; -+ -+ /* -+ * Prevent the reclaimer from falling into the cache trap: as -+ * cache pages start out inactive, every cache fault will tip -+ * the scan balance towards the file LRU. And as the file LRU -+ * shrinks, so does the window for rotation from references. -+ * This means we have a runaway feedback loop where a tiny -+ * thrashing file LRU becomes infinitely more attractive than -+ * anon pages. Try to detect this based on file LRU size. -+ */ -+ if (!cgroup_reclaim(sc)) { -+ unsigned long total_high_wmark = 0; -+ unsigned long free, anon; -+ int z; -+ -+ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); -+ file = node_page_state(pgdat, NR_ACTIVE_FILE) + -+ node_page_state(pgdat, NR_INACTIVE_FILE); -+ -+ for (z = 0; z < MAX_NR_ZONES; z++) { -+ struct zone *zone = &pgdat->node_zones[z]; -+ -+ if (!managed_zone(zone)) -+ continue; -+ -+ total_high_wmark += high_wmark_pages(zone); -+ } -+ -+ /* -+ * Consider anon: if that's low too, this isn't a -+ * runaway file reclaim problem, but rather just -+ * extreme pressure. Reclaim as per usual then. -+ */ -+ anon = node_page_state(pgdat, NR_INACTIVE_ANON); -+ -+ sc->file_is_tiny = -+ file + free <= total_high_wmark && -+ !(sc->may_deactivate & DEACTIVATE_ANON) && -+ anon >> sc->priority; -+ } -+} -+ - /* - * Determine how aggressively the anon and file LRU lists should be - * scanned. The relative value of each set of LRU lists is determined -@@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat - unsigned long nr_reclaimed, nr_scanned; - struct lruvec *target_lruvec; - bool reclaimable = false; -- unsigned long file; - - target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); - - again: -- /* -- * Flush the memory cgroup stats, so that we read accurate per-memcg -- * lruvec stats for heuristics. -- */ -- mem_cgroup_flush_stats(); -- - memset(&sc->nr, 0, sizeof(sc->nr)); - - nr_reclaimed = sc->nr_reclaimed; - nr_scanned = sc->nr_scanned; - -- /* -- * Determine the scan balance between anon and file LRUs. -- */ -- spin_lock_irq(&target_lruvec->lru_lock); -- sc->anon_cost = target_lruvec->anon_cost; -- sc->file_cost = target_lruvec->file_cost; -- spin_unlock_irq(&target_lruvec->lru_lock); -- -- /* -- * Target desirable inactive:active list ratios for the anon -- * and file LRU lists. -- */ -- if (!sc->force_deactivate) { -- unsigned long refaults; -- -- refaults = lruvec_page_state(target_lruvec, -- WORKINGSET_ACTIVATE_ANON); -- if (refaults != target_lruvec->refaults[0] || -- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) -- sc->may_deactivate |= DEACTIVATE_ANON; -- else -- sc->may_deactivate &= ~DEACTIVATE_ANON; -- -- /* -- * When refaults are being observed, it means a new -- * workingset is being established. Deactivate to get -- * rid of any stale active pages quickly. -- */ -- refaults = lruvec_page_state(target_lruvec, -- WORKINGSET_ACTIVATE_FILE); -- if (refaults != target_lruvec->refaults[1] || -- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) -- sc->may_deactivate |= DEACTIVATE_FILE; -- else -- sc->may_deactivate &= ~DEACTIVATE_FILE; -- } else -- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; -- -- /* -- * If we have plenty of inactive file pages that aren't -- * thrashing, try to reclaim those first before touching -- * anonymous pages. -- */ -- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); -- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) -- sc->cache_trim_mode = 1; -- else -- sc->cache_trim_mode = 0; -- -- /* -- * Prevent the reclaimer from falling into the cache trap: as -- * cache pages start out inactive, every cache fault will tip -- * the scan balance towards the file LRU. And as the file LRU -- * shrinks, so does the window for rotation from references. -- * This means we have a runaway feedback loop where a tiny -- * thrashing file LRU becomes infinitely more attractive than -- * anon pages. Try to detect this based on file LRU size. -- */ -- if (!cgroup_reclaim(sc)) { -- unsigned long total_high_wmark = 0; -- unsigned long free, anon; -- int z; -- -- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); -- file = node_page_state(pgdat, NR_ACTIVE_FILE) + -- node_page_state(pgdat, NR_INACTIVE_FILE); -- -- for (z = 0; z < MAX_NR_ZONES; z++) { -- struct zone *zone = &pgdat->node_zones[z]; -- if (!managed_zone(zone)) -- continue; -- -- total_high_wmark += high_wmark_pages(zone); -- } -- -- /* -- * Consider anon: if that's low too, this isn't a -- * runaway file reclaim problem, but rather just -- * extreme pressure. Reclaim as per usual then. -- */ -- anon = node_page_state(pgdat, NR_INACTIVE_ANON); -- -- sc->file_is_tiny = -- file + free <= total_high_wmark && -- !(sc->may_deactivate & DEACTIVATE_ANON) && -- anon >> sc->priority; -- } -+ prepare_scan_count(pgdat, sc); - - shrink_node_memcgs(pgdat, sc); - diff --git a/target/linux/generic/backport-6.1/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch b/target/linux/generic/backport-6.1/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch deleted file mode 100644 index 2f277a56e1c8..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 03705be42114db7cc5bd6eb7bf7e8703c94d4880 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:01 -0600 -Subject: [PATCH 04/29] Revert "include/linux/mm_inline.h: fold - __update_lru_size() into its sole caller" -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This patch undoes the following refactor: commit 289ccba18af4 -("include/linux/mm_inline.h: fold __update_lru_size() into its sole -caller") - -The upcoming changes to include/linux/mm_inline.h will reuse -__update_lru_size(). - -Link: https://lkml.kernel.org/r/20220918080010.2920238-5-yuzhao@google.com -Signed-off-by: Yu Zhao -Reviewed-by: Miaohe Lin -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/mm_inline.h | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - ---- a/include/linux/mm_inline.h -+++ b/include/linux/mm_inline.h -@@ -24,7 +24,7 @@ static inline int page_is_file_lru(struc - return !PageSwapBacked(page); - } - --static __always_inline void update_lru_size(struct lruvec *lruvec, -+static __always_inline void __update_lru_size(struct lruvec *lruvec, - enum lru_list lru, enum zone_type zid, - int nr_pages) - { -@@ -33,6 +33,13 @@ static __always_inline void update_lru_s - __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); - __mod_zone_page_state(&pgdat->node_zones[zid], - NR_ZONE_LRU_BASE + lru, nr_pages); -+} -+ -+static __always_inline void update_lru_size(struct lruvec *lruvec, -+ enum lru_list lru, enum zone_type zid, -+ long nr_pages) -+{ -+ __update_lru_size(lruvec, lru, zid, nr_pages); - #ifdef CONFIG_MEMCG - mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); - #endif diff --git a/target/linux/generic/backport-6.1/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch b/target/linux/generic/backport-6.1/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch deleted file mode 100644 index 237bd84875db..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch +++ /dev/null @@ -1,807 +0,0 @@ -From a9b328add8422921a0dbbef162730800e16e8cfd Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:02 -0600 -Subject: [PATCH 05/29] mm: multi-gen LRU: groundwork -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Evictable pages are divided into multiple generations for each lruvec. -The youngest generation number is stored in lrugen->max_seq for both -anon and file types as they are aged on an equal footing. The oldest -generation numbers are stored in lrugen->min_seq[] separately for anon -and file types as clean file pages can be evicted regardless of swap -constraints. These three variables are monotonically increasing. - -Generation numbers are truncated into order_base_2(MAX_NR_GENS+1) bits -in order to fit into the gen counter in page->flags. Each truncated -generation number is an index to lrugen->lists[]. The sliding window -technique is used to track at least MIN_NR_GENS and at most -MAX_NR_GENS generations. The gen counter stores a value within [1, -MAX_NR_GENS] while a page is on one of lrugen->lists[]. Otherwise it -stores 0. - -There are two conceptually independent procedures: "the aging", which -produces young generations, and "the eviction", which consumes old -generations. They form a closed-loop system, i.e., "the page reclaim". -Both procedures can be invoked from userspace for the purposes of working -set estimation and proactive reclaim. These techniques are commonly used -to optimize job scheduling (bin packing) in data centers [1][2]. - -To avoid confusion, the terms "hot" and "cold" will be applied to the -multi-gen LRU, as a new convention; the terms "active" and "inactive" will -be applied to the active/inactive LRU, as usual. - -The protection of hot pages and the selection of cold pages are based -on page access channels and patterns. There are two access channels: -one through page tables and the other through file descriptors. The -protection of the former channel is by design stronger because: -1. The uncertainty in determining the access patterns of the former - channel is higher due to the approximation of the accessed bit. -2. The cost of evicting the former channel is higher due to the TLB - flushes required and the likelihood of encountering the dirty bit. -3. The penalty of underprotecting the former channel is higher because - applications usually do not prepare themselves for major page - faults like they do for blocked I/O. E.g., GUI applications - commonly use dedicated I/O threads to avoid blocking rendering - threads. - -There are also two access patterns: one with temporal locality and the -other without. For the reasons listed above, the former channel is -assumed to follow the former pattern unless VM_SEQ_READ or VM_RAND_READ is -present; the latter channel is assumed to follow the latter pattern unless -outlying refaults have been observed [3][4]. - -The next patch will address the "outlying refaults". Three macros, i.e., -LRU_REFS_WIDTH, LRU_REFS_PGOFF and LRU_REFS_MASK, used later are added in -this patch to make the entire patchset less diffy. - -A page is added to the youngest generation on faulting. The aging needs -to check the accessed bit at least twice before handing this page over to -the eviction. The first check takes care of the accessed bit set on the -initial fault; the second check makes sure this page has not been used -since then. This protocol, AKA second chance, requires a minimum of two -generations, hence MIN_NR_GENS. - -[1] https://dl.acm.org/doi/10.1145/3297858.3304053 -[2] https://dl.acm.org/doi/10.1145/3503222.3507731 -[3] https://lwn.net/Articles/495543/ -[4] https://lwn.net/Articles/815342/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-6-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - fs/fuse/dev.c | 3 +- - include/linux/mm.h | 2 + - include/linux/mm_inline.h | 177 +++++++++++++++++++++++++++++- - include/linux/mmzone.h | 100 +++++++++++++++++ - include/linux/page-flags-layout.h | 13 ++- - include/linux/page-flags.h | 4 +- - include/linux/sched.h | 4 + - kernel/bounds.c | 5 + - mm/Kconfig | 8 ++ - mm/huge_memory.c | 3 +- - mm/memcontrol.c | 2 + - mm/memory.c | 25 +++++ - mm/mm_init.c | 6 +- - mm/mmzone.c | 2 + - mm/swap.c | 10 +- - mm/vmscan.c | 75 +++++++++++++ - 16 files changed, 425 insertions(+), 14 deletions(-) - ---- a/fs/fuse/dev.c -+++ b/fs/fuse/dev.c -@@ -785,7 +785,8 @@ static int fuse_check_page(struct page * - 1 << PG_active | - 1 << PG_workingset | - 1 << PG_reclaim | -- 1 << PG_waiters))) { -+ 1 << PG_waiters | -+ LRU_GEN_MASK | LRU_REFS_MASK))) { - dump_page(page, "fuse: trying to steal weird page"); - return 1; - } ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -1093,6 +1093,8 @@ vm_fault_t finish_mkwrite_fault(struct v - #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) - #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) - #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) -+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH) -+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH) - - /* - * Define the bit shifts to access each section. For non-existent ---- a/include/linux/mm_inline.h -+++ b/include/linux/mm_inline.h -@@ -26,10 +26,13 @@ static inline int page_is_file_lru(struc - - static __always_inline void __update_lru_size(struct lruvec *lruvec, - enum lru_list lru, enum zone_type zid, -- int nr_pages) -+ long nr_pages) - { - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - -+ lockdep_assert_held(&lruvec->lru_lock); -+ WARN_ON_ONCE(nr_pages != (int)nr_pages); -+ - __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); - __mod_zone_page_state(&pgdat->node_zones[zid], - NR_ZONE_LRU_BASE + lru, nr_pages); -@@ -86,11 +89,177 @@ static __always_inline enum lru_list pag - return lru; - } - -+#ifdef CONFIG_LRU_GEN -+ -+static inline bool lru_gen_enabled(void) -+{ -+ return true; -+} -+ -+static inline bool lru_gen_in_fault(void) -+{ -+ return current->in_lru_fault; -+} -+ -+static inline int lru_gen_from_seq(unsigned long seq) -+{ -+ return seq % MAX_NR_GENS; -+} -+ -+static inline int page_lru_gen(struct page *page) -+{ -+ unsigned long flags = READ_ONCE(page->flags); -+ -+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; -+} -+ -+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) -+{ -+ unsigned long max_seq = lruvec->lrugen.max_seq; -+ -+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); -+ -+ /* see the comment on MIN_NR_GENS */ -+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); -+} -+ -+static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page, -+ int old_gen, int new_gen) -+{ -+ int type = page_is_file_lru(page); -+ int zone = page_zonenum(page); -+ int delta = thp_nr_pages(page); -+ enum lru_list lru = type * LRU_INACTIVE_FILE; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); -+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); -+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); -+ -+ if (old_gen >= 0) -+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], -+ lrugen->nr_pages[old_gen][type][zone] - delta); -+ if (new_gen >= 0) -+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], -+ lrugen->nr_pages[new_gen][type][zone] + delta); -+ -+ /* addition */ -+ if (old_gen < 0) { -+ if (lru_gen_is_active(lruvec, new_gen)) -+ lru += LRU_ACTIVE; -+ __update_lru_size(lruvec, lru, zone, delta); -+ return; -+ } -+ -+ /* deletion */ -+ if (new_gen < 0) { -+ if (lru_gen_is_active(lruvec, old_gen)) -+ lru += LRU_ACTIVE; -+ __update_lru_size(lruvec, lru, zone, -delta); -+ return; -+ } -+} -+ -+static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming) -+{ -+ unsigned long seq; -+ unsigned long flags; -+ int gen = page_lru_gen(page); -+ int type = page_is_file_lru(page); -+ int zone = page_zonenum(page); -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ VM_WARN_ON_ONCE_PAGE(gen != -1, page); -+ -+ if (PageUnevictable(page)) -+ return false; -+ /* -+ * There are three common cases for this page: -+ * 1. If it's hot, e.g., freshly faulted in or previously hot and -+ * migrated, add it to the youngest generation. -+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page -+ * not in swapcache or a dirty page pending writeback, add it to the -+ * second oldest generation. -+ * 3. Everything else (clean, cold) is added to the oldest generation. -+ */ -+ if (PageActive(page)) -+ seq = lrugen->max_seq; -+ else if ((type == LRU_GEN_ANON && !PageSwapCache(page)) || -+ (PageReclaim(page) && -+ (PageDirty(page) || PageWriteback(page)))) -+ seq = lrugen->min_seq[type] + 1; -+ else -+ seq = lrugen->min_seq[type]; -+ -+ gen = lru_gen_from_seq(seq); -+ flags = (gen + 1UL) << LRU_GEN_PGOFF; -+ /* see the comment on MIN_NR_GENS about PG_active */ -+ set_mask_bits(&page->flags, LRU_GEN_MASK | BIT(PG_active), flags); -+ -+ lru_gen_update_size(lruvec, page, -1, gen); -+ /* for rotate_reclaimable_page() */ -+ if (reclaiming) -+ list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]); -+ else -+ list_add(&page->lru, &lrugen->lists[gen][type][zone]); -+ -+ return true; -+} -+ -+static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming) -+{ -+ unsigned long flags; -+ int gen = page_lru_gen(page); -+ -+ if (gen < 0) -+ return false; -+ -+ VM_WARN_ON_ONCE_PAGE(PageActive(page), page); -+ VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page); -+ -+ /* for migrate_page_states() */ -+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; -+ flags = set_mask_bits(&page->flags, LRU_GEN_MASK, flags); -+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; -+ -+ lru_gen_update_size(lruvec, page, gen, -1); -+ list_del(&page->lru); -+ -+ return true; -+} -+ -+#else /* !CONFIG_LRU_GEN */ -+ -+static inline bool lru_gen_enabled(void) -+{ -+ return false; -+} -+ -+static inline bool lru_gen_in_fault(void) -+{ -+ return false; -+} -+ -+static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming) -+{ -+ return false; -+} -+ -+static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming) -+{ -+ return false; -+} -+ -+#endif /* CONFIG_LRU_GEN */ -+ - static __always_inline void add_page_to_lru_list(struct page *page, - struct lruvec *lruvec) - { - enum lru_list lru = page_lru(page); - -+ if (lru_gen_add_page(lruvec, page, false)) -+ return; -+ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add(&page->lru, &lruvec->lists[lru]); - } -@@ -100,6 +269,9 @@ static __always_inline void add_page_to_ - { - enum lru_list lru = page_lru(page); - -+ if (lru_gen_add_page(lruvec, page, true)) -+ return; -+ - update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); - list_add_tail(&page->lru, &lruvec->lists[lru]); - } -@@ -107,6 +279,9 @@ static __always_inline void add_page_to_ - static __always_inline void del_page_from_lru_list(struct page *page, - struct lruvec *lruvec) - { -+ if (lru_gen_del_page(lruvec, page, false)) -+ return; -+ - list_del(&page->lru); - update_lru_size(lruvec, page_lru(page), page_zonenum(page), - -thp_nr_pages(page)); ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -294,6 +294,102 @@ enum lruvec_flags { - */ - }; - -+#endif /* !__GENERATING_BOUNDS_H */ -+ -+/* -+ * Evictable pages are divided into multiple generations. The youngest and the -+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing. -+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An -+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the -+ * corresponding generation. The gen counter in page->flags stores gen+1 while -+ * a page is on one of lrugen->lists[]. Otherwise it stores 0. -+ * -+ * A page is added to the youngest generation on faulting. The aging needs to -+ * check the accessed bit at least twice before handing this page over to the -+ * eviction. The first check takes care of the accessed bit set on the initial -+ * fault; the second check makes sure this page hasn't been used since then. -+ * This process, AKA second chance, requires a minimum of two generations, -+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive -+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the -+ * rest of generations, if they exist, are considered inactive. See -+ * lru_gen_is_active(). -+ * -+ * PG_active is always cleared while a page is on one of lrugen->lists[] so that -+ * the aging needs not to worry about it. And it's set again when a page -+ * considered active is isolated for non-reclaiming purposes, e.g., migration. -+ * See lru_gen_add_page() and lru_gen_del_page(). -+ * -+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the -+ * number of categories of the active/inactive LRU when keeping track of -+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits -+ * in page->flags. -+ */ -+#define MIN_NR_GENS 2U -+#define MAX_NR_GENS 4U -+ -+#ifndef __GENERATING_BOUNDS_H -+ -+struct lruvec; -+ -+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) -+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) -+ -+#ifdef CONFIG_LRU_GEN -+ -+enum { -+ LRU_GEN_ANON, -+ LRU_GEN_FILE, -+}; -+ -+/* -+ * The youngest generation number is stored in max_seq for both anon and file -+ * types as they are aged on an equal footing. The oldest generation numbers are -+ * stored in min_seq[] separately for anon and file types as clean file pages -+ * can be evicted regardless of swap constraints. -+ * -+ * Normally anon and file min_seq are in sync. But if swapping is constrained, -+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon -+ * min_seq behind. -+ * -+ * The number of pages in each generation is eventually consistent and therefore -+ * can be transiently negative. -+ */ -+struct lru_gen_struct { -+ /* the aging increments the youngest generation number */ -+ unsigned long max_seq; -+ /* the eviction increments the oldest generation numbers */ -+ unsigned long min_seq[ANON_AND_FILE]; -+ /* the multi-gen LRU lists, lazily sorted on eviction */ -+ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; -+ /* the multi-gen LRU sizes, eventually consistent */ -+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; -+}; -+ -+void lru_gen_init_lruvec(struct lruvec *lruvec); -+ -+#ifdef CONFIG_MEMCG -+void lru_gen_init_memcg(struct mem_cgroup *memcg); -+void lru_gen_exit_memcg(struct mem_cgroup *memcg); -+#endif -+ -+#else /* !CONFIG_LRU_GEN */ -+ -+static inline void lru_gen_init_lruvec(struct lruvec *lruvec) -+{ -+} -+ -+#ifdef CONFIG_MEMCG -+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) -+{ -+} -+ -+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) -+{ -+} -+#endif -+ -+#endif /* CONFIG_LRU_GEN */ -+ - struct lruvec { - struct list_head lists[NR_LRU_LISTS]; - /* per lruvec lru_lock for memcg */ -@@ -311,6 +407,10 @@ struct lruvec { - unsigned long refaults[ANON_AND_FILE]; - /* Various lruvec state flags (enum lruvec_flags) */ - unsigned long flags; -+#ifdef CONFIG_LRU_GEN -+ /* evictable pages divided into generations */ -+ struct lru_gen_struct lrugen; -+#endif - #ifdef CONFIG_MEMCG - struct pglist_data *pgdat; - #endif ---- a/include/linux/page-flags-layout.h -+++ b/include/linux/page-flags-layout.h -@@ -55,7 +55,8 @@ - #define SECTIONS_WIDTH 0 - #endif - --#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS -+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \ -+ <= BITS_PER_LONG - NR_PAGEFLAGS - #define NODES_WIDTH NODES_SHIFT - #elif defined(CONFIG_SPARSEMEM_VMEMMAP) - #error "Vmemmap: No space for nodes field in page flags" -@@ -89,8 +90,8 @@ - #define LAST_CPUPID_SHIFT 0 - #endif - --#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \ -- <= BITS_PER_LONG - NR_PAGEFLAGS -+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \ -+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS - #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT - #else - #define LAST_CPUPID_WIDTH 0 -@@ -100,10 +101,12 @@ - #define LAST_CPUPID_NOT_IN_PAGE_FLAGS - #endif - --#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \ -- > BITS_PER_LONG - NR_PAGEFLAGS -+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \ -+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS - #error "Not enough bits in page flags" - #endif - -+#define LRU_REFS_WIDTH 0 -+ - #endif - #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ ---- a/include/linux/page-flags.h -+++ b/include/linux/page-flags.h -@@ -845,7 +845,7 @@ static inline void ClearPageSlabPfmemall - 1UL << PG_private | 1UL << PG_private_2 | \ - 1UL << PG_writeback | 1UL << PG_reserved | \ - 1UL << PG_slab | 1UL << PG_active | \ -- 1UL << PG_unevictable | __PG_MLOCKED) -+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) - - /* - * Flags checked when a page is prepped for return by the page allocator. -@@ -856,7 +856,7 @@ static inline void ClearPageSlabPfmemall - * alloc-free cycle to prevent from reusing the page. - */ - #define PAGE_FLAGS_CHECK_AT_PREP \ -- (PAGEFLAGS_MASK & ~__PG_HWPOISON) -+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) - - #define PAGE_FLAGS_PRIVATE \ - (1UL << PG_private | 1UL << PG_private_2) ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -911,6 +911,10 @@ struct task_struct { - #ifdef CONFIG_MEMCG - unsigned in_user_fault:1; - #endif -+#ifdef CONFIG_LRU_GEN -+ /* whether the LRU algorithm may apply to this access */ -+ unsigned in_lru_fault:1; -+#endif - #ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; - #endif ---- a/kernel/bounds.c -+++ b/kernel/bounds.c -@@ -22,6 +22,11 @@ int main(void) - DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); - #endif - DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); -+#ifdef CONFIG_LRU_GEN -+ DEFINE(LRU_GEN_WIDTH, order_base_2(MAX_NR_GENS + 1)); -+#else -+ DEFINE(LRU_GEN_WIDTH, 0); -+#endif - /* End of constants */ - - return 0; ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -897,6 +897,14 @@ config IO_MAPPING - config SECRETMEM - def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED - -+config LRU_GEN -+ bool "Multi-Gen LRU" -+ depends on MMU -+ # make sure page->flags has enough spare bits -+ depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP -+ help -+ A high performance LRU implementation to overcommit memory. -+ - source "mm/damon/Kconfig" - - endmenu ---- a/mm/huge_memory.c -+++ b/mm/huge_memory.c -@@ -2366,7 +2366,8 @@ static void __split_huge_page_tail(struc - #ifdef CONFIG_64BIT - (1L << PG_arch_2) | - #endif -- (1L << PG_dirty))); -+ (1L << PG_dirty) | -+ LRU_GEN_MASK | LRU_REFS_MASK)); - - /* ->mapping in first tail page is compound_mapcount */ - VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -5178,6 +5178,7 @@ static void __mem_cgroup_free(struct mem - - static void mem_cgroup_free(struct mem_cgroup *memcg) - { -+ lru_gen_exit_memcg(memcg); - memcg_wb_domain_exit(memcg); - __mem_cgroup_free(memcg); - } -@@ -5241,6 +5242,7 @@ static struct mem_cgroup *mem_cgroup_all - memcg->deferred_split_queue.split_queue_len = 0; - #endif - idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); -+ lru_gen_init_memcg(memcg); - return memcg; - fail: - mem_cgroup_id_remove(memcg); ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -4792,6 +4792,27 @@ static inline void mm_account_fault(stru - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); - } - -+#ifdef CONFIG_LRU_GEN -+static void lru_gen_enter_fault(struct vm_area_struct *vma) -+{ -+ /* the LRU algorithm doesn't apply to sequential or random reads */ -+ current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)); -+} -+ -+static void lru_gen_exit_fault(void) -+{ -+ current->in_lru_fault = false; -+} -+#else -+static void lru_gen_enter_fault(struct vm_area_struct *vma) -+{ -+} -+ -+static void lru_gen_exit_fault(void) -+{ -+} -+#endif /* CONFIG_LRU_GEN */ -+ - /* - * By the time we get here, we already hold the mm semaphore - * -@@ -4823,11 +4844,15 @@ vm_fault_t handle_mm_fault(struct vm_are - if (flags & FAULT_FLAG_USER) - mem_cgroup_enter_user_fault(); - -+ lru_gen_enter_fault(vma); -+ - if (unlikely(is_vm_hugetlb_page(vma))) - ret = hugetlb_fault(vma->vm_mm, vma, address, flags); - else - ret = __handle_mm_fault(vma, address, flags); - -+ lru_gen_exit_fault(); -+ - if (flags & FAULT_FLAG_USER) { - mem_cgroup_exit_user_fault(); - /* ---- a/mm/mm_init.c -+++ b/mm/mm_init.c -@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layo - - shift = 8 * sizeof(unsigned long); - width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH -- - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH; -+ - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; - mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", -- "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n", -+ "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", - SECTIONS_WIDTH, - NODES_WIDTH, - ZONES_WIDTH, - LAST_CPUPID_WIDTH, - KASAN_TAG_WIDTH, -+ LRU_GEN_WIDTH, -+ LRU_REFS_WIDTH, - NR_PAGEFLAGS); - mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", - "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", ---- a/mm/mmzone.c -+++ b/mm/mmzone.c -@@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec) - - for_each_lru(lru) - INIT_LIST_HEAD(&lruvec->lists[lru]); -+ -+ lru_gen_init_lruvec(lruvec); - } - - #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -446,6 +446,11 @@ void lru_cache_add(struct page *page) - VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); - VM_BUG_ON_PAGE(PageLRU(page), page); - -+ /* see the comment in lru_gen_add_page() */ -+ if (lru_gen_enabled() && !PageUnevictable(page) && -+ lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) -+ SetPageActive(page); -+ - get_page(page); - local_lock(&lru_pvecs.lock); - pvec = this_cpu_ptr(&lru_pvecs.lru_add); -@@ -547,7 +552,7 @@ static void lru_deactivate_file_fn(struc - - static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) - { -- if (PageActive(page) && !PageUnevictable(page)) { -+ if (!PageUnevictable(page) && (PageActive(page) || lru_gen_enabled())) { - int nr_pages = thp_nr_pages(page); - - del_page_from_lru_list(page, lruvec); -@@ -661,7 +666,8 @@ void deactivate_file_page(struct page *p - */ - void deactivate_page(struct page *page) - { -- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { -+ if (PageLRU(page) && !PageUnevictable(page) && -+ (PageActive(page) || lru_gen_enabled())) { - struct pagevec *pvec; - - local_lock(&lru_pvecs.lock); ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -2821,6 +2821,81 @@ static bool can_age_anon_pages(struct pg - return can_demote(pgdat->node_id, sc); - } - -+#ifdef CONFIG_LRU_GEN -+ -+/****************************************************************************** -+ * shorthand helpers -+ ******************************************************************************/ -+ -+#define for_each_gen_type_zone(gen, type, zone) \ -+ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ -+ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ -+ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) -+ -+static struct lruvec __maybe_unused *get_lruvec(struct mem_cgroup *memcg, int nid) -+{ -+ struct pglist_data *pgdat = NODE_DATA(nid); -+ -+#ifdef CONFIG_MEMCG -+ if (memcg) { -+ struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; -+ -+ /* for hotadd_new_pgdat() */ -+ if (!lruvec->pgdat) -+ lruvec->pgdat = pgdat; -+ -+ return lruvec; -+ } -+#endif -+ VM_WARN_ON_ONCE(!mem_cgroup_disabled()); -+ -+ return pgdat ? &pgdat->__lruvec : NULL; -+} -+ -+/****************************************************************************** -+ * initialization -+ ******************************************************************************/ -+ -+void lru_gen_init_lruvec(struct lruvec *lruvec) -+{ -+ int gen, type, zone; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ lrugen->max_seq = MIN_NR_GENS + 1; -+ -+ for_each_gen_type_zone(gen, type, zone) -+ INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); -+} -+ -+#ifdef CONFIG_MEMCG -+void lru_gen_init_memcg(struct mem_cgroup *memcg) -+{ -+} -+ -+void lru_gen_exit_memcg(struct mem_cgroup *memcg) -+{ -+ int nid; -+ -+ for_each_node(nid) { -+ struct lruvec *lruvec = get_lruvec(memcg, nid); -+ -+ VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, -+ sizeof(lruvec->lrugen.nr_pages))); -+ } -+} -+#endif -+ -+static int __init init_lru_gen(void) -+{ -+ BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); -+ BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); -+ -+ return 0; -+}; -+late_initcall(init_lru_gen); -+ -+#endif /* CONFIG_LRU_GEN */ -+ - static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) - { - unsigned long nr[NR_LRU_LISTS]; diff --git a/target/linux/generic/backport-6.1/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch b/target/linux/generic/backport-6.1/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch deleted file mode 100644 index f8a7d9bd7f67..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch +++ /dev/null @@ -1,1447 +0,0 @@ -From b564b9471cd60ef1ee3961a224898ce4a9620d84 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:03 -0600 -Subject: [PATCH 06/29] mm: multi-gen LRU: minimal implementation -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -To avoid confusion, the terms "promotion" and "demotion" will be applied -to the multi-gen LRU, as a new convention; the terms "activation" and -"deactivation" will be applied to the active/inactive LRU, as usual. - -The aging produces young generations. Given an lruvec, it increments -max_seq when max_seq-min_seq+1 approaches MIN_NR_GENS. The aging promotes -hot pages to the youngest generation when it finds them accessed through -page tables; the demotion of cold pages happens consequently when it -increments max_seq. Promotion in the aging path does not involve any LRU -list operations, only the updates of the gen counter and -lrugen->nr_pages[]; demotion, unless as the result of the increment of -max_seq, requires LRU list operations, e.g., lru_deactivate_fn(). The -aging has the complexity O(nr_hot_pages), since it is only interested in -hot pages. - -The eviction consumes old generations. Given an lruvec, it increments -min_seq when lrugen->lists[] indexed by min_seq%MAX_NR_GENS becomes empty. -A feedback loop modeled after the PID controller monitors refaults over -anon and file types and decides which type to evict when both types are -available from the same generation. - -The protection of pages accessed multiple times through file descriptors -takes place in the eviction path. Each generation is divided into -multiple tiers. A page accessed N times through file descriptors is in -tier order_base_2(N). Tiers do not have dedicated lrugen->lists[], only -bits in page->flags. The aforementioned feedback loop also monitors -refaults over all tiers and decides when to protect pages in which tiers -(N>1), using the first tier (N=0,1) as a baseline. The first tier -contains single-use unmapped clean pages, which are most likely the best -choices. In contrast to promotion in the aging path, the protection of a -page in the eviction path is achieved by moving this page to the next -generation, i.e., min_seq+1, if the feedback loop decides so. This -approach has the following advantages: - -1. It removes the cost of activation in the buffered access path by - inferring whether pages accessed multiple times through file - descriptors are statistically hot and thus worth protecting in the - eviction path. -2. It takes pages accessed through page tables into account and avoids - overprotecting pages accessed multiple times through file - descriptors. (Pages accessed through page tables are in the first - tier, since N=0.) -3. More tiers provide better protection for pages accessed more than - twice through file descriptors, when under heavy buffered I/O - workloads. - -Server benchmark results: - Single workload: - fio (buffered I/O): +[30, 32]% - IOPS BW - 5.19-rc1: 2673k 10.2GiB/s - patch1-6: 3491k 13.3GiB/s - - Single workload: - memcached (anon): -[4, 6]% - Ops/sec KB/sec - 5.19-rc1: 1161501.04 45177.25 - patch1-6: 1106168.46 43025.04 - - Configurations: - CPU: two Xeon 6154 - Mem: total 256G - - Node 1 was only used as a ram disk to reduce the variance in the - results. - - patch drivers/block/brd.c < gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM | __GFP_THISNODE; - > page = alloc_pages_node(1, gfp_flags, 0); - EOF - - cat >>/etc/systemd/system.conf <>/etc/memcached.conf </sys/fs/cgroup/user.slice/test/memory.max - echo $$ >/sys/fs/cgroup/user.slice/test/cgroup.procs - fio -name=mglru --numjobs=72 --directory=/mnt --size=1408m \ - --buffered=1 --ioengine=io_uring --iodepth=128 \ - --iodepth_batch_submit=32 --iodepth_batch_complete=32 \ - --rw=randread --random_distribution=random --norandommap \ - --time_based --ramp_time=10m --runtime=5m --group_reporting - - cat memcached.sh - modprobe brd rd_nr=1 rd_size=113246208 - swapoff -a - mkswap /dev/ram0 - swapon /dev/ram0 - - memtier_benchmark -S /var/run/memcached/memcached.sock \ - -P memcache_binary -n allkeys --key-minimum=1 \ - --key-maximum=65000000 --key-pattern=P:P -c 1 -t 36 \ - --ratio 1:0 --pipeline 8 -d 2000 - - memtier_benchmark -S /var/run/memcached/memcached.sock \ - -P memcache_binary -n allkeys --key-minimum=1 \ - --key-maximum=65000000 --key-pattern=R:R -c 1 -t 36 \ - --ratio 0:1 --pipeline 8 --randomize --distinct-client-seed - -Client benchmark results: - kswapd profiles: - 5.19-rc1 - 40.33% page_vma_mapped_walk (overhead) - 21.80% lzo1x_1_do_compress (real work) - 7.53% do_raw_spin_lock - 3.95% _raw_spin_unlock_irq - 2.52% vma_interval_tree_iter_next - 2.37% page_referenced_one - 2.28% vma_interval_tree_subtree_search - 1.97% anon_vma_interval_tree_iter_first - 1.60% ptep_clear_flush - 1.06% __zram_bvec_write - - patch1-6 - 39.03% lzo1x_1_do_compress (real work) - 18.47% page_vma_mapped_walk (overhead) - 6.74% _raw_spin_unlock_irq - 3.97% do_raw_spin_lock - 2.49% ptep_clear_flush - 2.48% anon_vma_interval_tree_iter_first - 1.92% page_referenced_one - 1.88% __zram_bvec_write - 1.48% memmove - 1.31% vma_interval_tree_iter_next - - Configurations: - CPU: single Snapdragon 7c - Mem: total 4G - - ChromeOS MemoryPressure [1] - -[1] https://chromium.googlesource.com/chromiumos/platform/tast-tests/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-7-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/mm_inline.h | 36 ++ - include/linux/mmzone.h | 41 ++ - include/linux/page-flags-layout.h | 5 +- - kernel/bounds.c | 2 + - mm/Kconfig | 11 + - mm/swap.c | 39 ++ - mm/vmscan.c | 792 +++++++++++++++++++++++++++++- - mm/workingset.c | 110 ++++- - 8 files changed, 1025 insertions(+), 11 deletions(-) - ---- a/include/linux/mm_inline.h -+++ b/include/linux/mm_inline.h -@@ -106,6 +106,33 @@ static inline int lru_gen_from_seq(unsig - return seq % MAX_NR_GENS; - } - -+static inline int lru_hist_from_seq(unsigned long seq) -+{ -+ return seq % NR_HIST_GENS; -+} -+ -+static inline int lru_tier_from_refs(int refs) -+{ -+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); -+ -+ /* see the comment in page_lru_refs() */ -+ return order_base_2(refs + 1); -+} -+ -+static inline int page_lru_refs(struct page *page) -+{ -+ unsigned long flags = READ_ONCE(page->flags); -+ bool workingset = flags & BIT(PG_workingset); -+ -+ /* -+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the -+ * total number of accesses is N>1, since N=0,1 both map to the first -+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see -+ * the comment on MAX_NR_TIERS. -+ */ -+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; -+} -+ - static inline int page_lru_gen(struct page *page) - { - unsigned long flags = READ_ONCE(page->flags); -@@ -158,6 +185,15 @@ static inline void lru_gen_update_size(s - __update_lru_size(lruvec, lru, zone, -delta); - return; - } -+ -+ /* promotion */ -+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { -+ __update_lru_size(lruvec, lru, zone, -delta); -+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); -+ } -+ -+ /* demotion requires isolation, e.g., lru_deactivate_fn() */ -+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); - } - - static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming) ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -327,6 +327,28 @@ enum lruvec_flags { - #define MIN_NR_GENS 2U - #define MAX_NR_GENS 4U - -+/* -+ * Each generation is divided into multiple tiers. A page accessed N times -+ * through file descriptors is in tier order_base_2(N). A page in the first tier -+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page -+ * tables or read ahead. A page in any other tier (N>1) is marked by -+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is -+ * supported without using additional bits in page->flags. -+ * -+ * In contrast to moving across generations which requires the LRU lock, moving -+ * across tiers only involves atomic operations on page->flags and therefore -+ * has a negligible cost in the buffered access path. In the eviction path, -+ * comparisons of refaulted/(evicted+protected) from the first tier and the -+ * rest infer whether pages accessed multiple times through file descriptors -+ * are statistically hot and thus worth protecting. -+ * -+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the -+ * number of categories of the active/inactive LRU when keeping track of -+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in -+ * page->flags. -+ */ -+#define MAX_NR_TIERS 4U -+ - #ifndef __GENERATING_BOUNDS_H - - struct lruvec; -@@ -341,6 +363,16 @@ enum { - LRU_GEN_FILE, - }; - -+#define MIN_LRU_BATCH BITS_PER_LONG -+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) -+ -+/* whether to keep historical stats from evicted generations */ -+#ifdef CONFIG_LRU_GEN_STATS -+#define NR_HIST_GENS MAX_NR_GENS -+#else -+#define NR_HIST_GENS 1U -+#endif -+ - /* - * The youngest generation number is stored in max_seq for both anon and file - * types as they are aged on an equal footing. The oldest generation numbers are -@@ -363,6 +395,15 @@ struct lru_gen_struct { - struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; - /* the multi-gen LRU sizes, eventually consistent */ - long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; -+ /* the exponential moving average of refaulted */ -+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; -+ /* the exponential moving average of evicted+protected */ -+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; -+ /* the first tier doesn't need protection, hence the minus one */ -+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1]; -+ /* can be modified without holding the LRU lock */ -+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; -+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; - }; - - void lru_gen_init_lruvec(struct lruvec *lruvec); ---- a/include/linux/page-flags-layout.h -+++ b/include/linux/page-flags-layout.h -@@ -106,7 +106,10 @@ - #error "Not enough bits in page flags" - #endif - --#define LRU_REFS_WIDTH 0 -+/* see the comment on MAX_NR_TIERS */ -+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \ -+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \ -+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH) - - #endif - #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ ---- a/kernel/bounds.c -+++ b/kernel/bounds.c -@@ -24,8 +24,10 @@ int main(void) - DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); - #ifdef CONFIG_LRU_GEN - DEFINE(LRU_GEN_WIDTH, order_base_2(MAX_NR_GENS + 1)); -+ DEFINE(__LRU_REFS_WIDTH, MAX_NR_TIERS - 2); - #else - DEFINE(LRU_GEN_WIDTH, 0); -+ DEFINE(__LRU_REFS_WIDTH, 0); - #endif - /* End of constants */ - ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -897,6 +897,7 @@ config IO_MAPPING - config SECRETMEM - def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED - -+# multi-gen LRU { - config LRU_GEN - bool "Multi-Gen LRU" - depends on MMU -@@ -905,6 +906,16 @@ config LRU_GEN - help - A high performance LRU implementation to overcommit memory. - -+config LRU_GEN_STATS -+ bool "Full stats for debugging" -+ depends on LRU_GEN -+ help -+ Do not enable this option unless you plan to look at historical stats -+ from evicted generations for debugging purpose. -+ -+ This option has a per-memcg and per-node memory overhead. -+# } -+ - source "mm/damon/Kconfig" - - endmenu ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -389,6 +389,40 @@ static void __lru_cache_activate_page(st - local_unlock(&lru_pvecs.lock); - } - -+#ifdef CONFIG_LRU_GEN -+static void page_inc_refs(struct page *page) -+{ -+ unsigned long new_flags, old_flags = READ_ONCE(page->flags); -+ -+ if (PageUnevictable(page)) -+ return; -+ -+ if (!PageReferenced(page)) { -+ SetPageReferenced(page); -+ return; -+ } -+ -+ if (!PageWorkingset(page)) { -+ SetPageWorkingset(page); -+ return; -+ } -+ -+ /* see the comment on MAX_NR_TIERS */ -+ do { -+ new_flags = old_flags & LRU_REFS_MASK; -+ if (new_flags == LRU_REFS_MASK) -+ break; -+ -+ new_flags += BIT(LRU_REFS_PGOFF); -+ new_flags |= old_flags & ~LRU_REFS_MASK; -+ } while (!try_cmpxchg(&page->flags, &old_flags, new_flags)); -+} -+#else -+static void page_inc_refs(struct page *page) -+{ -+} -+#endif /* CONFIG_LRU_GEN */ -+ - /* - * Mark a page as having seen activity. - * -@@ -403,6 +437,11 @@ void mark_page_accessed(struct page *pag - { - page = compound_head(page); - -+ if (lru_gen_enabled()) { -+ page_inc_refs(page); -+ return; -+ } -+ - if (!PageReferenced(page)) { - SetPageReferenced(page); - } else if (PageUnevictable(page)) { ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -1142,9 +1142,11 @@ static int __remove_mapping(struct addre - - if (PageSwapCache(page)) { - swp_entry_t swap = { .val = page_private(page) }; -- mem_cgroup_swapout(page, swap); -+ -+ /* get a shadow entry before mem_cgroup_swapout() clears page_memcg() */ - if (reclaimed && !mapping_exiting(mapping)) - shadow = workingset_eviction(page, target_memcg); -+ mem_cgroup_swapout(page, swap); - __delete_from_swap_cache(page, swap, shadow); - xa_unlock_irq(&mapping->i_pages); - put_swap_page(page, swap); -@@ -2502,6 +2504,9 @@ static void prepare_scan_count(pg_data_t - unsigned long file; - struct lruvec *target_lruvec; - -+ if (lru_gen_enabled()) -+ return; -+ - target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); - - /* -@@ -2827,6 +2832,17 @@ static bool can_age_anon_pages(struct pg - * shorthand helpers - ******************************************************************************/ - -+#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) -+ -+#define DEFINE_MAX_SEQ(lruvec) \ -+ unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) -+ -+#define DEFINE_MIN_SEQ(lruvec) \ -+ unsigned long min_seq[ANON_AND_FILE] = { \ -+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ -+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ -+ } -+ - #define for_each_gen_type_zone(gen, type, zone) \ - for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ - for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ -@@ -2852,6 +2868,745 @@ static struct lruvec __maybe_unused *get - return pgdat ? &pgdat->__lruvec : NULL; - } - -+static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) -+{ -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ struct pglist_data *pgdat = lruvec_pgdat(lruvec); -+ -+ if (!can_demote(pgdat->node_id, sc) && -+ mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) -+ return 0; -+ -+ return mem_cgroup_swappiness(memcg); -+} -+ -+static int get_nr_gens(struct lruvec *lruvec, int type) -+{ -+ return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; -+} -+ -+static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) -+{ -+ /* see the comment on lru_gen_struct */ -+ return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && -+ get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && -+ get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; -+} -+ -+/****************************************************************************** -+ * refault feedback loop -+ ******************************************************************************/ -+ -+/* -+ * A feedback loop based on Proportional-Integral-Derivative (PID) controller. -+ * -+ * The P term is refaulted/(evicted+protected) from a tier in the generation -+ * currently being evicted; the I term is the exponential moving average of the -+ * P term over the generations previously evicted, using the smoothing factor -+ * 1/2; the D term isn't supported. -+ * -+ * The setpoint (SP) is always the first tier of one type; the process variable -+ * (PV) is either any tier of the other type or any other tier of the same -+ * type. -+ * -+ * The error is the difference between the SP and the PV; the correction is to -+ * turn off protection when SP>PV or turn on protection when SPlrugen; -+ int hist = lru_hist_from_seq(lrugen->min_seq[type]); -+ -+ pos->refaulted = lrugen->avg_refaulted[type][tier] + -+ atomic_long_read(&lrugen->refaulted[hist][type][tier]); -+ pos->total = lrugen->avg_total[type][tier] + -+ atomic_long_read(&lrugen->evicted[hist][type][tier]); -+ if (tier) -+ pos->total += lrugen->protected[hist][type][tier - 1]; -+ pos->gain = gain; -+} -+ -+static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) -+{ -+ int hist, tier; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; -+ unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; -+ -+ lockdep_assert_held(&lruvec->lru_lock); -+ -+ if (!carryover && !clear) -+ return; -+ -+ hist = lru_hist_from_seq(seq); -+ -+ for (tier = 0; tier < MAX_NR_TIERS; tier++) { -+ if (carryover) { -+ unsigned long sum; -+ -+ sum = lrugen->avg_refaulted[type][tier] + -+ atomic_long_read(&lrugen->refaulted[hist][type][tier]); -+ WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); -+ -+ sum = lrugen->avg_total[type][tier] + -+ atomic_long_read(&lrugen->evicted[hist][type][tier]); -+ if (tier) -+ sum += lrugen->protected[hist][type][tier - 1]; -+ WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); -+ } -+ -+ if (clear) { -+ atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); -+ atomic_long_set(&lrugen->evicted[hist][type][tier], 0); -+ if (tier) -+ WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); -+ } -+ } -+} -+ -+static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) -+{ -+ /* -+ * Return true if the PV has a limited number of refaults or a lower -+ * refaulted/total than the SP. -+ */ -+ return pv->refaulted < MIN_LRU_BATCH || -+ pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= -+ (sp->refaulted + 1) * pv->total * pv->gain; -+} -+ -+/****************************************************************************** -+ * the aging -+ ******************************************************************************/ -+ -+/* protect pages accessed multiple times through file descriptors */ -+static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming) -+{ -+ int type = page_is_file_lru(page); -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); -+ unsigned long new_flags, old_flags = READ_ONCE(page->flags); -+ -+ VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page); -+ -+ do { -+ new_gen = (old_gen + 1) % MAX_NR_GENS; -+ -+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); -+ new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; -+ /* for end_page_writeback() */ -+ if (reclaiming) -+ new_flags |= BIT(PG_reclaim); -+ } while (!try_cmpxchg(&page->flags, &old_flags, new_flags)); -+ -+ lru_gen_update_size(lruvec, page, old_gen, new_gen); -+ -+ return new_gen; -+} -+ -+static void inc_min_seq(struct lruvec *lruvec, int type) -+{ -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ reset_ctrl_pos(lruvec, type, true); -+ WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); -+} -+ -+static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) -+{ -+ int gen, type, zone; -+ bool success = false; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ DEFINE_MIN_SEQ(lruvec); -+ -+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); -+ -+ /* find the oldest populated generation */ -+ for (type = !can_swap; type < ANON_AND_FILE; type++) { -+ while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { -+ gen = lru_gen_from_seq(min_seq[type]); -+ -+ for (zone = 0; zone < MAX_NR_ZONES; zone++) { -+ if (!list_empty(&lrugen->lists[gen][type][zone])) -+ goto next; -+ } -+ -+ min_seq[type]++; -+ } -+next: -+ ; -+ } -+ -+ /* see the comment on lru_gen_struct */ -+ if (can_swap) { -+ min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); -+ min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); -+ } -+ -+ for (type = !can_swap; type < ANON_AND_FILE; type++) { -+ if (min_seq[type] == lrugen->min_seq[type]) -+ continue; -+ -+ reset_ctrl_pos(lruvec, type, true); -+ WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); -+ success = true; -+ } -+ -+ return success; -+} -+ -+static void inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, bool can_swap) -+{ -+ int prev, next; -+ int type, zone; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ spin_lock_irq(&lruvec->lru_lock); -+ -+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); -+ -+ if (max_seq != lrugen->max_seq) -+ goto unlock; -+ -+ for (type = ANON_AND_FILE - 1; type >= 0; type--) { -+ if (get_nr_gens(lruvec, type) != MAX_NR_GENS) -+ continue; -+ -+ VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap); -+ -+ inc_min_seq(lruvec, type); -+ } -+ -+ /* -+ * Update the active/inactive LRU sizes for compatibility. Both sides of -+ * the current max_seq need to be covered, since max_seq+1 can overlap -+ * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do -+ * overlap, cold/hot inversion happens. -+ */ -+ prev = lru_gen_from_seq(lrugen->max_seq - 1); -+ next = lru_gen_from_seq(lrugen->max_seq + 1); -+ -+ for (type = 0; type < ANON_AND_FILE; type++) { -+ for (zone = 0; zone < MAX_NR_ZONES; zone++) { -+ enum lru_list lru = type * LRU_INACTIVE_FILE; -+ long delta = lrugen->nr_pages[prev][type][zone] - -+ lrugen->nr_pages[next][type][zone]; -+ -+ if (!delta) -+ continue; -+ -+ __update_lru_size(lruvec, lru, zone, delta); -+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); -+ } -+ } -+ -+ for (type = 0; type < ANON_AND_FILE; type++) -+ reset_ctrl_pos(lruvec, type, false); -+ -+ /* make sure preceding modifications appear */ -+ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); -+unlock: -+ spin_unlock_irq(&lruvec->lru_lock); -+} -+ -+static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq, -+ struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) -+{ -+ int gen, type, zone; -+ unsigned long old = 0; -+ unsigned long young = 0; -+ unsigned long total = 0; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ -+ for (type = !can_swap; type < ANON_AND_FILE; type++) { -+ unsigned long seq; -+ -+ for (seq = min_seq[type]; seq <= max_seq; seq++) { -+ unsigned long size = 0; -+ -+ gen = lru_gen_from_seq(seq); -+ -+ for (zone = 0; zone < MAX_NR_ZONES; zone++) -+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); -+ -+ total += size; -+ if (seq == max_seq) -+ young += size; -+ else if (seq + MIN_NR_GENS == max_seq) -+ old += size; -+ } -+ } -+ -+ /* try to scrape all its memory if this memcg was deleted */ -+ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total; -+ -+ /* -+ * The aging tries to be lazy to reduce the overhead, while the eviction -+ * stalls when the number of generations reaches MIN_NR_GENS. Hence, the -+ * ideal number of generations is MIN_NR_GENS+1. -+ */ -+ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) -+ return true; -+ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) -+ return false; -+ -+ /* -+ * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) -+ * of the total number of pages for each generation. A reasonable range -+ * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The -+ * aging cares about the upper bound of hot pages, while the eviction -+ * cares about the lower bound of cold pages. -+ */ -+ if (young * MIN_NR_GENS > total) -+ return true; -+ if (old * (MIN_NR_GENS + 2) < total) -+ return true; -+ -+ return false; -+} -+ -+static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) -+{ -+ bool need_aging; -+ unsigned long nr_to_scan; -+ int swappiness = get_swappiness(lruvec, sc); -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ DEFINE_MAX_SEQ(lruvec); -+ DEFINE_MIN_SEQ(lruvec); -+ -+ VM_WARN_ON_ONCE(sc->memcg_low_reclaim); -+ -+ mem_cgroup_calculate_protection(NULL, memcg); -+ -+ if (mem_cgroup_below_min(memcg)) -+ return; -+ -+ need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan); -+ if (need_aging) -+ inc_max_seq(lruvec, max_seq, swappiness); -+} -+ -+static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) -+{ -+ struct mem_cgroup *memcg; -+ -+ VM_WARN_ON_ONCE(!current_is_kswapd()); -+ -+ memcg = mem_cgroup_iter(NULL, NULL, NULL); -+ do { -+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); -+ -+ age_lruvec(lruvec, sc); -+ -+ cond_resched(); -+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); -+} -+ -+/****************************************************************************** -+ * the eviction -+ ******************************************************************************/ -+ -+static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) -+{ -+ bool success; -+ int gen = page_lru_gen(page); -+ int type = page_is_file_lru(page); -+ int zone = page_zonenum(page); -+ int delta = thp_nr_pages(page); -+ int refs = page_lru_refs(page); -+ int tier = lru_tier_from_refs(refs); -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page); -+ -+ /* unevictable */ -+ if (!page_evictable(page)) { -+ success = lru_gen_del_page(lruvec, page, true); -+ VM_WARN_ON_ONCE_PAGE(!success, page); -+ SetPageUnevictable(page); -+ add_page_to_lru_list(page, lruvec); -+ __count_vm_events(UNEVICTABLE_PGCULLED, delta); -+ return true; -+ } -+ -+ /* dirty lazyfree */ -+ if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) { -+ success = lru_gen_del_page(lruvec, page, true); -+ VM_WARN_ON_ONCE_PAGE(!success, page); -+ SetPageSwapBacked(page); -+ add_page_to_lru_list_tail(page, lruvec); -+ return true; -+ } -+ -+ /* protected */ -+ if (tier > tier_idx) { -+ int hist = lru_hist_from_seq(lrugen->min_seq[type]); -+ -+ gen = page_inc_gen(lruvec, page, false); -+ list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]); -+ -+ WRITE_ONCE(lrugen->protected[hist][type][tier - 1], -+ lrugen->protected[hist][type][tier - 1] + delta); -+ __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); -+ return true; -+ } -+ -+ /* waiting for writeback */ -+ if (PageLocked(page) || PageWriteback(page) || -+ (type == LRU_GEN_FILE && PageDirty(page))) { -+ gen = page_inc_gen(lruvec, page, true); -+ list_move(&page->lru, &lrugen->lists[gen][type][zone]); -+ return true; -+ } -+ -+ return false; -+} -+ -+static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc) -+{ -+ bool success; -+ -+ /* unmapping inhibited */ -+ if (!sc->may_unmap && page_mapped(page)) -+ return false; -+ -+ /* swapping inhibited */ -+ if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) && -+ (PageDirty(page) || -+ (PageAnon(page) && !PageSwapCache(page)))) -+ return false; -+ -+ /* raced with release_pages() */ -+ if (!get_page_unless_zero(page)) -+ return false; -+ -+ /* raced with another isolation */ -+ if (!TestClearPageLRU(page)) { -+ put_page(page); -+ return false; -+ } -+ -+ /* see the comment on MAX_NR_TIERS */ -+ if (!PageReferenced(page)) -+ set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); -+ -+ /* for shrink_page_list() */ -+ ClearPageReclaim(page); -+ ClearPageReferenced(page); -+ -+ success = lru_gen_del_page(lruvec, page, true); -+ VM_WARN_ON_ONCE_PAGE(!success, page); -+ -+ return true; -+} -+ -+static int scan_pages(struct lruvec *lruvec, struct scan_control *sc, -+ int type, int tier, struct list_head *list) -+{ -+ int gen, zone; -+ enum vm_event_item item; -+ int sorted = 0; -+ int scanned = 0; -+ int isolated = 0; -+ int remaining = MAX_LRU_BATCH; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ -+ VM_WARN_ON_ONCE(!list_empty(list)); -+ -+ if (get_nr_gens(lruvec, type) == MIN_NR_GENS) -+ return 0; -+ -+ gen = lru_gen_from_seq(lrugen->min_seq[type]); -+ -+ for (zone = sc->reclaim_idx; zone >= 0; zone--) { -+ LIST_HEAD(moved); -+ int skipped = 0; -+ struct list_head *head = &lrugen->lists[gen][type][zone]; -+ -+ while (!list_empty(head)) { -+ struct page *page = lru_to_page(head); -+ int delta = thp_nr_pages(page); -+ -+ VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page); -+ VM_WARN_ON_ONCE_PAGE(PageActive(page), page); -+ VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page); -+ VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); -+ -+ scanned += delta; -+ -+ if (sort_page(lruvec, page, tier)) -+ sorted += delta; -+ else if (isolate_page(lruvec, page, sc)) { -+ list_add(&page->lru, list); -+ isolated += delta; -+ } else { -+ list_move(&page->lru, &moved); -+ skipped += delta; -+ } -+ -+ if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH) -+ break; -+ } -+ -+ if (skipped) { -+ list_splice(&moved, head); -+ __count_zid_vm_events(PGSCAN_SKIP, zone, skipped); -+ } -+ -+ if (!remaining || isolated >= MIN_LRU_BATCH) -+ break; -+ } -+ -+ item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT; -+ if (!cgroup_reclaim(sc)) { -+ __count_vm_events(item, isolated); -+ __count_vm_events(PGREFILL, sorted); -+ } -+ __count_memcg_events(memcg, item, isolated); -+ __count_memcg_events(memcg, PGREFILL, sorted); -+ __count_vm_events(PGSCAN_ANON + type, isolated); -+ -+ /* -+ * There might not be eligible pages due to reclaim_idx, may_unmap and -+ * may_writepage. Check the remaining to prevent livelock if it's not -+ * making progress. -+ */ -+ return isolated || !remaining ? scanned : 0; -+} -+ -+static int get_tier_idx(struct lruvec *lruvec, int type) -+{ -+ int tier; -+ struct ctrl_pos sp, pv; -+ -+ /* -+ * To leave a margin for fluctuations, use a larger gain factor (1:2). -+ * This value is chosen because any other tier would have at least twice -+ * as many refaults as the first tier. -+ */ -+ read_ctrl_pos(lruvec, type, 0, 1, &sp); -+ for (tier = 1; tier < MAX_NR_TIERS; tier++) { -+ read_ctrl_pos(lruvec, type, tier, 2, &pv); -+ if (!positive_ctrl_err(&sp, &pv)) -+ break; -+ } -+ -+ return tier - 1; -+} -+ -+static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) -+{ -+ int type, tier; -+ struct ctrl_pos sp, pv; -+ int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; -+ -+ /* -+ * Compare the first tier of anon with that of file to determine which -+ * type to scan. Also need to compare other tiers of the selected type -+ * with the first tier of the other type to determine the last tier (of -+ * the selected type) to evict. -+ */ -+ read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); -+ read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); -+ type = positive_ctrl_err(&sp, &pv); -+ -+ read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); -+ for (tier = 1; tier < MAX_NR_TIERS; tier++) { -+ read_ctrl_pos(lruvec, type, tier, gain[type], &pv); -+ if (!positive_ctrl_err(&sp, &pv)) -+ break; -+ } -+ -+ *tier_idx = tier - 1; -+ -+ return type; -+} -+ -+static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness, -+ int *type_scanned, struct list_head *list) -+{ -+ int i; -+ int type; -+ int scanned; -+ int tier = -1; -+ DEFINE_MIN_SEQ(lruvec); -+ -+ /* -+ * Try to make the obvious choice first. When anon and file are both -+ * available from the same generation, interpret swappiness 1 as file -+ * first and 200 as anon first. -+ */ -+ if (!swappiness) -+ type = LRU_GEN_FILE; -+ else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) -+ type = LRU_GEN_ANON; -+ else if (swappiness == 1) -+ type = LRU_GEN_FILE; -+ else if (swappiness == 200) -+ type = LRU_GEN_ANON; -+ else -+ type = get_type_to_scan(lruvec, swappiness, &tier); -+ -+ for (i = !swappiness; i < ANON_AND_FILE; i++) { -+ if (tier < 0) -+ tier = get_tier_idx(lruvec, type); -+ -+ scanned = scan_pages(lruvec, sc, type, tier, list); -+ if (scanned) -+ break; -+ -+ type = !type; -+ tier = -1; -+ } -+ -+ *type_scanned = type; -+ -+ return scanned; -+} -+ -+static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness) -+{ -+ int type; -+ int scanned; -+ int reclaimed; -+ LIST_HEAD(list); -+ struct page *page; -+ enum vm_event_item item; -+ struct reclaim_stat stat; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ struct pglist_data *pgdat = lruvec_pgdat(lruvec); -+ -+ spin_lock_irq(&lruvec->lru_lock); -+ -+ scanned = isolate_pages(lruvec, sc, swappiness, &type, &list); -+ -+ scanned += try_to_inc_min_seq(lruvec, swappiness); -+ -+ if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) -+ scanned = 0; -+ -+ spin_unlock_irq(&lruvec->lru_lock); -+ -+ if (list_empty(&list)) -+ return scanned; -+ -+ reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false); -+ -+ list_for_each_entry(page, &list, lru) { -+ /* restore LRU_REFS_FLAGS cleared by isolate_page() */ -+ if (PageWorkingset(page)) -+ SetPageReferenced(page); -+ -+ /* don't add rejected pages to the oldest generation */ -+ if (PageReclaim(page) && -+ (PageDirty(page) || PageWriteback(page))) -+ ClearPageActive(page); -+ else -+ SetPageActive(page); -+ } -+ -+ spin_lock_irq(&lruvec->lru_lock); -+ -+ move_pages_to_lru(lruvec, &list); -+ -+ item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; -+ if (!cgroup_reclaim(sc)) -+ __count_vm_events(item, reclaimed); -+ __count_memcg_events(memcg, item, reclaimed); -+ __count_vm_events(PGSTEAL_ANON + type, reclaimed); -+ -+ spin_unlock_irq(&lruvec->lru_lock); -+ -+ mem_cgroup_uncharge_list(&list); -+ free_unref_page_list(&list); -+ -+ sc->nr_reclaimed += reclaimed; -+ -+ return scanned; -+} -+ -+static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, -+ bool can_swap) -+{ -+ bool need_aging; -+ unsigned long nr_to_scan; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ DEFINE_MAX_SEQ(lruvec); -+ DEFINE_MIN_SEQ(lruvec); -+ -+ if (mem_cgroup_below_min(memcg) || -+ (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim)) -+ return 0; -+ -+ need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan); -+ if (!need_aging) -+ return nr_to_scan; -+ -+ /* skip the aging path at the default priority */ -+ if (sc->priority == DEF_PRIORITY) -+ goto done; -+ -+ /* leave the work to lru_gen_age_node() */ -+ if (current_is_kswapd()) -+ return 0; -+ -+ inc_max_seq(lruvec, max_seq, can_swap); -+done: -+ return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; -+} -+ -+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) -+{ -+ struct blk_plug plug; -+ unsigned long scanned = 0; -+ -+ lru_add_drain(); -+ -+ blk_start_plug(&plug); -+ -+ while (true) { -+ int delta; -+ int swappiness; -+ unsigned long nr_to_scan; -+ -+ if (sc->may_swap) -+ swappiness = get_swappiness(lruvec, sc); -+ else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc)) -+ swappiness = 1; -+ else -+ swappiness = 0; -+ -+ nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); -+ if (!nr_to_scan) -+ break; -+ -+ delta = evict_pages(lruvec, sc, swappiness); -+ if (!delta) -+ break; -+ -+ scanned += delta; -+ if (scanned >= nr_to_scan) -+ break; -+ -+ cond_resched(); -+ } -+ -+ blk_finish_plug(&plug); -+} -+ - /****************************************************************************** - * initialization - ******************************************************************************/ -@@ -2894,6 +3649,16 @@ static int __init init_lru_gen(void) - }; - late_initcall(init_lru_gen); - -+#else /* !CONFIG_LRU_GEN */ -+ -+static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) -+{ -+} -+ -+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) -+{ -+} -+ - #endif /* CONFIG_LRU_GEN */ - - static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) -@@ -2907,6 +3672,11 @@ static void shrink_lruvec(struct lruvec - bool proportional_reclaim; - struct blk_plug plug; - -+ if (lru_gen_enabled()) { -+ lru_gen_shrink_lruvec(lruvec, sc); -+ return; -+ } -+ - get_scan_count(lruvec, sc, nr); - - /* Record the original scan target for proportional adjustments later */ -@@ -3372,6 +4142,9 @@ static void snapshot_refaults(struct mem - struct lruvec *target_lruvec; - unsigned long refaults; - -+ if (lru_gen_enabled()) -+ return; -+ - target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); - target_lruvec->refaults[0] = refaults; -@@ -3736,12 +4509,16 @@ unsigned long try_to_free_mem_cgroup_pag - } - #endif - --static void age_active_anon(struct pglist_data *pgdat, -- struct scan_control *sc) -+static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) - { - struct mem_cgroup *memcg; - struct lruvec *lruvec; - -+ if (lru_gen_enabled()) { -+ lru_gen_age_node(pgdat, sc); -+ return; -+ } -+ - if (!can_age_anon_pages(pgdat, sc)) - return; - -@@ -4058,12 +4835,11 @@ restart: - sc.may_swap = !nr_boost_reclaim; - - /* -- * Do some background aging of the anon list, to give -- * pages a chance to be referenced before reclaiming. All -- * pages are rotated regardless of classzone as this is -- * about consistent aging. -+ * Do some background aging, to give pages a chance to be -+ * referenced before reclaiming. All pages are rotated -+ * regardless of classzone as this is about consistent aging. - */ -- age_active_anon(pgdat, &sc); -+ kswapd_age_node(pgdat, &sc); - - /* - * If we're getting trouble reclaiming, start doing writepage ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -187,7 +187,6 @@ static unsigned int bucket_order __read_ - static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, - bool workingset) - { -- eviction >>= bucket_order; - eviction &= EVICTION_MASK; - eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; - eviction = (eviction << NODES_SHIFT) | pgdat->node_id; -@@ -212,10 +211,107 @@ static void unpack_shadow(void *shadow, - - *memcgidp = memcgid; - *pgdat = NODE_DATA(nid); -- *evictionp = entry << bucket_order; -+ *evictionp = entry; - *workingsetp = workingset; - } - -+#ifdef CONFIG_LRU_GEN -+ -+static void *lru_gen_eviction(struct page *page) -+{ -+ int hist; -+ unsigned long token; -+ unsigned long min_seq; -+ struct lruvec *lruvec; -+ struct lru_gen_struct *lrugen; -+ int type = page_is_file_lru(page); -+ int delta = thp_nr_pages(page); -+ int refs = page_lru_refs(page); -+ int tier = lru_tier_from_refs(refs); -+ struct mem_cgroup *memcg = page_memcg(page); -+ struct pglist_data *pgdat = page_pgdat(page); -+ -+ BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT); -+ -+ lruvec = mem_cgroup_lruvec(memcg, pgdat); -+ lrugen = &lruvec->lrugen; -+ min_seq = READ_ONCE(lrugen->min_seq[type]); -+ token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); -+ -+ hist = lru_hist_from_seq(min_seq); -+ atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); -+ -+ return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); -+} -+ -+static void lru_gen_refault(struct page *page, void *shadow) -+{ -+ int hist, tier, refs; -+ int memcg_id; -+ bool workingset; -+ unsigned long token; -+ unsigned long min_seq; -+ struct lruvec *lruvec; -+ struct lru_gen_struct *lrugen; -+ struct mem_cgroup *memcg; -+ struct pglist_data *pgdat; -+ int type = page_is_file_lru(page); -+ int delta = thp_nr_pages(page); -+ -+ unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset); -+ -+ if (pgdat != page_pgdat(page)) -+ return; -+ -+ rcu_read_lock(); -+ -+ memcg = page_memcg_rcu(page); -+ if (memcg_id != mem_cgroup_id(memcg)) -+ goto unlock; -+ -+ lruvec = mem_cgroup_lruvec(memcg, pgdat); -+ lrugen = &lruvec->lrugen; -+ -+ min_seq = READ_ONCE(lrugen->min_seq[type]); -+ if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH))) -+ goto unlock; -+ -+ hist = lru_hist_from_seq(min_seq); -+ /* see the comment in page_lru_refs() */ -+ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; -+ tier = lru_tier_from_refs(refs); -+ -+ atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); -+ mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); -+ -+ /* -+ * Count the following two cases as stalls: -+ * 1. For pages accessed through page tables, hotter pages pushed out -+ * hot pages which refaulted immediately. -+ * 2. For pages accessed multiple times through file descriptors, -+ * numbers of accesses might have been out of the range. -+ */ -+ if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) { -+ SetPageWorkingset(page); -+ mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); -+ } -+unlock: -+ rcu_read_unlock(); -+} -+ -+#else /* !CONFIG_LRU_GEN */ -+ -+static void *lru_gen_eviction(struct page *page) -+{ -+ return NULL; -+} -+ -+static void lru_gen_refault(struct page *page, void *shadow) -+{ -+} -+ -+#endif /* CONFIG_LRU_GEN */ -+ - /** - * workingset_age_nonresident - age non-resident entries as LRU ages - * @lruvec: the lruvec that was aged -@@ -264,10 +360,14 @@ void *workingset_eviction(struct page *p - VM_BUG_ON_PAGE(page_count(page), page); - VM_BUG_ON_PAGE(!PageLocked(page), page); - -+ if (lru_gen_enabled()) -+ return lru_gen_eviction(page); -+ - lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - /* XXX: target_memcg can be NULL, go through lruvec */ - memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); - eviction = atomic_long_read(&lruvec->nonresident_age); -+ eviction >>= bucket_order; - workingset_age_nonresident(lruvec, thp_nr_pages(page)); - return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); - } -@@ -296,7 +396,13 @@ void workingset_refault(struct page *pag - bool workingset; - int memcgid; - -+ if (lru_gen_enabled()) { -+ lru_gen_refault(page, shadow); -+ return; -+ } -+ - unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); -+ eviction <<= bucket_order; - - rcu_read_lock(); - /* diff --git a/target/linux/generic/backport-6.1/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch b/target/linux/generic/backport-6.1/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch deleted file mode 100644 index 5cd6e03dc607..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch +++ /dev/null @@ -1,491 +0,0 @@ -From e4277535f6d6708bb19b88c4bad155832671d69b Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:04 -0600 -Subject: [PATCH 07/29] mm: multi-gen LRU: exploit locality in rmap -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Searching the rmap for PTEs mapping each page on an LRU list (to test and -clear the accessed bit) can be expensive because pages from different VMAs -(PA space) are not cache friendly to the rmap (VA space). For workloads -mostly using mapped pages, searching the rmap can incur the highest CPU -cost in the reclaim path. - -This patch exploits spatial locality to reduce the trips into the rmap. -When shrink_page_list() walks the rmap and finds a young PTE, a new -function lru_gen_look_around() scans at most BITS_PER_LONG-1 adjacent -PTEs. On finding another young PTE, it clears the accessed bit and -updates the gen counter of the page mapped by this PTE to -(max_seq%MAX_NR_GENS)+1. - -Server benchmark results: - Single workload: - fio (buffered I/O): no change - - Single workload: - memcached (anon): +[3, 5]% - Ops/sec KB/sec - patch1-6: 1106168.46 43025.04 - patch1-7: 1147696.57 44640.29 - - Configurations: - no change - -Client benchmark results: - kswapd profiles: - patch1-6 - 39.03% lzo1x_1_do_compress (real work) - 18.47% page_vma_mapped_walk (overhead) - 6.74% _raw_spin_unlock_irq - 3.97% do_raw_spin_lock - 2.49% ptep_clear_flush - 2.48% anon_vma_interval_tree_iter_first - 1.92% page_referenced_one - 1.88% __zram_bvec_write - 1.48% memmove - 1.31% vma_interval_tree_iter_next - - patch1-7 - 48.16% lzo1x_1_do_compress (real work) - 8.20% page_vma_mapped_walk (overhead) - 7.06% _raw_spin_unlock_irq - 2.92% ptep_clear_flush - 2.53% __zram_bvec_write - 2.11% do_raw_spin_lock - 2.02% memmove - 1.93% lru_gen_look_around - 1.56% free_unref_page_list - 1.40% memset - - Configurations: - no change - -Link: https://lkml.kernel.org/r/20220918080010.2920238-8-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Barry Song -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/memcontrol.h | 31 +++++++ - include/linux/mmzone.h | 6 ++ - mm/internal.h | 1 + - mm/memcontrol.c | 1 + - mm/rmap.c | 7 ++ - mm/swap.c | 4 +- - mm/vmscan.c | 184 +++++++++++++++++++++++++++++++++++++ - 7 files changed, 232 insertions(+), 2 deletions(-) - ---- a/include/linux/memcontrol.h -+++ b/include/linux/memcontrol.h -@@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_ - * - LRU isolation - * - lock_page_memcg() - * - exclusive reference -+ * - mem_cgroup_trylock_pages() - * - * For a kmem page a caller should hold an rcu read lock to protect memcg - * associated with a kmem page from being released. -@@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_me - * - LRU isolation - * - lock_page_memcg() - * - exclusive reference -+ * - mem_cgroup_trylock_pages() - * - * For a kmem page a caller should hold an rcu read lock to protect memcg - * associated with a kmem page from being released. -@@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page - - void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); - -+/* try to stablize page_memcg() for all the pages in a memcg */ -+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -+{ -+ rcu_read_lock(); -+ -+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) -+ return true; -+ -+ rcu_read_unlock(); -+ return false; -+} -+ -+static inline void mem_cgroup_unlock_pages(void) -+{ -+ rcu_read_unlock(); -+} -+ - /* idx can be of type enum memcg_stat_item or node_stat_item */ - static inline void mod_memcg_state(struct mem_cgroup *memcg, - int idx, int val) -@@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(str - { - } - -+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -+{ -+ /* to match page_memcg_rcu() */ -+ rcu_read_lock(); -+ return true; -+} -+ -+static inline void mem_cgroup_unlock_pages(void) -+{ -+ rcu_read_unlock(); -+} -+ - static inline void mem_cgroup_handle_over_high(void) - { - } ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -352,6 +352,7 @@ enum lruvec_flags { - #ifndef __GENERATING_BOUNDS_H - - struct lruvec; -+struct page_vma_mapped_walk; - - #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) - #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) -@@ -407,6 +408,7 @@ struct lru_gen_struct { - }; - - void lru_gen_init_lruvec(struct lruvec *lruvec); -+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); - - #ifdef CONFIG_MEMCG - void lru_gen_init_memcg(struct mem_cgroup *memcg); -@@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(s - { - } - -+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) -+{ -+} -+ - #ifdef CONFIG_MEMCG - static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) - { ---- a/mm/internal.h -+++ b/mm/internal.h -@@ -35,6 +35,7 @@ - void page_writeback_init(void); - - vm_fault_t do_swap_page(struct vm_fault *vmf); -+void activate_page(struct page *page); - - void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *p - * - LRU isolation - * - lock_page_memcg() - * - exclusive reference -+ * - mem_cgroup_trylock_pages() - */ - page->memcg_data = (unsigned long)memcg; - } ---- a/mm/rmap.c -+++ b/mm/rmap.c -@@ -73,6 +73,7 @@ - #include - #include - #include -+#include - - #include - -@@ -793,6 +794,12 @@ static bool page_referenced_one(struct p - } - - if (pvmw.pte) { -+ if (lru_gen_enabled() && pte_young(*pvmw.pte) && -+ !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) { -+ lru_gen_look_around(&pvmw); -+ referenced++; -+ } -+ - if (ptep_clear_flush_young_notify(vma, address, - pvmw.pte)) { - /* ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -325,7 +325,7 @@ static bool need_activate_page_drain(int - return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; - } - --static void activate_page(struct page *page) -+void activate_page(struct page *page) - { - page = compound_head(page); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { -@@ -345,7 +345,7 @@ static inline void activate_page_drain(i - { - } - --static void activate_page(struct page *page) -+void activate_page(struct page *page) - { - struct lruvec *lruvec; - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -1409,6 +1409,11 @@ retry: - if (!sc->may_unmap && page_mapped(page)) - goto keep_locked; - -+ /* page_update_gen() tried to promote this page? */ -+ if (lru_gen_enabled() && !ignore_references && -+ page_mapped(page) && PageReferenced(page)) -+ goto keep_locked; -+ - may_enter_fs = (sc->gfp_mask & __GFP_FS) || - (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); - -@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctr - * the aging - ******************************************************************************/ - -+/* promote pages accessed through page tables */ -+static int page_update_gen(struct page *page, int gen) -+{ -+ unsigned long new_flags, old_flags = READ_ONCE(page->flags); -+ -+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); -+ VM_WARN_ON_ONCE(!rcu_read_lock_held()); -+ -+ do { -+ /* lru_gen_del_page() has isolated this page? */ -+ if (!(old_flags & LRU_GEN_MASK)) { -+ /* for shrink_page_list() */ -+ new_flags = old_flags | BIT(PG_referenced); -+ continue; -+ } -+ -+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); -+ new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; -+ } while (!try_cmpxchg(&page->flags, &old_flags, new_flags)); -+ -+ return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; -+} -+ - /* protect pages accessed multiple times through file descriptors */ - static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming) - { -@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *l - VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page); - - do { -+ new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; -+ /* page_update_gen() has promoted this page? */ -+ if (new_gen >= 0 && new_gen != old_gen) -+ return new_gen; -+ - new_gen = (old_gen + 1) % MAX_NR_GENS; - - new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); -@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *l - return new_gen; - } - -+static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) -+{ -+ unsigned long pfn = pte_pfn(pte); -+ -+ VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); -+ -+ if (!pte_present(pte) || is_zero_pfn(pfn)) -+ return -1; -+ -+ if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) -+ return -1; -+ -+ if (WARN_ON_ONCE(!pfn_valid(pfn))) -+ return -1; -+ -+ return pfn; -+} -+ -+static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg, -+ struct pglist_data *pgdat) -+{ -+ struct page *page; -+ -+ /* try to avoid unnecessary memory loads */ -+ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) -+ return NULL; -+ -+ page = compound_head(pfn_to_page(pfn)); -+ if (page_to_nid(page) != pgdat->node_id) -+ return NULL; -+ -+ if (page_memcg_rcu(page) != memcg) -+ return NULL; -+ -+ return page; -+} -+ - static void inc_min_seq(struct lruvec *lruvec, int type) - { - struct lru_gen_struct *lrugen = &lruvec->lrugen; -@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pgli - } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); - } - -+/* -+ * This function exploits spatial locality when shrink_page_list() walks the -+ * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. -+ */ -+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) -+{ -+ int i; -+ pte_t *pte; -+ unsigned long start; -+ unsigned long end; -+ unsigned long addr; -+ unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {}; -+ struct page *page = pvmw->page; -+ struct mem_cgroup *memcg = page_memcg(page); -+ struct pglist_data *pgdat = page_pgdat(page); -+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); -+ DEFINE_MAX_SEQ(lruvec); -+ int old_gen, new_gen = lru_gen_from_seq(max_seq); -+ -+ lockdep_assert_held(pvmw->ptl); -+ VM_WARN_ON_ONCE_PAGE(PageLRU(page), page); -+ -+ if (spin_is_contended(pvmw->ptl)) -+ return; -+ -+ start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start); -+ end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; -+ -+ if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { -+ if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2) -+ end = start + MIN_LRU_BATCH * PAGE_SIZE; -+ else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2) -+ start = end - MIN_LRU_BATCH * PAGE_SIZE; -+ else { -+ start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2; -+ end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2; -+ } -+ } -+ -+ pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE; -+ -+ rcu_read_lock(); -+ arch_enter_lazy_mmu_mode(); -+ -+ for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { -+ unsigned long pfn; -+ -+ pfn = get_pte_pfn(pte[i], pvmw->vma, addr); -+ if (pfn == -1) -+ continue; -+ -+ if (!pte_young(pte[i])) -+ continue; -+ -+ page = get_pfn_page(pfn, memcg, pgdat); -+ if (!page) -+ continue; -+ -+ if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) -+ VM_WARN_ON_ONCE(true); -+ -+ if (pte_dirty(pte[i]) && !PageDirty(page) && -+ !(PageAnon(page) && PageSwapBacked(page) && -+ !PageSwapCache(page))) -+ set_page_dirty(page); -+ -+ old_gen = page_lru_gen(page); -+ if (old_gen < 0) -+ SetPageReferenced(page); -+ else if (old_gen != new_gen) -+ __set_bit(i, bitmap); -+ } -+ -+ arch_leave_lazy_mmu_mode(); -+ rcu_read_unlock(); -+ -+ if (bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) { -+ for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { -+ page = pte_page(pte[i]); -+ activate_page(page); -+ } -+ return; -+ } -+ -+ /* page_update_gen() requires stable page_memcg() */ -+ if (!mem_cgroup_trylock_pages(memcg)) -+ return; -+ -+ spin_lock_irq(&lruvec->lru_lock); -+ new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq); -+ -+ for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { -+ page = compound_head(pte_page(pte[i])); -+ if (page_memcg_rcu(page) != memcg) -+ continue; -+ -+ old_gen = page_update_gen(page, new_gen); -+ if (old_gen < 0 || old_gen == new_gen) -+ continue; -+ -+ lru_gen_update_size(lruvec, page, old_gen, new_gen); -+ } -+ -+ spin_unlock_irq(&lruvec->lru_lock); -+ -+ mem_cgroup_unlock_pages(); -+} -+ - /****************************************************************************** - * the eviction - ******************************************************************************/ -@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lru - return true; - } - -+ /* promoted */ -+ if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { -+ list_move(&page->lru, &lrugen->lists[gen][type][zone]); -+ return true; -+ } -+ - /* protected */ - if (tier > tier_idx) { - int hist = lru_hist_from_seq(lrugen->min_seq[type]); diff --git a/target/linux/generic/backport-6.1/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch b/target/linux/generic/backport-6.1/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch deleted file mode 100644 index dad21dccb58e..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch +++ /dev/null @@ -1,1687 +0,0 @@ -From 05223c4e80b34e29f2255c04ffebc2c4475e7593 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:05 -0600 -Subject: [PATCH 08/29] mm: multi-gen LRU: support page table walks -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -To further exploit spatial locality, the aging prefers to walk page tables -to search for young PTEs and promote hot pages. A kill switch will be -added in the next patch to disable this behavior. When disabled, the -aging relies on the rmap only. - -NB: this behavior has nothing similar with the page table scanning in the -2.4 kernel [1], which searches page tables for old PTEs, adds cold pages -to swapcache and unmaps them. - -To avoid confusion, the term "iteration" specifically means the traversal -of an entire mm_struct list; the term "walk" will be applied to page -tables and the rmap, as usual. - -An mm_struct list is maintained for each memcg, and an mm_struct follows -its owner task to the new memcg when this task is migrated. Given an -lruvec, the aging iterates lruvec_memcg()->mm_list and calls -walk_page_range() with each mm_struct on this list to promote hot pages -before it increments max_seq. - -When multiple page table walkers iterate the same list, each of them gets -a unique mm_struct; therefore they can run concurrently. Page table -walkers ignore any misplaced pages, e.g., if an mm_struct was migrated, -pages it left in the previous memcg will not be promoted when its current -memcg is under reclaim. Similarly, page table walkers will not promote -pages from nodes other than the one under reclaim. - -This patch uses the following optimizations when walking page tables: -1. It tracks the usage of mm_struct's between context switches so that - page table walkers can skip processes that have been sleeping since - the last iteration. -2. It uses generational Bloom filters to record populated branches so - that page table walkers can reduce their search space based on the - query results, e.g., to skip page tables containing mostly holes or - misplaced pages. -3. It takes advantage of the accessed bit in non-leaf PMD entries when - CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y. -4. It does not zigzag between a PGD table and the same PMD table - spanning multiple VMAs. IOW, it finishes all the VMAs within the - range of the same PMD table before it returns to a PGD table. This - improves the cache performance for workloads that have large - numbers of tiny VMAs [2], especially when CONFIG_PGTABLE_LEVELS=5. - -Server benchmark results: - Single workload: - fio (buffered I/O): no change - - Single workload: - memcached (anon): +[8, 10]% - Ops/sec KB/sec - patch1-7: 1147696.57 44640.29 - patch1-8: 1245274.91 48435.66 - - Configurations: - no change - -Client benchmark results: - kswapd profiles: - patch1-7 - 48.16% lzo1x_1_do_compress (real work) - 8.20% page_vma_mapped_walk (overhead) - 7.06% _raw_spin_unlock_irq - 2.92% ptep_clear_flush - 2.53% __zram_bvec_write - 2.11% do_raw_spin_lock - 2.02% memmove - 1.93% lru_gen_look_around - 1.56% free_unref_page_list - 1.40% memset - - patch1-8 - 49.44% lzo1x_1_do_compress (real work) - 6.19% page_vma_mapped_walk (overhead) - 5.97% _raw_spin_unlock_irq - 3.13% get_pfn_page - 2.85% ptep_clear_flush - 2.42% __zram_bvec_write - 2.08% do_raw_spin_lock - 1.92% memmove - 1.44% alloc_zspage - 1.36% memset - - Configurations: - no change - -Thanks to the following developers for their efforts [3]. - kernel test robot - -[1] https://lwn.net/Articles/23732/ -[2] https://llvm.org/docs/ScudoHardenedAllocator.html -[3] https://lore.kernel.org/r/202204160827.ekEARWQo-lkp@intel.com/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-9-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - fs/exec.c | 2 + - include/linux/memcontrol.h | 5 + - include/linux/mm_types.h | 76 +++ - include/linux/mmzone.h | 56 +- - include/linux/swap.h | 4 + - kernel/exit.c | 1 + - kernel/fork.c | 9 + - kernel/sched/core.c | 1 + - mm/memcontrol.c | 25 + - mm/vmscan.c | 1010 +++++++++++++++++++++++++++++++++++- - 10 files changed, 1172 insertions(+), 17 deletions(-) - ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -1013,6 +1013,7 @@ static int exec_mmap(struct mm_struct *m - active_mm = tsk->active_mm; - tsk->active_mm = mm; - tsk->mm = mm; -+ lru_gen_add_mm(mm); - /* - * This prevents preemption while active_mm is being loaded and - * it and mm are being updated, which could cause problems for -@@ -1028,6 +1029,7 @@ static int exec_mmap(struct mm_struct *m - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); - task_unlock(tsk); -+ lru_gen_use_mm(mm); - if (old_mm) { - mmap_read_unlock(old_mm); - BUG_ON(active_mm != old_mm); ---- a/include/linux/memcontrol.h -+++ b/include/linux/memcontrol.h -@@ -348,6 +348,11 @@ struct mem_cgroup { - struct deferred_split deferred_split_queue; - #endif - -+#ifdef CONFIG_LRU_GEN -+ /* per-memcg mm_struct list */ -+ struct lru_gen_mm_list mm_list; -+#endif -+ - struct mem_cgroup_per_node *nodeinfo[]; - }; - ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -580,6 +580,22 @@ struct mm_struct { - #ifdef CONFIG_IOMMU_SUPPORT - u32 pasid; - #endif -+#ifdef CONFIG_LRU_GEN -+ struct { -+ /* this mm_struct is on lru_gen_mm_list */ -+ struct list_head list; -+ /* -+ * Set when switching to this mm_struct, as a hint of -+ * whether it has been used since the last time per-node -+ * page table walkers cleared the corresponding bits. -+ */ -+ unsigned long bitmap; -+#ifdef CONFIG_MEMCG -+ /* points to the memcg of "owner" above */ -+ struct mem_cgroup *memcg; -+#endif -+ } lru_gen; -+#endif /* CONFIG_LRU_GEN */ - } __randomize_layout; - - /* -@@ -606,6 +622,66 @@ static inline cpumask_t *mm_cpumask(stru - return (struct cpumask *)&mm->cpu_bitmap; - } - -+#ifdef CONFIG_LRU_GEN -+ -+struct lru_gen_mm_list { -+ /* mm_struct list for page table walkers */ -+ struct list_head fifo; -+ /* protects the list above */ -+ spinlock_t lock; -+}; -+ -+void lru_gen_add_mm(struct mm_struct *mm); -+void lru_gen_del_mm(struct mm_struct *mm); -+#ifdef CONFIG_MEMCG -+void lru_gen_migrate_mm(struct mm_struct *mm); -+#endif -+ -+static inline void lru_gen_init_mm(struct mm_struct *mm) -+{ -+ INIT_LIST_HEAD(&mm->lru_gen.list); -+ mm->lru_gen.bitmap = 0; -+#ifdef CONFIG_MEMCG -+ mm->lru_gen.memcg = NULL; -+#endif -+} -+ -+static inline void lru_gen_use_mm(struct mm_struct *mm) -+{ -+ /* -+ * When the bitmap is set, page reclaim knows this mm_struct has been -+ * used since the last time it cleared the bitmap. So it might be worth -+ * walking the page tables of this mm_struct to clear the accessed bit. -+ */ -+ WRITE_ONCE(mm->lru_gen.bitmap, -1); -+} -+ -+#else /* !CONFIG_LRU_GEN */ -+ -+static inline void lru_gen_add_mm(struct mm_struct *mm) -+{ -+} -+ -+static inline void lru_gen_del_mm(struct mm_struct *mm) -+{ -+} -+ -+#ifdef CONFIG_MEMCG -+static inline void lru_gen_migrate_mm(struct mm_struct *mm) -+{ -+} -+#endif -+ -+static inline void lru_gen_init_mm(struct mm_struct *mm) -+{ -+} -+ -+static inline void lru_gen_use_mm(struct mm_struct *mm) -+{ -+} -+ -+#endif /* CONFIG_LRU_GEN */ -+ - struct mmu_gather; - extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); - extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -385,7 +385,7 @@ enum { - * min_seq behind. - * - * The number of pages in each generation is eventually consistent and therefore -- * can be transiently negative. -+ * can be transiently negative when reset_batch_size() is pending. - */ - struct lru_gen_struct { - /* the aging increments the youngest generation number */ -@@ -407,6 +407,53 @@ struct lru_gen_struct { - atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; - }; - -+enum { -+ MM_LEAF_TOTAL, /* total leaf entries */ -+ MM_LEAF_OLD, /* old leaf entries */ -+ MM_LEAF_YOUNG, /* young leaf entries */ -+ MM_NONLEAF_TOTAL, /* total non-leaf entries */ -+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */ -+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */ -+ NR_MM_STATS -+}; -+ -+/* double-buffering Bloom filters */ -+#define NR_BLOOM_FILTERS 2 -+ -+struct lru_gen_mm_state { -+ /* set to max_seq after each iteration */ -+ unsigned long seq; -+ /* where the current iteration continues (inclusive) */ -+ struct list_head *head; -+ /* where the last iteration ended (exclusive) */ -+ struct list_head *tail; -+ /* to wait for the last page table walker to finish */ -+ struct wait_queue_head wait; -+ /* Bloom filters flip after each iteration */ -+ unsigned long *filters[NR_BLOOM_FILTERS]; -+ /* the mm stats for debugging */ -+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; -+ /* the number of concurrent page table walkers */ -+ int nr_walkers; -+}; -+ -+struct lru_gen_mm_walk { -+ /* the lruvec under reclaim */ -+ struct lruvec *lruvec; -+ /* unstable max_seq from lru_gen_struct */ -+ unsigned long max_seq; -+ /* the next address within an mm to scan */ -+ unsigned long next_addr; -+ /* to batch promoted pages */ -+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; -+ /* to batch the mm stats */ -+ int mm_stats[NR_MM_STATS]; -+ /* total batched items */ -+ int batched; -+ bool can_swap; -+ bool force_scan; -+}; -+ - void lru_gen_init_lruvec(struct lruvec *lruvec); - void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); - -@@ -457,6 +504,8 @@ struct lruvec { - #ifdef CONFIG_LRU_GEN - /* evictable pages divided into generations */ - struct lru_gen_struct lrugen; -+ /* to concurrently iterate lru_gen_mm_list */ -+ struct lru_gen_mm_state mm_state; - #endif - #ifdef CONFIG_MEMCG - struct pglist_data *pgdat; -@@ -1042,6 +1091,11 @@ typedef struct pglist_data { - - unsigned long flags; - -+#ifdef CONFIG_LRU_GEN -+ /* kswap mm walk data */ -+ struct lru_gen_mm_walk mm_walk; -+#endif -+ - ZONE_PADDING(_pad2_) - - /* Per-node vmstats */ ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -137,6 +137,10 @@ union swap_header { - */ - struct reclaim_state { - unsigned long reclaimed_slab; -+#ifdef CONFIG_LRU_GEN -+ /* per-thread mm walk data */ -+ struct lru_gen_mm_walk *mm_walk; -+#endif - }; - - #ifdef __KERNEL__ ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -469,6 +469,7 @@ assign_new_owner: - goto retry; - } - WRITE_ONCE(mm->owner, c); -+ lru_gen_migrate_mm(mm); - task_unlock(c); - put_task_struct(c); - } ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1083,6 +1083,7 @@ static struct mm_struct *mm_init(struct - goto fail_nocontext; - - mm->user_ns = get_user_ns(user_ns); -+ lru_gen_init_mm(mm); - return mm; - - fail_nocontext: -@@ -1125,6 +1126,7 @@ static inline void __mmput(struct mm_str - } - if (mm->binfmt) - module_put(mm->binfmt->module); -+ lru_gen_del_mm(mm); - mmdrop(mm); - } - -@@ -2622,6 +2624,13 @@ pid_t kernel_clone(struct kernel_clone_a - get_task_struct(p); - } - -+ if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) { -+ /* lock the task to synchronize with memcg migration */ -+ task_lock(p); -+ lru_gen_add_mm(p->mm); -+ task_unlock(p); -+ } -+ - wake_up_new_task(p); - - /* forking complete and child started to run, tell ptracer */ ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -5010,6 +5010,7 @@ context_switch(struct rq *rq, struct tas - * finish_task_switch()'s mmdrop(). - */ - switch_mm_irqs_off(prev->active_mm, next->mm, next); -+ lru_gen_use_mm(next->mm); - - if (!prev->mm) { // from kernel - /* will mmdrop() in finish_task_switch(). */ ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -6212,6 +6212,30 @@ static void mem_cgroup_move_task(void) - } - #endif - -+#ifdef CONFIG_LRU_GEN -+static void mem_cgroup_attach(struct cgroup_taskset *tset) -+{ -+ struct task_struct *task; -+ struct cgroup_subsys_state *css; -+ -+ /* find the first leader if there is any */ -+ cgroup_taskset_for_each_leader(task, css, tset) -+ break; -+ -+ if (!task) -+ return; -+ -+ task_lock(task); -+ if (task->mm && READ_ONCE(task->mm->owner) == task) -+ lru_gen_migrate_mm(task->mm); -+ task_unlock(task); -+} -+#else -+static void mem_cgroup_attach(struct cgroup_taskset *tset) -+{ -+} -+#endif /* CONFIG_LRU_GEN */ -+ - static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) - { - if (value == PAGE_COUNTER_MAX) -@@ -6555,6 +6579,7 @@ struct cgroup_subsys memory_cgrp_subsys - .css_reset = mem_cgroup_css_reset, - .css_rstat_flush = mem_cgroup_css_rstat_flush, - .can_attach = mem_cgroup_can_attach, -+ .attach = mem_cgroup_attach, - .cancel_attach = mem_cgroup_cancel_attach, - .post_attach = mem_cgroup_move_task, - .dfl_cftypes = memory_files, ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -50,6 +50,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -2853,7 +2855,7 @@ static bool can_age_anon_pages(struct pg - for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ - for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) - --static struct lruvec __maybe_unused *get_lruvec(struct mem_cgroup *memcg, int nid) -+static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) - { - struct pglist_data *pgdat = NODE_DATA(nid); - -@@ -2899,6 +2901,371 @@ static bool __maybe_unused seq_is_valid( - } - - /****************************************************************************** -+ * mm_struct list -+ ******************************************************************************/ -+ -+static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) -+{ -+ static struct lru_gen_mm_list mm_list = { -+ .fifo = LIST_HEAD_INIT(mm_list.fifo), -+ .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), -+ }; -+ -+#ifdef CONFIG_MEMCG -+ if (memcg) -+ return &memcg->mm_list; -+#endif -+ VM_WARN_ON_ONCE(!mem_cgroup_disabled()); -+ -+ return &mm_list; -+} -+ -+void lru_gen_add_mm(struct mm_struct *mm) -+{ -+ int nid; -+ struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); -+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg); -+ -+ VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); -+#ifdef CONFIG_MEMCG -+ VM_WARN_ON_ONCE(mm->lru_gen.memcg); -+ mm->lru_gen.memcg = memcg; -+#endif -+ spin_lock(&mm_list->lock); -+ -+ for_each_node_state(nid, N_MEMORY) { -+ struct lruvec *lruvec = get_lruvec(memcg, nid); -+ -+ if (!lruvec) -+ continue; -+ -+ /* the first addition since the last iteration */ -+ if (lruvec->mm_state.tail == &mm_list->fifo) -+ lruvec->mm_state.tail = &mm->lru_gen.list; -+ } -+ -+ list_add_tail(&mm->lru_gen.list, &mm_list->fifo); -+ -+ spin_unlock(&mm_list->lock); -+} -+ -+void lru_gen_del_mm(struct mm_struct *mm) -+{ -+ int nid; -+ struct lru_gen_mm_list *mm_list; -+ struct mem_cgroup *memcg = NULL; -+ -+ if (list_empty(&mm->lru_gen.list)) -+ return; -+ -+#ifdef CONFIG_MEMCG -+ memcg = mm->lru_gen.memcg; -+#endif -+ mm_list = get_mm_list(memcg); -+ -+ spin_lock(&mm_list->lock); -+ -+ for_each_node(nid) { -+ struct lruvec *lruvec = get_lruvec(memcg, nid); -+ -+ if (!lruvec) -+ continue; -+ -+ /* where the last iteration ended (exclusive) */ -+ if (lruvec->mm_state.tail == &mm->lru_gen.list) -+ lruvec->mm_state.tail = lruvec->mm_state.tail->next; -+ -+ /* where the current iteration continues (inclusive) */ -+ if (lruvec->mm_state.head != &mm->lru_gen.list) -+ continue; -+ -+ lruvec->mm_state.head = lruvec->mm_state.head->next; -+ /* the deletion ends the current iteration */ -+ if (lruvec->mm_state.head == &mm_list->fifo) -+ WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1); -+ } -+ -+ list_del_init(&mm->lru_gen.list); -+ -+ spin_unlock(&mm_list->lock); -+ -+#ifdef CONFIG_MEMCG -+ mem_cgroup_put(mm->lru_gen.memcg); -+ mm->lru_gen.memcg = NULL; -+#endif -+} -+ -+#ifdef CONFIG_MEMCG -+void lru_gen_migrate_mm(struct mm_struct *mm) -+{ -+ struct mem_cgroup *memcg; -+ struct task_struct *task = rcu_dereference_protected(mm->owner, true); -+ -+ VM_WARN_ON_ONCE(task->mm != mm); -+ lockdep_assert_held(&task->alloc_lock); -+ -+ /* for mm_update_next_owner() */ -+ if (mem_cgroup_disabled()) -+ return; -+ -+ rcu_read_lock(); -+ memcg = mem_cgroup_from_task(task); -+ rcu_read_unlock(); -+ if (memcg == mm->lru_gen.memcg) -+ return; -+ -+ VM_WARN_ON_ONCE(!mm->lru_gen.memcg); -+ VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); -+ -+ lru_gen_del_mm(mm); -+ lru_gen_add_mm(mm); -+} -+#endif -+ -+/* -+ * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when -+ * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of -+ * bits in a bitmap, k is the number of hash functions and n is the number of -+ * inserted items. -+ * -+ * Page table walkers use one of the two filters to reduce their search space. -+ * To get rid of non-leaf entries that no longer have enough leaf entries, the -+ * aging uses the double-buffering technique to flip to the other filter each -+ * time it produces a new generation. For non-leaf entries that have enough -+ * leaf entries, the aging carries them over to the next generation in -+ * walk_pmd_range(); the eviction also report them when walking the rmap -+ * in lru_gen_look_around(). -+ * -+ * For future optimizations: -+ * 1. It's not necessary to keep both filters all the time. The spare one can be -+ * freed after the RCU grace period and reallocated if needed again. -+ * 2. And when reallocating, it's worth scaling its size according to the number -+ * of inserted entries in the other filter, to reduce the memory overhead on -+ * small systems and false positives on large systems. -+ * 3. Jenkins' hash function is an alternative to Knuth's. -+ */ -+#define BLOOM_FILTER_SHIFT 15 -+ -+static inline int filter_gen_from_seq(unsigned long seq) -+{ -+ return seq % NR_BLOOM_FILTERS; -+} -+ -+static void get_item_key(void *item, int *key) -+{ -+ u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); -+ -+ BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); -+ -+ key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); -+ key[1] = hash >> BLOOM_FILTER_SHIFT; -+} -+ -+static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq) -+{ -+ unsigned long *filter; -+ int gen = filter_gen_from_seq(seq); -+ -+ filter = lruvec->mm_state.filters[gen]; -+ if (filter) { -+ bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); -+ return; -+ } -+ -+ filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), -+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); -+ WRITE_ONCE(lruvec->mm_state.filters[gen], filter); -+} -+ -+static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) -+{ -+ int key[2]; -+ unsigned long *filter; -+ int gen = filter_gen_from_seq(seq); -+ -+ filter = READ_ONCE(lruvec->mm_state.filters[gen]); -+ if (!filter) -+ return; -+ -+ get_item_key(item, key); -+ -+ if (!test_bit(key[0], filter)) -+ set_bit(key[0], filter); -+ if (!test_bit(key[1], filter)) -+ set_bit(key[1], filter); -+} -+ -+static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) -+{ -+ int key[2]; -+ unsigned long *filter; -+ int gen = filter_gen_from_seq(seq); -+ -+ filter = READ_ONCE(lruvec->mm_state.filters[gen]); -+ if (!filter) -+ return true; -+ -+ get_item_key(item, key); -+ -+ return test_bit(key[0], filter) && test_bit(key[1], filter); -+} -+ -+static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) -+{ -+ int i; -+ int hist; -+ -+ lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); -+ -+ if (walk) { -+ hist = lru_hist_from_seq(walk->max_seq); -+ -+ for (i = 0; i < NR_MM_STATS; i++) { -+ WRITE_ONCE(lruvec->mm_state.stats[hist][i], -+ lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); -+ walk->mm_stats[i] = 0; -+ } -+ } -+ -+ if (NR_HIST_GENS > 1 && last) { -+ hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); -+ -+ for (i = 0; i < NR_MM_STATS; i++) -+ WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); -+ } -+} -+ -+static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) -+{ -+ int type; -+ unsigned long size = 0; -+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); -+ int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); -+ -+ if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) -+ return true; -+ -+ clear_bit(key, &mm->lru_gen.bitmap); -+ -+ for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { -+ size += type ? get_mm_counter(mm, MM_FILEPAGES) : -+ get_mm_counter(mm, MM_ANONPAGES) + -+ get_mm_counter(mm, MM_SHMEMPAGES); -+ } -+ -+ if (size < MIN_LRU_BATCH) -+ return true; -+ -+ return !mmget_not_zero(mm); -+} -+ -+static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, -+ struct mm_struct **iter) -+{ -+ bool first = false; -+ bool last = true; -+ struct mm_struct *mm = NULL; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg); -+ struct lru_gen_mm_state *mm_state = &lruvec->mm_state; -+ -+ /* -+ * There are four interesting cases for this page table walker: -+ * 1. It tries to start a new iteration of mm_list with a stale max_seq; -+ * there is nothing left to do. -+ * 2. It's the first of the current generation, and it needs to reset -+ * the Bloom filter for the next generation. -+ * 3. It reaches the end of mm_list, and it needs to increment -+ * mm_state->seq; the iteration is done. -+ * 4. It's the last of the current generation, and it needs to reset the -+ * mm stats counters for the next generation. -+ */ -+ spin_lock(&mm_list->lock); -+ -+ VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); -+ VM_WARN_ON_ONCE(*iter && mm_state->seq > walk->max_seq); -+ VM_WARN_ON_ONCE(*iter && !mm_state->nr_walkers); -+ -+ if (walk->max_seq <= mm_state->seq) { -+ if (!*iter) -+ last = false; -+ goto done; -+ } -+ -+ if (!mm_state->nr_walkers) { -+ VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); -+ -+ mm_state->head = mm_list->fifo.next; -+ first = true; -+ } -+ -+ while (!mm && mm_state->head != &mm_list->fifo) { -+ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); -+ -+ mm_state->head = mm_state->head->next; -+ -+ /* force scan for those added after the last iteration */ -+ if (!mm_state->tail || mm_state->tail == &mm->lru_gen.list) { -+ mm_state->tail = mm_state->head; -+ walk->force_scan = true; -+ } -+ -+ if (should_skip_mm(mm, walk)) -+ mm = NULL; -+ } -+ -+ if (mm_state->head == &mm_list->fifo) -+ WRITE_ONCE(mm_state->seq, mm_state->seq + 1); -+done: -+ if (*iter && !mm) -+ mm_state->nr_walkers--; -+ if (!*iter && mm) -+ mm_state->nr_walkers++; -+ -+ if (mm_state->nr_walkers) -+ last = false; -+ -+ if (*iter || last) -+ reset_mm_stats(lruvec, walk, last); -+ -+ spin_unlock(&mm_list->lock); -+ -+ if (mm && first) -+ reset_bloom_filter(lruvec, walk->max_seq + 1); -+ -+ if (*iter) -+ mmput_async(*iter); -+ -+ *iter = mm; -+ -+ return last; -+} -+ -+static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) -+{ -+ bool success = false; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg); -+ struct lru_gen_mm_state *mm_state = &lruvec->mm_state; -+ -+ spin_lock(&mm_list->lock); -+ -+ VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); -+ -+ if (max_seq > mm_state->seq && !mm_state->nr_walkers) { -+ VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo); -+ -+ WRITE_ONCE(mm_state->seq, mm_state->seq + 1); -+ reset_mm_stats(lruvec, NULL, true); -+ success = true; -+ } -+ -+ spin_unlock(&mm_list->lock); -+ -+ return success; -+} -+ -+/****************************************************************************** - * refault feedback loop - ******************************************************************************/ - -@@ -3048,6 +3415,118 @@ static int page_inc_gen(struct lruvec *l - return new_gen; - } - -+static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page, -+ int old_gen, int new_gen) -+{ -+ int type = page_is_file_lru(page); -+ int zone = page_zonenum(page); -+ int delta = thp_nr_pages(page); -+ -+ VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); -+ VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); -+ -+ walk->batched++; -+ -+ walk->nr_pages[old_gen][type][zone] -= delta; -+ walk->nr_pages[new_gen][type][zone] += delta; -+} -+ -+static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) -+{ -+ int gen, type, zone; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ walk->batched = 0; -+ -+ for_each_gen_type_zone(gen, type, zone) { -+ enum lru_list lru = type * LRU_INACTIVE_FILE; -+ int delta = walk->nr_pages[gen][type][zone]; -+ -+ if (!delta) -+ continue; -+ -+ walk->nr_pages[gen][type][zone] = 0; -+ WRITE_ONCE(lrugen->nr_pages[gen][type][zone], -+ lrugen->nr_pages[gen][type][zone] + delta); -+ -+ if (lru_gen_is_active(lruvec, gen)) -+ lru += LRU_ACTIVE; -+ __update_lru_size(lruvec, lru, zone, delta); -+ } -+} -+ -+static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) -+{ -+ struct address_space *mapping; -+ struct vm_area_struct *vma = args->vma; -+ struct lru_gen_mm_walk *walk = args->private; -+ -+ if (!vma_is_accessible(vma)) -+ return true; -+ -+ if (is_vm_hugetlb_page(vma)) -+ return true; -+ -+ if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ)) -+ return true; -+ -+ if (vma == get_gate_vma(vma->vm_mm)) -+ return true; -+ -+ if (vma_is_anonymous(vma)) -+ return !walk->can_swap; -+ -+ if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) -+ return true; -+ -+ mapping = vma->vm_file->f_mapping; -+ if (mapping_unevictable(mapping)) -+ return true; -+ -+ if (shmem_mapping(mapping)) -+ return !walk->can_swap; -+ -+ /* to exclude special mappings like dax, etc. */ -+ return !mapping->a_ops->readpage; -+} -+ -+/* -+ * Some userspace memory allocators map many single-page VMAs. Instead of -+ * returning back to the PGD table for each of such VMAs, finish an entire PMD -+ * table to reduce zigzags and improve cache performance. -+ */ -+static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, -+ unsigned long *vm_start, unsigned long *vm_end) -+{ -+ unsigned long start = round_up(*vm_end, size); -+ unsigned long end = (start | ~mask) + 1; -+ -+ VM_WARN_ON_ONCE(mask & size); -+ VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); -+ -+ while (args->vma) { -+ if (start >= args->vma->vm_end) { -+ args->vma = args->vma->vm_next; -+ continue; -+ } -+ -+ if (end && end <= args->vma->vm_start) -+ return false; -+ -+ if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) { -+ args->vma = args->vma->vm_next; -+ continue; -+ } -+ -+ *vm_start = max(start, args->vma->vm_start); -+ *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; -+ -+ return true; -+ } -+ -+ return false; -+} -+ - static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) - { - unsigned long pfn = pte_pfn(pte); -@@ -3066,8 +3545,28 @@ static unsigned long get_pte_pfn(pte_t p - return pfn; - } - -+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) -+static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) -+{ -+ unsigned long pfn = pmd_pfn(pmd); -+ -+ VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); -+ -+ if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) -+ return -1; -+ -+ if (WARN_ON_ONCE(pmd_devmap(pmd))) -+ return -1; -+ -+ if (WARN_ON_ONCE(!pfn_valid(pfn))) -+ return -1; -+ -+ return pfn; -+} -+#endif -+ - static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg, -- struct pglist_data *pgdat) -+ struct pglist_data *pgdat, bool can_swap) - { - struct page *page; - -@@ -3082,9 +3581,375 @@ static struct page *get_pfn_page(unsigne - if (page_memcg_rcu(page) != memcg) - return NULL; - -+ /* file VMAs can contain anon pages from COW */ -+ if (!page_is_file_lru(page) && !can_swap) -+ return NULL; -+ - return page; - } - -+static bool suitable_to_scan(int total, int young) -+{ -+ int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); -+ -+ /* suitable if the average number of young PTEs per cacheline is >=1 */ -+ return young * n >= total; -+} -+ -+static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, -+ struct mm_walk *args) -+{ -+ int i; -+ pte_t *pte; -+ spinlock_t *ptl; -+ unsigned long addr; -+ int total = 0; -+ int young = 0; -+ struct lru_gen_mm_walk *walk = args->private; -+ struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); -+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); -+ int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); -+ -+ VM_WARN_ON_ONCE(pmd_leaf(*pmd)); -+ -+ ptl = pte_lockptr(args->mm, pmd); -+ if (!spin_trylock(ptl)) -+ return false; -+ -+ arch_enter_lazy_mmu_mode(); -+ -+ pte = pte_offset_map(pmd, start & PMD_MASK); -+restart: -+ for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { -+ unsigned long pfn; -+ struct page *page; -+ -+ total++; -+ walk->mm_stats[MM_LEAF_TOTAL]++; -+ -+ pfn = get_pte_pfn(pte[i], args->vma, addr); -+ if (pfn == -1) -+ continue; -+ -+ if (!pte_young(pte[i])) { -+ walk->mm_stats[MM_LEAF_OLD]++; -+ continue; -+ } -+ -+ page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap); -+ if (!page) -+ continue; -+ -+ if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) -+ VM_WARN_ON_ONCE(true); -+ -+ young++; -+ walk->mm_stats[MM_LEAF_YOUNG]++; -+ -+ if (pte_dirty(pte[i]) && !PageDirty(page) && -+ !(PageAnon(page) && PageSwapBacked(page) && -+ !PageSwapCache(page))) -+ set_page_dirty(page); -+ -+ old_gen = page_update_gen(page, new_gen); -+ if (old_gen >= 0 && old_gen != new_gen) -+ update_batch_size(walk, page, old_gen, new_gen); -+ } -+ -+ if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) -+ goto restart; -+ -+ pte_unmap(pte); -+ -+ arch_leave_lazy_mmu_mode(); -+ spin_unlock(ptl); -+ -+ return suitable_to_scan(total, young); -+} -+ -+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) -+static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma, -+ struct mm_walk *args, unsigned long *bitmap, unsigned long *start) -+{ -+ int i; -+ pmd_t *pmd; -+ spinlock_t *ptl; -+ struct lru_gen_mm_walk *walk = args->private; -+ struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); -+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); -+ int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); -+ -+ VM_WARN_ON_ONCE(pud_leaf(*pud)); -+ -+ /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ -+ if (*start == -1) { -+ *start = next; -+ return; -+ } -+ -+ i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start); -+ if (i && i <= MIN_LRU_BATCH) { -+ __set_bit(i - 1, bitmap); -+ return; -+ } -+ -+ pmd = pmd_offset(pud, *start); -+ -+ ptl = pmd_lockptr(args->mm, pmd); -+ if (!spin_trylock(ptl)) -+ goto done; -+ -+ arch_enter_lazy_mmu_mode(); -+ -+ do { -+ unsigned long pfn; -+ struct page *page; -+ unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start; -+ -+ pfn = get_pmd_pfn(pmd[i], vma, addr); -+ if (pfn == -1) -+ goto next; -+ -+ if (!pmd_trans_huge(pmd[i])) { -+ if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)) -+ pmdp_test_and_clear_young(vma, addr, pmd + i); -+ goto next; -+ } -+ -+ page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap); -+ if (!page) -+ goto next; -+ -+ if (!pmdp_test_and_clear_young(vma, addr, pmd + i)) -+ goto next; -+ -+ walk->mm_stats[MM_LEAF_YOUNG]++; -+ -+ if (pmd_dirty(pmd[i]) && !PageDirty(page) && -+ !(PageAnon(page) && PageSwapBacked(page) && -+ !PageSwapCache(page))) -+ set_page_dirty(page); -+ -+ old_gen = page_update_gen(page, new_gen); -+ if (old_gen >= 0 && old_gen != new_gen) -+ update_batch_size(walk, page, old_gen, new_gen); -+next: -+ i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; -+ } while (i <= MIN_LRU_BATCH); -+ -+ arch_leave_lazy_mmu_mode(); -+ spin_unlock(ptl); -+done: -+ *start = -1; -+ bitmap_zero(bitmap, MIN_LRU_BATCH); -+} -+#else -+static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma, -+ struct mm_walk *args, unsigned long *bitmap, unsigned long *start) -+{ -+} -+#endif -+ -+static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, -+ struct mm_walk *args) -+{ -+ int i; -+ pmd_t *pmd; -+ unsigned long next; -+ unsigned long addr; -+ struct vm_area_struct *vma; -+ unsigned long pos = -1; -+ struct lru_gen_mm_walk *walk = args->private; -+ unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {}; -+ -+ VM_WARN_ON_ONCE(pud_leaf(*pud)); -+ -+ /* -+ * Finish an entire PMD in two passes: the first only reaches to PTE -+ * tables to avoid taking the PMD lock; the second, if necessary, takes -+ * the PMD lock to clear the accessed bit in PMD entries. -+ */ -+ pmd = pmd_offset(pud, start & PUD_MASK); -+restart: -+ /* walk_pte_range() may call get_next_vma() */ -+ vma = args->vma; -+ for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { -+ pmd_t val = pmd_read_atomic(pmd + i); -+ -+ /* for pmd_read_atomic() */ -+ barrier(); -+ -+ next = pmd_addr_end(addr, end); -+ -+ if (!pmd_present(val) || is_huge_zero_pmd(val)) { -+ walk->mm_stats[MM_LEAF_TOTAL]++; -+ continue; -+ } -+ -+#ifdef CONFIG_TRANSPARENT_HUGEPAGE -+ if (pmd_trans_huge(val)) { -+ unsigned long pfn = pmd_pfn(val); -+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); -+ -+ walk->mm_stats[MM_LEAF_TOTAL]++; -+ -+ if (!pmd_young(val)) { -+ walk->mm_stats[MM_LEAF_OLD]++; -+ continue; -+ } -+ -+ /* try to avoid unnecessary memory loads */ -+ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) -+ continue; -+ -+ walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos); -+ continue; -+ } -+#endif -+ walk->mm_stats[MM_NONLEAF_TOTAL]++; -+ -+#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG -+ if (!pmd_young(val)) -+ continue; -+ -+ walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos); -+#endif -+ if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) -+ continue; -+ -+ walk->mm_stats[MM_NONLEAF_FOUND]++; -+ -+ if (!walk_pte_range(&val, addr, next, args)) -+ continue; -+ -+ walk->mm_stats[MM_NONLEAF_ADDED]++; -+ -+ /* carry over to the next generation */ -+ update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i); -+ } -+ -+ walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos); -+ -+ if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) -+ goto restart; -+} -+ -+static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, -+ struct mm_walk *args) -+{ -+ int i; -+ pud_t *pud; -+ unsigned long addr; -+ unsigned long next; -+ struct lru_gen_mm_walk *walk = args->private; -+ -+ VM_WARN_ON_ONCE(p4d_leaf(*p4d)); -+ -+ pud = pud_offset(p4d, start & P4D_MASK); -+restart: -+ for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { -+ pud_t val = READ_ONCE(pud[i]); -+ -+ next = pud_addr_end(addr, end); -+ -+ if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) -+ continue; -+ -+ walk_pmd_range(&val, addr, next, args); -+ -+ /* a racy check to curtail the waiting time */ -+ if (wq_has_sleeper(&walk->lruvec->mm_state.wait)) -+ return 1; -+ -+ if (need_resched() || walk->batched >= MAX_LRU_BATCH) { -+ end = (addr | ~PUD_MASK) + 1; -+ goto done; -+ } -+ } -+ -+ if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) -+ goto restart; -+ -+ end = round_up(end, P4D_SIZE); -+done: -+ if (!end || !args->vma) -+ return 1; -+ -+ walk->next_addr = max(end, args->vma->vm_start); -+ -+ return -EAGAIN; -+} -+ -+static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk) -+{ -+ static const struct mm_walk_ops mm_walk_ops = { -+ .test_walk = should_skip_vma, -+ .p4d_entry = walk_pud_range, -+ }; -+ -+ int err; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ -+ walk->next_addr = FIRST_USER_ADDRESS; -+ -+ do { -+ err = -EBUSY; -+ -+ /* page_update_gen() requires stable page_memcg() */ -+ if (!mem_cgroup_trylock_pages(memcg)) -+ break; -+ -+ /* the caller might be holding the lock for write */ -+ if (mmap_read_trylock(mm)) { -+ err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); -+ -+ mmap_read_unlock(mm); -+ } -+ -+ mem_cgroup_unlock_pages(); -+ -+ if (walk->batched) { -+ spin_lock_irq(&lruvec->lru_lock); -+ reset_batch_size(lruvec, walk); -+ spin_unlock_irq(&lruvec->lru_lock); -+ } -+ -+ cond_resched(); -+ } while (err == -EAGAIN); -+} -+ -+static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat) -+{ -+ struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; -+ -+ if (pgdat && current_is_kswapd()) { -+ VM_WARN_ON_ONCE(walk); -+ -+ walk = &pgdat->mm_walk; -+ } else if (!pgdat && !walk) { -+ VM_WARN_ON_ONCE(current_is_kswapd()); -+ -+ walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); -+ } -+ -+ current->reclaim_state->mm_walk = walk; -+ -+ return walk; -+} -+ -+static void clear_mm_walk(void) -+{ -+ struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; -+ -+ VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); -+ VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); -+ -+ current->reclaim_state->mm_walk = NULL; -+ -+ if (!current_is_kswapd()) -+ kfree(walk); -+} -+ - static void inc_min_seq(struct lruvec *lruvec, int type) - { - struct lru_gen_struct *lrugen = &lruvec->lrugen; -@@ -3136,7 +4001,7 @@ next: - return success; - } - --static void inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, bool can_swap) -+static void inc_max_seq(struct lruvec *lruvec, bool can_swap) - { - int prev, next; - int type, zone; -@@ -3146,9 +4011,6 @@ static void inc_max_seq(struct lruvec *l - - VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); - -- if (max_seq != lrugen->max_seq) -- goto unlock; -- - for (type = ANON_AND_FILE - 1; type >= 0; type--) { - if (get_nr_gens(lruvec, type) != MAX_NR_GENS) - continue; -@@ -3186,10 +4048,76 @@ static void inc_max_seq(struct lruvec *l - - /* make sure preceding modifications appear */ - smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); --unlock: -+ - spin_unlock_irq(&lruvec->lru_lock); - } - -+static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, -+ struct scan_control *sc, bool can_swap) -+{ -+ bool success; -+ struct lru_gen_mm_walk *walk; -+ struct mm_struct *mm = NULL; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); -+ -+ /* see the comment in iterate_mm_list() */ -+ if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { -+ success = false; -+ goto done; -+ } -+ -+ /* -+ * If the hardware doesn't automatically set the accessed bit, fallback -+ * to lru_gen_look_around(), which only clears the accessed bit in a -+ * handful of PTEs. Spreading the work out over a period of time usually -+ * is less efficient, but it avoids bursty page faults. -+ */ -+ if (!arch_has_hw_pte_young()) { -+ success = iterate_mm_list_nowalk(lruvec, max_seq); -+ goto done; -+ } -+ -+ walk = set_mm_walk(NULL); -+ if (!walk) { -+ success = iterate_mm_list_nowalk(lruvec, max_seq); -+ goto done; -+ } -+ -+ walk->lruvec = lruvec; -+ walk->max_seq = max_seq; -+ walk->can_swap = can_swap; -+ walk->force_scan = false; -+ -+ do { -+ success = iterate_mm_list(lruvec, walk, &mm); -+ if (mm) -+ walk_mm(lruvec, mm, walk); -+ -+ cond_resched(); -+ } while (mm); -+done: -+ if (!success) { -+ if (sc->priority <= DEF_PRIORITY - 2) -+ wait_event_killable(lruvec->mm_state.wait, -+ max_seq < READ_ONCE(lrugen->max_seq)); -+ -+ return max_seq < READ_ONCE(lrugen->max_seq); -+ } -+ -+ VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); -+ -+ inc_max_seq(lruvec, can_swap); -+ /* either this sees any waiters or they will see updated max_seq */ -+ if (wq_has_sleeper(&lruvec->mm_state.wait)) -+ wake_up_all(&lruvec->mm_state.wait); -+ -+ wakeup_flusher_threads(WB_REASON_VMSCAN); -+ -+ return true; -+} -+ - static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq, - struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) - { -@@ -3265,7 +4193,7 @@ static void age_lruvec(struct lruvec *lr - - need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan); - if (need_aging) -- inc_max_seq(lruvec, max_seq, swappiness); -+ try_to_inc_max_seq(lruvec, max_seq, sc, swappiness); - } - - static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) -@@ -3274,6 +4202,8 @@ static void lru_gen_age_node(struct pgli - - VM_WARN_ON_ONCE(!current_is_kswapd()); - -+ set_mm_walk(pgdat); -+ - memcg = mem_cgroup_iter(NULL, NULL, NULL); - do { - struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); -@@ -3282,11 +4212,16 @@ static void lru_gen_age_node(struct pgli - - cond_resched(); - } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); -+ -+ clear_mm_walk(); - } - - /* - * This function exploits spatial locality when shrink_page_list() walks the -- * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. -+ * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If -+ * the scan was done cacheline efficiently, it adds the PMD entry pointing to -+ * the PTE table to the Bloom filter. This forms a feedback loop between the -+ * eviction and the aging. - */ - void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) - { -@@ -3295,6 +4230,8 @@ void lru_gen_look_around(struct page_vma - unsigned long start; - unsigned long end; - unsigned long addr; -+ struct lru_gen_mm_walk *walk; -+ int young = 0; - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {}; - struct page *page = pvmw->page; - struct mem_cgroup *memcg = page_memcg(page); -@@ -3309,6 +4246,9 @@ void lru_gen_look_around(struct page_vma - if (spin_is_contended(pvmw->ptl)) - return; - -+ /* avoid taking the LRU lock under the PTL when possible */ -+ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; -+ - start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start); - end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; - -@@ -3338,13 +4278,15 @@ void lru_gen_look_around(struct page_vma - if (!pte_young(pte[i])) - continue; - -- page = get_pfn_page(pfn, memcg, pgdat); -+ page = get_pfn_page(pfn, memcg, pgdat, !walk || walk->can_swap); - if (!page) - continue; - - if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) - VM_WARN_ON_ONCE(true); - -+ young++; -+ - if (pte_dirty(pte[i]) && !PageDirty(page) && - !(PageAnon(page) && PageSwapBacked(page) && - !PageSwapCache(page))) -@@ -3360,7 +4302,11 @@ void lru_gen_look_around(struct page_vma - arch_leave_lazy_mmu_mode(); - rcu_read_unlock(); - -- if (bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) { -+ /* feedback from rmap walkers to page table walkers */ -+ if (suitable_to_scan(i, young)) -+ update_bloom_filter(lruvec, max_seq, pvmw->pmd); -+ -+ if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) { - for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { - page = pte_page(pte[i]); - activate_page(page); -@@ -3372,8 +4318,10 @@ void lru_gen_look_around(struct page_vma - if (!mem_cgroup_trylock_pages(memcg)) - return; - -- spin_lock_irq(&lruvec->lru_lock); -- new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq); -+ if (!walk) { -+ spin_lock_irq(&lruvec->lru_lock); -+ new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq); -+ } - - for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { - page = compound_head(pte_page(pte[i])); -@@ -3384,10 +4332,14 @@ void lru_gen_look_around(struct page_vma - if (old_gen < 0 || old_gen == new_gen) - continue; - -- lru_gen_update_size(lruvec, page, old_gen, new_gen); -+ if (walk) -+ update_batch_size(walk, page, old_gen, new_gen); -+ else -+ lru_gen_update_size(lruvec, page, old_gen, new_gen); - } - -- spin_unlock_irq(&lruvec->lru_lock); -+ if (!walk) -+ spin_unlock_irq(&lruvec->lru_lock); - - mem_cgroup_unlock_pages(); - } -@@ -3670,6 +4622,7 @@ static int evict_pages(struct lruvec *lr - struct page *page; - enum vm_event_item item; - struct reclaim_stat stat; -+ struct lru_gen_mm_walk *walk; - struct mem_cgroup *memcg = lruvec_memcg(lruvec); - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - -@@ -3706,6 +4659,10 @@ static int evict_pages(struct lruvec *lr - - move_pages_to_lru(lruvec, &list); - -+ walk = current->reclaim_state->mm_walk; -+ if (walk && walk->batched) -+ reset_batch_size(lruvec, walk); -+ - item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; - if (!cgroup_reclaim(sc)) - __count_vm_events(item, reclaimed); -@@ -3722,6 +4679,11 @@ static int evict_pages(struct lruvec *lr - return scanned; - } - -+/* -+ * For future optimizations: -+ * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg -+ * reclaim. -+ */ - static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, - bool can_swap) - { -@@ -3747,7 +4709,8 @@ static unsigned long get_nr_to_scan(stru - if (current_is_kswapd()) - return 0; - -- inc_max_seq(lruvec, max_seq, can_swap); -+ if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap)) -+ return nr_to_scan; - done: - return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; - } -@@ -3761,6 +4724,8 @@ static void lru_gen_shrink_lruvec(struct - - blk_start_plug(&plug); - -+ set_mm_walk(lruvec_pgdat(lruvec)); -+ - while (true) { - int delta; - int swappiness; -@@ -3788,6 +4753,8 @@ static void lru_gen_shrink_lruvec(struct - cond_resched(); - } - -+ clear_mm_walk(); -+ - blk_finish_plug(&plug); - } - -@@ -3804,15 +4771,21 @@ void lru_gen_init_lruvec(struct lruvec * - - for_each_gen_type_zone(gen, type, zone) - INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); -+ -+ lruvec->mm_state.seq = MIN_NR_GENS; -+ init_waitqueue_head(&lruvec->mm_state.wait); - } - - #ifdef CONFIG_MEMCG - void lru_gen_init_memcg(struct mem_cgroup *memcg) - { -+ INIT_LIST_HEAD(&memcg->mm_list.fifo); -+ spin_lock_init(&memcg->mm_list.lock); - } - - void lru_gen_exit_memcg(struct mem_cgroup *memcg) - { -+ int i; - int nid; - - for_each_node(nid) { -@@ -3820,6 +4793,11 @@ void lru_gen_exit_memcg(struct mem_cgrou - - VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, - sizeof(lruvec->lrugen.nr_pages))); -+ -+ for (i = 0; i < NR_BLOOM_FILTERS; i++) { -+ bitmap_free(lruvec->mm_state.filters[i]); -+ lruvec->mm_state.filters[i] = NULL; -+ } - } - } - #endif diff --git a/target/linux/generic/backport-6.1/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch b/target/linux/generic/backport-6.1/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch deleted file mode 100644 index b5fb1951514d..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch +++ /dev/null @@ -1,315 +0,0 @@ -From 36a18a68ea458e8f4db2ca86b00091daf32c6c74 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:06 -0600 -Subject: [PATCH 09/29] mm: multi-gen LRU: optimize multiple memcgs -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -When multiple memcgs are available, it is possible to use generations as a -frame of reference to make better choices and improve overall performance -under global memory pressure. This patch adds a basic optimization to -select memcgs that can drop single-use unmapped clean pages first. Doing -so reduces the chance of going into the aging path or swapping, which can -be costly. - -A typical example that benefits from this optimization is a server running -mixed types of workloads, e.g., heavy anon workload in one memcg and heavy -buffered I/O workload in the other. - -Though this optimization can be applied to both kswapd and direct reclaim, -it is only added to kswapd to keep the patchset manageable. Later -improvements may cover the direct reclaim path. - -While ensuring certain fairness to all eligible memcgs, proportional scans -of individual memcgs also require proper backoff to avoid overshooting -their aggregate reclaim target by too much. Otherwise it can cause high -direct reclaim latency. The conditions for backoff are: - -1. At low priorities, for direct reclaim, if aging fairness or direct - reclaim latency is at risk, i.e., aging one memcg multiple times or - swapping after the target is met. -2. At high priorities, for global reclaim, if per-zone free pages are - above respective watermarks. - -Server benchmark results: - Mixed workloads: - fio (buffered I/O): +[19, 21]% - IOPS BW - patch1-8: 1880k 7343MiB/s - patch1-9: 2252k 8796MiB/s - - memcached (anon): +[119, 123]% - Ops/sec KB/sec - patch1-8: 862768.65 33514.68 - patch1-9: 1911022.12 74234.54 - - Mixed workloads: - fio (buffered I/O): +[75, 77]% - IOPS BW - 5.19-rc1: 1279k 4996MiB/s - patch1-9: 2252k 8796MiB/s - - memcached (anon): +[13, 15]% - Ops/sec KB/sec - 5.19-rc1: 1673524.04 65008.87 - patch1-9: 1911022.12 74234.54 - - Configurations: - (changes since patch 6) - - cat mixed.sh - modprobe brd rd_nr=2 rd_size=56623104 - - swapoff -a - mkswap /dev/ram0 - swapon /dev/ram0 - - mkfs.ext4 /dev/ram1 - mount -t ext4 /dev/ram1 /mnt - - memtier_benchmark -S /var/run/memcached/memcached.sock \ - -P memcache_binary -n allkeys --key-minimum=1 \ - --key-maximum=50000000 --key-pattern=P:P -c 1 -t 36 \ - --ratio 1:0 --pipeline 8 -d 2000 - - fio -name=mglru --numjobs=36 --directory=/mnt --size=1408m \ - --buffered=1 --ioengine=io_uring --iodepth=128 \ - --iodepth_batch_submit=32 --iodepth_batch_complete=32 \ - --rw=randread --random_distribution=random --norandommap \ - --time_based --ramp_time=10m --runtime=90m --group_reporting & - pid=$! - - sleep 200 - - memtier_benchmark -S /var/run/memcached/memcached.sock \ - -P memcache_binary -n allkeys --key-minimum=1 \ - --key-maximum=50000000 --key-pattern=R:R -c 1 -t 36 \ - --ratio 0:1 --pipeline 8 --randomize --distinct-client-seed - - kill -INT $pid - wait - -Client benchmark results: - no change (CONFIG_MEMCG=n) - -Link: https://lkml.kernel.org/r/20220918080010.2920238-10-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - mm/vmscan.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 96 insertions(+), 9 deletions(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -127,6 +127,12 @@ struct scan_control { - /* Always discard instead of demoting to lower tier memory */ - unsigned int no_demotion:1; - -+#ifdef CONFIG_LRU_GEN -+ /* help kswapd make better choices among multiple memcgs */ -+ unsigned int memcgs_need_aging:1; -+ unsigned long last_reclaimed; -+#endif -+ - /* Allocation order */ - s8 order; - -@@ -4202,6 +4208,19 @@ static void lru_gen_age_node(struct pgli - - VM_WARN_ON_ONCE(!current_is_kswapd()); - -+ sc->last_reclaimed = sc->nr_reclaimed; -+ -+ /* -+ * To reduce the chance of going into the aging path, which can be -+ * costly, optimistically skip it if the flag below was cleared in the -+ * eviction path. This improves the overall performance when multiple -+ * memcgs are available. -+ */ -+ if (!sc->memcgs_need_aging) { -+ sc->memcgs_need_aging = true; -+ return; -+ } -+ - set_mm_walk(pgdat); - - memcg = mem_cgroup_iter(NULL, NULL, NULL); -@@ -4613,7 +4632,8 @@ static int isolate_pages(struct lruvec * - return scanned; - } - --static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness) -+static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness, -+ bool *need_swapping) - { - int type; - int scanned; -@@ -4676,6 +4696,9 @@ static int evict_pages(struct lruvec *lr - - sc->nr_reclaimed += reclaimed; - -+ if (need_swapping && type == LRU_GEN_ANON) -+ *need_swapping = true; -+ - return scanned; - } - -@@ -4685,9 +4708,8 @@ static int evict_pages(struct lruvec *lr - * reclaim. - */ - static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, -- bool can_swap) -+ bool can_swap, bool *need_aging) - { -- bool need_aging; - unsigned long nr_to_scan; - struct mem_cgroup *memcg = lruvec_memcg(lruvec); - DEFINE_MAX_SEQ(lruvec); -@@ -4697,8 +4719,8 @@ static unsigned long get_nr_to_scan(stru - (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim)) - return 0; - -- need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan); -- if (!need_aging) -+ *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan); -+ if (!*need_aging) - return nr_to_scan; - - /* skip the aging path at the default priority */ -@@ -4715,10 +4737,68 @@ done: - return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; - } - -+static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq, -+ struct scan_control *sc, bool need_swapping) -+{ -+ int i; -+ DEFINE_MAX_SEQ(lruvec); -+ -+ if (!current_is_kswapd()) { -+ /* age each memcg once to ensure fairness */ -+ if (max_seq - seq > 1) -+ return true; -+ -+ /* over-swapping can increase allocation latency */ -+ if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping) -+ return true; -+ -+ /* give this thread a chance to exit and free its memory */ -+ if (fatal_signal_pending(current)) { -+ sc->nr_reclaimed += MIN_LRU_BATCH; -+ return true; -+ } -+ -+ if (cgroup_reclaim(sc)) -+ return false; -+ } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim) -+ return false; -+ -+ /* keep scanning at low priorities to ensure fairness */ -+ if (sc->priority > DEF_PRIORITY - 2) -+ return false; -+ -+ /* -+ * A minimum amount of work was done under global memory pressure. For -+ * kswapd, it may be overshooting. For direct reclaim, the target isn't -+ * met, and yet the allocation may still succeed, since kswapd may have -+ * caught up. In either case, it's better to stop now, and restart if -+ * necessary. -+ */ -+ for (i = 0; i <= sc->reclaim_idx; i++) { -+ unsigned long wmark; -+ struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; -+ -+ if (!managed_zone(zone)) -+ continue; -+ -+ wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone); -+ if (wmark > zone_page_state(zone, NR_FREE_PAGES)) -+ return false; -+ } -+ -+ sc->nr_reclaimed += MIN_LRU_BATCH; -+ -+ return true; -+} -+ - static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) - { - struct blk_plug plug; -+ bool need_aging = false; -+ bool need_swapping = false; - unsigned long scanned = 0; -+ unsigned long reclaimed = sc->nr_reclaimed; -+ DEFINE_MAX_SEQ(lruvec); - - lru_add_drain(); - -@@ -4738,21 +4818,28 @@ static void lru_gen_shrink_lruvec(struct - else - swappiness = 0; - -- nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); -+ nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging); - if (!nr_to_scan) -- break; -+ goto done; - -- delta = evict_pages(lruvec, sc, swappiness); -+ delta = evict_pages(lruvec, sc, swappiness, &need_swapping); - if (!delta) -- break; -+ goto done; - - scanned += delta; - if (scanned >= nr_to_scan) - break; - -+ if (should_abort_scan(lruvec, max_seq, sc, need_swapping)) -+ break; -+ - cond_resched(); - } - -+ /* see the comment in lru_gen_age_node() */ -+ if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging) -+ sc->memcgs_need_aging = false; -+done: - clear_mm_walk(); - - blk_finish_plug(&plug); diff --git a/target/linux/generic/backport-6.1/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch b/target/linux/generic/backport-6.1/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch deleted file mode 100644 index cf5b8f0e9f78..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch +++ /dev/null @@ -1,498 +0,0 @@ -From 640db3a029dca909af47157ca18f52b29d34a1b9 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:07 -0600 -Subject: [PATCH 10/29] mm: multi-gen LRU: kill switch -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Add /sys/kernel/mm/lru_gen/enabled as a kill switch. Components that -can be disabled include: - 0x0001: the multi-gen LRU core - 0x0002: walking page table, when arch_has_hw_pte_young() returns - true - 0x0004: clearing the accessed bit in non-leaf PMD entries, when - CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y - [yYnN]: apply to all the components above -E.g., - echo y >/sys/kernel/mm/lru_gen/enabled - cat /sys/kernel/mm/lru_gen/enabled - 0x0007 - echo 5 >/sys/kernel/mm/lru_gen/enabled - cat /sys/kernel/mm/lru_gen/enabled - 0x0005 - -NB: the page table walks happen on the scale of seconds under heavy memory -pressure, in which case the mmap_lock contention is a lesser concern, -compared with the LRU lock contention and the I/O congestion. So far the -only well-known case of the mmap_lock contention happens on Android, due -to Scudo [1] which allocates several thousand VMAs for merely a few -hundred MBs. The SPF and the Maple Tree also have provided their own -assessments [2][3]. However, if walking page tables does worsen the -mmap_lock contention, the kill switch can be used to disable it. In this -case the multi-gen LRU will suffer a minor performance degradation, as -shown previously. - -Clearing the accessed bit in non-leaf PMD entries can also be disabled, -since this behavior was not tested on x86 varieties other than Intel and -AMD. - -[1] https://source.android.com/devices/tech/debug/scudo -[2] https://lore.kernel.org/r/20220128131006.67712-1-michel@lespinasse.org/ -[3] https://lore.kernel.org/r/20220426150616.3937571-1-Liam.Howlett@oracle.com/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-11-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/cgroup.h | 15 ++- - include/linux/mm_inline.h | 15 ++- - include/linux/mmzone.h | 9 ++ - kernel/cgroup/cgroup-internal.h | 1 - - mm/Kconfig | 6 + - mm/vmscan.c | 228 +++++++++++++++++++++++++++++++- - 6 files changed, 265 insertions(+), 9 deletions(-) - ---- a/include/linux/cgroup.h -+++ b/include/linux/cgroup.h -@@ -433,6 +433,18 @@ static inline void cgroup_put(struct cgr - css_put(&cgrp->self); - } - -+extern struct mutex cgroup_mutex; -+ -+static inline void cgroup_lock(void) -+{ -+ mutex_lock(&cgroup_mutex); -+} -+ -+static inline void cgroup_unlock(void) -+{ -+ mutex_unlock(&cgroup_mutex); -+} -+ - /** - * task_css_set_check - obtain a task's css_set with extra access conditions - * @task: the task to obtain css_set for -@@ -447,7 +459,6 @@ static inline void cgroup_put(struct cgr - * as locks used during the cgroup_subsys::attach() methods. - */ - #ifdef CONFIG_PROVE_RCU --extern struct mutex cgroup_mutex; - extern spinlock_t css_set_lock; - #define task_css_set_check(task, __c) \ - rcu_dereference_check((task)->cgroups, \ -@@ -708,6 +719,8 @@ struct cgroup; - static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; } - static inline void css_get(struct cgroup_subsys_state *css) {} - static inline void css_put(struct cgroup_subsys_state *css) {} -+static inline void cgroup_lock(void) {} -+static inline void cgroup_unlock(void) {} - static inline int cgroup_attach_task_all(struct task_struct *from, - struct task_struct *t) { return 0; } - static inline int cgroupstats_build(struct cgroupstats *stats, ---- a/include/linux/mm_inline.h -+++ b/include/linux/mm_inline.h -@@ -91,10 +91,21 @@ static __always_inline enum lru_list pag - - #ifdef CONFIG_LRU_GEN - -+#ifdef CONFIG_LRU_GEN_ENABLED - static inline bool lru_gen_enabled(void) - { -- return true; -+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); -+ -+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); -+} -+#else -+static inline bool lru_gen_enabled(void) -+{ -+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); -+ -+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); - } -+#endif - - static inline bool lru_gen_in_fault(void) - { -@@ -207,7 +218,7 @@ static inline bool lru_gen_add_page(stru - - VM_WARN_ON_ONCE_PAGE(gen != -1, page); - -- if (PageUnevictable(page)) -+ if (PageUnevictable(page) || !lrugen->enabled) - return false; - /* - * There are three common cases for this page: ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -364,6 +364,13 @@ enum { - LRU_GEN_FILE, - }; - -+enum { -+ LRU_GEN_CORE, -+ LRU_GEN_MM_WALK, -+ LRU_GEN_NONLEAF_YOUNG, -+ NR_LRU_GEN_CAPS -+}; -+ - #define MIN_LRU_BATCH BITS_PER_LONG - #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) - -@@ -405,6 +412,8 @@ struct lru_gen_struct { - /* can be modified without holding the LRU lock */ - atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; - atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; -+ /* whether the multi-gen LRU is enabled */ -+ bool enabled; - }; - - enum { ---- a/kernel/cgroup/cgroup-internal.h -+++ b/kernel/cgroup/cgroup-internal.h -@@ -165,7 +165,6 @@ struct cgroup_mgctx { - #define DEFINE_CGROUP_MGCTX(name) \ - struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) - --extern struct mutex cgroup_mutex; - extern spinlock_t css_set_lock; - extern struct cgroup_subsys *cgroup_subsys[]; - extern struct list_head cgroup_roots; ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -906,6 +906,12 @@ config LRU_GEN - help - A high performance LRU implementation to overcommit memory. - -+config LRU_GEN_ENABLED -+ bool "Enable by default" -+ depends on LRU_GEN -+ help -+ This option enables the multi-gen LRU by default. -+ - config LRU_GEN_STATS - bool "Full stats for debugging" - depends on LRU_GEN ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -52,6 +52,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -2841,6 +2842,14 @@ static bool can_age_anon_pages(struct pg - - #ifdef CONFIG_LRU_GEN - -+#ifdef CONFIG_LRU_GEN_ENABLED -+DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); -+#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) -+#else -+DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); -+#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) -+#endif -+ - /****************************************************************************** - * shorthand helpers - ******************************************************************************/ -@@ -3717,7 +3726,8 @@ static void walk_pmd_range_locked(pud_t - goto next; - - if (!pmd_trans_huge(pmd[i])) { -- if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)) -+ if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && -+ get_cap(LRU_GEN_NONLEAF_YOUNG)) - pmdp_test_and_clear_young(vma, addr, pmd + i); - goto next; - } -@@ -3815,10 +3825,12 @@ restart: - walk->mm_stats[MM_NONLEAF_TOTAL]++; - - #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG -- if (!pmd_young(val)) -- continue; -+ if (get_cap(LRU_GEN_NONLEAF_YOUNG)) { -+ if (!pmd_young(val)) -+ continue; - -- walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos); -+ walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos); -+ } - #endif - if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) - continue; -@@ -4080,7 +4092,7 @@ static bool try_to_inc_max_seq(struct lr - * handful of PTEs. Spreading the work out over a period of time usually - * is less efficient, but it avoids bursty page faults. - */ -- if (!arch_has_hw_pte_young()) { -+ if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) { - success = iterate_mm_list_nowalk(lruvec, max_seq); - goto done; - } -@@ -4846,6 +4858,208 @@ done: - } - - /****************************************************************************** -+ * state change -+ ******************************************************************************/ -+ -+static bool __maybe_unused state_is_valid(struct lruvec *lruvec) -+{ -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ if (lrugen->enabled) { -+ enum lru_list lru; -+ -+ for_each_evictable_lru(lru) { -+ if (!list_empty(&lruvec->lists[lru])) -+ return false; -+ } -+ } else { -+ int gen, type, zone; -+ -+ for_each_gen_type_zone(gen, type, zone) { -+ if (!list_empty(&lrugen->lists[gen][type][zone])) -+ return false; -+ } -+ } -+ -+ return true; -+} -+ -+static bool fill_evictable(struct lruvec *lruvec) -+{ -+ enum lru_list lru; -+ int remaining = MAX_LRU_BATCH; -+ -+ for_each_evictable_lru(lru) { -+ int type = is_file_lru(lru); -+ bool active = is_active_lru(lru); -+ struct list_head *head = &lruvec->lists[lru]; -+ -+ while (!list_empty(head)) { -+ bool success; -+ struct page *page = lru_to_page(head); -+ -+ VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page); -+ VM_WARN_ON_ONCE_PAGE(PageActive(page) != active, page); -+ VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page); -+ VM_WARN_ON_ONCE_PAGE(page_lru_gen(page) != -1, page); -+ -+ del_page_from_lru_list(page, lruvec); -+ success = lru_gen_add_page(lruvec, page, false); -+ VM_WARN_ON_ONCE(!success); -+ -+ if (!--remaining) -+ return false; -+ } -+ } -+ -+ return true; -+} -+ -+static bool drain_evictable(struct lruvec *lruvec) -+{ -+ int gen, type, zone; -+ int remaining = MAX_LRU_BATCH; -+ -+ for_each_gen_type_zone(gen, type, zone) { -+ struct list_head *head = &lruvec->lrugen.lists[gen][type][zone]; -+ -+ while (!list_empty(head)) { -+ bool success; -+ struct page *page = lru_to_page(head); -+ -+ VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page); -+ VM_WARN_ON_ONCE_PAGE(PageActive(page), page); -+ VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page); -+ VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); -+ -+ success = lru_gen_del_page(lruvec, page, false); -+ VM_WARN_ON_ONCE(!success); -+ add_page_to_lru_list(page, lruvec); -+ -+ if (!--remaining) -+ return false; -+ } -+ } -+ -+ return true; -+} -+ -+static void lru_gen_change_state(bool enabled) -+{ -+ static DEFINE_MUTEX(state_mutex); -+ -+ struct mem_cgroup *memcg; -+ -+ cgroup_lock(); -+ cpus_read_lock(); -+ get_online_mems(); -+ mutex_lock(&state_mutex); -+ -+ if (enabled == lru_gen_enabled()) -+ goto unlock; -+ -+ if (enabled) -+ static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); -+ else -+ static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); -+ -+ memcg = mem_cgroup_iter(NULL, NULL, NULL); -+ do { -+ int nid; -+ -+ for_each_node(nid) { -+ struct lruvec *lruvec = get_lruvec(memcg, nid); -+ -+ if (!lruvec) -+ continue; -+ -+ spin_lock_irq(&lruvec->lru_lock); -+ -+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); -+ VM_WARN_ON_ONCE(!state_is_valid(lruvec)); -+ -+ lruvec->lrugen.enabled = enabled; -+ -+ while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { -+ spin_unlock_irq(&lruvec->lru_lock); -+ cond_resched(); -+ spin_lock_irq(&lruvec->lru_lock); -+ } -+ -+ spin_unlock_irq(&lruvec->lru_lock); -+ } -+ -+ cond_resched(); -+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); -+unlock: -+ mutex_unlock(&state_mutex); -+ put_online_mems(); -+ cpus_read_unlock(); -+ cgroup_unlock(); -+} -+ -+/****************************************************************************** -+ * sysfs interface -+ ******************************************************************************/ -+ -+static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -+{ -+ unsigned int caps = 0; -+ -+ if (get_cap(LRU_GEN_CORE)) -+ caps |= BIT(LRU_GEN_CORE); -+ -+ if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK)) -+ caps |= BIT(LRU_GEN_MM_WALK); -+ -+ if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG)) -+ caps |= BIT(LRU_GEN_NONLEAF_YOUNG); -+ -+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps); -+} -+ -+static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t len) -+{ -+ int i; -+ unsigned int caps; -+ -+ if (tolower(*buf) == 'n') -+ caps = 0; -+ else if (tolower(*buf) == 'y') -+ caps = -1; -+ else if (kstrtouint(buf, 0, &caps)) -+ return -EINVAL; -+ -+ for (i = 0; i < NR_LRU_GEN_CAPS; i++) { -+ bool enabled = caps & BIT(i); -+ -+ if (i == LRU_GEN_CORE) -+ lru_gen_change_state(enabled); -+ else if (enabled) -+ static_branch_enable(&lru_gen_caps[i]); -+ else -+ static_branch_disable(&lru_gen_caps[i]); -+ } -+ -+ return len; -+} -+ -+static struct kobj_attribute lru_gen_enabled_attr = __ATTR( -+ enabled, 0644, show_enabled, store_enabled -+); -+ -+static struct attribute *lru_gen_attrs[] = { -+ &lru_gen_enabled_attr.attr, -+ NULL -+}; -+ -+static struct attribute_group lru_gen_attr_group = { -+ .name = "lru_gen", -+ .attrs = lru_gen_attrs, -+}; -+ -+/****************************************************************************** - * initialization - ******************************************************************************/ - -@@ -4855,6 +5069,7 @@ void lru_gen_init_lruvec(struct lruvec * - struct lru_gen_struct *lrugen = &lruvec->lrugen; - - lrugen->max_seq = MIN_NR_GENS + 1; -+ lrugen->enabled = lru_gen_enabled(); - - for_each_gen_type_zone(gen, type, zone) - INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); -@@ -4894,6 +5109,9 @@ static int __init init_lru_gen(void) - BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); - BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); - -+ if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) -+ pr_err("lru_gen: failed to create sysfs group\n"); -+ - return 0; - }; - late_initcall(init_lru_gen); diff --git a/target/linux/generic/backport-6.1/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch b/target/linux/generic/backport-6.1/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch deleted file mode 100644 index 30e20aff6edc..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch +++ /dev/null @@ -1,226 +0,0 @@ -From 73d1ff551760f0c79c47ab70faa4c2ca91413f5c Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:08 -0600 -Subject: [PATCH 11/29] mm: multi-gen LRU: thrashing prevention -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Add /sys/kernel/mm/lru_gen/min_ttl_ms for thrashing prevention, as -requested by many desktop users [1]. - -When set to value N, it prevents the working set of N milliseconds from -getting evicted. The OOM killer is triggered if this working set cannot -be kept in memory. Based on the average human detectable lag (~100ms), -N=1000 usually eliminates intolerable lags due to thrashing. Larger -values like N=3000 make lags less noticeable at the risk of premature OOM -kills. - -Compared with the size-based approach [2], this time-based approach -has the following advantages: - -1. It is easier to configure because it is agnostic to applications - and memory sizes. -2. It is more reliable because it is directly wired to the OOM killer. - -[1] https://lore.kernel.org/r/Ydza%2FzXKY9ATRoh6@google.com/ -[2] https://lore.kernel.org/r/20101028191523.GA14972@google.com/ - -Link: https://lkml.kernel.org/r/20220918080010.2920238-12-yuzhao@google.com -Signed-off-by: Yu Zhao -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Qi Zheng -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/mmzone.h | 2 ++ - mm/vmscan.c | 74 ++++++++++++++++++++++++++++++++++++++++-- - 2 files changed, 73 insertions(+), 3 deletions(-) - ---- a/include/linux/mmzone.h -+++ b/include/linux/mmzone.h -@@ -399,6 +399,8 @@ struct lru_gen_struct { - unsigned long max_seq; - /* the eviction increments the oldest generation numbers */ - unsigned long min_seq[ANON_AND_FILE]; -+ /* the birth time of each generation in jiffies */ -+ unsigned long timestamps[MAX_NR_GENS]; - /* the multi-gen LRU lists, lazily sorted on eviction */ - struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; - /* the multi-gen LRU sizes, eventually consistent */ ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -4064,6 +4064,7 @@ static void inc_max_seq(struct lruvec *l - for (type = 0; type < ANON_AND_FILE; type++) - reset_ctrl_pos(lruvec, type, false); - -+ WRITE_ONCE(lrugen->timestamps[next], jiffies); - /* make sure preceding modifications appear */ - smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); - -@@ -4193,7 +4194,7 @@ static bool should_run_aging(struct lruv - return false; - } - --static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) -+static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl) - { - bool need_aging; - unsigned long nr_to_scan; -@@ -4207,16 +4208,36 @@ static void age_lruvec(struct lruvec *lr - mem_cgroup_calculate_protection(NULL, memcg); - - if (mem_cgroup_below_min(memcg)) -- return; -+ return false; - - need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan); -+ -+ if (min_ttl) { -+ int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); -+ unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); -+ -+ if (time_is_after_jiffies(birth + min_ttl)) -+ return false; -+ -+ /* the size is likely too small to be helpful */ -+ if (!nr_to_scan && sc->priority != DEF_PRIORITY) -+ return false; -+ } -+ - if (need_aging) - try_to_inc_max_seq(lruvec, max_seq, sc, swappiness); -+ -+ return true; - } - -+/* to protect the working set of the last N jiffies */ -+static unsigned long lru_gen_min_ttl __read_mostly; -+ - static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) - { - struct mem_cgroup *memcg; -+ bool success = false; -+ unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); - - VM_WARN_ON_ONCE(!current_is_kswapd()); - -@@ -4239,12 +4260,32 @@ static void lru_gen_age_node(struct pgli - do { - struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - -- age_lruvec(lruvec, sc); -+ if (age_lruvec(lruvec, sc, min_ttl)) -+ success = true; - - cond_resched(); - } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); - - clear_mm_walk(); -+ -+ /* check the order to exclude compaction-induced reclaim */ -+ if (success || !min_ttl || sc->order) -+ return; -+ -+ /* -+ * The main goal is to OOM kill if every generation from all memcgs is -+ * younger than min_ttl. However, another possibility is all memcgs are -+ * either below min or empty. -+ */ -+ if (mutex_trylock(&oom_lock)) { -+ struct oom_control oc = { -+ .gfp_mask = sc->gfp_mask, -+ }; -+ -+ out_of_memory(&oc); -+ -+ mutex_unlock(&oom_lock); -+ } - } - - /* -@@ -5002,6 +5043,28 @@ unlock: - * sysfs interface - ******************************************************************************/ - -+static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); -+} -+ -+static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t len) -+{ -+ unsigned int msecs; -+ -+ if (kstrtouint(buf, 0, &msecs)) -+ return -EINVAL; -+ -+ WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); -+ -+ return len; -+} -+ -+static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR( -+ min_ttl_ms, 0644, show_min_ttl, store_min_ttl -+); -+ - static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) - { - unsigned int caps = 0; -@@ -5050,6 +5113,7 @@ static struct kobj_attribute lru_gen_ena - ); - - static struct attribute *lru_gen_attrs[] = { -+ &lru_gen_min_ttl_attr.attr, - &lru_gen_enabled_attr.attr, - NULL - }; -@@ -5065,12 +5129,16 @@ static struct attribute_group lru_gen_at - - void lru_gen_init_lruvec(struct lruvec *lruvec) - { -+ int i; - int gen, type, zone; - struct lru_gen_struct *lrugen = &lruvec->lrugen; - - lrugen->max_seq = MIN_NR_GENS + 1; - lrugen->enabled = lru_gen_enabled(); - -+ for (i = 0; i <= MIN_NR_GENS + 1; i++) -+ lrugen->timestamps[i] = jiffies; -+ - for_each_gen_type_zone(gen, type, zone) - INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); - diff --git a/target/linux/generic/backport-6.1/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch b/target/linux/generic/backport-6.1/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch deleted file mode 100644 index 482e714bb6aa..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch +++ /dev/null @@ -1,579 +0,0 @@ -From 530716d008ca26315f246cd70dc1cefc636beaa4 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 18 Sep 2022 02:00:09 -0600 -Subject: [PATCH 12/29] mm: multi-gen LRU: debugfs interface -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Add /sys/kernel/debug/lru_gen for working set estimation and proactive -reclaim. These techniques are commonly used to optimize job scheduling -(bin packing) in data centers [1][2]. - -Compared with the page table-based approach and the PFN-based -approach, this lruvec-based approach has the following advantages: -1. It offers better choices because it is aware of memcgs, NUMA nodes, - shared mappings and unmapped page cache. -2. It is more scalable because it is O(nr_hot_pages), whereas the - PFN-based approach is O(nr_total_pages). - -Add /sys/kernel/debug/lru_gen_full for debugging. - -[1] https://dl.acm.org/doi/10.1145/3297858.3304053 -[2] https://dl.acm.org/doi/10.1145/3503222.3507731 - -Link: https://lkml.kernel.org/r/20220918080010.2920238-13-yuzhao@google.com -Signed-off-by: Yu Zhao -Reviewed-by: Qi Zheng -Acked-by: Brian Geffon -Acked-by: Jan Alexander Steffens (heftig) -Acked-by: Oleksandr Natalenko -Acked-by: Steven Barrett -Acked-by: Suleiman Souhlal -Tested-by: Daniel Byrne -Tested-by: Donald Carr -Tested-by: Holger Hoffstätte -Tested-by: Konstantin Kharlamov -Tested-by: Shuang Zhai -Tested-by: Sofia Trinh -Tested-by: Vaibhav Jain -Cc: Andi Kleen -Cc: Aneesh Kumar K.V -Cc: Barry Song -Cc: Catalin Marinas -Cc: Dave Hansen -Cc: Hillf Danton -Cc: Jens Axboe -Cc: Johannes Weiner -Cc: Jonathan Corbet -Cc: Linus Torvalds -Cc: Matthew Wilcox -Cc: Mel Gorman -Cc: Miaohe Lin -Cc: Michael Larabel -Cc: Michal Hocko -Cc: Mike Rapoport -Cc: Mike Rapoport -Cc: Peter Zijlstra -Cc: Tejun Heo -Cc: Vlastimil Babka -Cc: Will Deacon -Signed-off-by: Andrew Morton ---- - include/linux/nodemask.h | 1 + - mm/vmscan.c | 411 ++++++++++++++++++++++++++++++++++++++- - 2 files changed, 402 insertions(+), 10 deletions(-) - ---- a/include/linux/nodemask.h -+++ b/include/linux/nodemask.h -@@ -485,6 +485,7 @@ static inline int num_node_state(enum no - #define first_online_node 0 - #define first_memory_node 0 - #define next_online_node(nid) (MAX_NUMNODES) -+#define next_memory_node(nid) (MAX_NUMNODES) - #define nr_node_ids 1U - #define nr_online_nodes 1U - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -53,6 +53,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -3968,12 +3969,40 @@ static void clear_mm_walk(void) - kfree(walk); - } - --static void inc_min_seq(struct lruvec *lruvec, int type) -+static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) - { -+ int zone; -+ int remaining = MAX_LRU_BATCH; - struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); -+ -+ if (type == LRU_GEN_ANON && !can_swap) -+ goto done; -+ -+ /* prevent cold/hot inversion if force_scan is true */ -+ for (zone = 0; zone < MAX_NR_ZONES; zone++) { -+ struct list_head *head = &lrugen->lists[old_gen][type][zone]; -+ -+ while (!list_empty(head)) { -+ struct page *page = lru_to_page(head); -+ -+ VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page); -+ VM_WARN_ON_ONCE_PAGE(PageActive(page), page); -+ VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page); -+ VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); - -+ new_gen = page_inc_gen(lruvec, page, false); -+ list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]); -+ -+ if (!--remaining) -+ return false; -+ } -+ } -+done: - reset_ctrl_pos(lruvec, type, true); - WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); -+ -+ return true; - } - - static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) -@@ -4019,7 +4048,7 @@ next: - return success; - } - --static void inc_max_seq(struct lruvec *lruvec, bool can_swap) -+static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) - { - int prev, next; - int type, zone; -@@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *l - if (get_nr_gens(lruvec, type) != MAX_NR_GENS) - continue; - -- VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap); -+ VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); - -- inc_min_seq(lruvec, type); -+ while (!inc_min_seq(lruvec, type, can_swap)) { -+ spin_unlock_irq(&lruvec->lru_lock); -+ cond_resched(); -+ spin_lock_irq(&lruvec->lru_lock); -+ } - } - - /* -@@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *l - } - - static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, -- struct scan_control *sc, bool can_swap) -+ struct scan_control *sc, bool can_swap, bool force_scan) - { - bool success; - struct lru_gen_mm_walk *walk; -@@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lr - * handful of PTEs. Spreading the work out over a period of time usually - * is less efficient, but it avoids bursty page faults. - */ -- if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) { -+ if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) { - success = iterate_mm_list_nowalk(lruvec, max_seq); - goto done; - } -@@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lr - walk->lruvec = lruvec; - walk->max_seq = max_seq; - walk->can_swap = can_swap; -- walk->force_scan = false; -+ walk->force_scan = force_scan; - - do { - success = iterate_mm_list(lruvec, walk, &mm); -@@ -4127,7 +4160,7 @@ done: - - VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); - -- inc_max_seq(lruvec, can_swap); -+ inc_max_seq(lruvec, can_swap, force_scan); - /* either this sees any waiters or they will see updated max_seq */ - if (wq_has_sleeper(&lruvec->mm_state.wait)) - wake_up_all(&lruvec->mm_state.wait); -@@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lr - } - - if (need_aging) -- try_to_inc_max_seq(lruvec, max_seq, sc, swappiness); -+ try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false); - - return true; - } -@@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(stru - if (current_is_kswapd()) - return 0; - -- if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap)) -+ if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false)) - return nr_to_scan; - done: - return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; -@@ -5124,6 +5157,361 @@ static struct attribute_group lru_gen_at - }; - - /****************************************************************************** -+ * debugfs interface -+ ******************************************************************************/ -+ -+static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) -+{ -+ struct mem_cgroup *memcg; -+ loff_t nr_to_skip = *pos; -+ -+ m->private = kvmalloc(PATH_MAX, GFP_KERNEL); -+ if (!m->private) -+ return ERR_PTR(-ENOMEM); -+ -+ memcg = mem_cgroup_iter(NULL, NULL, NULL); -+ do { -+ int nid; -+ -+ for_each_node_state(nid, N_MEMORY) { -+ if (!nr_to_skip--) -+ return get_lruvec(memcg, nid); -+ } -+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); -+ -+ return NULL; -+} -+ -+static void lru_gen_seq_stop(struct seq_file *m, void *v) -+{ -+ if (!IS_ERR_OR_NULL(v)) -+ mem_cgroup_iter_break(NULL, lruvec_memcg(v)); -+ -+ kvfree(m->private); -+ m->private = NULL; -+} -+ -+static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) -+{ -+ int nid = lruvec_pgdat(v)->node_id; -+ struct mem_cgroup *memcg = lruvec_memcg(v); -+ -+ ++*pos; -+ -+ nid = next_memory_node(nid); -+ if (nid == MAX_NUMNODES) { -+ memcg = mem_cgroup_iter(NULL, memcg, NULL); -+ if (!memcg) -+ return NULL; -+ -+ nid = first_memory_node; -+ } -+ -+ return get_lruvec(memcg, nid); -+} -+ -+static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, -+ unsigned long max_seq, unsigned long *min_seq, -+ unsigned long seq) -+{ -+ int i; -+ int type, tier; -+ int hist = lru_hist_from_seq(seq); -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ -+ for (tier = 0; tier < MAX_NR_TIERS; tier++) { -+ seq_printf(m, " %10d", tier); -+ for (type = 0; type < ANON_AND_FILE; type++) { -+ const char *s = " "; -+ unsigned long n[3] = {}; -+ -+ if (seq == max_seq) { -+ s = "RT "; -+ n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); -+ n[1] = READ_ONCE(lrugen->avg_total[type][tier]); -+ } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { -+ s = "rep"; -+ n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); -+ n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); -+ if (tier) -+ n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); -+ } -+ -+ for (i = 0; i < 3; i++) -+ seq_printf(m, " %10lu%c", n[i], s[i]); -+ } -+ seq_putc(m, '\n'); -+ } -+ -+ seq_puts(m, " "); -+ for (i = 0; i < NR_MM_STATS; i++) { -+ const char *s = " "; -+ unsigned long n = 0; -+ -+ if (seq == max_seq && NR_HIST_GENS == 1) { -+ s = "LOYNFA"; -+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]); -+ } else if (seq != max_seq && NR_HIST_GENS > 1) { -+ s = "loynfa"; -+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]); -+ } -+ -+ seq_printf(m, " %10lu%c", n, s[i]); -+ } -+ seq_putc(m, '\n'); -+} -+ -+static int lru_gen_seq_show(struct seq_file *m, void *v) -+{ -+ unsigned long seq; -+ bool full = !debugfs_real_fops(m->file)->write; -+ struct lruvec *lruvec = v; -+ struct lru_gen_struct *lrugen = &lruvec->lrugen; -+ int nid = lruvec_pgdat(lruvec)->node_id; -+ struct mem_cgroup *memcg = lruvec_memcg(lruvec); -+ DEFINE_MAX_SEQ(lruvec); -+ DEFINE_MIN_SEQ(lruvec); -+ -+ if (nid == first_memory_node) { -+ const char *path = memcg ? m->private : ""; -+ -+#ifdef CONFIG_MEMCG -+ if (memcg) -+ cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); -+#endif -+ seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); -+ } -+ -+ seq_printf(m, " node %5d\n", nid); -+ -+ if (!full) -+ seq = min_seq[LRU_GEN_ANON]; -+ else if (max_seq >= MAX_NR_GENS) -+ seq = max_seq - MAX_NR_GENS + 1; -+ else -+ seq = 0; -+ -+ for (; seq <= max_seq; seq++) { -+ int type, zone; -+ int gen = lru_gen_from_seq(seq); -+ unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); -+ -+ seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); -+ -+ for (type = 0; type < ANON_AND_FILE; type++) { -+ unsigned long size = 0; -+ char mark = full && seq < min_seq[type] ? 'x' : ' '; -+ -+ for (zone = 0; zone < MAX_NR_ZONES; zone++) -+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); -+ -+ seq_printf(m, " %10lu%c", size, mark); -+ } -+ -+ seq_putc(m, '\n'); -+ -+ if (full) -+ lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); -+ } -+ -+ return 0; -+} -+ -+static const struct seq_operations lru_gen_seq_ops = { -+ .start = lru_gen_seq_start, -+ .stop = lru_gen_seq_stop, -+ .next = lru_gen_seq_next, -+ .show = lru_gen_seq_show, -+}; -+ -+static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, -+ bool can_swap, bool force_scan) -+{ -+ DEFINE_MAX_SEQ(lruvec); -+ DEFINE_MIN_SEQ(lruvec); -+ -+ if (seq < max_seq) -+ return 0; -+ -+ if (seq > max_seq) -+ return -EINVAL; -+ -+ if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) -+ return -ERANGE; -+ -+ try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan); -+ -+ return 0; -+} -+ -+static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, -+ int swappiness, unsigned long nr_to_reclaim) -+{ -+ DEFINE_MAX_SEQ(lruvec); -+ -+ if (seq + MIN_NR_GENS > max_seq) -+ return -EINVAL; -+ -+ sc->nr_reclaimed = 0; -+ -+ while (!signal_pending(current)) { -+ DEFINE_MIN_SEQ(lruvec); -+ -+ if (seq < min_seq[!swappiness]) -+ return 0; -+ -+ if (sc->nr_reclaimed >= nr_to_reclaim) -+ return 0; -+ -+ if (!evict_pages(lruvec, sc, swappiness, NULL)) -+ return 0; -+ -+ cond_resched(); -+ } -+ -+ return -EINTR; -+} -+ -+static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, -+ struct scan_control *sc, int swappiness, unsigned long opt) -+{ -+ struct lruvec *lruvec; -+ int err = -EINVAL; -+ struct mem_cgroup *memcg = NULL; -+ -+ if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) -+ return -EINVAL; -+ -+ if (!mem_cgroup_disabled()) { -+ rcu_read_lock(); -+ memcg = mem_cgroup_from_id(memcg_id); -+#ifdef CONFIG_MEMCG -+ if (memcg && !css_tryget(&memcg->css)) -+ memcg = NULL; -+#endif -+ rcu_read_unlock(); -+ -+ if (!memcg) -+ return -EINVAL; -+ } -+ -+ if (memcg_id != mem_cgroup_id(memcg)) -+ goto done; -+ -+ lruvec = get_lruvec(memcg, nid); -+ -+ if (swappiness < 0) -+ swappiness = get_swappiness(lruvec, sc); -+ else if (swappiness > 200) -+ goto done; -+ -+ switch (cmd) { -+ case '+': -+ err = run_aging(lruvec, seq, sc, swappiness, opt); -+ break; -+ case '-': -+ err = run_eviction(lruvec, seq, sc, swappiness, opt); -+ break; -+ } -+done: -+ mem_cgroup_put(memcg); -+ -+ return err; -+} -+ -+static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, -+ size_t len, loff_t *pos) -+{ -+ void *buf; -+ char *cur, *next; -+ unsigned int flags; -+ struct blk_plug plug; -+ int err = -EINVAL; -+ struct scan_control sc = { -+ .may_writepage = true, -+ .may_unmap = true, -+ .may_swap = true, -+ .reclaim_idx = MAX_NR_ZONES - 1, -+ .gfp_mask = GFP_KERNEL, -+ }; -+ -+ buf = kvmalloc(len + 1, GFP_KERNEL); -+ if (!buf) -+ return -ENOMEM; -+ -+ if (copy_from_user(buf, src, len)) { -+ kvfree(buf); -+ return -EFAULT; -+ } -+ -+ set_task_reclaim_state(current, &sc.reclaim_state); -+ flags = memalloc_noreclaim_save(); -+ blk_start_plug(&plug); -+ if (!set_mm_walk(NULL)) { -+ err = -ENOMEM; -+ goto done; -+ } -+ -+ next = buf; -+ next[len] = '\0'; -+ -+ while ((cur = strsep(&next, ",;\n"))) { -+ int n; -+ int end; -+ char cmd; -+ unsigned int memcg_id; -+ unsigned int nid; -+ unsigned long seq; -+ unsigned int swappiness = -1; -+ unsigned long opt = -1; -+ -+ cur = skip_spaces(cur); -+ if (!*cur) -+ continue; -+ -+ n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, -+ &seq, &end, &swappiness, &end, &opt, &end); -+ if (n < 4 || cur[end]) { -+ err = -EINVAL; -+ break; -+ } -+ -+ err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); -+ if (err) -+ break; -+ } -+done: -+ clear_mm_walk(); -+ blk_finish_plug(&plug); -+ memalloc_noreclaim_restore(flags); -+ set_task_reclaim_state(current, NULL); -+ -+ kvfree(buf); -+ -+ return err ? : len; -+} -+ -+static int lru_gen_seq_open(struct inode *inode, struct file *file) -+{ -+ return seq_open(file, &lru_gen_seq_ops); -+} -+ -+static const struct file_operations lru_gen_rw_fops = { -+ .open = lru_gen_seq_open, -+ .read = seq_read, -+ .write = lru_gen_seq_write, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+static const struct file_operations lru_gen_ro_fops = { -+ .open = lru_gen_seq_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+/****************************************************************************** - * initialization - ******************************************************************************/ - -@@ -5180,6 +5568,9 @@ static int __init init_lru_gen(void) - if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) - pr_err("lru_gen: failed to create sysfs group\n"); - -+ debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); -+ debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); -+ - return 0; - }; - late_initcall(init_lru_gen); diff --git a/target/linux/generic/backport-6.1/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch b/target/linux/generic/backport-6.1/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch deleted file mode 100644 index fd4aa7274732..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 92d430e8955c976eacb7cc91d7ff849c0dd009af Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Wed, 28 Sep 2022 13:36:58 -0600 -Subject: [PATCH 13/29] mm/mglru: don't sync disk for each aging cycle - -wakeup_flusher_threads() was added under the assumption that if a system -runs out of clean cold pages, it might want to write back dirty pages more -aggressively so that they can become clean and be dropped. - -However, doing so can breach the rate limit a system wants to impose on -writeback, resulting in early SSD wearout. - -Link: https://lkml.kernel.org/r/YzSiWq9UEER5LKup@google.com -Fixes: bd74fdaea146 ("mm: multi-gen LRU: support page table walks") -Signed-off-by: Yu Zhao -Reported-by: Axel Rasmussen -Signed-off-by: Andrew Morton ---- - mm/vmscan.c | 2 -- - 1 file changed, 2 deletions(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -4165,8 +4165,6 @@ done: - if (wq_has_sleeper(&lruvec->mm_state.wait)) - wake_up_all(&lruvec->mm_state.wait); - -- wakeup_flusher_threads(WB_REASON_VMSCAN); -- - return true; - } - diff --git a/target/linux/generic/backport-6.1/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch b/target/linux/generic/backport-6.1/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch deleted file mode 100644 index 31b35cbc4b69..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch +++ /dev/null @@ -1,124 +0,0 @@ -From 6f315879ad750391a0b1fab8c9170bc054a5f5d7 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Tue, 15 Nov 2022 18:38:07 -0700 -Subject: [PATCH 14/29] mm: multi-gen LRU: retry pages written back while - isolated - -The page reclaim isolates a batch of pages from the tail of one of the -LRU lists and works on those pages one by one. For a suitable -swap-backed page, if the swap device is async, it queues that page for -writeback. After the page reclaim finishes an entire batch, it puts back -the pages it queued for writeback to the head of the original LRU list. - -In the meantime, the page writeback flushes the queued pages also by -batches. Its batching logic is independent from that of the page reclaim. -For each of the pages it writes back, the page writeback calls -rotate_reclaimable_page() which tries to rotate a page to the tail. - -rotate_reclaimable_page() only works for a page after the page reclaim -has put it back. If an async swap device is fast enough, the page -writeback can finish with that page while the page reclaim is still -working on the rest of the batch containing it. In this case, that page -will remain at the head and the page reclaim will not retry it before -reaching there. - -This patch adds a retry to evict_pages(). After evict_pages() has -finished an entire batch and before it puts back pages it cannot free -immediately, it retries those that may have missed the rotation. - -Before this patch, ~60% of pages swapped to an Intel Optane missed -rotate_reclaimable_page(). After this patch, ~99% of missed pages were -reclaimed upon retry. - -This problem affects relatively slow async swap devices like Samsung 980 -Pro much less and does not affect sync swap devices like zram or zswap at -all. - -Link: https://lkml.kernel.org/r/20221116013808.3995280-1-yuzhao@google.com -Fixes: ac35a4902374 ("mm: multi-gen LRU: minimal implementation") -Signed-off-by: Yu Zhao -Cc: "Yin, Fengwei" -Signed-off-by: Andrew Morton ---- - mm/vmscan.c | 48 +++++++++++++++++++++++++++++++++++++----------- - 1 file changed, 37 insertions(+), 11 deletions(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -4723,10 +4723,13 @@ static int evict_pages(struct lruvec *lr - int scanned; - int reclaimed; - LIST_HEAD(list); -+ LIST_HEAD(clean); - struct page *page; -+ struct page *next; - enum vm_event_item item; - struct reclaim_stat stat; - struct lru_gen_mm_walk *walk; -+ bool skip_retry = false; - struct mem_cgroup *memcg = lruvec_memcg(lruvec); - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - -@@ -4743,20 +4746,37 @@ static int evict_pages(struct lruvec *lr - - if (list_empty(&list)) - return scanned; -- -+retry: - reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false); -+ sc->nr_reclaimed += reclaimed; - -- list_for_each_entry(page, &list, lru) { -- /* restore LRU_REFS_FLAGS cleared by isolate_page() */ -- if (PageWorkingset(page)) -- SetPageReferenced(page); -+ list_for_each_entry_safe_reverse(page, next, &list, lru) { -+ if (!page_evictable(page)) { -+ list_del(&page->lru); -+ putback_lru_page(page); -+ continue; -+ } - -- /* don't add rejected pages to the oldest generation */ - if (PageReclaim(page) && -- (PageDirty(page) || PageWriteback(page))) -- ClearPageActive(page); -- else -- SetPageActive(page); -+ (PageDirty(page) || PageWriteback(page))) { -+ /* restore LRU_REFS_FLAGS cleared by isolate_page() */ -+ if (PageWorkingset(page)) -+ SetPageReferenced(page); -+ continue; -+ } -+ -+ if (skip_retry || PageActive(page) || PageReferenced(page) || -+ page_mapped(page) || PageLocked(page) || -+ PageDirty(page) || PageWriteback(page)) { -+ /* don't add rejected pages to the oldest generation */ -+ set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, -+ BIT(PG_active)); -+ continue; -+ } -+ -+ /* retry pages that may have missed rotate_reclaimable_page() */ -+ list_move(&page->lru, &clean); -+ sc->nr_scanned -= thp_nr_pages(page); - } - - spin_lock_irq(&lruvec->lru_lock); -@@ -4778,7 +4798,13 @@ static int evict_pages(struct lruvec *lr - mem_cgroup_uncharge_list(&list); - free_unref_page_list(&list); - -- sc->nr_reclaimed += reclaimed; -+ INIT_LIST_HEAD(&list); -+ list_splice_init(&clean, &list); -+ -+ if (!list_empty(&list)) { -+ skip_retry = true; -+ goto retry; -+ } - - if (need_swapping && type == LRU_GEN_ANON) - *need_swapping = true; diff --git a/target/linux/generic/backport-6.1/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch b/target/linux/generic/backport-6.1/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch deleted file mode 100644 index 5b1d378504a8..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 255bb0ac393f1c2818cd75af45a9226300ab3daf Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Oct 2022 15:48:30 +0200 -Subject: [PATCH 15/29] mm: multi-gen LRU: move lru_gen_add_mm() out of IRQ-off - region - -lru_gen_add_mm() has been added within an IRQ-off region in the commit -mentioned below. The other invocations of lru_gen_add_mm() are not within -an IRQ-off region. - -The invocation within IRQ-off region is problematic on PREEMPT_RT because -the function is using a spin_lock_t which must not be used within -IRQ-disabled regions. - -The other invocations of lru_gen_add_mm() occur while -task_struct::alloc_lock is acquired. Move lru_gen_add_mm() after -interrupts are enabled and before task_unlock(). - -Link: https://lkml.kernel.org/r/20221026134830.711887-1-bigeasy@linutronix.de -Fixes: bd74fdaea1460 ("mm: multi-gen LRU: support page table walks") -Signed-off-by: Sebastian Andrzej Siewior -Acked-by: Yu Zhao -Cc: Al Viro -Cc: "Eric W . Biederman" -Cc: Kees Cook -Cc: Thomas Gleixner -Signed-off-by: Andrew Morton ---- - fs/exec.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -1013,7 +1013,6 @@ static int exec_mmap(struct mm_struct *m - active_mm = tsk->active_mm; - tsk->active_mm = mm; - tsk->mm = mm; -- lru_gen_add_mm(mm); - /* - * This prevents preemption while active_mm is being loaded and - * it and mm are being updated, which could cause problems for -@@ -1028,6 +1027,7 @@ static int exec_mmap(struct mm_struct *m - local_irq_enable(); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); -+ lru_gen_add_mm(mm); - task_unlock(tsk); - lru_gen_use_mm(mm); - if (old_mm) { diff --git a/target/linux/generic/backport-6.1/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch b/target/linux/generic/backport-6.1/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch deleted file mode 100644 index 1c10c168a50c..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch +++ /dev/null @@ -1,96 +0,0 @@ -From c5ec455ebd2b488d91de9d8915a0c8036a2a04dd Mon Sep 17 00:00:00 2001 -From: Juergen Gross -Date: Wed, 30 Nov 2022 14:49:41 -0800 -Subject: [PATCH 17/29] mm: add dummy pmd_young() for architectures not having - it - -In order to avoid #ifdeffery add a dummy pmd_young() implementation as a -fallback. This is required for the later patch "mm: introduce -arch_has_hw_nonleaf_pmd_young()". - -Link: https://lkml.kernel.org/r/fd3ac3cd-7349-6bbd-890a-71a9454ca0b3@suse.com -Signed-off-by: Juergen Gross -Acked-by: Yu Zhao -Cc: Borislav Petkov -Cc: Dave Hansen -Cc: Geert Uytterhoeven -Cc: "H. Peter Anvin" -Cc: Ingo Molnar -Cc: Sander Eikelenboom -Cc: Thomas Gleixner -Signed-off-by: Andrew Morton ---- - arch/mips/include/asm/pgtable.h | 1 + - arch/riscv/include/asm/pgtable.h | 1 + - arch/s390/include/asm/pgtable.h | 1 + - arch/sparc/include/asm/pgtable_64.h | 1 + - arch/x86/include/asm/pgtable.h | 1 + - include/linux/pgtable.h | 7 +++++++ - 6 files changed, 12 insertions(+) - ---- a/arch/mips/include/asm/pgtable.h -+++ b/arch/mips/include/asm/pgtable.h -@@ -632,6 +632,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pm - return pmd; - } - -+#define pmd_young pmd_young - static inline int pmd_young(pmd_t pmd) - { - return !!(pmd_val(pmd) & _PAGE_ACCESSED); ---- a/arch/riscv/include/asm/pgtable.h -+++ b/arch/riscv/include/asm/pgtable.h -@@ -535,6 +535,7 @@ static inline int pmd_dirty(pmd_t pmd) - return pte_dirty(pmd_pte(pmd)); - } - -+#define pmd_young pmd_young - static inline int pmd_young(pmd_t pmd) - { - return pte_young(pmd_pte(pmd)); ---- a/arch/s390/include/asm/pgtable.h -+++ b/arch/s390/include/asm/pgtable.h -@@ -748,6 +748,7 @@ static inline int pmd_dirty(pmd_t pmd) - return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; - } - -+#define pmd_young pmd_young - static inline int pmd_young(pmd_t pmd) - { - return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; ---- a/arch/sparc/include/asm/pgtable_64.h -+++ b/arch/sparc/include/asm/pgtable_64.h -@@ -712,6 +712,7 @@ static inline unsigned long pmd_dirty(pm - return pte_dirty(pte); - } - -+#define pmd_young pmd_young - static inline unsigned long pmd_young(pmd_t pmd) - { - pte_t pte = __pte(pmd_val(pmd)); ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -136,6 +136,7 @@ static inline int pmd_dirty(pmd_t pmd) - return pmd_flags(pmd) & _PAGE_DIRTY; - } - -+#define pmd_young pmd_young - static inline int pmd_young(pmd_t pmd) - { - return pmd_flags(pmd) & _PAGE_ACCESSED; ---- a/include/linux/pgtable.h -+++ b/include/linux/pgtable.h -@@ -164,6 +164,13 @@ static inline pte_t *virt_to_kpte(unsign - return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); - } - -+#ifndef pmd_young -+static inline int pmd_young(pmd_t pmd) -+{ -+ return 0; -+} -+#endif -+ - #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS - extern int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, diff --git a/target/linux/generic/backport-6.1/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch b/target/linux/generic/backport-6.1/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch deleted file mode 100644 index 9a1f9bead6cb..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 46cbda7b65998a5af4493f745d94417af697bd68 Mon Sep 17 00:00:00 2001 -From: Juergen Gross -Date: Wed, 23 Nov 2022 07:45:10 +0100 -Subject: [PATCH 18/29] mm: introduce arch_has_hw_nonleaf_pmd_young() - -When running as a Xen PV guests commit eed9a328aa1a ("mm: x86: add -CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG") can cause a protection violation in -pmdp_test_and_clear_young(): - - BUG: unable to handle page fault for address: ffff8880083374d0 - #PF: supervisor write access in kernel mode - #PF: error_code(0x0003) - permissions violation - PGD 3026067 P4D 3026067 PUD 3027067 PMD 7fee5067 PTE 8010000008337065 - Oops: 0003 [#1] PREEMPT SMP NOPTI - CPU: 7 PID: 158 Comm: kswapd0 Not tainted 6.1.0-rc5-20221118-doflr+ #1 - RIP: e030:pmdp_test_and_clear_young+0x25/0x40 - -This happens because the Xen hypervisor can't emulate direct writes to -page table entries other than PTEs. - -This can easily be fixed by introducing arch_has_hw_nonleaf_pmd_young() -similar to arch_has_hw_pte_young() and test that instead of -CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG. - -Link: https://lkml.kernel.org/r/20221123064510.16225-1-jgross@suse.com -Fixes: eed9a328aa1a ("mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG") -Signed-off-by: Juergen Gross -Reported-by: Sander Eikelenboom -Acked-by: Yu Zhao -Tested-by: Sander Eikelenboom -Acked-by: David Hildenbrand [core changes] -Signed-off-by: Andrew Morton ---- - arch/x86/include/asm/pgtable.h | 8 ++++++++ - include/linux/pgtable.h | 11 +++++++++++ - mm/vmscan.c | 10 +++++----- - 3 files changed, 24 insertions(+), 5 deletions(-) - ---- a/arch/x86/include/asm/pgtable.h -+++ b/arch/x86/include/asm/pgtable.h -@@ -1405,6 +1405,14 @@ static inline bool arch_has_hw_pte_young - return true; - } - -+#ifdef CONFIG_XEN_PV -+#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young -+static inline bool arch_has_hw_nonleaf_pmd_young(void) -+{ -+ return !cpu_feature_enabled(X86_FEATURE_XENPV); -+} -+#endif -+ - #endif /* __ASSEMBLY__ */ - - #endif /* _ASM_X86_PGTABLE_H */ ---- a/include/linux/pgtable.h -+++ b/include/linux/pgtable.h -@@ -266,6 +266,17 @@ static inline int pmdp_clear_flush_young - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - #endif - -+#ifndef arch_has_hw_nonleaf_pmd_young -+/* -+ * Return whether the accessed bit in non-leaf PMD entries is supported on the -+ * local CPU. -+ */ -+static inline bool arch_has_hw_nonleaf_pmd_young(void) -+{ -+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG); -+} -+#endif -+ - #ifndef arch_has_hw_pte_young - /* - * Return whether the accessed bit is supported on the local CPU. ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -3727,7 +3727,7 @@ static void walk_pmd_range_locked(pud_t - goto next; - - if (!pmd_trans_huge(pmd[i])) { -- if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && -+ if (arch_has_hw_nonleaf_pmd_young() && - get_cap(LRU_GEN_NONLEAF_YOUNG)) - pmdp_test_and_clear_young(vma, addr, pmd + i); - goto next; -@@ -3825,14 +3825,14 @@ restart: - #endif - walk->mm_stats[MM_NONLEAF_TOTAL]++; - --#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG -- if (get_cap(LRU_GEN_NONLEAF_YOUNG)) { -+ if (arch_has_hw_nonleaf_pmd_young() && -+ get_cap(LRU_GEN_NONLEAF_YOUNG)) { - if (!pmd_young(val)) - continue; - - walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos); - } --#endif -+ - if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) - continue; - -@@ -5132,7 +5132,7 @@ static ssize_t show_enabled(struct kobje - if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK)) - caps |= BIT(LRU_GEN_MM_WALK); - -- if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG)) -+ if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) - caps |= BIT(LRU_GEN_NONLEAF_YOUNG); - - return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps); diff --git a/target/linux/generic/backport-6.1/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch b/target/linux/generic/backport-6.1/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch deleted file mode 100644 index e37386abdf19..000000000000 --- a/target/linux/generic/backport-6.1/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch +++ /dev/null @@ -1,56 +0,0 @@ -From c7dfefd4bdfba3d5171038d1cc2d4160288e6ee4 Mon Sep 17 00:00:00 2001 -From: Yu Zhao -Date: Sun, 15 Jan 2023 20:44:05 -0700 -Subject: [PATCH 16/29] mm: multi-gen LRU: fix crash during cgroup migration - -lru_gen_migrate_mm() assumes lru_gen_add_mm() runs prior to itself. This -isn't true for the following scenario: - - CPU 1 CPU 2 - - clone() - cgroup_can_fork() - cgroup_procs_write() - cgroup_post_fork() - task_lock() - lru_gen_migrate_mm() - task_unlock() - task_lock() - lru_gen_add_mm() - task_unlock() - -And when the above happens, kernel crashes because of linked list -corruption (mm_struct->lru_gen.list). - -Link: https://lore.kernel.org/r/20230115134651.30028-1-msizanoen@qtmlabs.xyz/ -Link: https://lkml.kernel.org/r/20230116034405.2960276-1-yuzhao@google.com -Fixes: bd74fdaea146 ("mm: multi-gen LRU: support page table walks") -Signed-off-by: Yu Zhao -Reported-by: msizanoen -Tested-by: msizanoen -Cc: [6.1+] -Signed-off-by: Andrew Morton ---- - mm/vmscan.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -3024,13 +3024,16 @@ void lru_gen_migrate_mm(struct mm_struct - if (mem_cgroup_disabled()) - return; - -+ /* migration can happen before addition */ -+ if (!mm->lru_gen.memcg) -+ return; -+ - rcu_read_lock(); - memcg = mem_cgroup_from_task(task); - rcu_read_unlock(); - if (memcg == mm->lru_gen.memcg) - return; - -- VM_WARN_ON_ONCE(!mm->lru_gen.memcg); - VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); - - lru_gen_del_mm(mm); diff --git a/target/linux/generic/backport-6.1/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch b/target/linux/generic/backport-6.1/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch deleted file mode 100644 index 82feb7421d5f..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-00-MIPS-uasm-Enable-muhu-opcode-for-MIPS-R6.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:02 +0200 -Subject: [PATCH] MIPS: uasm: Enable muhu opcode for MIPS R6 - -Enable the 'muhu' instruction, complementing the existing 'mulu', needed -to implement a MIPS32 BPF JIT. - -Also fix a typo in the existing definition of 'dmulu'. - -Signed-off-by: Tony Ambardar - -This patch is a dependency for my 32-bit MIPS eBPF JIT. - -Signed-off-by: Johan Almbladh ---- - ---- a/arch/mips/include/asm/uasm.h -+++ b/arch/mips/include/asm/uasm.h -@@ -145,6 +145,7 @@ Ip_u1(_mtlo); - Ip_u3u1u2(_mul); - Ip_u1u2(_multu); - Ip_u3u1u2(_mulu); -+Ip_u3u1u2(_muhu); - Ip_u3u1u2(_nor); - Ip_u3u1u2(_or); - Ip_u2u1u3(_ori); ---- a/arch/mips/mm/uasm-mips.c -+++ b/arch/mips/mm/uasm-mips.c -@@ -90,7 +90,7 @@ static const struct insn insn_table[insn - RS | RT | RD}, - [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, - [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, -- [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op), -+ [insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op), - RS | RT | RD}, - [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, - [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, -@@ -150,6 +150,8 @@ static const struct insn insn_table[insn - [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, - [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op), - RS | RT | RD}, -+ [insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op), -+ RS | RT | RD}, - #ifndef CONFIG_CPU_MIPSR6 - [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, - #else ---- a/arch/mips/mm/uasm.c -+++ b/arch/mips/mm/uasm.c -@@ -59,7 +59,7 @@ enum opcode { - insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld, - insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, - insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0, -- insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor, -+ insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor, - insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc, - insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll, - insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, -@@ -344,6 +344,7 @@ I_u1(_mtlo) - I_u3u1u2(_mul) - I_u1u2(_multu) - I_u3u1u2(_mulu) -+I_u3u1u2(_muhu) - I_u3u1u2(_nor) - I_u3u1u2(_or) - I_u2u1u3(_ori) diff --git a/target/linux/generic/backport-6.1/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch b/target/linux/generic/backport-6.1/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch deleted file mode 100644 index 3a4d573f80af..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-01-mips-uasm-Add-workaround-for-Loongson-2F-nop-CPU-err.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:03 +0200 -Subject: [PATCH] mips: uasm: Add workaround for Loongson-2F nop CPU errata - -This patch implements a workaround for the Loongson-2F nop in generated, -code, if the existing option CONFIG_CPU_NOP_WORKAROUND is set. Before, -the binutils option -mfix-loongson2f-nop was enabled, but no workaround -was done when emitting MIPS code. Now, the nop pseudo instruction is -emitted as "or ax,ax,zero" instead of the default "sll zero,zero,0". This -is consistent with the workaround implemented by binutils. - -Link: https://sourceware.org/legacy-ml/binutils/2009-11/msg00387.html - -Signed-off-by: Johan Almbladh -Reviewed-by: Jiaxun Yang ---- - ---- a/arch/mips/include/asm/uasm.h -+++ b/arch/mips/include/asm/uasm.h -@@ -249,7 +249,11 @@ static inline void uasm_l##lb(struct uas - #define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off) - #define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3) - #define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b) -+#ifdef CONFIG_CPU_NOP_WORKAROUNDS -+#define uasm_i_nop(buf) uasm_i_or(buf, 1, 1, 0) -+#else - #define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0) -+#endif - #define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1) - - static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, diff --git a/target/linux/generic/backport-6.1/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch b/target/linux/generic/backport-6.1/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch deleted file mode 100644 index 798065996124..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch +++ /dev/null @@ -1,3078 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:04 +0200 -Subject: [PATCH] mips: bpf: Add eBPF JIT for 32-bit MIPS - -This is an implementation of an eBPF JIT for 32-bit MIPS I-V and MIPS32. -The implementation supports all 32-bit and 64-bit ALU and JMP operations, -including the recently-added atomics. 64-bit div/mod and 64-bit atomics -are implemented using function calls to math64 and atomic64 functions, -respectively. All 32-bit operations are implemented natively by the JIT, -except if the CPU lacks ll/sc instructions. - -Register mapping -================ -All 64-bit eBPF registers are mapped to native 32-bit MIPS register pairs, -and does not use any stack scratch space for register swapping. This means -that all eBPF register data is kept in CPU registers all the time, and -this simplifies the register management a lot. It also reduces the JIT's -pressure on temporary registers since we do not have to move data around. - -Native register pairs are ordered according to CPU endiannes, following -the O32 calling convention for passing 64-bit arguments and return values. -The eBPF return value, arguments and callee-saved registers are mapped to -their native MIPS equivalents. - -Since the 32 highest bits in the eBPF FP (frame pointer) register are -always zero, only one general-purpose register is actually needed for the -mapping. The MIPS fp register is used for this purpose. The high bits are -mapped to MIPS register r0. This saves us one CPU register, which is much -needed for temporaries, while still allowing us to treat the R10 (FP) -register just like any other eBPF register in the JIT. - -The MIPS gp (global pointer) and at (assembler temporary) registers are -used as internal temporary registers for constant blinding. CPU registers -t6-t9 are used internally by the JIT when constructing more complex 64-bit -operations. This is precisely what is needed - two registers to store an -operand value, and two more as scratch registers when performing the -operation. - -The register mapping is shown below. - - R0 - $v1, $v0 return value - R1 - $a1, $a0 argument 1, passed in registers - R2 - $a3, $a2 argument 2, passed in registers - R3 - $t1, $t0 argument 3, passed on stack - R4 - $t3, $t2 argument 4, passed on stack - R5 - $t4, $t3 argument 5, passed on stack - R6 - $s1, $s0 callee-saved - R7 - $s3, $s2 callee-saved - R8 - $s5, $s4 callee-saved - R9 - $s7, $s6 callee-saved - FP - $r0, $fp 32-bit frame pointer - AX - $gp, $at constant-blinding - $t6 - $t9 unallocated, JIT temporaries - -Jump offsets -============ -The JIT tries to map all conditional JMP operations to MIPS conditional -PC-relative branches. The MIPS branch offset field is 18 bits, in bytes, -which is equivalent to the eBPF 16-bit instruction offset. However, since -the JIT may emit more than one CPU instruction per eBPF instruction, the -field width may overflow. If that happens, the JIT converts the long -conditional jump to a short PC-relative branch with the condition -inverted, jumping over a long unconditional absolute jmp (j). - -This conversion will change the instruction offset mapping used for jumps, -and may in turn result in more branch offset overflows. The JIT therefore -dry-runs the translation until no more branches are converted and the -offsets do not change anymore. There is an upper bound on this of course, -and if the JIT hits that limit, the last two iterations are run with all -branches being converted. - -Tail call count -=============== -The current tail call count is stored in the 16-byte area of the caller's -stack frame that is reserved for the callee in the o32 ABI. The value is -initialized in the prologue, and propagated to the tail-callee by skipping -the initialization instructions when emitting the tail call. - -Signed-off-by: Johan Almbladh ---- - create mode 100644 arch/mips/net/bpf_jit_comp.c - create mode 100644 arch/mips/net/bpf_jit_comp.h - create mode 100644 arch/mips/net/bpf_jit_comp32.c - ---- a/arch/mips/net/Makefile -+++ b/arch/mips/net/Makefile -@@ -2,4 +2,9 @@ - # MIPS networking code - - obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o --obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o -+ -+ifeq ($(CONFIG_32BIT),y) -+ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o bpf_jit_comp32.o -+else -+ obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o -+endif ---- /dev/null -+++ b/arch/mips/net/bpf_jit_comp.c -@@ -0,0 +1,1020 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Just-In-Time compiler for eBPF bytecode on MIPS. -+ * Implementation of JIT functions common to 32-bit and 64-bit CPUs. -+ * -+ * Copyright (c) 2021 Anyfi Networks AB. -+ * Author: Johan Almbladh -+ * -+ * Based on code and ideas from -+ * Copyright (c) 2017 Cavium, Inc. -+ * Copyright (c) 2017 Shubham Bansal -+ * Copyright (c) 2011 Mircea Gherzan -+ */ -+ -+/* -+ * Code overview -+ * ============= -+ * -+ * - bpf_jit_comp.h -+ * Common definitions and utilities. -+ * -+ * - bpf_jit_comp.c -+ * Implementation of JIT top-level logic and exported JIT API functions. -+ * Implementation of internal operations shared by 32-bit and 64-bit code. -+ * JMP and ALU JIT control code, register control code, shared ALU and -+ * JMP/JMP32 JIT operations. -+ * -+ * - bpf_jit_comp32.c -+ * Implementation of functions to JIT prologue, epilogue and a single eBPF -+ * instruction for 32-bit MIPS CPUs. The functions use shared operations -+ * where possible, and implement the rest for 32-bit MIPS such as ALU64 -+ * operations. -+ * -+ * - bpf_jit_comp64.c -+ * Ditto, for 64-bit MIPS CPUs. -+ * -+ * Zero and sign extension -+ * ======================== -+ * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension, -+ * but the eBPF instruction set mandates zero extension. We let the verifier -+ * insert explicit zero-extensions after 32-bit ALU operations, both for -+ * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs -+ * are JITed with sign extensions inserted when so expected. -+ * -+ * ALU operations -+ * ============== -+ * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are -+ * JITed in the following steps. ALU64 operations on 32-bit MIPS are more -+ * complicated and therefore only processed by special implementations in -+ * step (3). -+ * -+ * 1) valid_alu_i: -+ * Determine if an immediate operation can be emitted as such, or if -+ * we must fall back to the register version. -+ * -+ * 2) rewrite_alu_i: -+ * Convert BPF operation and immediate value to a canonical form for -+ * JITing. In some degenerate cases this form may be a no-op. -+ * -+ * 3) emit_alu_{i,i64,r,64}: -+ * Emit instructions for an ALU or ALU64 immediate or register operation. -+ * -+ * JMP operations -+ * ============== -+ * JMP and JMP32 operations require an JIT instruction offset table for -+ * translating the jump offset. This table is computed by dry-running the -+ * JIT without actually emitting anything. However, the computed PC-relative -+ * offset may overflow the 18-bit offset field width of the native MIPS -+ * branch instruction. In such cases, the long jump is converted into the -+ * following sequence. -+ * -+ * ! +2 Inverted PC-relative branch -+ * nop Delay slot -+ * j Unconditional absolute long jump -+ * nop Delay slot -+ * -+ * Since this converted sequence alters the offset table, all offsets must -+ * be re-calculated. This may in turn trigger new branch conversions, so -+ * the process is repeated until no further changes are made. Normally it -+ * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we -+ * fall back to converting every remaining jump operation. The branch -+ * conversion is independent of how the JMP or JMP32 condition is JITed. -+ * -+ * JMP32 and JMP operations are JITed as follows. -+ * -+ * 1) setup_jmp_{i,r}: -+ * Convert jump conditional and offset into a form that can be JITed. -+ * This form may be a no-op, a canonical form, or an inverted PC-relative -+ * jump if branch conversion is necessary. -+ * -+ * 2) valid_jmp_i: -+ * Determine if an immediate operations can be emitted as such, or if -+ * we must fall back to the register version. Applies to JMP32 for 32-bit -+ * MIPS, and both JMP and JMP32 for 64-bit MIPS. -+ * -+ * 3) emit_jmp_{i,i64,r,r64}: -+ * Emit instructions for an JMP or JMP32 immediate or register operation. -+ * -+ * 4) finish_jmp_{i,r}: -+ * Emit any instructions needed to finish the jump. This includes a nop -+ * for the delay slot if a branch was emitted, and a long absolute jump -+ * if the branch was converted. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "bpf_jit_comp.h" -+ -+/* Convenience macros for descriptor access */ -+#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT) -+#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT) -+ -+/* -+ * Push registers on the stack, starting at a given depth from the stack -+ * pointer and increasing. The next depth to be written is returned. -+ */ -+int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) -+{ -+ int reg; -+ -+ for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) -+ if (mask & BIT(reg)) { -+ if ((excl & BIT(reg)) == 0) { -+ if (sizeof(long) == 4) -+ emit(ctx, sw, reg, depth, MIPS_R_SP); -+ else /* sizeof(long) == 8 */ -+ emit(ctx, sd, reg, depth, MIPS_R_SP); -+ } -+ depth += sizeof(long); -+ } -+ -+ ctx->stack_used = max((int)ctx->stack_used, depth); -+ return depth; -+} -+ -+/* -+ * Pop registers from the stack, starting at a given depth from the stack -+ * pointer and increasing. The next depth to be read is returned. -+ */ -+int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth) -+{ -+ int reg; -+ -+ for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++) -+ if (mask & BIT(reg)) { -+ if ((excl & BIT(reg)) == 0) { -+ if (sizeof(long) == 4) -+ emit(ctx, lw, reg, depth, MIPS_R_SP); -+ else /* sizeof(long) == 8 */ -+ emit(ctx, ld, reg, depth, MIPS_R_SP); -+ } -+ depth += sizeof(long); -+ } -+ -+ return depth; -+} -+ -+/* Compute the 28-bit jump target address from a BPF program location */ -+int get_target(struct jit_context *ctx, u32 loc) -+{ -+ u32 index = INDEX(ctx->descriptors[loc]); -+ unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index]; -+ unsigned long addr = (unsigned long)&ctx->target[index]; -+ -+ if (!ctx->target) -+ return 0; -+ -+ if ((addr ^ pc) & ~MIPS_JMP_MASK) -+ return -1; -+ -+ return addr & MIPS_JMP_MASK; -+} -+ -+/* Compute the PC-relative offset to relative BPF program offset */ -+int get_offset(const struct jit_context *ctx, int off) -+{ -+ return (INDEX(ctx->descriptors[ctx->bpf_index + off]) - -+ ctx->jit_index - 1) * sizeof(u32); -+} -+ -+/* dst = imm (register width) */ -+void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm) -+{ -+ if (imm >= -0x8000 && imm <= 0x7fff) { -+ emit(ctx, addiu, dst, MIPS_R_ZERO, imm); -+ } else { -+ emit(ctx, lui, dst, (s16)((u32)imm >> 16)); -+ emit(ctx, ori, dst, dst, (u16)(imm & 0xffff)); -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* dst = src (register width) */ -+void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src) -+{ -+ emit(ctx, ori, dst, src, 0); -+ clobber_reg(ctx, dst); -+} -+ -+/* Validate ALU immediate range */ -+bool valid_alu_i(u8 op, s32 imm) -+{ -+ switch (BPF_OP(op)) { -+ case BPF_NEG: -+ case BPF_LSH: -+ case BPF_RSH: -+ case BPF_ARSH: -+ /* All legal eBPF values are valid */ -+ return true; -+ case BPF_ADD: -+ /* imm must be 16 bits */ -+ return imm >= -0x8000 && imm <= 0x7fff; -+ case BPF_SUB: -+ /* -imm must be 16 bits */ -+ return imm >= -0x7fff && imm <= 0x8000; -+ case BPF_AND: -+ case BPF_OR: -+ case BPF_XOR: -+ /* imm must be 16 bits unsigned */ -+ return imm >= 0 && imm <= 0xffff; -+ case BPF_MUL: -+ /* imm must be zero or a positive power of two */ -+ return imm == 0 || (imm > 0 && is_power_of_2(imm)); -+ case BPF_DIV: -+ case BPF_MOD: -+ /* imm must be an 17-bit power of two */ -+ return (u32)imm <= 0x10000 && is_power_of_2((u32)imm); -+ } -+ return false; -+} -+ -+/* Rewrite ALU immediate operation */ -+bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val) -+{ -+ bool act = true; -+ -+ switch (BPF_OP(op)) { -+ case BPF_LSH: -+ case BPF_RSH: -+ case BPF_ARSH: -+ case BPF_ADD: -+ case BPF_SUB: -+ case BPF_OR: -+ case BPF_XOR: -+ /* imm == 0 is a no-op */ -+ act = imm != 0; -+ break; -+ case BPF_MUL: -+ if (imm == 1) { -+ /* dst * 1 is a no-op */ -+ act = false; -+ } else if (imm == 0) { -+ /* dst * 0 is dst & 0 */ -+ op = BPF_AND; -+ } else { -+ /* dst * (1 << n) is dst << n */ -+ op = BPF_LSH; -+ imm = ilog2(abs(imm)); -+ } -+ break; -+ case BPF_DIV: -+ if (imm == 1) { -+ /* dst / 1 is a no-op */ -+ act = false; -+ } else { -+ /* dst / (1 << n) is dst >> n */ -+ op = BPF_RSH; -+ imm = ilog2(imm); -+ } -+ break; -+ case BPF_MOD: -+ /* dst % (1 << n) is dst & ((1 << n) - 1) */ -+ op = BPF_AND; -+ imm--; -+ break; -+ } -+ -+ *alu = op; -+ *val = imm; -+ return act; -+} -+ -+/* ALU immediate operation (32-bit) */ -+void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = -dst */ -+ case BPF_NEG: -+ emit(ctx, subu, dst, MIPS_R_ZERO, dst); -+ break; -+ /* dst = dst & imm */ -+ case BPF_AND: -+ emit(ctx, andi, dst, dst, (u16)imm); -+ break; -+ /* dst = dst | imm */ -+ case BPF_OR: -+ emit(ctx, ori, dst, dst, (u16)imm); -+ break; -+ /* dst = dst ^ imm */ -+ case BPF_XOR: -+ emit(ctx, xori, dst, dst, (u16)imm); -+ break; -+ /* dst = dst << imm */ -+ case BPF_LSH: -+ emit(ctx, sll, dst, dst, imm); -+ break; -+ /* dst = dst >> imm */ -+ case BPF_RSH: -+ emit(ctx, srl, dst, dst, imm); -+ break; -+ /* dst = dst >> imm (arithmetic) */ -+ case BPF_ARSH: -+ emit(ctx, sra, dst, dst, imm); -+ break; -+ /* dst = dst + imm */ -+ case BPF_ADD: -+ emit(ctx, addiu, dst, dst, imm); -+ break; -+ /* dst = dst - imm */ -+ case BPF_SUB: -+ emit(ctx, addiu, dst, dst, -imm); -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* ALU register operation (32-bit) */ -+void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = dst & src */ -+ case BPF_AND: -+ emit(ctx, and, dst, dst, src); -+ break; -+ /* dst = dst | src */ -+ case BPF_OR: -+ emit(ctx, or, dst, dst, src); -+ break; -+ /* dst = dst ^ src */ -+ case BPF_XOR: -+ emit(ctx, xor, dst, dst, src); -+ break; -+ /* dst = dst << src */ -+ case BPF_LSH: -+ emit(ctx, sllv, dst, dst, src); -+ break; -+ /* dst = dst >> src */ -+ case BPF_RSH: -+ emit(ctx, srlv, dst, dst, src); -+ break; -+ /* dst = dst >> src (arithmetic) */ -+ case BPF_ARSH: -+ emit(ctx, srav, dst, dst, src); -+ break; -+ /* dst = dst + src */ -+ case BPF_ADD: -+ emit(ctx, addu, dst, dst, src); -+ break; -+ /* dst = dst - src */ -+ case BPF_SUB: -+ emit(ctx, subu, dst, dst, src); -+ break; -+ /* dst = dst * src */ -+ case BPF_MUL: -+ if (cpu_has_mips32r1 || cpu_has_mips32r6) { -+ emit(ctx, mul, dst, dst, src); -+ } else { -+ emit(ctx, multu, dst, src); -+ emit(ctx, mflo, dst); -+ } -+ break; -+ /* dst = dst / src */ -+ case BPF_DIV: -+ if (cpu_has_mips32r6) { -+ emit(ctx, divu_r6, dst, dst, src); -+ } else { -+ emit(ctx, divu, dst, src); -+ emit(ctx, mflo, dst); -+ } -+ break; -+ /* dst = dst % src */ -+ case BPF_MOD: -+ if (cpu_has_mips32r6) { -+ emit(ctx, modu, dst, dst, src); -+ } else { -+ emit(ctx, divu, dst, src); -+ emit(ctx, mfhi, dst); -+ } -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Atomic read-modify-write (32-bit) */ -+void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code) -+{ -+ emit(ctx, ll, MIPS_R_T9, off, dst); -+ switch (code) { -+ case BPF_ADD: -+ emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src); -+ break; -+ case BPF_AND: -+ emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src); -+ break; -+ case BPF_OR: -+ emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src); -+ break; -+ case BPF_XOR: -+ emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src); -+ break; -+ } -+ emit(ctx, sc, MIPS_R_T8, off, dst); -+ emit(ctx, beqz, MIPS_R_T8, -16); -+ emit(ctx, nop); /* Delay slot */ -+} -+ -+/* Atomic compare-and-exchange (32-bit) */ -+void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off) -+{ -+ emit(ctx, ll, MIPS_R_T9, off, dst); -+ emit(ctx, bne, MIPS_R_T9, res, 12); -+ emit(ctx, move, MIPS_R_T8, src); /* Delay slot */ -+ emit(ctx, sc, MIPS_R_T8, off, dst); -+ emit(ctx, beqz, MIPS_R_T8, -20); -+ emit(ctx, move, res, MIPS_R_T9); /* Delay slot */ -+ clobber_reg(ctx, res); -+} -+ -+/* Swap bytes and truncate a register word or half word */ -+void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width) -+{ -+ u8 tmp = MIPS_R_T8; -+ u8 msk = MIPS_R_T9; -+ -+ switch (width) { -+ /* Swap bytes in a word */ -+ case 32: -+ if (cpu_has_mips32r2 || cpu_has_mips32r6) { -+ emit(ctx, wsbh, dst, dst); -+ emit(ctx, rotr, dst, dst, 16); -+ } else { -+ emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */ -+ emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */ -+ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ -+ -+ emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */ -+ emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */ -+ -+ emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */ -+ emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ -+ emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */ -+ emit(ctx, and, dst, dst, msk); /* dst = dst & msk */ -+ emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */ -+ } -+ break; -+ /* Swap bytes in a half word */ -+ case 16: -+ if (cpu_has_mips32r2 || cpu_has_mips32r6) { -+ emit(ctx, wsbh, dst, dst); -+ emit(ctx, andi, dst, dst, 0xffff); -+ } else { -+ emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */ -+ emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */ -+ emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */ -+ emit(ctx, sll, dst, dst, 8); /* d = d << 8 */ -+ emit(ctx, or, dst, dst, tmp); /* d = d | t */ -+ } -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Validate jump immediate range */ -+bool valid_jmp_i(u8 op, s32 imm) -+{ -+ switch (op) { -+ case JIT_JNOP: -+ /* Immediate value not used */ -+ return true; -+ case BPF_JEQ: -+ case BPF_JNE: -+ /* No immediate operation */ -+ return false; -+ case BPF_JSET: -+ case JIT_JNSET: -+ /* imm must be 16 bits unsigned */ -+ return imm >= 0 && imm <= 0xffff; -+ case BPF_JGE: -+ case BPF_JLT: -+ case BPF_JSGE: -+ case BPF_JSLT: -+ /* imm must be 16 bits */ -+ return imm >= -0x8000 && imm <= 0x7fff; -+ case BPF_JGT: -+ case BPF_JLE: -+ case BPF_JSGT: -+ case BPF_JSLE: -+ /* imm + 1 must be 16 bits */ -+ return imm >= -0x8001 && imm <= 0x7ffe; -+ } -+ return false; -+} -+ -+/* Invert a conditional jump operation */ -+static u8 invert_jmp(u8 op) -+{ -+ switch (op) { -+ case BPF_JA: return JIT_JNOP; -+ case BPF_JEQ: return BPF_JNE; -+ case BPF_JNE: return BPF_JEQ; -+ case BPF_JSET: return JIT_JNSET; -+ case BPF_JGT: return BPF_JLE; -+ case BPF_JGE: return BPF_JLT; -+ case BPF_JLT: return BPF_JGE; -+ case BPF_JLE: return BPF_JGT; -+ case BPF_JSGT: return BPF_JSLE; -+ case BPF_JSGE: return BPF_JSLT; -+ case BPF_JSLT: return BPF_JSGE; -+ case BPF_JSLE: return BPF_JSGT; -+ } -+ return 0; -+} -+ -+/* Prepare a PC-relative jump operation */ -+static void setup_jmp(struct jit_context *ctx, u8 bpf_op, -+ s16 bpf_off, u8 *jit_op, s32 *jit_off) -+{ -+ u32 *descp = &ctx->descriptors[ctx->bpf_index]; -+ int op = bpf_op; -+ int offset = 0; -+ -+ /* Do not compute offsets on the first pass */ -+ if (INDEX(*descp) == 0) -+ goto done; -+ -+ /* Skip jumps never taken */ -+ if (bpf_op == JIT_JNOP) -+ goto done; -+ -+ /* Convert jumps always taken */ -+ if (bpf_op == BPF_JA) -+ *descp |= JIT_DESC_CONVERT; -+ -+ /* -+ * Current ctx->jit_index points to the start of the branch preamble. -+ * Since the preamble differs among different branch conditionals, -+ * the current index cannot be used to compute the branch offset. -+ * Instead, we use the offset table value for the next instruction, -+ * which gives the index immediately after the branch delay slot. -+ */ -+ if (!CONVERTED(*descp)) { -+ int target = ctx->bpf_index + bpf_off + 1; -+ int origin = ctx->bpf_index + 1; -+ -+ offset = (INDEX(ctx->descriptors[target]) - -+ INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32); -+ } -+ -+ /* -+ * The PC-relative branch offset field on MIPS is 18 bits signed, -+ * so if the computed offset is larger than this we generate a an -+ * absolute jump that we skip with an inverted conditional branch. -+ */ -+ if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) { -+ offset = 3 * sizeof(u32); -+ op = invert_jmp(bpf_op); -+ ctx->changes += !CONVERTED(*descp); -+ *descp |= JIT_DESC_CONVERT; -+ } -+ -+done: -+ *jit_off = offset; -+ *jit_op = op; -+} -+ -+/* Prepare a PC-relative jump operation with immediate conditional */ -+void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, -+ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) -+{ -+ bool always = false; -+ bool never = false; -+ -+ switch (bpf_op) { -+ case BPF_JEQ: -+ case BPF_JNE: -+ break; -+ case BPF_JSET: -+ case BPF_JLT: -+ never = imm == 0; -+ break; -+ case BPF_JGE: -+ always = imm == 0; -+ break; -+ case BPF_JGT: -+ never = (u32)imm == U32_MAX; -+ break; -+ case BPF_JLE: -+ always = (u32)imm == U32_MAX; -+ break; -+ case BPF_JSGT: -+ never = imm == S32_MAX && width == 32; -+ break; -+ case BPF_JSGE: -+ always = imm == S32_MIN && width == 32; -+ break; -+ case BPF_JSLT: -+ never = imm == S32_MIN && width == 32; -+ break; -+ case BPF_JSLE: -+ always = imm == S32_MAX && width == 32; -+ break; -+ } -+ -+ if (never) -+ bpf_op = JIT_JNOP; -+ if (always) -+ bpf_op = BPF_JA; -+ setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); -+} -+ -+/* Prepare a PC-relative jump operation with register conditional */ -+void setup_jmp_r(struct jit_context *ctx, bool same_reg, -+ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off) -+{ -+ switch (bpf_op) { -+ case BPF_JSET: -+ break; -+ case BPF_JEQ: -+ case BPF_JGE: -+ case BPF_JLE: -+ case BPF_JSGE: -+ case BPF_JSLE: -+ if (same_reg) -+ bpf_op = BPF_JA; -+ break; -+ case BPF_JNE: -+ case BPF_JLT: -+ case BPF_JGT: -+ case BPF_JSGT: -+ case BPF_JSLT: -+ if (same_reg) -+ bpf_op = JIT_JNOP; -+ break; -+ } -+ setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off); -+} -+ -+/* Finish a PC-relative jump operation */ -+int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off) -+{ -+ /* Emit conditional branch delay slot */ -+ if (jit_op != JIT_JNOP) -+ emit(ctx, nop); -+ /* -+ * Emit an absolute long jump with delay slot, -+ * if the PC-relative branch was converted. -+ */ -+ if (CONVERTED(ctx->descriptors[ctx->bpf_index])) { -+ int target = get_target(ctx, ctx->bpf_index + bpf_off + 1); -+ -+ if (target < 0) -+ return -1; -+ emit(ctx, j, target); -+ emit(ctx, nop); -+ } -+ return 0; -+} -+ -+/* Jump immediate (32-bit) */ -+void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op) -+{ -+ switch (op) { -+ /* No-op, used internally for branch optimization */ -+ case JIT_JNOP: -+ break; -+ /* PC += off if dst & imm */ -+ case BPF_JSET: -+ emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ -+ case JIT_JNSET: -+ emit(ctx, andi, MIPS_R_T9, dst, (u16)imm); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst > imm */ -+ case BPF_JGT: -+ emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst >= imm */ -+ case BPF_JGE: -+ emit(ctx, sltiu, MIPS_R_T9, dst, imm); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst < imm */ -+ case BPF_JLT: -+ emit(ctx, sltiu, MIPS_R_T9, dst, imm); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst <= imm */ -+ case BPF_JLE: -+ emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst > imm (signed) */ -+ case BPF_JSGT: -+ emit(ctx, slti, MIPS_R_T9, dst, imm + 1); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst >= imm (signed) */ -+ case BPF_JSGE: -+ emit(ctx, slti, MIPS_R_T9, dst, imm); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst < imm (signed) */ -+ case BPF_JSLT: -+ emit(ctx, slti, MIPS_R_T9, dst, imm); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JSLE: -+ emit(ctx, slti, MIPS_R_T9, dst, imm + 1); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ } -+} -+ -+/* Jump register (32-bit) */ -+void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op) -+{ -+ switch (op) { -+ /* No-op, used internally for branch optimization */ -+ case JIT_JNOP: -+ break; -+ /* PC += off if dst == src */ -+ case BPF_JEQ: -+ emit(ctx, beq, dst, src, off); -+ break; -+ /* PC += off if dst != src */ -+ case BPF_JNE: -+ emit(ctx, bne, dst, src, off); -+ break; -+ /* PC += off if dst & src */ -+ case BPF_JSET: -+ emit(ctx, and, MIPS_R_T9, dst, src); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ -+ case JIT_JNSET: -+ emit(ctx, and, MIPS_R_T9, dst, src); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst > src */ -+ case BPF_JGT: -+ emit(ctx, sltu, MIPS_R_T9, src, dst); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst >= src */ -+ case BPF_JGE: -+ emit(ctx, sltu, MIPS_R_T9, dst, src); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst < src */ -+ case BPF_JLT: -+ emit(ctx, sltu, MIPS_R_T9, dst, src); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst <= src */ -+ case BPF_JLE: -+ emit(ctx, sltu, MIPS_R_T9, src, dst); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst > src (signed) */ -+ case BPF_JSGT: -+ emit(ctx, slt, MIPS_R_T9, src, dst); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst >= src (signed) */ -+ case BPF_JSGE: -+ emit(ctx, slt, MIPS_R_T9, dst, src); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst < src (signed) */ -+ case BPF_JSLT: -+ emit(ctx, slt, MIPS_R_T9, dst, src); -+ emit(ctx, bnez, MIPS_R_T9, off); -+ break; -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JSLE: -+ emit(ctx, slt, MIPS_R_T9, src, dst); -+ emit(ctx, beqz, MIPS_R_T9, off); -+ break; -+ } -+} -+ -+/* Jump always */ -+int emit_ja(struct jit_context *ctx, s16 off) -+{ -+ int target = get_target(ctx, ctx->bpf_index + off + 1); -+ -+ if (target < 0) -+ return -1; -+ emit(ctx, j, target); -+ emit(ctx, nop); -+ return 0; -+} -+ -+/* Jump to epilogue */ -+int emit_exit(struct jit_context *ctx) -+{ -+ int target = get_target(ctx, ctx->program->len); -+ -+ if (target < 0) -+ return -1; -+ emit(ctx, j, target); -+ emit(ctx, nop); -+ return 0; -+} -+ -+/* Build the program body from eBPF bytecode */ -+static int build_body(struct jit_context *ctx) -+{ -+ const struct bpf_prog *prog = ctx->program; -+ unsigned int i; -+ -+ ctx->stack_used = 0; -+ for (i = 0; i < prog->len; i++) { -+ const struct bpf_insn *insn = &prog->insnsi[i]; -+ u32 *descp = &ctx->descriptors[i]; -+ int ret; -+ -+ access_reg(ctx, insn->src_reg); -+ access_reg(ctx, insn->dst_reg); -+ -+ ctx->bpf_index = i; -+ if (ctx->target == NULL) { -+ ctx->changes += INDEX(*descp) != ctx->jit_index; -+ *descp &= JIT_DESC_CONVERT; -+ *descp |= ctx->jit_index; -+ } -+ -+ ret = build_insn(insn, ctx); -+ if (ret < 0) -+ return ret; -+ -+ if (ret > 0) { -+ i++; -+ if (ctx->target == NULL) -+ descp[1] = ctx->jit_index; -+ } -+ } -+ -+ /* Store the end offset, where the epilogue begins */ -+ ctx->descriptors[prog->len] = ctx->jit_index; -+ return 0; -+} -+ -+/* Set the branch conversion flag on all instructions */ -+static void set_convert_flag(struct jit_context *ctx, bool enable) -+{ -+ const struct bpf_prog *prog = ctx->program; -+ u32 flag = enable ? JIT_DESC_CONVERT : 0; -+ unsigned int i; -+ -+ for (i = 0; i <= prog->len; i++) -+ ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag; -+} -+ -+static void jit_fill_hole(void *area, unsigned int size) -+{ -+ u32 *p; -+ -+ /* We are guaranteed to have aligned memory. */ -+ for (p = area; size >= sizeof(u32); size -= sizeof(u32)) -+ uasm_i_break(&p, BRK_BUG); /* Increments p */ -+} -+ -+bool bpf_jit_needs_zext(void) -+{ -+ return true; -+} -+ -+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) -+{ -+ struct bpf_prog *tmp, *orig_prog = prog; -+ struct bpf_binary_header *header = NULL; -+ struct jit_context ctx; -+ bool tmp_blinded = false; -+ unsigned int tmp_idx; -+ unsigned int image_size; -+ u8 *image_ptr; -+ int tries; -+ -+ /* -+ * If BPF JIT was not enabled then we must fall back to -+ * the interpreter. -+ */ -+ if (!prog->jit_requested) -+ return orig_prog; -+ /* -+ * If constant blinding was enabled and we failed during blinding -+ * then we must fall back to the interpreter. Otherwise, we save -+ * the new JITed code. -+ */ -+ tmp = bpf_jit_blind_constants(prog); -+ if (IS_ERR(tmp)) -+ return orig_prog; -+ if (tmp != prog) { -+ tmp_blinded = true; -+ prog = tmp; -+ } -+ -+ memset(&ctx, 0, sizeof(ctx)); -+ ctx.program = prog; -+ -+ /* -+ * Not able to allocate memory for descriptors[], then -+ * we must fall back to the interpreter -+ */ -+ ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors), -+ GFP_KERNEL); -+ if (ctx.descriptors == NULL) -+ goto out_err; -+ -+ /* First pass discovers used resources */ -+ if (build_body(&ctx) < 0) -+ goto out_err; -+ /* -+ * Second pass computes instruction offsets. -+ * If any PC-relative branches are out of range, a sequence of -+ * a PC-relative branch + a jump is generated, and we have to -+ * try again from the beginning to generate the new offsets. -+ * This is done until no additional conversions are necessary. -+ * The last two iterations are done with all branches being -+ * converted, to guarantee offset table convergence within a -+ * fixed number of iterations. -+ */ -+ ctx.jit_index = 0; -+ build_prologue(&ctx); -+ tmp_idx = ctx.jit_index; -+ -+ tries = JIT_MAX_ITERATIONS; -+ do { -+ ctx.jit_index = tmp_idx; -+ ctx.changes = 0; -+ if (tries == 2) -+ set_convert_flag(&ctx, true); -+ if (build_body(&ctx) < 0) -+ goto out_err; -+ } while (ctx.changes > 0 && --tries > 0); -+ -+ if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge")) -+ goto out_err; -+ -+ build_epilogue(&ctx, MIPS_R_RA); -+ -+ /* Now we know the size of the structure to make */ -+ image_size = sizeof(u32) * ctx.jit_index; -+ header = bpf_jit_binary_alloc(image_size, &image_ptr, -+ sizeof(u32), jit_fill_hole); -+ /* -+ * Not able to allocate memory for the structure then -+ * we must fall back to the interpretation -+ */ -+ if (header == NULL) -+ goto out_err; -+ -+ /* Actual pass to generate final JIT code */ -+ ctx.target = (u32 *)image_ptr; -+ ctx.jit_index = 0; -+ -+ /* -+ * If building the JITed code fails somehow, -+ * we fall back to the interpretation. -+ */ -+ build_prologue(&ctx); -+ if (build_body(&ctx) < 0) -+ goto out_err; -+ build_epilogue(&ctx, MIPS_R_RA); -+ -+ /* Populate line info meta data */ -+ set_convert_flag(&ctx, false); -+ bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]); -+ -+ /* Set as read-only exec and flush instruction cache */ -+ bpf_jit_binary_lock_ro(header); -+ flush_icache_range((unsigned long)header, -+ (unsigned long)&ctx.target[ctx.jit_index]); -+ -+ if (bpf_jit_enable > 1) -+ bpf_jit_dump(prog->len, image_size, 2, ctx.target); -+ -+ prog->bpf_func = (void *)ctx.target; -+ prog->jited = 1; -+ prog->jited_len = image_size; -+ -+out: -+ if (tmp_blinded) -+ bpf_jit_prog_release_other(prog, prog == orig_prog ? -+ tmp : orig_prog); -+ kfree(ctx.descriptors); -+ return prog; -+ -+out_err: -+ prog = orig_prog; -+ if (header) -+ bpf_jit_binary_free(header); -+ goto out; -+} ---- /dev/null -+++ b/arch/mips/net/bpf_jit_comp.h -@@ -0,0 +1,211 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS. -+ * -+ * Copyright (c) 2021 Anyfi Networks AB. -+ * Author: Johan Almbladh -+ * -+ * Based on code and ideas from -+ * Copyright (c) 2017 Cavium, Inc. -+ * Copyright (c) 2017 Shubham Bansal -+ * Copyright (c) 2011 Mircea Gherzan -+ */ -+ -+#ifndef _BPF_JIT_COMP_H -+#define _BPF_JIT_COMP_H -+ -+/* MIPS registers */ -+#define MIPS_R_ZERO 0 /* Const zero */ -+#define MIPS_R_AT 1 /* Asm temp */ -+#define MIPS_R_V0 2 /* Result */ -+#define MIPS_R_V1 3 /* Result */ -+#define MIPS_R_A0 4 /* Argument */ -+#define MIPS_R_A1 5 /* Argument */ -+#define MIPS_R_A2 6 /* Argument */ -+#define MIPS_R_A3 7 /* Argument */ -+#define MIPS_R_A4 8 /* Arg (n64) */ -+#define MIPS_R_A5 9 /* Arg (n64) */ -+#define MIPS_R_A6 10 /* Arg (n64) */ -+#define MIPS_R_A7 11 /* Arg (n64) */ -+#define MIPS_R_T0 8 /* Temp (o32) */ -+#define MIPS_R_T1 9 /* Temp (o32) */ -+#define MIPS_R_T2 10 /* Temp (o32) */ -+#define MIPS_R_T3 11 /* Temp (o32) */ -+#define MIPS_R_T4 12 /* Temporary */ -+#define MIPS_R_T5 13 /* Temporary */ -+#define MIPS_R_T6 14 /* Temporary */ -+#define MIPS_R_T7 15 /* Temporary */ -+#define MIPS_R_S0 16 /* Saved */ -+#define MIPS_R_S1 17 /* Saved */ -+#define MIPS_R_S2 18 /* Saved */ -+#define MIPS_R_S3 19 /* Saved */ -+#define MIPS_R_S4 20 /* Saved */ -+#define MIPS_R_S5 21 /* Saved */ -+#define MIPS_R_S6 22 /* Saved */ -+#define MIPS_R_S7 23 /* Saved */ -+#define MIPS_R_T8 24 /* Temporary */ -+#define MIPS_R_T9 25 /* Temporary */ -+/* MIPS_R_K0 26 Reserved */ -+/* MIPS_R_K1 27 Reserved */ -+#define MIPS_R_GP 28 /* Global ptr */ -+#define MIPS_R_SP 29 /* Stack ptr */ -+#define MIPS_R_FP 30 /* Frame ptr */ -+#define MIPS_R_RA 31 /* Return */ -+ -+/* -+ * Jump address mask for immediate jumps. The four most significant bits -+ * must be equal to PC. -+ */ -+#define MIPS_JMP_MASK 0x0fffffffUL -+ -+/* Maximum number of iterations in offset table computation */ -+#define JIT_MAX_ITERATIONS 8 -+ -+/* -+ * Jump pseudo-instructions used internally -+ * for branch conversion and branch optimization. -+ */ -+#define JIT_JNSET 0xe0 -+#define JIT_JNOP 0xf0 -+ -+/* Descriptor flag for PC-relative branch conversion */ -+#define JIT_DESC_CONVERT BIT(31) -+ -+/* JIT context for an eBPF program */ -+struct jit_context { -+ struct bpf_prog *program; /* The eBPF program being JITed */ -+ u32 *descriptors; /* eBPF to JITed CPU insn descriptors */ -+ u32 *target; /* JITed code buffer */ -+ u32 bpf_index; /* Index of current BPF program insn */ -+ u32 jit_index; /* Index of current JIT target insn */ -+ u32 changes; /* Number of PC-relative branch conv */ -+ u32 accessed; /* Bit mask of read eBPF registers */ -+ u32 clobbered; /* Bit mask of modified CPU registers */ -+ u32 stack_size; /* Total allocated stack size in bytes */ -+ u32 saved_size; /* Size of callee-saved registers */ -+ u32 stack_used; /* Stack size used for function calls */ -+}; -+ -+/* Emit the instruction if the JIT memory space has been allocated */ -+#define emit(ctx, func, ...) \ -+do { \ -+ if ((ctx)->target != NULL) { \ -+ u32 *p = &(ctx)->target[ctx->jit_index]; \ -+ uasm_i_##func(&p, ##__VA_ARGS__); \ -+ } \ -+ (ctx)->jit_index++; \ -+} while (0) -+ -+/* -+ * Mark a BPF register as accessed, it needs to be -+ * initialized by the program if expected, e.g. FP. -+ */ -+static inline void access_reg(struct jit_context *ctx, u8 reg) -+{ -+ ctx->accessed |= BIT(reg); -+} -+ -+/* -+ * Mark a CPU register as clobbered, it needs to be -+ * saved/restored by the program if callee-saved. -+ */ -+static inline void clobber_reg(struct jit_context *ctx, u8 reg) -+{ -+ ctx->clobbered |= BIT(reg); -+} -+ -+/* -+ * Push registers on the stack, starting at a given depth from the stack -+ * pointer and increasing. The next depth to be written is returned. -+ */ -+int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); -+ -+/* -+ * Pop registers from the stack, starting at a given depth from the stack -+ * pointer and increasing. The next depth to be read is returned. -+ */ -+int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth); -+ -+/* Compute the 28-bit jump target address from a BPF program location */ -+int get_target(struct jit_context *ctx, u32 loc); -+ -+/* Compute the PC-relative offset to relative BPF program offset */ -+int get_offset(const struct jit_context *ctx, int off); -+ -+/* dst = imm (32-bit) */ -+void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm); -+ -+/* dst = src (32-bit) */ -+void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src); -+ -+/* Validate ALU/ALU64 immediate range */ -+bool valid_alu_i(u8 op, s32 imm); -+ -+/* Rewrite ALU/ALU64 immediate operation */ -+bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val); -+ -+/* ALU immediate operation (32-bit) */ -+void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op); -+ -+/* ALU register operation (32-bit) */ -+void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op); -+ -+/* Atomic read-modify-write (32-bit) */ -+void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code); -+ -+/* Atomic compare-and-exchange (32-bit) */ -+void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off); -+ -+/* Swap bytes and truncate a register word or half word */ -+void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width); -+ -+/* Validate JMP/JMP32 immediate range */ -+bool valid_jmp_i(u8 op, s32 imm); -+ -+/* Prepare a PC-relative jump operation with immediate conditional */ -+void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width, -+ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); -+ -+/* Prepare a PC-relative jump operation with register conditional */ -+void setup_jmp_r(struct jit_context *ctx, bool same_reg, -+ u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off); -+ -+/* Finish a PC-relative jump operation */ -+int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off); -+ -+/* Conditional JMP/JMP32 immediate */ -+void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op); -+ -+/* Conditional JMP/JMP32 register */ -+void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op); -+ -+/* Jump always */ -+int emit_ja(struct jit_context *ctx, s16 off); -+ -+/* Jump to epilogue */ -+int emit_exit(struct jit_context *ctx); -+ -+/* -+ * Build program prologue to set up the stack and registers. -+ * This function is implemented separately for 32-bit and 64-bit JITs. -+ */ -+void build_prologue(struct jit_context *ctx); -+ -+/* -+ * Build the program epilogue to restore the stack and registers. -+ * This function is implemented separately for 32-bit and 64-bit JITs. -+ */ -+void build_epilogue(struct jit_context *ctx, int dest_reg); -+ -+/* -+ * Convert an eBPF instruction to native instruction, i.e -+ * JITs an eBPF instruction. -+ * Returns : -+ * 0 - Successfully JITed an 8-byte eBPF instruction -+ * >0 - Successfully JITed a 16-byte eBPF instruction -+ * <0 - Failed to JIT. -+ * This function is implemented separately for 32-bit and 64-bit JITs. -+ */ -+int build_insn(const struct bpf_insn *insn, struct jit_context *ctx); -+ -+#endif /* _BPF_JIT_COMP_H */ ---- /dev/null -+++ b/arch/mips/net/bpf_jit_comp32.c -@@ -0,0 +1,1741 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Just-In-Time compiler for eBPF bytecode on MIPS. -+ * Implementation of JIT functions for 32-bit CPUs. -+ * -+ * Copyright (c) 2021 Anyfi Networks AB. -+ * Author: Johan Almbladh -+ * -+ * Based on code and ideas from -+ * Copyright (c) 2017 Cavium, Inc. -+ * Copyright (c) 2017 Shubham Bansal -+ * Copyright (c) 2011 Mircea Gherzan -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "bpf_jit_comp.h" -+ -+/* MIPS a4-a7 are not available in the o32 ABI */ -+#undef MIPS_R_A4 -+#undef MIPS_R_A5 -+#undef MIPS_R_A6 -+#undef MIPS_R_A7 -+ -+/* Stack is 8-byte aligned in o32 ABI */ -+#define MIPS_STACK_ALIGNMENT 8 -+ -+/* -+ * The top 16 bytes of a stack frame is reserved for the callee in O32 ABI. -+ * This corresponds to stack space for register arguments a0-a3. -+ */ -+#define JIT_RESERVED_STACK 16 -+ -+/* Temporary 64-bit register used by JIT */ -+#define JIT_REG_TMP MAX_BPF_JIT_REG -+ -+/* -+ * Number of prologue bytes to skip when doing a tail call. -+ * Tail call count (TCC) initialization (8 bytes) always, plus -+ * R0-to-v0 assignment (4 bytes) if big endian. -+ */ -+#ifdef __BIG_ENDIAN -+#define JIT_TCALL_SKIP 12 -+#else -+#define JIT_TCALL_SKIP 8 -+#endif -+ -+/* CPU registers holding the callee return value */ -+#define JIT_RETURN_REGS \ -+ (BIT(MIPS_R_V0) | \ -+ BIT(MIPS_R_V1)) -+ -+/* CPU registers arguments passed to callee directly */ -+#define JIT_ARG_REGS \ -+ (BIT(MIPS_R_A0) | \ -+ BIT(MIPS_R_A1) | \ -+ BIT(MIPS_R_A2) | \ -+ BIT(MIPS_R_A3)) -+ -+/* CPU register arguments passed to callee on stack */ -+#define JIT_STACK_REGS \ -+ (BIT(MIPS_R_T0) | \ -+ BIT(MIPS_R_T1) | \ -+ BIT(MIPS_R_T2) | \ -+ BIT(MIPS_R_T3) | \ -+ BIT(MIPS_R_T4) | \ -+ BIT(MIPS_R_T5)) -+ -+/* Caller-saved CPU registers */ -+#define JIT_CALLER_REGS \ -+ (JIT_RETURN_REGS | \ -+ JIT_ARG_REGS | \ -+ JIT_STACK_REGS) -+ -+/* Callee-saved CPU registers */ -+#define JIT_CALLEE_REGS \ -+ (BIT(MIPS_R_S0) | \ -+ BIT(MIPS_R_S1) | \ -+ BIT(MIPS_R_S2) | \ -+ BIT(MIPS_R_S3) | \ -+ BIT(MIPS_R_S4) | \ -+ BIT(MIPS_R_S5) | \ -+ BIT(MIPS_R_S6) | \ -+ BIT(MIPS_R_S7) | \ -+ BIT(MIPS_R_GP) | \ -+ BIT(MIPS_R_FP) | \ -+ BIT(MIPS_R_RA)) -+ -+/* -+ * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers. -+ * -+ * 1) Native register pairs are ordered according to CPU endiannes, following -+ * the MIPS convention for passing 64-bit arguments and return values. -+ * 2) The eBPF return value, arguments and callee-saved registers are mapped -+ * to their native MIPS equivalents. -+ * 3) Since the 32 highest bits in the eBPF FP register are always zero, -+ * only one general-purpose register is actually needed for the mapping. -+ * We use the fp register for this purpose, and map the highest bits to -+ * the MIPS register r0 (zero). -+ * 4) We use the MIPS gp and at registers as internal temporary registers -+ * for constant blinding. The gp register is callee-saved. -+ * 5) One 64-bit temporary register is mapped for use when sign-extending -+ * immediate operands. MIPS registers t6-t9 are available to the JIT -+ * for as temporaries when implementing complex 64-bit operations. -+ * -+ * With this scheme all eBPF registers are being mapped to native MIPS -+ * registers without having to use any stack scratch space. The direct -+ * register mapping (2) simplifies the handling of function calls. -+ */ -+static const u8 bpf2mips32[][2] = { -+ /* Return value from in-kernel function, and exit value from eBPF */ -+ [BPF_REG_0] = {MIPS_R_V1, MIPS_R_V0}, -+ /* Arguments from eBPF program to in-kernel function */ -+ [BPF_REG_1] = {MIPS_R_A1, MIPS_R_A0}, -+ [BPF_REG_2] = {MIPS_R_A3, MIPS_R_A2}, -+ /* Remaining arguments, to be passed on the stack per O32 ABI */ -+ [BPF_REG_3] = {MIPS_R_T1, MIPS_R_T0}, -+ [BPF_REG_4] = {MIPS_R_T3, MIPS_R_T2}, -+ [BPF_REG_5] = {MIPS_R_T5, MIPS_R_T4}, -+ /* Callee-saved registers that in-kernel function will preserve */ -+ [BPF_REG_6] = {MIPS_R_S1, MIPS_R_S0}, -+ [BPF_REG_7] = {MIPS_R_S3, MIPS_R_S2}, -+ [BPF_REG_8] = {MIPS_R_S5, MIPS_R_S4}, -+ [BPF_REG_9] = {MIPS_R_S7, MIPS_R_S6}, -+ /* Read-only frame pointer to access the eBPF stack */ -+#ifdef __BIG_ENDIAN -+ [BPF_REG_FP] = {MIPS_R_FP, MIPS_R_ZERO}, -+#else -+ [BPF_REG_FP] = {MIPS_R_ZERO, MIPS_R_FP}, -+#endif -+ /* Temporary register for blinding constants */ -+ [BPF_REG_AX] = {MIPS_R_GP, MIPS_R_AT}, -+ /* Temporary register for internal JIT use */ -+ [JIT_REG_TMP] = {MIPS_R_T7, MIPS_R_T6}, -+}; -+ -+/* Get low CPU register for a 64-bit eBPF register mapping */ -+static inline u8 lo(const u8 reg[]) -+{ -+#ifdef __BIG_ENDIAN -+ return reg[0]; -+#else -+ return reg[1]; -+#endif -+} -+ -+/* Get high CPU register for a 64-bit eBPF register mapping */ -+static inline u8 hi(const u8 reg[]) -+{ -+#ifdef __BIG_ENDIAN -+ return reg[1]; -+#else -+ return reg[0]; -+#endif -+} -+ -+/* -+ * Mark a 64-bit CPU register pair as clobbered, it needs to be -+ * saved/restored by the program if callee-saved. -+ */ -+static void clobber_reg64(struct jit_context *ctx, const u8 reg[]) -+{ -+ clobber_reg(ctx, reg[0]); -+ clobber_reg(ctx, reg[1]); -+} -+ -+/* dst = imm (sign-extended) */ -+static void emit_mov_se_i64(struct jit_context *ctx, const u8 dst[], s32 imm) -+{ -+ emit_mov_i(ctx, lo(dst), imm); -+ if (imm < 0) -+ emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); -+ else -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ clobber_reg64(ctx, dst); -+} -+ -+/* Zero extension, if verifier does not do it for us */ -+static void emit_zext_ver(struct jit_context *ctx, const u8 dst[]) -+{ -+ if (!ctx->program->aux->verifier_zext) { -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ clobber_reg(ctx, hi(dst)); -+ } -+} -+ -+/* Load delay slot, if ISA mandates it */ -+static void emit_load_delay(struct jit_context *ctx) -+{ -+ if (!cpu_has_mips_2_3_4_5_r) -+ emit(ctx, nop); -+} -+ -+/* ALU immediate operation (64-bit) */ -+static void emit_alu_i64(struct jit_context *ctx, -+ const u8 dst[], s32 imm, u8 op) -+{ -+ u8 src = MIPS_R_T6; -+ -+ /* -+ * ADD/SUB with all but the max negative imm can be handled by -+ * inverting the operation and the imm value, saving one insn. -+ */ -+ if (imm > S32_MIN && imm < 0) -+ switch (op) { -+ case BPF_ADD: -+ op = BPF_SUB; -+ imm = -imm; -+ break; -+ case BPF_SUB: -+ op = BPF_ADD; -+ imm = -imm; -+ break; -+ } -+ -+ /* Move immediate to temporary register */ -+ emit_mov_i(ctx, src, imm); -+ -+ switch (op) { -+ /* dst = dst + imm */ -+ case BPF_ADD: -+ emit(ctx, addu, lo(dst), lo(dst), src); -+ emit(ctx, sltu, MIPS_R_T9, lo(dst), src); -+ emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); -+ if (imm < 0) -+ emit(ctx, addiu, hi(dst), hi(dst), -1); -+ break; -+ /* dst = dst - imm */ -+ case BPF_SUB: -+ emit(ctx, sltu, MIPS_R_T9, lo(dst), src); -+ emit(ctx, subu, lo(dst), lo(dst), src); -+ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); -+ if (imm < 0) -+ emit(ctx, addiu, hi(dst), hi(dst), 1); -+ break; -+ /* dst = dst | imm */ -+ case BPF_OR: -+ emit(ctx, or, lo(dst), lo(dst), src); -+ if (imm < 0) -+ emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1); -+ break; -+ /* dst = dst & imm */ -+ case BPF_AND: -+ emit(ctx, and, lo(dst), lo(dst), src); -+ if (imm >= 0) -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ break; -+ /* dst = dst ^ imm */ -+ case BPF_XOR: -+ emit(ctx, xor, lo(dst), lo(dst), src); -+ if (imm < 0) { -+ emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); -+ emit(ctx, addiu, hi(dst), hi(dst), -1); -+ } -+ break; -+ } -+ clobber_reg64(ctx, dst); -+} -+ -+/* ALU register operation (64-bit) */ -+static void emit_alu_r64(struct jit_context *ctx, -+ const u8 dst[], const u8 src[], u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = dst + src */ -+ case BPF_ADD: -+ if (src == dst) { -+ emit(ctx, srl, MIPS_R_T9, lo(dst), 31); -+ emit(ctx, addu, lo(dst), lo(dst), lo(dst)); -+ } else { -+ emit(ctx, addu, lo(dst), lo(dst), lo(src)); -+ emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); -+ } -+ emit(ctx, addu, hi(dst), hi(dst), hi(src)); -+ emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9); -+ break; -+ /* dst = dst - src */ -+ case BPF_SUB: -+ emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src)); -+ emit(ctx, subu, lo(dst), lo(dst), lo(src)); -+ emit(ctx, subu, hi(dst), hi(dst), hi(src)); -+ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); -+ break; -+ /* dst = dst | src */ -+ case BPF_OR: -+ emit(ctx, or, lo(dst), lo(dst), lo(src)); -+ emit(ctx, or, hi(dst), hi(dst), hi(src)); -+ break; -+ /* dst = dst & src */ -+ case BPF_AND: -+ emit(ctx, and, lo(dst), lo(dst), lo(src)); -+ emit(ctx, and, hi(dst), hi(dst), hi(src)); -+ break; -+ /* dst = dst ^ src */ -+ case BPF_XOR: -+ emit(ctx, xor, lo(dst), lo(dst), lo(src)); -+ emit(ctx, xor, hi(dst), hi(dst), hi(src)); -+ break; -+ } -+ clobber_reg64(ctx, dst); -+} -+ -+/* ALU invert (64-bit) */ -+static void emit_neg_i64(struct jit_context *ctx, const u8 dst[]) -+{ -+ emit(ctx, sltu, MIPS_R_T9, MIPS_R_ZERO, lo(dst)); -+ emit(ctx, subu, lo(dst), MIPS_R_ZERO, lo(dst)); -+ emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst)); -+ emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9); -+ -+ clobber_reg64(ctx, dst); -+} -+ -+/* ALU shift immediate (64-bit) */ -+static void emit_shift_i64(struct jit_context *ctx, -+ const u8 dst[], u32 imm, u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = dst << imm */ -+ case BPF_LSH: -+ if (imm < 32) { -+ emit(ctx, srl, MIPS_R_T9, lo(dst), 32 - imm); -+ emit(ctx, sll, lo(dst), lo(dst), imm); -+ emit(ctx, sll, hi(dst), hi(dst), imm); -+ emit(ctx, or, hi(dst), hi(dst), MIPS_R_T9); -+ } else { -+ emit(ctx, sll, hi(dst), lo(dst), imm - 32); -+ emit(ctx, move, lo(dst), MIPS_R_ZERO); -+ } -+ break; -+ /* dst = dst >> imm */ -+ case BPF_RSH: -+ if (imm < 32) { -+ emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); -+ emit(ctx, srl, lo(dst), lo(dst), imm); -+ emit(ctx, srl, hi(dst), hi(dst), imm); -+ emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); -+ } else { -+ emit(ctx, srl, lo(dst), hi(dst), imm - 32); -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ } -+ break; -+ /* dst = dst >> imm (arithmetic) */ -+ case BPF_ARSH: -+ if (imm < 32) { -+ emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm); -+ emit(ctx, srl, lo(dst), lo(dst), imm); -+ emit(ctx, sra, hi(dst), hi(dst), imm); -+ emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9); -+ } else { -+ emit(ctx, sra, lo(dst), hi(dst), imm - 32); -+ emit(ctx, sra, hi(dst), hi(dst), 31); -+ } -+ break; -+ } -+ clobber_reg64(ctx, dst); -+} -+ -+/* ALU shift register (64-bit) */ -+static void emit_shift_r64(struct jit_context *ctx, -+ const u8 dst[], u8 src, u8 op) -+{ -+ u8 t1 = MIPS_R_T8; -+ u8 t2 = MIPS_R_T9; -+ -+ emit(ctx, andi, t1, src, 32); /* t1 = src & 32 */ -+ emit(ctx, beqz, t1, 16); /* PC += 16 if t1 == 0 */ -+ emit(ctx, nor, t2, src, MIPS_R_ZERO); /* t2 = ~src (delay slot) */ -+ -+ switch (BPF_OP(op)) { -+ /* dst = dst << src */ -+ case BPF_LSH: -+ /* Next: shift >= 32 */ -+ emit(ctx, sllv, hi(dst), lo(dst), src); /* dh = dl << src */ -+ emit(ctx, move, lo(dst), MIPS_R_ZERO); /* dl = 0 */ -+ emit(ctx, b, 20); /* PC += 20 */ -+ /* +16: shift < 32 */ -+ emit(ctx, srl, t1, lo(dst), 1); /* t1 = dl >> 1 */ -+ emit(ctx, srlv, t1, t1, t2); /* t1 = t1 >> t2 */ -+ emit(ctx, sllv, lo(dst), lo(dst), src); /* dl = dl << src */ -+ emit(ctx, sllv, hi(dst), hi(dst), src); /* dh = dh << src */ -+ emit(ctx, or, hi(dst), hi(dst), t1); /* dh = dh | t1 */ -+ break; -+ /* dst = dst >> src */ -+ case BPF_RSH: -+ /* Next: shift >= 32 */ -+ emit(ctx, srlv, lo(dst), hi(dst), src); /* dl = dh >> src */ -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); /* dh = 0 */ -+ emit(ctx, b, 20); /* PC += 20 */ -+ /* +16: shift < 32 */ -+ emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ -+ emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ -+ emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >> src */ -+ emit(ctx, srlv, hi(dst), hi(dst), src); /* dh = dh >> src */ -+ emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ -+ break; -+ /* dst = dst >> src (arithmetic) */ -+ case BPF_ARSH: -+ /* Next: shift >= 32 */ -+ emit(ctx, srav, lo(dst), hi(dst), src); /* dl = dh >>a src */ -+ emit(ctx, sra, hi(dst), hi(dst), 31); /* dh = dh >>a 31 */ -+ emit(ctx, b, 20); /* PC += 20 */ -+ /* +16: shift < 32 */ -+ emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */ -+ emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */ -+ emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >>a src */ -+ emit(ctx, srav, hi(dst), hi(dst), src); /* dh = dh >> src */ -+ emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */ -+ break; -+ } -+ -+ /* +20: Done */ -+ clobber_reg64(ctx, dst); -+} -+ -+/* ALU mul immediate (64x32-bit) */ -+static void emit_mul_i64(struct jit_context *ctx, const u8 dst[], s32 imm) -+{ -+ u8 src = MIPS_R_T6; -+ u8 tmp = MIPS_R_T9; -+ -+ switch (imm) { -+ /* dst = dst * 1 is a no-op */ -+ case 1: -+ break; -+ /* dst = dst * -1 */ -+ case -1: -+ emit_neg_i64(ctx, dst); -+ break; -+ case 0: -+ emit_mov_r(ctx, lo(dst), MIPS_R_ZERO); -+ emit_mov_r(ctx, hi(dst), MIPS_R_ZERO); -+ break; -+ /* Full 64x32 multiply */ -+ default: -+ /* hi(dst) = hi(dst) * src(imm) */ -+ emit_mov_i(ctx, src, imm); -+ if (cpu_has_mips32r1 || cpu_has_mips32r6) { -+ emit(ctx, mul, hi(dst), hi(dst), src); -+ } else { -+ emit(ctx, multu, hi(dst), src); -+ emit(ctx, mflo, hi(dst)); -+ } -+ -+ /* hi(dst) = hi(dst) - lo(dst) */ -+ if (imm < 0) -+ emit(ctx, subu, hi(dst), hi(dst), lo(dst)); -+ -+ /* tmp = lo(dst) * src(imm) >> 32 */ -+ /* lo(dst) = lo(dst) * src(imm) */ -+ if (cpu_has_mips32r6) { -+ emit(ctx, muhu, tmp, lo(dst), src); -+ emit(ctx, mulu, lo(dst), lo(dst), src); -+ } else { -+ emit(ctx, multu, lo(dst), src); -+ emit(ctx, mflo, lo(dst)); -+ emit(ctx, mfhi, tmp); -+ } -+ -+ /* hi(dst) += tmp */ -+ emit(ctx, addu, hi(dst), hi(dst), tmp); -+ clobber_reg64(ctx, dst); -+ break; -+ } -+} -+ -+/* ALU mul register (64x64-bit) */ -+static void emit_mul_r64(struct jit_context *ctx, -+ const u8 dst[], const u8 src[]) -+{ -+ u8 acc = MIPS_R_T8; -+ u8 tmp = MIPS_R_T9; -+ -+ /* acc = hi(dst) * lo(src) */ -+ if (cpu_has_mips32r1 || cpu_has_mips32r6) { -+ emit(ctx, mul, acc, hi(dst), lo(src)); -+ } else { -+ emit(ctx, multu, hi(dst), lo(src)); -+ emit(ctx, mflo, acc); -+ } -+ -+ /* tmp = lo(dst) * hi(src) */ -+ if (cpu_has_mips32r1 || cpu_has_mips32r6) { -+ emit(ctx, mul, tmp, lo(dst), hi(src)); -+ } else { -+ emit(ctx, multu, lo(dst), hi(src)); -+ emit(ctx, mflo, tmp); -+ } -+ -+ /* acc += tmp */ -+ emit(ctx, addu, acc, acc, tmp); -+ -+ /* tmp = lo(dst) * lo(src) >> 32 */ -+ /* lo(dst) = lo(dst) * lo(src) */ -+ if (cpu_has_mips32r6) { -+ emit(ctx, muhu, tmp, lo(dst), lo(src)); -+ emit(ctx, mulu, lo(dst), lo(dst), lo(src)); -+ } else { -+ emit(ctx, multu, lo(dst), lo(src)); -+ emit(ctx, mflo, lo(dst)); -+ emit(ctx, mfhi, tmp); -+ } -+ -+ /* hi(dst) = acc + tmp */ -+ emit(ctx, addu, hi(dst), acc, tmp); -+ clobber_reg64(ctx, dst); -+} -+ -+/* Helper function for 64-bit modulo */ -+static u64 jit_mod64(u64 a, u64 b) -+{ -+ u64 rem; -+ -+ div64_u64_rem(a, b, &rem); -+ return rem; -+} -+ -+/* ALU div/mod register (64-bit) */ -+static void emit_divmod_r64(struct jit_context *ctx, -+ const u8 dst[], const u8 src[], u8 op) -+{ -+ const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */ -+ const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ -+ const u8 *r2 = bpf2mips32[BPF_REG_2]; /* Mapped to a2-a3 */ -+ int exclude, k; -+ u32 addr = 0; -+ -+ /* Push caller-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ 0, JIT_RESERVED_STACK); -+ -+ /* Put 64-bit arguments 1 and 2 in registers a0-a3 */ -+ for (k = 0; k < 2; k++) { -+ emit(ctx, move, MIPS_R_T9, src[k]); -+ emit(ctx, move, r1[k], dst[k]); -+ emit(ctx, move, r2[k], MIPS_R_T9); -+ } -+ -+ /* Emit function call */ -+ switch (BPF_OP(op)) { -+ /* dst = dst / src */ -+ case BPF_DIV: -+ addr = (u32)&div64_u64; -+ break; -+ /* dst = dst % src */ -+ case BPF_MOD: -+ addr = (u32)&jit_mod64; -+ break; -+ } -+ emit_mov_i(ctx, MIPS_R_T9, addr); -+ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* Store the 64-bit result in dst */ -+ emit(ctx, move, dst[0], r0[0]); -+ emit(ctx, move, dst[1], r0[1]); -+ -+ /* Restore caller-saved registers, excluding the computed result */ -+ exclude = BIT(lo(dst)) | BIT(hi(dst)); -+ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ exclude, JIT_RESERVED_STACK); -+ emit_load_delay(ctx); -+ -+ clobber_reg64(ctx, dst); -+ clobber_reg(ctx, MIPS_R_V0); -+ clobber_reg(ctx, MIPS_R_V1); -+ clobber_reg(ctx, MIPS_R_RA); -+} -+ -+/* Swap bytes in a register word */ -+static void emit_swap8_r(struct jit_context *ctx, u8 dst, u8 src, u8 mask) -+{ -+ u8 tmp = MIPS_R_T9; -+ -+ emit(ctx, and, tmp, src, mask); /* tmp = src & 0x00ff00ff */ -+ emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */ -+ emit(ctx, srl, dst, src, 8); /* dst = src >> 8 */ -+ emit(ctx, and, dst, dst, mask); /* dst = dst & 0x00ff00ff */ -+ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ -+} -+ -+/* Swap half words in a register word */ -+static void emit_swap16_r(struct jit_context *ctx, u8 dst, u8 src) -+{ -+ u8 tmp = MIPS_R_T9; -+ -+ emit(ctx, sll, tmp, src, 16); /* tmp = src << 16 */ -+ emit(ctx, srl, dst, src, 16); /* dst = src >> 16 */ -+ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ -+} -+ -+/* Swap bytes and truncate a register double word, word or half word */ -+static void emit_bswap_r64(struct jit_context *ctx, const u8 dst[], u32 width) -+{ -+ u8 tmp = MIPS_R_T8; -+ -+ switch (width) { -+ /* Swap bytes in a double word */ -+ case 64: -+ if (cpu_has_mips32r2 || cpu_has_mips32r6) { -+ emit(ctx, rotr, tmp, hi(dst), 16); -+ emit(ctx, rotr, hi(dst), lo(dst), 16); -+ emit(ctx, wsbh, lo(dst), tmp); -+ emit(ctx, wsbh, hi(dst), hi(dst)); -+ } else { -+ emit_swap16_r(ctx, tmp, lo(dst)); -+ emit_swap16_r(ctx, lo(dst), hi(dst)); -+ emit(ctx, move, hi(dst), tmp); -+ -+ emit(ctx, lui, tmp, 0xff); /* tmp = 0x00ff0000 */ -+ emit(ctx, ori, tmp, tmp, 0xff); /* tmp = 0x00ff00ff */ -+ emit_swap8_r(ctx, lo(dst), lo(dst), tmp); -+ emit_swap8_r(ctx, hi(dst), hi(dst), tmp); -+ } -+ break; -+ /* Swap bytes in a word */ -+ /* Swap bytes in a half word */ -+ case 32: -+ case 16: -+ emit_bswap_r(ctx, lo(dst), width); -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ break; -+ } -+ clobber_reg64(ctx, dst); -+} -+ -+/* Truncate a register double word, word or half word */ -+static void emit_trunc_r64(struct jit_context *ctx, const u8 dst[], u32 width) -+{ -+ switch (width) { -+ case 64: -+ break; -+ /* Zero-extend a word */ -+ case 32: -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ clobber_reg(ctx, hi(dst)); -+ break; -+ /* Zero-extend a half word */ -+ case 16: -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ emit(ctx, andi, lo(dst), lo(dst), 0xffff); -+ clobber_reg64(ctx, dst); -+ break; -+ } -+} -+ -+/* Load operation: dst = *(size*)(src + off) */ -+static void emit_ldx(struct jit_context *ctx, -+ const u8 dst[], u8 src, s16 off, u8 size) -+{ -+ switch (size) { -+ /* Load a byte */ -+ case BPF_B: -+ emit(ctx, lbu, lo(dst), off, src); -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ break; -+ /* Load a half word */ -+ case BPF_H: -+ emit(ctx, lhu, lo(dst), off, src); -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ break; -+ /* Load a word */ -+ case BPF_W: -+ emit(ctx, lw, lo(dst), off, src); -+ emit(ctx, move, hi(dst), MIPS_R_ZERO); -+ break; -+ /* Load a double word */ -+ case BPF_DW: -+ if (dst[1] == src) { -+ emit(ctx, lw, dst[0], off + 4, src); -+ emit(ctx, lw, dst[1], off, src); -+ } else { -+ emit(ctx, lw, dst[1], off, src); -+ emit(ctx, lw, dst[0], off + 4, src); -+ } -+ emit_load_delay(ctx); -+ break; -+ } -+ clobber_reg64(ctx, dst); -+} -+ -+/* Store operation: *(size *)(dst + off) = src */ -+static void emit_stx(struct jit_context *ctx, -+ const u8 dst, const u8 src[], s16 off, u8 size) -+{ -+ switch (size) { -+ /* Store a byte */ -+ case BPF_B: -+ emit(ctx, sb, lo(src), off, dst); -+ break; -+ /* Store a half word */ -+ case BPF_H: -+ emit(ctx, sh, lo(src), off, dst); -+ break; -+ /* Store a word */ -+ case BPF_W: -+ emit(ctx, sw, lo(src), off, dst); -+ break; -+ /* Store a double word */ -+ case BPF_DW: -+ emit(ctx, sw, src[1], off, dst); -+ emit(ctx, sw, src[0], off + 4, dst); -+ break; -+ } -+} -+ -+/* Atomic read-modify-write (32-bit, non-ll/sc fallback) */ -+static void emit_atomic_r32(struct jit_context *ctx, -+ u8 dst, u8 src, s16 off, u8 code) -+{ -+ u32 exclude = 0; -+ u32 addr = 0; -+ -+ /* Push caller-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ 0, JIT_RESERVED_STACK); -+ /* -+ * Argument 1: dst+off if xchg, otherwise src, passed in register a0 -+ * Argument 2: src if xchg, othersize dst+off, passed in register a1 -+ */ -+ emit(ctx, move, MIPS_R_T9, dst); -+ emit(ctx, move, MIPS_R_A0, src); -+ emit(ctx, addiu, MIPS_R_A1, MIPS_R_T9, off); -+ -+ /* Emit function call */ -+ switch (code) { -+ case BPF_ADD: -+ addr = (u32)&atomic_add; -+ break; -+ case BPF_SUB: -+ addr = (u32)&atomic_sub; -+ break; -+ case BPF_OR: -+ addr = (u32)&atomic_or; -+ break; -+ case BPF_AND: -+ addr = (u32)&atomic_and; -+ break; -+ case BPF_XOR: -+ addr = (u32)&atomic_xor; -+ break; -+ } -+ emit_mov_i(ctx, MIPS_R_T9, addr); -+ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* Restore caller-saved registers, except any fetched value */ -+ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ exclude, JIT_RESERVED_STACK); -+ emit_load_delay(ctx); -+ clobber_reg(ctx, MIPS_R_RA); -+} -+ -+/* Atomic read-modify-write (64-bit) */ -+static void emit_atomic_r64(struct jit_context *ctx, -+ u8 dst, const u8 src[], s16 off, u8 code) -+{ -+ const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */ -+ u32 exclude = 0; -+ u32 addr = 0; -+ -+ /* Push caller-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ 0, JIT_RESERVED_STACK); -+ /* -+ * Argument 1: 64-bit src, passed in registers a0-a1 -+ * Argument 2: 32-bit dst+off, passed in register a2 -+ */ -+ emit(ctx, move, MIPS_R_T9, dst); -+ emit(ctx, move, r1[0], src[0]); -+ emit(ctx, move, r1[1], src[1]); -+ emit(ctx, addiu, MIPS_R_A2, MIPS_R_T9, off); -+ -+ /* Emit function call */ -+ switch (code) { -+ case BPF_ADD: -+ addr = (u32)&atomic64_add; -+ break; -+ case BPF_SUB: -+ addr = (u32)&atomic64_sub; -+ break; -+ case BPF_OR: -+ addr = (u32)&atomic64_or; -+ break; -+ case BPF_AND: -+ addr = (u32)&atomic64_and; -+ break; -+ case BPF_XOR: -+ addr = (u32)&atomic64_xor; -+ break; -+ } -+ emit_mov_i(ctx, MIPS_R_T9, addr); -+ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* Restore caller-saved registers, except any fetched value */ -+ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, -+ exclude, JIT_RESERVED_STACK); -+ emit_load_delay(ctx); -+ clobber_reg(ctx, MIPS_R_RA); -+} -+ -+/* -+ * Conditional movz or an emulated equivalent. -+ * Note that the rs register may be modified. -+ */ -+static void emit_movz_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) -+{ -+ if (cpu_has_mips_2) { -+ emit(ctx, movz, rd, rs, rt); /* rd = rt ? rd : rs */ -+ } else if (cpu_has_mips32r6) { -+ if (rs != MIPS_R_ZERO) -+ emit(ctx, seleqz, rs, rs, rt); /* rs = 0 if rt == 0 */ -+ emit(ctx, selnez, rd, rd, rt); /* rd = 0 if rt != 0 */ -+ if (rs != MIPS_R_ZERO) -+ emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ -+ } else { -+ emit(ctx, bnez, rt, 8); /* PC += 8 if rd != 0 */ -+ emit(ctx, nop); /* +0: delay slot */ -+ emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ -+ } -+ clobber_reg(ctx, rd); -+ clobber_reg(ctx, rs); -+} -+ -+/* -+ * Conditional movn or an emulated equivalent. -+ * Note that the rs register may be modified. -+ */ -+static void emit_movn_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt) -+{ -+ if (cpu_has_mips_2) { -+ emit(ctx, movn, rd, rs, rt); /* rd = rt ? rs : rd */ -+ } else if (cpu_has_mips32r6) { -+ if (rs != MIPS_R_ZERO) -+ emit(ctx, selnez, rs, rs, rt); /* rs = 0 if rt == 0 */ -+ emit(ctx, seleqz, rd, rd, rt); /* rd = 0 if rt != 0 */ -+ if (rs != MIPS_R_ZERO) -+ emit(ctx, or, rd, rd, rs); /* rd = rd | rs */ -+ } else { -+ emit(ctx, beqz, rt, 8); /* PC += 8 if rd == 0 */ -+ emit(ctx, nop); /* +0: delay slot */ -+ emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */ -+ } -+ clobber_reg(ctx, rd); -+ clobber_reg(ctx, rs); -+} -+ -+/* Emulation of 64-bit sltiu rd, rs, imm, where imm may be S32_MAX + 1 */ -+static void emit_sltiu_r64(struct jit_context *ctx, u8 rd, -+ const u8 rs[], s64 imm) -+{ -+ u8 tmp = MIPS_R_T9; -+ -+ if (imm < 0) { -+ emit_mov_i(ctx, rd, imm); /* rd = imm */ -+ emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ -+ emit(ctx, sltiu, tmp, hi(rs), -1); /* tmp = rsh < ~0U */ -+ emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ -+ } else { /* imm >= 0 */ -+ if (imm > 0x7fff) { -+ emit_mov_i(ctx, rd, (s32)imm); /* rd = imm */ -+ emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */ -+ } else { -+ emit(ctx, sltiu, rd, lo(rs), imm); /* rd = rsl < imm */ -+ } -+ emit_movn_r(ctx, rd, MIPS_R_ZERO, hi(rs)); /* rd = 0 if rsh */ -+ } -+} -+ -+/* Emulation of 64-bit sltu rd, rs, rt */ -+static void emit_sltu_r64(struct jit_context *ctx, u8 rd, -+ const u8 rs[], const u8 rt[]) -+{ -+ u8 tmp = MIPS_R_T9; -+ -+ emit(ctx, sltu, rd, lo(rs), lo(rt)); /* rd = rsl < rtl */ -+ emit(ctx, subu, tmp, hi(rs), hi(rt)); /* tmp = rsh - rth */ -+ emit_movn_r(ctx, rd, MIPS_R_ZERO, tmp); /* rd = 0 if tmp != 0 */ -+ emit(ctx, sltu, tmp, hi(rs), hi(rt)); /* tmp = rsh < rth */ -+ emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */ -+} -+ -+/* Emulation of 64-bit slti rd, rs, imm, where imm may be S32_MAX + 1 */ -+static void emit_slti_r64(struct jit_context *ctx, u8 rd, -+ const u8 rs[], s64 imm) -+{ -+ u8 t1 = MIPS_R_T8; -+ u8 t2 = MIPS_R_T9; -+ u8 cmp; -+ -+ /* -+ * if ((rs < 0) ^ (imm < 0)) t1 = imm >u rsl -+ * else t1 = rsl > 31 */ -+ if (imm < 0) -+ emit_movz_r(ctx, t1, t2, rd); /* t1 = rd ? t1 : t2 */ -+ else -+ emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ -+ /* -+ * if ((imm < 0 && rsh != 0xffffffff) || -+ * (imm >= 0 && rsh != 0)) -+ * t1 = 0 -+ */ -+ if (imm < 0) { -+ emit(ctx, addiu, rd, hi(rs), 1); /* rd = rsh + 1 */ -+ cmp = rd; -+ } else { /* imm >= 0 */ -+ cmp = hi(rs); -+ } -+ emit_movn_r(ctx, t1, MIPS_R_ZERO, cmp); /* t1 = 0 if cmp != 0 */ -+ -+ /* -+ * if (imm < 0) rd = rsh < -1 -+ * else rd = rsh != 0 -+ * rd = rd | t1 -+ */ -+ emit(ctx, slti, rd, hi(rs), imm < 0 ? -1 : 0); /* rd = rsh < hi(imm) */ -+ emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */ -+} -+ -+/* Emulation of 64-bit(slt rd, rs, rt) */ -+static void emit_slt_r64(struct jit_context *ctx, u8 rd, -+ const u8 rs[], const u8 rt[]) -+{ -+ u8 t1 = MIPS_R_T7; -+ u8 t2 = MIPS_R_T8; -+ u8 t3 = MIPS_R_T9; -+ -+ /* -+ * if ((rs < 0) ^ (rt < 0)) t1 = rtl > 31 */ -+ emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */ -+ emit_movn_r(ctx, t1, MIPS_R_ZERO, t3); /* t1 = 0 if t3 != 0 */ -+ -+ /* rd = (rsh < rth) | t1 */ -+ emit(ctx, slt, rd, hi(rs), hi(rt)); /* rd = rsh = -0x7fff && imm <= 0x8000) { -+ emit(ctx, addiu, tmp, lo(dst), -imm); -+ } else if ((u32)imm <= 0xffff) { -+ emit(ctx, xori, tmp, lo(dst), imm); -+ } else { /* Register fallback */ -+ emit_mov_i(ctx, tmp, imm); -+ emit(ctx, xor, tmp, lo(dst), tmp); -+ } -+ if (imm < 0) { /* Compare sign extension */ -+ emit(ctx, addu, MIPS_R_T9, hi(dst), 1); -+ emit(ctx, or, tmp, tmp, MIPS_R_T9); -+ } else { /* Compare zero extension */ -+ emit(ctx, or, tmp, tmp, hi(dst)); -+ } -+ if (op == BPF_JEQ) -+ emit(ctx, beqz, tmp, off); -+ else /* BPF_JNE */ -+ emit(ctx, bnez, tmp, off); -+ break; -+ /* PC += off if dst & imm */ -+ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ -+ case BPF_JSET: -+ case JIT_JNSET: -+ if ((u32)imm <= 0xffff) { -+ emit(ctx, andi, tmp, lo(dst), imm); -+ } else { /* Register fallback */ -+ emit_mov_i(ctx, tmp, imm); -+ emit(ctx, and, tmp, lo(dst), tmp); -+ } -+ if (imm < 0) /* Sign-extension pulls in high word */ -+ emit(ctx, or, tmp, tmp, hi(dst)); -+ if (op == BPF_JSET) -+ emit(ctx, bnez, tmp, off); -+ else /* JIT_JNSET */ -+ emit(ctx, beqz, tmp, off); -+ break; -+ /* PC += off if dst > imm */ -+ case BPF_JGT: -+ emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); -+ emit(ctx, beqz, tmp, off); -+ break; -+ /* PC += off if dst >= imm */ -+ case BPF_JGE: -+ emit_sltiu_r64(ctx, tmp, dst, imm); -+ emit(ctx, beqz, tmp, off); -+ break; -+ /* PC += off if dst < imm */ -+ case BPF_JLT: -+ emit_sltiu_r64(ctx, tmp, dst, imm); -+ emit(ctx, bnez, tmp, off); -+ break; -+ /* PC += off if dst <= imm */ -+ case BPF_JLE: -+ emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1); -+ emit(ctx, bnez, tmp, off); -+ break; -+ /* PC += off if dst > imm (signed) */ -+ case BPF_JSGT: -+ emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); -+ emit(ctx, beqz, tmp, off); -+ break; -+ /* PC += off if dst >= imm (signed) */ -+ case BPF_JSGE: -+ emit_slti_r64(ctx, tmp, dst, imm); -+ emit(ctx, beqz, tmp, off); -+ break; -+ /* PC += off if dst < imm (signed) */ -+ case BPF_JSLT: -+ emit_slti_r64(ctx, tmp, dst, imm); -+ emit(ctx, bnez, tmp, off); -+ break; -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JSLE: -+ emit_slti_r64(ctx, tmp, dst, (s64)imm + 1); -+ emit(ctx, bnez, tmp, off); -+ break; -+ } -+} -+ -+/* Jump register (64-bit) */ -+static void emit_jmp_r64(struct jit_context *ctx, -+ const u8 dst[], const u8 src[], s32 off, u8 op) -+{ -+ u8 t1 = MIPS_R_T6; -+ u8 t2 = MIPS_R_T7; -+ -+ switch (op) { -+ /* No-op, used internally for branch optimization */ -+ case JIT_JNOP: -+ break; -+ /* PC += off if dst == src */ -+ /* PC += off if dst != src */ -+ case BPF_JEQ: -+ case BPF_JNE: -+ emit(ctx, subu, t1, lo(dst), lo(src)); -+ emit(ctx, subu, t2, hi(dst), hi(src)); -+ emit(ctx, or, t1, t1, t2); -+ if (op == BPF_JEQ) -+ emit(ctx, beqz, t1, off); -+ else /* BPF_JNE */ -+ emit(ctx, bnez, t1, off); -+ break; -+ /* PC += off if dst & src */ -+ /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */ -+ case BPF_JSET: -+ case JIT_JNSET: -+ emit(ctx, and, t1, lo(dst), lo(src)); -+ emit(ctx, and, t2, hi(dst), hi(src)); -+ emit(ctx, or, t1, t1, t2); -+ if (op == BPF_JSET) -+ emit(ctx, bnez, t1, off); -+ else /* JIT_JNSET */ -+ emit(ctx, beqz, t1, off); -+ break; -+ /* PC += off if dst > src */ -+ case BPF_JGT: -+ emit_sltu_r64(ctx, t1, src, dst); -+ emit(ctx, bnez, t1, off); -+ break; -+ /* PC += off if dst >= src */ -+ case BPF_JGE: -+ emit_sltu_r64(ctx, t1, dst, src); -+ emit(ctx, beqz, t1, off); -+ break; -+ /* PC += off if dst < src */ -+ case BPF_JLT: -+ emit_sltu_r64(ctx, t1, dst, src); -+ emit(ctx, bnez, t1, off); -+ break; -+ /* PC += off if dst <= src */ -+ case BPF_JLE: -+ emit_sltu_r64(ctx, t1, src, dst); -+ emit(ctx, beqz, t1, off); -+ break; -+ /* PC += off if dst > src (signed) */ -+ case BPF_JSGT: -+ emit_slt_r64(ctx, t1, src, dst); -+ emit(ctx, bnez, t1, off); -+ break; -+ /* PC += off if dst >= src (signed) */ -+ case BPF_JSGE: -+ emit_slt_r64(ctx, t1, dst, src); -+ emit(ctx, beqz, t1, off); -+ break; -+ /* PC += off if dst < src (signed) */ -+ case BPF_JSLT: -+ emit_slt_r64(ctx, t1, dst, src); -+ emit(ctx, bnez, t1, off); -+ break; -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JSLE: -+ emit_slt_r64(ctx, t1, src, dst); -+ emit(ctx, beqz, t1, off); -+ break; -+ } -+} -+ -+/* Function call */ -+static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) -+{ -+ bool fixed; -+ u64 addr; -+ -+ /* Decode the call address */ -+ if (bpf_jit_get_func_addr(ctx->program, insn, false, -+ &addr, &fixed) < 0) -+ return -1; -+ if (!fixed) -+ return -1; -+ -+ /* Push stack arguments */ -+ push_regs(ctx, JIT_STACK_REGS, 0, JIT_RESERVED_STACK); -+ -+ /* Emit function call */ -+ emit_mov_i(ctx, MIPS_R_T9, addr); -+ emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9); -+ emit(ctx, nop); /* Delay slot */ -+ -+ clobber_reg(ctx, MIPS_R_RA); -+ clobber_reg(ctx, MIPS_R_V0); -+ clobber_reg(ctx, MIPS_R_V1); -+ return 0; -+} -+ -+/* Function tail call */ -+static int emit_tail_call(struct jit_context *ctx) -+{ -+ u8 ary = lo(bpf2mips32[BPF_REG_2]); -+ u8 ind = lo(bpf2mips32[BPF_REG_3]); -+ u8 t1 = MIPS_R_T8; -+ u8 t2 = MIPS_R_T9; -+ int off; -+ -+ /* -+ * Tail call: -+ * eBPF R1 - function argument (context ptr), passed in a0-a1 -+ * eBPF R2 - ptr to object with array of function entry points -+ * eBPF R3 - array index of function to be called -+ * stack[sz] - remaining tail call count, initialized in prologue -+ */ -+ -+ /* if (ind >= ary->map.max_entries) goto out */ -+ off = offsetof(struct bpf_array, map.max_entries); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, lw, t1, off, ary); /* t1 = ary->map.max_entries*/ -+ emit_load_delay(ctx); /* Load delay slot */ -+ emit(ctx, sltu, t1, ind, t1); /* t1 = ind < t1 */ -+ emit(ctx, beqz, t1, get_offset(ctx, 1)); /* PC += off(1) if t1 == 0 */ -+ /* (next insn delay slot) */ -+ /* if (TCC-- <= 0) goto out */ -+ emit(ctx, lw, t2, ctx->stack_size, MIPS_R_SP); /* t2 = *(SP + size) */ -+ emit_load_delay(ctx); /* Load delay slot */ -+ emit(ctx, blez, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 < 0 */ -+ emit(ctx, addiu, t2, t2, -1); /* t2-- (delay slot) */ -+ emit(ctx, sw, t2, ctx->stack_size, MIPS_R_SP); /* *(SP + size) = t2 */ -+ -+ /* prog = ary->ptrs[ind] */ -+ off = offsetof(struct bpf_array, ptrs); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, sll, t1, ind, 2); /* t1 = ind << 2 */ -+ emit(ctx, addu, t1, t1, ary); /* t1 += ary */ -+ emit(ctx, lw, t2, off, t1); /* t2 = *(t1 + off) */ -+ emit_load_delay(ctx); /* Load delay slot */ -+ -+ /* if (prog == 0) goto out */ -+ emit(ctx, beqz, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 == 0 */ -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* func = prog->bpf_func + 8 (prologue skip offset) */ -+ off = offsetof(struct bpf_prog, bpf_func); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, lw, t1, off, t2); /* t1 = *(t2 + off) */ -+ emit_load_delay(ctx); /* Load delay slot */ -+ emit(ctx, addiu, t1, t1, JIT_TCALL_SKIP); /* t1 += skip (8 or 12) */ -+ -+ /* goto func */ -+ build_epilogue(ctx, t1); -+ return 0; -+} -+ -+/* -+ * Stack frame layout for a JITed program (stack grows down). -+ * -+ * Higher address : Caller's stack frame : -+ * :----------------------------: -+ * : 64-bit eBPF args r3-r5 : -+ * :----------------------------: -+ * : Reserved / tail call count : -+ * +============================+ <--- MIPS sp before call -+ * | Callee-saved registers, | -+ * | including RA and FP | -+ * +----------------------------+ <--- eBPF FP (MIPS zero,fp) -+ * | Local eBPF variables | -+ * | allocated by program | -+ * +----------------------------+ -+ * | Reserved for caller-saved | -+ * | registers | -+ * +----------------------------+ -+ * | Reserved for 64-bit eBPF | -+ * | args r3-r5 & args passed | -+ * | on stack in kernel calls | -+ * Lower address +============================+ <--- MIPS sp -+ */ -+ -+/* Build program prologue to set up the stack and registers */ -+void build_prologue(struct jit_context *ctx) -+{ -+ const u8 *r1 = bpf2mips32[BPF_REG_1]; -+ const u8 *fp = bpf2mips32[BPF_REG_FP]; -+ int stack, saved, locals, reserved; -+ -+ /* -+ * The first two instructions initialize TCC in the reserved (for us) -+ * 16-byte area in the parent's stack frame. On a tail call, the -+ * calling function jumps into the prologue after these instructions. -+ */ -+ emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, -+ min(MAX_TAIL_CALL_CNT + 1, 0xffff)); -+ emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP); -+ -+ /* -+ * Register eBPF R1 contains the 32-bit context pointer argument. -+ * A 32-bit argument is always passed in MIPS register a0, regardless -+ * of CPU endianness. Initialize R1 accordingly and zero-extend. -+ */ -+#ifdef __BIG_ENDIAN -+ emit(ctx, move, lo(r1), MIPS_R_A0); -+#endif -+ -+ /* === Entry-point for tail calls === */ -+ -+ /* Zero-extend the 32-bit argument */ -+ emit(ctx, move, hi(r1), MIPS_R_ZERO); -+ -+ /* If the eBPF frame pointer was accessed it must be saved */ -+ if (ctx->accessed & BIT(BPF_REG_FP)) -+ clobber_reg64(ctx, fp); -+ -+ /* Compute the stack space needed for callee-saved registers */ -+ saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u32); -+ saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); -+ -+ /* Stack space used by eBPF program local data */ -+ locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); -+ -+ /* -+ * If we are emitting function calls, reserve extra stack space for -+ * caller-saved registers and function arguments passed on the stack. -+ * The required space is computed automatically during resource -+ * usage discovery (pass 1). -+ */ -+ reserved = ctx->stack_used; -+ -+ /* Allocate the stack frame */ -+ stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); -+ emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, -stack); -+ -+ /* Store callee-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); -+ -+ /* Initialize the eBPF frame pointer if accessed */ -+ if (ctx->accessed & BIT(BPF_REG_FP)) -+ emit(ctx, addiu, lo(fp), MIPS_R_SP, stack - saved); -+ -+ ctx->saved_size = saved; -+ ctx->stack_size = stack; -+} -+ -+/* Build the program epilogue to restore the stack and registers */ -+void build_epilogue(struct jit_context *ctx, int dest_reg) -+{ -+ /* Restore callee-saved registers from stack */ -+ pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, -+ ctx->stack_size - ctx->saved_size); -+ /* -+ * A 32-bit return value is always passed in MIPS register v0, -+ * but on big-endian targets the low part of R0 is mapped to v1. -+ */ -+#ifdef __BIG_ENDIAN -+ emit(ctx, move, MIPS_R_V0, MIPS_R_V1); -+#endif -+ -+ /* Jump to the return address and adjust the stack pointer */ -+ emit(ctx, jr, dest_reg); -+ emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); -+} -+ -+/* Build one eBPF instruction */ -+int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) -+{ -+ const u8 *dst = bpf2mips32[insn->dst_reg]; -+ const u8 *src = bpf2mips32[insn->src_reg]; -+ const u8 *tmp = bpf2mips32[JIT_REG_TMP]; -+ u8 code = insn->code; -+ s16 off = insn->off; -+ s32 imm = insn->imm; -+ s32 val, rel; -+ u8 alu, jmp; -+ -+ switch (code) { -+ /* ALU operations */ -+ /* dst = imm */ -+ case BPF_ALU | BPF_MOV | BPF_K: -+ emit_mov_i(ctx, lo(dst), imm); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = src */ -+ case BPF_ALU | BPF_MOV | BPF_X: -+ if (imm == 1) { -+ /* Special mov32 for zext */ -+ emit_mov_i(ctx, hi(dst), 0); -+ } else { -+ emit_mov_r(ctx, lo(dst), lo(src)); -+ emit_zext_ver(ctx, dst); -+ } -+ break; -+ /* dst = -dst */ -+ case BPF_ALU | BPF_NEG: -+ emit_alu_i(ctx, lo(dst), 0, BPF_NEG); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst & imm */ -+ /* dst = dst | imm */ -+ /* dst = dst ^ imm */ -+ /* dst = dst << imm */ -+ /* dst = dst >> imm */ -+ /* dst = dst >> imm (arithmetic) */ -+ /* dst = dst + imm */ -+ /* dst = dst - imm */ -+ /* dst = dst * imm */ -+ /* dst = dst / imm */ -+ /* dst = dst % imm */ -+ case BPF_ALU | BPF_OR | BPF_K: -+ case BPF_ALU | BPF_AND | BPF_K: -+ case BPF_ALU | BPF_XOR | BPF_K: -+ case BPF_ALU | BPF_LSH | BPF_K: -+ case BPF_ALU | BPF_RSH | BPF_K: -+ case BPF_ALU | BPF_ARSH | BPF_K: -+ case BPF_ALU | BPF_ADD | BPF_K: -+ case BPF_ALU | BPF_SUB | BPF_K: -+ case BPF_ALU | BPF_MUL | BPF_K: -+ case BPF_ALU | BPF_DIV | BPF_K: -+ case BPF_ALU | BPF_MOD | BPF_K: -+ if (!valid_alu_i(BPF_OP(code), imm)) { -+ emit_mov_i(ctx, MIPS_R_T6, imm); -+ emit_alu_r(ctx, lo(dst), MIPS_R_T6, BPF_OP(code)); -+ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { -+ emit_alu_i(ctx, lo(dst), val, alu); -+ } -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst & src */ -+ /* dst = dst | src */ -+ /* dst = dst ^ src */ -+ /* dst = dst << src */ -+ /* dst = dst >> src */ -+ /* dst = dst >> src (arithmetic) */ -+ /* dst = dst + src */ -+ /* dst = dst - src */ -+ /* dst = dst * src */ -+ /* dst = dst / src */ -+ /* dst = dst % src */ -+ case BPF_ALU | BPF_AND | BPF_X: -+ case BPF_ALU | BPF_OR | BPF_X: -+ case BPF_ALU | BPF_XOR | BPF_X: -+ case BPF_ALU | BPF_LSH | BPF_X: -+ case BPF_ALU | BPF_RSH | BPF_X: -+ case BPF_ALU | BPF_ARSH | BPF_X: -+ case BPF_ALU | BPF_ADD | BPF_X: -+ case BPF_ALU | BPF_SUB | BPF_X: -+ case BPF_ALU | BPF_MUL | BPF_X: -+ case BPF_ALU | BPF_DIV | BPF_X: -+ case BPF_ALU | BPF_MOD | BPF_X: -+ emit_alu_r(ctx, lo(dst), lo(src), BPF_OP(code)); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = imm (64-bit) */ -+ case BPF_ALU64 | BPF_MOV | BPF_K: -+ emit_mov_se_i64(ctx, dst, imm); -+ break; -+ /* dst = src (64-bit) */ -+ case BPF_ALU64 | BPF_MOV | BPF_X: -+ emit_mov_r(ctx, lo(dst), lo(src)); -+ emit_mov_r(ctx, hi(dst), hi(src)); -+ break; -+ /* dst = -dst (64-bit) */ -+ case BPF_ALU64 | BPF_NEG: -+ emit_neg_i64(ctx, dst); -+ break; -+ /* dst = dst & imm (64-bit) */ -+ case BPF_ALU64 | BPF_AND | BPF_K: -+ emit_alu_i64(ctx, dst, imm, BPF_OP(code)); -+ break; -+ /* dst = dst | imm (64-bit) */ -+ /* dst = dst ^ imm (64-bit) */ -+ /* dst = dst + imm (64-bit) */ -+ /* dst = dst - imm (64-bit) */ -+ case BPF_ALU64 | BPF_OR | BPF_K: -+ case BPF_ALU64 | BPF_XOR | BPF_K: -+ case BPF_ALU64 | BPF_ADD | BPF_K: -+ case BPF_ALU64 | BPF_SUB | BPF_K: -+ if (imm) -+ emit_alu_i64(ctx, dst, imm, BPF_OP(code)); -+ break; -+ /* dst = dst << imm (64-bit) */ -+ /* dst = dst >> imm (64-bit) */ -+ /* dst = dst >> imm (64-bit, arithmetic) */ -+ case BPF_ALU64 | BPF_LSH | BPF_K: -+ case BPF_ALU64 | BPF_RSH | BPF_K: -+ case BPF_ALU64 | BPF_ARSH | BPF_K: -+ if (imm) -+ emit_shift_i64(ctx, dst, imm, BPF_OP(code)); -+ break; -+ /* dst = dst * imm (64-bit) */ -+ case BPF_ALU64 | BPF_MUL | BPF_K: -+ emit_mul_i64(ctx, dst, imm); -+ break; -+ /* dst = dst / imm (64-bit) */ -+ /* dst = dst % imm (64-bit) */ -+ case BPF_ALU64 | BPF_DIV | BPF_K: -+ case BPF_ALU64 | BPF_MOD | BPF_K: -+ /* -+ * Sign-extend the immediate value into a temporary register, -+ * and then do the operation on this register. -+ */ -+ emit_mov_se_i64(ctx, tmp, imm); -+ emit_divmod_r64(ctx, dst, tmp, BPF_OP(code)); -+ break; -+ /* dst = dst & src (64-bit) */ -+ /* dst = dst | src (64-bit) */ -+ /* dst = dst ^ src (64-bit) */ -+ /* dst = dst + src (64-bit) */ -+ /* dst = dst - src (64-bit) */ -+ case BPF_ALU64 | BPF_AND | BPF_X: -+ case BPF_ALU64 | BPF_OR | BPF_X: -+ case BPF_ALU64 | BPF_XOR | BPF_X: -+ case BPF_ALU64 | BPF_ADD | BPF_X: -+ case BPF_ALU64 | BPF_SUB | BPF_X: -+ emit_alu_r64(ctx, dst, src, BPF_OP(code)); -+ break; -+ /* dst = dst << src (64-bit) */ -+ /* dst = dst >> src (64-bit) */ -+ /* dst = dst >> src (64-bit, arithmetic) */ -+ case BPF_ALU64 | BPF_LSH | BPF_X: -+ case BPF_ALU64 | BPF_RSH | BPF_X: -+ case BPF_ALU64 | BPF_ARSH | BPF_X: -+ emit_shift_r64(ctx, dst, lo(src), BPF_OP(code)); -+ break; -+ /* dst = dst * src (64-bit) */ -+ case BPF_ALU64 | BPF_MUL | BPF_X: -+ emit_mul_r64(ctx, dst, src); -+ break; -+ /* dst = dst / src (64-bit) */ -+ /* dst = dst % src (64-bit) */ -+ case BPF_ALU64 | BPF_DIV | BPF_X: -+ case BPF_ALU64 | BPF_MOD | BPF_X: -+ emit_divmod_r64(ctx, dst, src, BPF_OP(code)); -+ break; -+ /* dst = htole(dst) */ -+ /* dst = htobe(dst) */ -+ case BPF_ALU | BPF_END | BPF_FROM_LE: -+ case BPF_ALU | BPF_END | BPF_FROM_BE: -+ if (BPF_SRC(code) == -+#ifdef __BIG_ENDIAN -+ BPF_FROM_LE -+#else -+ BPF_FROM_BE -+#endif -+ ) -+ emit_bswap_r64(ctx, dst, imm); -+ else -+ emit_trunc_r64(ctx, dst, imm); -+ break; -+ /* dst = imm64 */ -+ case BPF_LD | BPF_IMM | BPF_DW: -+ emit_mov_i(ctx, lo(dst), imm); -+ emit_mov_i(ctx, hi(dst), insn[1].imm); -+ return 1; -+ /* LDX: dst = *(size *)(src + off) */ -+ case BPF_LDX | BPF_MEM | BPF_W: -+ case BPF_LDX | BPF_MEM | BPF_H: -+ case BPF_LDX | BPF_MEM | BPF_B: -+ case BPF_LDX | BPF_MEM | BPF_DW: -+ emit_ldx(ctx, dst, lo(src), off, BPF_SIZE(code)); -+ break; -+ /* ST: *(size *)(dst + off) = imm */ -+ case BPF_ST | BPF_MEM | BPF_W: -+ case BPF_ST | BPF_MEM | BPF_H: -+ case BPF_ST | BPF_MEM | BPF_B: -+ case BPF_ST | BPF_MEM | BPF_DW: -+ switch (BPF_SIZE(code)) { -+ case BPF_DW: -+ /* Sign-extend immediate value into temporary reg */ -+ emit_mov_se_i64(ctx, tmp, imm); -+ break; -+ case BPF_W: -+ case BPF_H: -+ case BPF_B: -+ emit_mov_i(ctx, lo(tmp), imm); -+ break; -+ } -+ emit_stx(ctx, lo(dst), tmp, off, BPF_SIZE(code)); -+ break; -+ /* STX: *(size *)(dst + off) = src */ -+ case BPF_STX | BPF_MEM | BPF_W: -+ case BPF_STX | BPF_MEM | BPF_H: -+ case BPF_STX | BPF_MEM | BPF_B: -+ case BPF_STX | BPF_MEM | BPF_DW: -+ emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code)); -+ break; -+ /* Speculation barrier */ -+ case BPF_ST | BPF_NOSPEC: -+ break; -+ /* Atomics */ -+ case BPF_STX | BPF_XADD | BPF_W: -+ switch (imm) { -+ case BPF_ADD: -+ case BPF_AND: -+ case BPF_OR: -+ case BPF_XOR: -+ if (cpu_has_llsc) -+ emit_atomic_r(ctx, lo(dst), lo(src), off, imm); -+ else /* Non-ll/sc fallback */ -+ emit_atomic_r32(ctx, lo(dst), lo(src), -+ off, imm); -+ break; -+ default: -+ goto notyet; -+ } -+ break; -+ /* Atomics (64-bit) */ -+ case BPF_STX | BPF_XADD | BPF_DW: -+ switch (imm) { -+ case BPF_ADD: -+ case BPF_AND: -+ case BPF_OR: -+ case BPF_XOR: -+ emit_atomic_r64(ctx, lo(dst), src, off, imm); -+ break; -+ default: -+ goto notyet; -+ } -+ break; -+ /* PC += off if dst == src */ -+ /* PC += off if dst != src */ -+ /* PC += off if dst & src */ -+ /* PC += off if dst > src */ -+ /* PC += off if dst >= src */ -+ /* PC += off if dst < src */ -+ /* PC += off if dst <= src */ -+ /* PC += off if dst > src (signed) */ -+ /* PC += off if dst >= src (signed) */ -+ /* PC += off if dst < src (signed) */ -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JMP32 | BPF_JEQ | BPF_X: -+ case BPF_JMP32 | BPF_JNE | BPF_X: -+ case BPF_JMP32 | BPF_JSET | BPF_X: -+ case BPF_JMP32 | BPF_JGT | BPF_X: -+ case BPF_JMP32 | BPF_JGE | BPF_X: -+ case BPF_JMP32 | BPF_JLT | BPF_X: -+ case BPF_JMP32 | BPF_JLE | BPF_X: -+ case BPF_JMP32 | BPF_JSGT | BPF_X: -+ case BPF_JMP32 | BPF_JSGE | BPF_X: -+ case BPF_JMP32 | BPF_JSLT | BPF_X: -+ case BPF_JMP32 | BPF_JSLE | BPF_X: -+ if (off == 0) -+ break; -+ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); -+ emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp); -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == imm */ -+ /* PC += off if dst != imm */ -+ /* PC += off if dst & imm */ -+ /* PC += off if dst > imm */ -+ /* PC += off if dst >= imm */ -+ /* PC += off if dst < imm */ -+ /* PC += off if dst <= imm */ -+ /* PC += off if dst > imm (signed) */ -+ /* PC += off if dst >= imm (signed) */ -+ /* PC += off if dst < imm (signed) */ -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JMP32 | BPF_JEQ | BPF_K: -+ case BPF_JMP32 | BPF_JNE | BPF_K: -+ case BPF_JMP32 | BPF_JSET | BPF_K: -+ case BPF_JMP32 | BPF_JGT | BPF_K: -+ case BPF_JMP32 | BPF_JGE | BPF_K: -+ case BPF_JMP32 | BPF_JLT | BPF_K: -+ case BPF_JMP32 | BPF_JLE | BPF_K: -+ case BPF_JMP32 | BPF_JSGT | BPF_K: -+ case BPF_JMP32 | BPF_JSGE | BPF_K: -+ case BPF_JMP32 | BPF_JSLT | BPF_K: -+ case BPF_JMP32 | BPF_JSLE | BPF_K: -+ if (off == 0) -+ break; -+ setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); -+ if (valid_jmp_i(jmp, imm)) { -+ emit_jmp_i(ctx, lo(dst), imm, rel, jmp); -+ } else { -+ /* Move large immediate to register */ -+ emit_mov_i(ctx, MIPS_R_T6, imm); -+ emit_jmp_r(ctx, lo(dst), MIPS_R_T6, rel, jmp); -+ } -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == src */ -+ /* PC += off if dst != src */ -+ /* PC += off if dst & src */ -+ /* PC += off if dst > src */ -+ /* PC += off if dst >= src */ -+ /* PC += off if dst < src */ -+ /* PC += off if dst <= src */ -+ /* PC += off if dst > src (signed) */ -+ /* PC += off if dst >= src (signed) */ -+ /* PC += off if dst < src (signed) */ -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JMP | BPF_JEQ | BPF_X: -+ case BPF_JMP | BPF_JNE | BPF_X: -+ case BPF_JMP | BPF_JSET | BPF_X: -+ case BPF_JMP | BPF_JGT | BPF_X: -+ case BPF_JMP | BPF_JGE | BPF_X: -+ case BPF_JMP | BPF_JLT | BPF_X: -+ case BPF_JMP | BPF_JLE | BPF_X: -+ case BPF_JMP | BPF_JSGT | BPF_X: -+ case BPF_JMP | BPF_JSGE | BPF_X: -+ case BPF_JMP | BPF_JSLT | BPF_X: -+ case BPF_JMP | BPF_JSLE | BPF_X: -+ if (off == 0) -+ break; -+ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); -+ emit_jmp_r64(ctx, dst, src, rel, jmp); -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == imm */ -+ /* PC += off if dst != imm */ -+ /* PC += off if dst & imm */ -+ /* PC += off if dst > imm */ -+ /* PC += off if dst >= imm */ -+ /* PC += off if dst < imm */ -+ /* PC += off if dst <= imm */ -+ /* PC += off if dst > imm (signed) */ -+ /* PC += off if dst >= imm (signed) */ -+ /* PC += off if dst < imm (signed) */ -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JMP | BPF_JEQ | BPF_K: -+ case BPF_JMP | BPF_JNE | BPF_K: -+ case BPF_JMP | BPF_JSET | BPF_K: -+ case BPF_JMP | BPF_JGT | BPF_K: -+ case BPF_JMP | BPF_JGE | BPF_K: -+ case BPF_JMP | BPF_JLT | BPF_K: -+ case BPF_JMP | BPF_JLE | BPF_K: -+ case BPF_JMP | BPF_JSGT | BPF_K: -+ case BPF_JMP | BPF_JSGE | BPF_K: -+ case BPF_JMP | BPF_JSLT | BPF_K: -+ case BPF_JMP | BPF_JSLE | BPF_K: -+ if (off == 0) -+ break; -+ setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); -+ emit_jmp_i64(ctx, dst, imm, rel, jmp); -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off */ -+ case BPF_JMP | BPF_JA: -+ if (off == 0) -+ break; -+ if (emit_ja(ctx, off) < 0) -+ goto toofar; -+ break; -+ /* Tail call */ -+ case BPF_JMP | BPF_TAIL_CALL: -+ if (emit_tail_call(ctx) < 0) -+ goto invalid; -+ break; -+ /* Function call */ -+ case BPF_JMP | BPF_CALL: -+ if (emit_call(ctx, insn) < 0) -+ goto invalid; -+ break; -+ /* Function return */ -+ case BPF_JMP | BPF_EXIT: -+ /* -+ * Optimization: when last instruction is EXIT -+ * simply continue to epilogue. -+ */ -+ if (ctx->bpf_index == ctx->program->len - 1) -+ break; -+ if (emit_exit(ctx) < 0) -+ goto toofar; -+ break; -+ -+ default: -+invalid: -+ pr_err_once("unknown opcode %02x\n", code); -+ return -EINVAL; -+notyet: -+ pr_info_once("*** NOT YET: opcode %02x ***\n", code); -+ return -EFAULT; -+toofar: -+ pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", -+ ctx->bpf_index, code); -+ return -E2BIG; -+ } -+ return 0; -+} diff --git a/target/linux/generic/backport-6.1/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch b/target/linux/generic/backport-6.1/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch deleted file mode 100644 index 38b46c0b765b..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch +++ /dev/null @@ -1,1005 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:05 +0200 -Subject: [PATCH] mips: bpf: Add new eBPF JIT for 64-bit MIPS - -This is an implementation on of an eBPF JIT for 64-bit MIPS III-V and -MIPS64r1-r6. It uses the same framework introduced by the 32-bit JIT. - -Signed-off-by: Johan Almbladh ---- - create mode 100644 arch/mips/net/bpf_jit_comp64.c - ---- /dev/null -+++ b/arch/mips/net/bpf_jit_comp64.c -@@ -0,0 +1,991 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Just-In-Time compiler for eBPF bytecode on MIPS. -+ * Implementation of JIT functions for 64-bit CPUs. -+ * -+ * Copyright (c) 2021 Anyfi Networks AB. -+ * Author: Johan Almbladh -+ * -+ * Based on code and ideas from -+ * Copyright (c) 2017 Cavium, Inc. -+ * Copyright (c) 2017 Shubham Bansal -+ * Copyright (c) 2011 Mircea Gherzan -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "bpf_jit_comp.h" -+ -+/* MIPS t0-t3 are not available in the n64 ABI */ -+#undef MIPS_R_T0 -+#undef MIPS_R_T1 -+#undef MIPS_R_T2 -+#undef MIPS_R_T3 -+ -+/* Stack is 16-byte aligned in n64 ABI */ -+#define MIPS_STACK_ALIGNMENT 16 -+ -+/* Extra 64-bit eBPF registers used by JIT */ -+#define JIT_REG_TC (MAX_BPF_JIT_REG + 0) -+#define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) -+ -+/* Number of prologue bytes to skip when doing a tail call */ -+#define JIT_TCALL_SKIP 4 -+ -+/* Callee-saved CPU registers that the JIT must preserve */ -+#define JIT_CALLEE_REGS \ -+ (BIT(MIPS_R_S0) | \ -+ BIT(MIPS_R_S1) | \ -+ BIT(MIPS_R_S2) | \ -+ BIT(MIPS_R_S3) | \ -+ BIT(MIPS_R_S4) | \ -+ BIT(MIPS_R_S5) | \ -+ BIT(MIPS_R_S6) | \ -+ BIT(MIPS_R_S7) | \ -+ BIT(MIPS_R_GP) | \ -+ BIT(MIPS_R_FP) | \ -+ BIT(MIPS_R_RA)) -+ -+/* Caller-saved CPU registers available for JIT use */ -+#define JIT_CALLER_REGS \ -+ (BIT(MIPS_R_A5) | \ -+ BIT(MIPS_R_A6) | \ -+ BIT(MIPS_R_A7)) -+/* -+ * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. -+ * MIPS registers t4 - t7 may be used by the JIT as temporary registers. -+ * MIPS registers t8 - t9 are reserved for single-register common functions. -+ */ -+static const u8 bpf2mips64[] = { -+ /* Return value from in-kernel function, and exit value from eBPF */ -+ [BPF_REG_0] = MIPS_R_V0, -+ /* Arguments from eBPF program to in-kernel function */ -+ [BPF_REG_1] = MIPS_R_A0, -+ [BPF_REG_2] = MIPS_R_A1, -+ [BPF_REG_3] = MIPS_R_A2, -+ [BPF_REG_4] = MIPS_R_A3, -+ [BPF_REG_5] = MIPS_R_A4, -+ /* Callee-saved registers that in-kernel function will preserve */ -+ [BPF_REG_6] = MIPS_R_S0, -+ [BPF_REG_7] = MIPS_R_S1, -+ [BPF_REG_8] = MIPS_R_S2, -+ [BPF_REG_9] = MIPS_R_S3, -+ /* Read-only frame pointer to access the eBPF stack */ -+ [BPF_REG_FP] = MIPS_R_FP, -+ /* Temporary register for blinding constants */ -+ [BPF_REG_AX] = MIPS_R_AT, -+ /* Tail call count register, caller-saved */ -+ [JIT_REG_TC] = MIPS_R_A5, -+ /* Constant for register zero-extension */ -+ [JIT_REG_ZX] = MIPS_R_V1, -+}; -+ -+/* -+ * MIPS 32-bit operations on 64-bit registers generate a sign-extended -+ * result. However, the eBPF ISA mandates zero-extension, so we rely on the -+ * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic -+ * operations, right shift and byte swap require properly sign-extended -+ * operands or the result is unpredictable. We emit explicit sign-extensions -+ * in those cases. -+ */ -+ -+/* Sign extension */ -+static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) -+{ -+ emit(ctx, sll, dst, src, 0); -+ clobber_reg(ctx, dst); -+} -+ -+/* Zero extension */ -+static void emit_zext(struct jit_context *ctx, u8 dst) -+{ -+ if (cpu_has_mips64r2 || cpu_has_mips64r6) { -+ emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); -+ } else { -+ emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); -+ access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Zero extension, if verifier does not do it for us */ -+static void emit_zext_ver(struct jit_context *ctx, u8 dst) -+{ -+ if (!ctx->program->aux->verifier_zext) -+ emit_zext(ctx, dst); -+} -+ -+/* dst = imm (64-bit) */ -+static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) -+{ -+ if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { -+ emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); -+ } else if (imm64 >= 0xffffffff80000000ULL || -+ (imm64 < 0x80000000 && imm64 > 0xffff)) { -+ emit(ctx, lui, dst, (s16)(imm64 >> 16)); -+ emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); -+ } else { -+ u8 acc = MIPS_R_ZERO; -+ int k; -+ -+ for (k = 0; k < 4; k++) { -+ u16 half = imm64 >> (48 - 16 * k); -+ -+ if (acc == dst) -+ emit(ctx, dsll, dst, dst, 16); -+ -+ if (half) { -+ emit(ctx, ori, dst, acc, half); -+ acc = dst; -+ } -+ } -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* ALU immediate operation (64-bit) */ -+static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = dst | imm */ -+ case BPF_OR: -+ emit(ctx, ori, dst, dst, (u16)imm); -+ break; -+ /* dst = dst ^ imm */ -+ case BPF_XOR: -+ emit(ctx, xori, dst, dst, (u16)imm); -+ break; -+ /* dst = -dst */ -+ case BPF_NEG: -+ emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); -+ break; -+ /* dst = dst << imm */ -+ case BPF_LSH: -+ emit(ctx, dsll_safe, dst, dst, imm); -+ break; -+ /* dst = dst >> imm */ -+ case BPF_RSH: -+ emit(ctx, dsrl_safe, dst, dst, imm); -+ break; -+ /* dst = dst >> imm (arithmetic) */ -+ case BPF_ARSH: -+ emit(ctx, dsra_safe, dst, dst, imm); -+ break; -+ /* dst = dst + imm */ -+ case BPF_ADD: -+ emit(ctx, daddiu, dst, dst, imm); -+ break; -+ /* dst = dst - imm */ -+ case BPF_SUB: -+ emit(ctx, daddiu, dst, dst, -imm); -+ break; -+ default: -+ /* Width-generic operations */ -+ emit_alu_i(ctx, dst, imm, op); -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* ALU register operation (64-bit) */ -+static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) -+{ -+ switch (BPF_OP(op)) { -+ /* dst = dst << src */ -+ case BPF_LSH: -+ emit(ctx, dsllv, dst, dst, src); -+ break; -+ /* dst = dst >> src */ -+ case BPF_RSH: -+ emit(ctx, dsrlv, dst, dst, src); -+ break; -+ /* dst = dst >> src (arithmetic) */ -+ case BPF_ARSH: -+ emit(ctx, dsrav, dst, dst, src); -+ break; -+ /* dst = dst + src */ -+ case BPF_ADD: -+ emit(ctx, daddu, dst, dst, src); -+ break; -+ /* dst = dst - src */ -+ case BPF_SUB: -+ emit(ctx, dsubu, dst, dst, src); -+ break; -+ /* dst = dst * src */ -+ case BPF_MUL: -+ if (cpu_has_mips64r6) { -+ emit(ctx, dmulu, dst, dst, src); -+ } else { -+ emit(ctx, dmultu, dst, src); -+ emit(ctx, mflo, dst); -+ } -+ break; -+ /* dst = dst / src */ -+ case BPF_DIV: -+ if (cpu_has_mips64r6) { -+ emit(ctx, ddivu_r6, dst, dst, src); -+ } else { -+ emit(ctx, ddivu, dst, src); -+ emit(ctx, mflo, dst); -+ } -+ break; -+ /* dst = dst % src */ -+ case BPF_MOD: -+ if (cpu_has_mips64r6) { -+ emit(ctx, dmodu, dst, dst, src); -+ } else { -+ emit(ctx, ddivu, dst, src); -+ emit(ctx, mfhi, dst); -+ } -+ break; -+ default: -+ /* Width-generic operations */ -+ emit_alu_r(ctx, dst, src, op); -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Swap sub words in a register double word */ -+static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) -+{ -+ u8 tmp = MIPS_R_T9; -+ -+ emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ -+ emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ -+ emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ -+ emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ -+ emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ -+} -+ -+/* Swap bytes and truncate a register double word, word or half word */ -+static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) -+{ -+ switch (width) { -+ /* Swap bytes in a double word */ -+ case 64: -+ if (cpu_has_mips64r2 || cpu_has_mips64r6) { -+ emit(ctx, dsbh, dst, dst); -+ emit(ctx, dshd, dst, dst); -+ } else { -+ u8 t1 = MIPS_R_T6; -+ u8 t2 = MIPS_R_T7; -+ -+ emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ -+ emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ -+ emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ -+ -+ emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); -+ emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ -+ emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ -+ emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */ -+ -+ emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ -+ emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ -+ emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ -+ emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ -+ emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */ -+ } -+ break; -+ /* Swap bytes in a half word */ -+ /* Swap bytes in a word */ -+ case 32: -+ case 16: -+ emit_sext(ctx, dst, dst); -+ emit_bswap_r(ctx, dst, width); -+ if (cpu_has_mips64r2 || cpu_has_mips64r6) -+ emit_zext(ctx, dst); -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Truncate a register double word, word or half word */ -+static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) -+{ -+ switch (width) { -+ case 64: -+ break; -+ /* Zero-extend a word */ -+ case 32: -+ emit_zext(ctx, dst); -+ break; -+ /* Zero-extend a half word */ -+ case 16: -+ emit(ctx, andi, dst, dst, 0xffff); -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Load operation: dst = *(size*)(src + off) */ -+static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) -+{ -+ switch (size) { -+ /* Load a byte */ -+ case BPF_B: -+ emit(ctx, lbu, dst, off, src); -+ break; -+ /* Load a half word */ -+ case BPF_H: -+ emit(ctx, lhu, dst, off, src); -+ break; -+ /* Load a word */ -+ case BPF_W: -+ emit(ctx, lwu, dst, off, src); -+ break; -+ /* Load a double word */ -+ case BPF_DW: -+ emit(ctx, ld, dst, off, src); -+ break; -+ } -+ clobber_reg(ctx, dst); -+} -+ -+/* Store operation: *(size *)(dst + off) = src */ -+static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) -+{ -+ switch (size) { -+ /* Store a byte */ -+ case BPF_B: -+ emit(ctx, sb, src, off, dst); -+ break; -+ /* Store a half word */ -+ case BPF_H: -+ emit(ctx, sh, src, off, dst); -+ break; -+ /* Store a word */ -+ case BPF_W: -+ emit(ctx, sw, src, off, dst); -+ break; -+ /* Store a double word */ -+ case BPF_DW: -+ emit(ctx, sd, src, off, dst); -+ break; -+ } -+} -+ -+/* Atomic read-modify-write */ -+static void emit_atomic_r64(struct jit_context *ctx, -+ u8 dst, u8 src, s16 off, u8 code) -+{ -+ u8 t1 = MIPS_R_T6; -+ u8 t2 = MIPS_R_T7; -+ -+ emit(ctx, lld, t1, off, dst); -+ switch (code) { -+ case BPF_ADD: -+ emit(ctx, daddu, t2, t1, src); -+ break; -+ case BPF_AND: -+ emit(ctx, and, t2, t1, src); -+ break; -+ case BPF_OR: -+ emit(ctx, or, t2, t1, src); -+ break; -+ case BPF_XOR: -+ emit(ctx, xor, t2, t1, src); -+ break; -+ } -+ emit(ctx, scd, t2, off, dst); -+ emit(ctx, beqz, t2, -16); -+ emit(ctx, nop); /* Delay slot */ -+} -+ -+/* Function call */ -+static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) -+{ -+ u8 zx = bpf2mips64[JIT_REG_ZX]; -+ u8 tmp = MIPS_R_T6; -+ bool fixed; -+ u64 addr; -+ -+ /* Decode the call address */ -+ if (bpf_jit_get_func_addr(ctx->program, insn, false, -+ &addr, &fixed) < 0) -+ return -1; -+ if (!fixed) -+ return -1; -+ -+ /* Push caller-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); -+ -+ /* Emit function call */ -+ emit_mov_i64(ctx, tmp, addr); -+ emit(ctx, jalr, MIPS_R_RA, tmp); -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* Restore caller-saved registers */ -+ pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); -+ -+ /* Re-initialize the JIT zero-extension register if accessed */ -+ if (ctx->accessed & BIT(JIT_REG_ZX)) { -+ emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); -+ emit(ctx, dsrl32, zx, zx, 0); -+ } -+ -+ clobber_reg(ctx, MIPS_R_RA); -+ clobber_reg(ctx, MIPS_R_V0); -+ clobber_reg(ctx, MIPS_R_V1); -+ return 0; -+} -+ -+/* Function tail call */ -+static int emit_tail_call(struct jit_context *ctx) -+{ -+ u8 ary = bpf2mips64[BPF_REG_2]; -+ u8 ind = bpf2mips64[BPF_REG_3]; -+ u8 tcc = bpf2mips64[JIT_REG_TC]; -+ u8 tmp = MIPS_R_T6; -+ int off; -+ -+ /* -+ * Tail call: -+ * eBPF R1 - function argument (context ptr), passed in a0-a1 -+ * eBPF R2 - ptr to object with array of function entry points -+ * eBPF R3 - array index of function to be called -+ */ -+ -+ /* if (ind >= ary->map.max_entries) goto out */ -+ off = offsetof(struct bpf_array, map.max_entries); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ -+ emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ -+ emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ -+ -+ /* if (--TCC < 0) goto out */ -+ emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ -+ emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ -+ /* (next insn delay slot) */ -+ /* prog = ary->ptrs[ind] */ -+ off = offsetof(struct bpf_array, ptrs); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ -+ emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ -+ emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ -+ -+ /* if (prog == 0) goto out */ -+ emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ -+ emit(ctx, nop); /* Delay slot */ -+ -+ /* func = prog->bpf_func + 8 (prologue skip offset) */ -+ off = offsetof(struct bpf_prog, bpf_func); -+ if (off > 0x7fff) -+ return -1; -+ emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ -+ emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ -+ -+ /* goto func */ -+ build_epilogue(ctx, tmp); -+ access_reg(ctx, JIT_REG_TC); -+ return 0; -+} -+ -+/* -+ * Stack frame layout for a JITed program (stack grows down). -+ * -+ * Higher address : Previous stack frame : -+ * +===========================+ <--- MIPS sp before call -+ * | Callee-saved registers, | -+ * | including RA and FP | -+ * +---------------------------+ <--- eBPF FP (MIPS fp) -+ * | Local eBPF variables | -+ * | allocated by program | -+ * +---------------------------+ -+ * | Reserved for caller-saved | -+ * | registers | -+ * Lower address +===========================+ <--- MIPS sp -+ */ -+ -+/* Build program prologue to set up the stack and registers */ -+void build_prologue(struct jit_context *ctx) -+{ -+ u8 fp = bpf2mips64[BPF_REG_FP]; -+ u8 tc = bpf2mips64[JIT_REG_TC]; -+ u8 zx = bpf2mips64[JIT_REG_ZX]; -+ int stack, saved, locals, reserved; -+ -+ /* -+ * The first instruction initializes the tail call count register. -+ * On a tail call, the calling function jumps into the prologue -+ * after this instruction. -+ */ -+ emit(ctx, addiu, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT + 1, 0xffff)); -+ -+ /* === Entry-point for tail calls === */ -+ -+ /* -+ * If the eBPF frame pointer and tail call count registers were -+ * accessed they must be preserved. Mark them as clobbered here -+ * to save and restore them on the stack as needed. -+ */ -+ if (ctx->accessed & BIT(BPF_REG_FP)) -+ clobber_reg(ctx, fp); -+ if (ctx->accessed & BIT(JIT_REG_TC)) -+ clobber_reg(ctx, tc); -+ if (ctx->accessed & BIT(JIT_REG_ZX)) -+ clobber_reg(ctx, zx); -+ -+ /* Compute the stack space needed for callee-saved registers */ -+ saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); -+ saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); -+ -+ /* Stack space used by eBPF program local data */ -+ locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); -+ -+ /* -+ * If we are emitting function calls, reserve extra stack space for -+ * caller-saved registers needed by the JIT. The required space is -+ * computed automatically during resource usage discovery (pass 1). -+ */ -+ reserved = ctx->stack_used; -+ -+ /* Allocate the stack frame */ -+ stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); -+ if (stack) -+ emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); -+ -+ /* Store callee-saved registers on stack */ -+ push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved); -+ -+ /* Initialize the eBPF frame pointer if accessed */ -+ if (ctx->accessed & BIT(BPF_REG_FP)) -+ emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); -+ -+ /* Initialize the ePF JIT zero-extension register if accessed */ -+ if (ctx->accessed & BIT(JIT_REG_ZX)) { -+ emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); -+ emit(ctx, dsrl32, zx, zx, 0); -+ } -+ -+ ctx->saved_size = saved; -+ ctx->stack_size = stack; -+} -+ -+/* Build the program epilogue to restore the stack and registers */ -+void build_epilogue(struct jit_context *ctx, int dest_reg) -+{ -+ /* Restore callee-saved registers from stack */ -+ pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, -+ ctx->stack_size - ctx->saved_size); -+ -+ /* Release the stack frame */ -+ if (ctx->stack_size) -+ emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); -+ -+ /* Jump to return address and sign-extend the 32-bit return value */ -+ emit(ctx, jr, dest_reg); -+ emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ -+} -+ -+/* Build one eBPF instruction */ -+int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) -+{ -+ u8 dst = bpf2mips64[insn->dst_reg]; -+ u8 src = bpf2mips64[insn->src_reg]; -+ u8 code = insn->code; -+ s16 off = insn->off; -+ s32 imm = insn->imm; -+ s32 val, rel; -+ u8 alu, jmp; -+ -+ switch (code) { -+ /* ALU operations */ -+ /* dst = imm */ -+ case BPF_ALU | BPF_MOV | BPF_K: -+ emit_mov_i(ctx, dst, imm); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = src */ -+ case BPF_ALU | BPF_MOV | BPF_X: -+ if (imm == 1) { -+ /* Special mov32 for zext */ -+ emit_zext(ctx, dst); -+ } else { -+ emit_mov_r(ctx, dst, src); -+ emit_zext_ver(ctx, dst); -+ } -+ break; -+ /* dst = -dst */ -+ case BPF_ALU | BPF_NEG: -+ emit_sext(ctx, dst, dst); -+ emit_alu_i(ctx, dst, 0, BPF_NEG); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst & imm */ -+ /* dst = dst | imm */ -+ /* dst = dst ^ imm */ -+ /* dst = dst << imm */ -+ case BPF_ALU | BPF_OR | BPF_K: -+ case BPF_ALU | BPF_AND | BPF_K: -+ case BPF_ALU | BPF_XOR | BPF_K: -+ case BPF_ALU | BPF_LSH | BPF_K: -+ if (!valid_alu_i(BPF_OP(code), imm)) { -+ emit_mov_i(ctx, MIPS_R_T4, imm); -+ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); -+ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { -+ emit_alu_i(ctx, dst, val, alu); -+ } -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst >> imm */ -+ /* dst = dst >> imm (arithmetic) */ -+ /* dst = dst + imm */ -+ /* dst = dst - imm */ -+ /* dst = dst * imm */ -+ /* dst = dst / imm */ -+ /* dst = dst % imm */ -+ case BPF_ALU | BPF_RSH | BPF_K: -+ case BPF_ALU | BPF_ARSH | BPF_K: -+ case BPF_ALU | BPF_ADD | BPF_K: -+ case BPF_ALU | BPF_SUB | BPF_K: -+ case BPF_ALU | BPF_MUL | BPF_K: -+ case BPF_ALU | BPF_DIV | BPF_K: -+ case BPF_ALU | BPF_MOD | BPF_K: -+ if (!valid_alu_i(BPF_OP(code), imm)) { -+ emit_sext(ctx, dst, dst); -+ emit_mov_i(ctx, MIPS_R_T4, imm); -+ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); -+ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { -+ emit_sext(ctx, dst, dst); -+ emit_alu_i(ctx, dst, val, alu); -+ } -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst & src */ -+ /* dst = dst | src */ -+ /* dst = dst ^ src */ -+ /* dst = dst << src */ -+ case BPF_ALU | BPF_AND | BPF_X: -+ case BPF_ALU | BPF_OR | BPF_X: -+ case BPF_ALU | BPF_XOR | BPF_X: -+ case BPF_ALU | BPF_LSH | BPF_X: -+ emit_alu_r(ctx, dst, src, BPF_OP(code)); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = dst >> src */ -+ /* dst = dst >> src (arithmetic) */ -+ /* dst = dst + src */ -+ /* dst = dst - src */ -+ /* dst = dst * src */ -+ /* dst = dst / src */ -+ /* dst = dst % src */ -+ case BPF_ALU | BPF_RSH | BPF_X: -+ case BPF_ALU | BPF_ARSH | BPF_X: -+ case BPF_ALU | BPF_ADD | BPF_X: -+ case BPF_ALU | BPF_SUB | BPF_X: -+ case BPF_ALU | BPF_MUL | BPF_X: -+ case BPF_ALU | BPF_DIV | BPF_X: -+ case BPF_ALU | BPF_MOD | BPF_X: -+ emit_sext(ctx, dst, dst); -+ emit_sext(ctx, MIPS_R_T4, src); -+ emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); -+ emit_zext_ver(ctx, dst); -+ break; -+ /* dst = imm (64-bit) */ -+ case BPF_ALU64 | BPF_MOV | BPF_K: -+ emit_mov_i(ctx, dst, imm); -+ break; -+ /* dst = src (64-bit) */ -+ case BPF_ALU64 | BPF_MOV | BPF_X: -+ emit_mov_r(ctx, dst, src); -+ break; -+ /* dst = -dst (64-bit) */ -+ case BPF_ALU64 | BPF_NEG: -+ emit_alu_i64(ctx, dst, 0, BPF_NEG); -+ break; -+ /* dst = dst & imm (64-bit) */ -+ /* dst = dst | imm (64-bit) */ -+ /* dst = dst ^ imm (64-bit) */ -+ /* dst = dst << imm (64-bit) */ -+ /* dst = dst >> imm (64-bit) */ -+ /* dst = dst >> imm ((64-bit, arithmetic) */ -+ /* dst = dst + imm (64-bit) */ -+ /* dst = dst - imm (64-bit) */ -+ /* dst = dst * imm (64-bit) */ -+ /* dst = dst / imm (64-bit) */ -+ /* dst = dst % imm (64-bit) */ -+ case BPF_ALU64 | BPF_AND | BPF_K: -+ case BPF_ALU64 | BPF_OR | BPF_K: -+ case BPF_ALU64 | BPF_XOR | BPF_K: -+ case BPF_ALU64 | BPF_LSH | BPF_K: -+ case BPF_ALU64 | BPF_RSH | BPF_K: -+ case BPF_ALU64 | BPF_ARSH | BPF_K: -+ case BPF_ALU64 | BPF_ADD | BPF_K: -+ case BPF_ALU64 | BPF_SUB | BPF_K: -+ case BPF_ALU64 | BPF_MUL | BPF_K: -+ case BPF_ALU64 | BPF_DIV | BPF_K: -+ case BPF_ALU64 | BPF_MOD | BPF_K: -+ if (!valid_alu_i(BPF_OP(code), imm)) { -+ emit_mov_i(ctx, MIPS_R_T4, imm); -+ emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); -+ } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) { -+ emit_alu_i64(ctx, dst, val, alu); -+ } -+ break; -+ /* dst = dst & src (64-bit) */ -+ /* dst = dst | src (64-bit) */ -+ /* dst = dst ^ src (64-bit) */ -+ /* dst = dst << src (64-bit) */ -+ /* dst = dst >> src (64-bit) */ -+ /* dst = dst >> src (64-bit, arithmetic) */ -+ /* dst = dst + src (64-bit) */ -+ /* dst = dst - src (64-bit) */ -+ /* dst = dst * src (64-bit) */ -+ /* dst = dst / src (64-bit) */ -+ /* dst = dst % src (64-bit) */ -+ case BPF_ALU64 | BPF_AND | BPF_X: -+ case BPF_ALU64 | BPF_OR | BPF_X: -+ case BPF_ALU64 | BPF_XOR | BPF_X: -+ case BPF_ALU64 | BPF_LSH | BPF_X: -+ case BPF_ALU64 | BPF_RSH | BPF_X: -+ case BPF_ALU64 | BPF_ARSH | BPF_X: -+ case BPF_ALU64 | BPF_ADD | BPF_X: -+ case BPF_ALU64 | BPF_SUB | BPF_X: -+ case BPF_ALU64 | BPF_MUL | BPF_X: -+ case BPF_ALU64 | BPF_DIV | BPF_X: -+ case BPF_ALU64 | BPF_MOD | BPF_X: -+ emit_alu_r64(ctx, dst, src, BPF_OP(code)); -+ break; -+ /* dst = htole(dst) */ -+ /* dst = htobe(dst) */ -+ case BPF_ALU | BPF_END | BPF_FROM_LE: -+ case BPF_ALU | BPF_END | BPF_FROM_BE: -+ if (BPF_SRC(code) == -+#ifdef __BIG_ENDIAN -+ BPF_FROM_LE -+#else -+ BPF_FROM_BE -+#endif -+ ) -+ emit_bswap_r64(ctx, dst, imm); -+ else -+ emit_trunc_r64(ctx, dst, imm); -+ break; -+ /* dst = imm64 */ -+ case BPF_LD | BPF_IMM | BPF_DW: -+ emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32)); -+ return 1; -+ /* LDX: dst = *(size *)(src + off) */ -+ case BPF_LDX | BPF_MEM | BPF_W: -+ case BPF_LDX | BPF_MEM | BPF_H: -+ case BPF_LDX | BPF_MEM | BPF_B: -+ case BPF_LDX | BPF_MEM | BPF_DW: -+ emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); -+ break; -+ /* ST: *(size *)(dst + off) = imm */ -+ case BPF_ST | BPF_MEM | BPF_W: -+ case BPF_ST | BPF_MEM | BPF_H: -+ case BPF_ST | BPF_MEM | BPF_B: -+ case BPF_ST | BPF_MEM | BPF_DW: -+ emit_mov_i(ctx, MIPS_R_T4, imm); -+ emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); -+ break; -+ /* STX: *(size *)(dst + off) = src */ -+ case BPF_STX | BPF_MEM | BPF_W: -+ case BPF_STX | BPF_MEM | BPF_H: -+ case BPF_STX | BPF_MEM | BPF_B: -+ case BPF_STX | BPF_MEM | BPF_DW: -+ emit_stx(ctx, dst, src, off, BPF_SIZE(code)); -+ break; -+ /* Speculation barrier */ -+ case BPF_ST | BPF_NOSPEC: -+ break; -+ /* Atomics */ -+ case BPF_STX | BPF_XADD | BPF_W: -+ case BPF_STX | BPF_XADD | BPF_DW: -+ switch (imm) { -+ case BPF_ADD: -+ case BPF_AND: -+ case BPF_OR: -+ case BPF_XOR: -+ if (BPF_SIZE(code) == BPF_DW) { -+ emit_atomic_r64(ctx, dst, src, off, imm); -+ } else { /* 32-bit, no fetch */ -+ emit_sext(ctx, MIPS_R_T4, src); -+ emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm); -+ } -+ break; -+ default: -+ goto notyet; -+ } -+ break; -+ /* PC += off if dst == src */ -+ /* PC += off if dst != src */ -+ /* PC += off if dst & src */ -+ /* PC += off if dst > src */ -+ /* PC += off if dst >= src */ -+ /* PC += off if dst < src */ -+ /* PC += off if dst <= src */ -+ /* PC += off if dst > src (signed) */ -+ /* PC += off if dst >= src (signed) */ -+ /* PC += off if dst < src (signed) */ -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JMP32 | BPF_JEQ | BPF_X: -+ case BPF_JMP32 | BPF_JNE | BPF_X: -+ case BPF_JMP32 | BPF_JSET | BPF_X: -+ case BPF_JMP32 | BPF_JGT | BPF_X: -+ case BPF_JMP32 | BPF_JGE | BPF_X: -+ case BPF_JMP32 | BPF_JLT | BPF_X: -+ case BPF_JMP32 | BPF_JLE | BPF_X: -+ case BPF_JMP32 | BPF_JSGT | BPF_X: -+ case BPF_JMP32 | BPF_JSGE | BPF_X: -+ case BPF_JMP32 | BPF_JSLT | BPF_X: -+ case BPF_JMP32 | BPF_JSLE | BPF_X: -+ if (off == 0) -+ break; -+ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); -+ emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ -+ emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ -+ emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == imm */ -+ /* PC += off if dst != imm */ -+ /* PC += off if dst & imm */ -+ /* PC += off if dst > imm */ -+ /* PC += off if dst >= imm */ -+ /* PC += off if dst < imm */ -+ /* PC += off if dst <= imm */ -+ /* PC += off if dst > imm (signed) */ -+ /* PC += off if dst >= imm (signed) */ -+ /* PC += off if dst < imm (signed) */ -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JMP32 | BPF_JEQ | BPF_K: -+ case BPF_JMP32 | BPF_JNE | BPF_K: -+ case BPF_JMP32 | BPF_JSET | BPF_K: -+ case BPF_JMP32 | BPF_JGT | BPF_K: -+ case BPF_JMP32 | BPF_JGE | BPF_K: -+ case BPF_JMP32 | BPF_JLT | BPF_K: -+ case BPF_JMP32 | BPF_JLE | BPF_K: -+ case BPF_JMP32 | BPF_JSGT | BPF_K: -+ case BPF_JMP32 | BPF_JSGE | BPF_K: -+ case BPF_JMP32 | BPF_JSLT | BPF_K: -+ case BPF_JMP32 | BPF_JSLE | BPF_K: -+ if (off == 0) -+ break; -+ setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel); -+ emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ -+ if (valid_jmp_i(jmp, imm)) { -+ emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp); -+ } else { -+ /* Move large immediate to register, sign-extended */ -+ emit_mov_i(ctx, MIPS_R_T5, imm); -+ emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); -+ } -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == src */ -+ /* PC += off if dst != src */ -+ /* PC += off if dst & src */ -+ /* PC += off if dst > src */ -+ /* PC += off if dst >= src */ -+ /* PC += off if dst < src */ -+ /* PC += off if dst <= src */ -+ /* PC += off if dst > src (signed) */ -+ /* PC += off if dst >= src (signed) */ -+ /* PC += off if dst < src (signed) */ -+ /* PC += off if dst <= src (signed) */ -+ case BPF_JMP | BPF_JEQ | BPF_X: -+ case BPF_JMP | BPF_JNE | BPF_X: -+ case BPF_JMP | BPF_JSET | BPF_X: -+ case BPF_JMP | BPF_JGT | BPF_X: -+ case BPF_JMP | BPF_JGE | BPF_X: -+ case BPF_JMP | BPF_JLT | BPF_X: -+ case BPF_JMP | BPF_JLE | BPF_X: -+ case BPF_JMP | BPF_JSGT | BPF_X: -+ case BPF_JMP | BPF_JSGE | BPF_X: -+ case BPF_JMP | BPF_JSLT | BPF_X: -+ case BPF_JMP | BPF_JSLE | BPF_X: -+ if (off == 0) -+ break; -+ setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel); -+ emit_jmp_r(ctx, dst, src, rel, jmp); -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off if dst == imm */ -+ /* PC += off if dst != imm */ -+ /* PC += off if dst & imm */ -+ /* PC += off if dst > imm */ -+ /* PC += off if dst >= imm */ -+ /* PC += off if dst < imm */ -+ /* PC += off if dst <= imm */ -+ /* PC += off if dst > imm (signed) */ -+ /* PC += off if dst >= imm (signed) */ -+ /* PC += off if dst < imm (signed) */ -+ /* PC += off if dst <= imm (signed) */ -+ case BPF_JMP | BPF_JEQ | BPF_K: -+ case BPF_JMP | BPF_JNE | BPF_K: -+ case BPF_JMP | BPF_JSET | BPF_K: -+ case BPF_JMP | BPF_JGT | BPF_K: -+ case BPF_JMP | BPF_JGE | BPF_K: -+ case BPF_JMP | BPF_JLT | BPF_K: -+ case BPF_JMP | BPF_JLE | BPF_K: -+ case BPF_JMP | BPF_JSGT | BPF_K: -+ case BPF_JMP | BPF_JSGE | BPF_K: -+ case BPF_JMP | BPF_JSLT | BPF_K: -+ case BPF_JMP | BPF_JSLE | BPF_K: -+ if (off == 0) -+ break; -+ setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); -+ if (valid_jmp_i(jmp, imm)) { -+ emit_jmp_i(ctx, dst, imm, rel, jmp); -+ } else { -+ /* Move large immediate to register */ -+ emit_mov_i(ctx, MIPS_R_T4, imm); -+ emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp); -+ } -+ if (finish_jmp(ctx, jmp, off) < 0) -+ goto toofar; -+ break; -+ /* PC += off */ -+ case BPF_JMP | BPF_JA: -+ if (off == 0) -+ break; -+ if (emit_ja(ctx, off) < 0) -+ goto toofar; -+ break; -+ /* Tail call */ -+ case BPF_JMP | BPF_TAIL_CALL: -+ if (emit_tail_call(ctx) < 0) -+ goto invalid; -+ break; -+ /* Function call */ -+ case BPF_JMP | BPF_CALL: -+ if (emit_call(ctx, insn) < 0) -+ goto invalid; -+ break; -+ /* Function return */ -+ case BPF_JMP | BPF_EXIT: -+ /* -+ * Optimization: when last instruction is EXIT -+ * simply continue to epilogue. -+ */ -+ if (ctx->bpf_index == ctx->program->len - 1) -+ break; -+ if (emit_exit(ctx) < 0) -+ goto toofar; -+ break; -+ -+ default: -+invalid: -+ pr_err_once("unknown opcode %02x\n", code); -+ return -EINVAL; -+notyet: -+ pr_info_once("*** NOT YET: opcode %02x ***\n", code); -+ return -EFAULT; -+toofar: -+ pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n", -+ ctx->bpf_index, code); -+ return -E2BIG; -+ } -+ return 0; -+} diff --git a/target/linux/generic/backport-6.1/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch b/target/linux/generic/backport-6.1/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch deleted file mode 100644 index 63553ebe5842..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-04-mips-bpf-Add-JIT-workarounds-for-CPU-errata.patch +++ /dev/null @@ -1,120 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:06 +0200 -Subject: [PATCH] mips: bpf: Add JIT workarounds for CPU errata - -This patch adds workarounds for the following CPU errata to the MIPS -eBPF JIT, if enabled in the kernel configuration. - - - R10000 ll/sc weak ordering - - Loongson-3 ll/sc weak ordering - - Loongson-2F jump hang - -The Loongson-2F nop errata is implemented in uasm, which the JIT uses, -so no additional mitigations are needed for that. - -Signed-off-by: Johan Almbladh -Reviewed-by: Jiaxun Yang ---- - ---- a/arch/mips/net/bpf_jit_comp.c -+++ b/arch/mips/net/bpf_jit_comp.c -@@ -404,6 +404,7 @@ void emit_alu_r(struct jit_context *ctx, - /* Atomic read-modify-write (32-bit) */ - void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code) - { -+ LLSC_sync(ctx); - emit(ctx, ll, MIPS_R_T9, off, dst); - switch (code) { - case BPF_ADD: -@@ -420,18 +421,19 @@ void emit_atomic_r(struct jit_context *c - break; - } - emit(ctx, sc, MIPS_R_T8, off, dst); -- emit(ctx, beqz, MIPS_R_T8, -16); -+ emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset); - emit(ctx, nop); /* Delay slot */ - } - - /* Atomic compare-and-exchange (32-bit) */ - void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off) - { -+ LLSC_sync(ctx); - emit(ctx, ll, MIPS_R_T9, off, dst); - emit(ctx, bne, MIPS_R_T9, res, 12); - emit(ctx, move, MIPS_R_T8, src); /* Delay slot */ - emit(ctx, sc, MIPS_R_T8, off, dst); -- emit(ctx, beqz, MIPS_R_T8, -20); -+ emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset); - emit(ctx, move, res, MIPS_R_T9); /* Delay slot */ - clobber_reg(ctx, res); - } ---- a/arch/mips/net/bpf_jit_comp.h -+++ b/arch/mips/net/bpf_jit_comp.h -@@ -87,7 +87,7 @@ struct jit_context { - }; - - /* Emit the instruction if the JIT memory space has been allocated */ --#define emit(ctx, func, ...) \ -+#define __emit(ctx, func, ...) \ - do { \ - if ((ctx)->target != NULL) { \ - u32 *p = &(ctx)->target[ctx->jit_index]; \ -@@ -95,6 +95,30 @@ do { \ - } \ - (ctx)->jit_index++; \ - } while (0) -+#define emit(...) __emit(__VA_ARGS__) -+ -+/* Workaround for R10000 ll/sc errata */ -+#ifdef CONFIG_WAR_R10000 -+#define LLSC_beqz beqzl -+#else -+#define LLSC_beqz beqz -+#endif -+ -+/* Workaround for Loongson-3 ll/sc errata */ -+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS -+#define LLSC_sync(ctx) emit(ctx, sync, 0) -+#define LLSC_offset 4 -+#else -+#define LLSC_sync(ctx) -+#define LLSC_offset 0 -+#endif -+ -+/* Workaround for Loongson-2F jump errata */ -+#ifdef CONFIG_CPU_JUMP_WORKAROUNDS -+#define JALR_MASK 0xffffffffcfffffffULL -+#else -+#define JALR_MASK (~0ULL) -+#endif - - /* - * Mark a BPF register as accessed, it needs to be ---- a/arch/mips/net/bpf_jit_comp64.c -+++ b/arch/mips/net/bpf_jit_comp64.c -@@ -375,6 +375,7 @@ static void emit_atomic_r64(struct jit_c - u8 t1 = MIPS_R_T6; - u8 t2 = MIPS_R_T7; - -+ LLSC_sync(ctx); - emit(ctx, lld, t1, off, dst); - switch (code) { - case BPF_ADD: -@@ -391,7 +392,7 @@ static void emit_atomic_r64(struct jit_c - break; - } - emit(ctx, scd, t2, off, dst); -- emit(ctx, beqz, t2, -16); -+ emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); - emit(ctx, nop); /* Delay slot */ - } - -@@ -414,7 +415,7 @@ static int emit_call(struct jit_context - push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0); - - /* Emit function call */ -- emit_mov_i64(ctx, tmp, addr); -+ emit_mov_i64(ctx, tmp, addr & JALR_MASK); - emit(ctx, jalr, MIPS_R_RA, tmp); - emit(ctx, nop); /* Delay slot */ - diff --git a/target/linux/generic/backport-6.1/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch b/target/linux/generic/backport-6.1/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch deleted file mode 100644 index 14763c1211ea..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-05-mips-bpf-Enable-eBPF-JITs.patch +++ /dev/null @@ -1,61 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:07 +0200 -Subject: [PATCH] mips: bpf: Enable eBPF JITs - -This patch enables the new eBPF JITs for 32-bit and 64-bit MIPS. It also -disables the old cBPF JIT to so cBPF programs are converted to use the -new JIT. - -Workarounds for R4000 CPU errata are not implemented by the JIT, so the -JIT is disabled if any of those workarounds are configured. - -Signed-off-by: Johan Almbladh ---- - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -3431,6 +3431,7 @@ S: Supported - F: arch/arm64/net/ - - BPF JIT for MIPS (32-BIT AND 64-BIT) -+M: Johan Almbladh - M: Paul Burton - L: netdev@vger.kernel.org - L: bpf@vger.kernel.org ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -57,7 +57,6 @@ config MIPS - select HAVE_ARCH_TRACEHOOK - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES - select HAVE_ASM_MODVERSIONS -- select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS - select HAVE_CONTEXT_TRACKING - select HAVE_TIF_NOHZ - select HAVE_C_RECORDMCOUNT -@@ -65,7 +64,10 @@ config MIPS - select HAVE_DEBUG_STACKOVERFLOW - select HAVE_DMA_CONTIGUOUS - select HAVE_DYNAMIC_FTRACE -- select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 -+ select HAVE_EBPF_JIT if !CPU_MICROMIPS && \ -+ !CPU_DADDI_WORKAROUNDS && \ -+ !CPU_R4000_WORKAROUNDS && \ -+ !CPU_R4400_WORKAROUNDS - select HAVE_EXIT_THREAD - select HAVE_FAST_GUP - select HAVE_FTRACE_MCOUNT_RECORD ---- a/arch/mips/net/Makefile -+++ b/arch/mips/net/Makefile -@@ -2,9 +2,10 @@ - # MIPS networking code - - obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o -+obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o - - ifeq ($(CONFIG_32BIT),y) -- obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o bpf_jit_comp32.o -+ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp32.o - else -- obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o -+ obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp64.o - endif diff --git a/target/linux/generic/backport-6.1/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch b/target/linux/generic/backport-6.1/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch deleted file mode 100644 index e25c336831dc..000000000000 --- a/target/linux/generic/backport-6.1/050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch +++ /dev/null @@ -1,387 +0,0 @@ -From: Johan Almbladh -Date: Tue, 5 Oct 2021 18:54:08 +0200 -Subject: [PATCH] mips: bpf: Remove old BPF JIT implementations - -This patch removes the old 32-bit cBPF and 64-bit eBPF JIT implementations. -They are replaced by a new eBPF implementation that supports both 32-bit -and 64-bit MIPS CPUs. - -Signed-off-by: Johan Almbladh ---- - delete mode 100644 arch/mips/net/bpf_jit.c - delete mode 100644 arch/mips/net/bpf_jit.h - delete mode 100644 arch/mips/net/bpf_jit_asm.S - delete mode 100644 arch/mips/net/ebpf_jit.c - ---- a/arch/mips/net/bpf_jit.h -+++ /dev/null -@@ -1,81 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-only */ --/* -- * Just-In-Time compiler for BPF filters on MIPS -- * -- * Copyright (c) 2014 Imagination Technologies Ltd. -- * Author: Markos Chandras -- */ -- --#ifndef BPF_JIT_MIPS_OP_H --#define BPF_JIT_MIPS_OP_H -- --/* Registers used by JIT */ --#define MIPS_R_ZERO 0 --#define MIPS_R_V0 2 --#define MIPS_R_A0 4 --#define MIPS_R_A1 5 --#define MIPS_R_T4 12 --#define MIPS_R_T5 13 --#define MIPS_R_T6 14 --#define MIPS_R_T7 15 --#define MIPS_R_S0 16 --#define MIPS_R_S1 17 --#define MIPS_R_S2 18 --#define MIPS_R_S3 19 --#define MIPS_R_S4 20 --#define MIPS_R_S5 21 --#define MIPS_R_S6 22 --#define MIPS_R_S7 23 --#define MIPS_R_SP 29 --#define MIPS_R_RA 31 -- --/* Conditional codes */ --#define MIPS_COND_EQ 0x1 --#define MIPS_COND_GE (0x1 << 1) --#define MIPS_COND_GT (0x1 << 2) --#define MIPS_COND_NE (0x1 << 3) --#define MIPS_COND_ALL (0x1 << 4) --/* Conditionals on X register or K immediate */ --#define MIPS_COND_X (0x1 << 5) --#define MIPS_COND_K (0x1 << 6) -- --#define r_ret MIPS_R_V0 -- --/* -- * Use 2 scratch registers to avoid pipeline interlocks. -- * There is no overhead during epilogue and prologue since -- * any of the $s0-$s6 registers will only be preserved if -- * they are going to actually be used. -- */ --#define r_skb_hl MIPS_R_S0 /* skb header length */ --#define r_skb_data MIPS_R_S1 /* skb actual data */ --#define r_off MIPS_R_S2 --#define r_A MIPS_R_S3 --#define r_X MIPS_R_S4 --#define r_skb MIPS_R_S5 --#define r_M MIPS_R_S6 --#define r_skb_len MIPS_R_S7 --#define r_s0 MIPS_R_T4 /* scratch reg 1 */ --#define r_s1 MIPS_R_T5 /* scratch reg 2 */ --#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */ --#define r_tmp MIPS_R_T7 /* No need to preserve this */ --#define r_zero MIPS_R_ZERO --#define r_sp MIPS_R_SP --#define r_ra MIPS_R_RA -- --#ifndef __ASSEMBLY__ -- --/* Declare ASM helpers */ -- --#define DECLARE_LOAD_FUNC(func) \ -- extern u8 func(unsigned long *skb, int offset); \ -- extern u8 func##_negative(unsigned long *skb, int offset); \ -- extern u8 func##_positive(unsigned long *skb, int offset) -- --DECLARE_LOAD_FUNC(sk_load_word); --DECLARE_LOAD_FUNC(sk_load_half); --DECLARE_LOAD_FUNC(sk_load_byte); -- --#endif -- --#endif /* BPF_JIT_MIPS_OP_H */ ---- a/arch/mips/net/bpf_jit_asm.S -+++ /dev/null -@@ -1,285 +0,0 @@ --/* -- * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF -- * compiler. -- * -- * Copyright (C) 2015 Imagination Technologies Ltd. -- * Author: Markos Chandras -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License as published by the -- * Free Software Foundation; version 2 of the License. -- */ -- --#include --#include --#include --#include "bpf_jit.h" -- --/* ABI -- * -- * r_skb_hl skb header length -- * r_skb_data skb data -- * r_off(a1) offset register -- * r_A BPF register A -- * r_X PF register X -- * r_skb(a0) *skb -- * r_M *scratch memory -- * r_skb_le skb length -- * r_s0 Scratch register 0 -- * r_s1 Scratch register 1 -- * -- * On entry: -- * a0: *skb -- * a1: offset (imm or imm + X) -- * -- * All non-BPF-ABI registers are free for use. On return, we only -- * care about r_ret. The BPF-ABI registers are assumed to remain -- * unmodified during the entire filter operation. -- */ -- --#define skb a0 --#define offset a1 --#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ -- -- /* We know better :) so prevent assembler reordering etc */ -- .set noreorder -- --#define is_offset_negative(TYPE) \ -- /* If offset is negative we have more work to do */ \ -- slti t0, offset, 0; \ -- bgtz t0, bpf_slow_path_##TYPE##_neg; \ -- /* Be careful what follows in DS. */ -- --#define is_offset_in_header(SIZE, TYPE) \ -- /* Reading from header? */ \ -- addiu $r_s0, $r_skb_hl, -SIZE; \ -- slt t0, $r_s0, offset; \ -- bgtz t0, bpf_slow_path_##TYPE; \ -- --LEAF(sk_load_word) -- is_offset_negative(word) --FEXPORT(sk_load_word_positive) -- is_offset_in_header(4, word) -- /* Offset within header boundaries */ -- PTR_ADDU t1, $r_skb_data, offset -- .set reorder -- lw $r_A, 0(t1) -- .set noreorder --#ifdef CONFIG_CPU_LITTLE_ENDIAN --# if MIPS_ISA_REV >= 2 -- wsbh t0, $r_A -- rotr $r_A, t0, 16 --# else -- sll t0, $r_A, 24 -- srl t1, $r_A, 24 -- srl t2, $r_A, 8 -- or t0, t0, t1 -- andi t2, t2, 0xff00 -- andi t1, $r_A, 0xff00 -- or t0, t0, t2 -- sll t1, t1, 8 -- or $r_A, t0, t1 --# endif --#endif -- jr $r_ra -- move $r_ret, zero -- END(sk_load_word) -- --LEAF(sk_load_half) -- is_offset_negative(half) --FEXPORT(sk_load_half_positive) -- is_offset_in_header(2, half) -- /* Offset within header boundaries */ -- PTR_ADDU t1, $r_skb_data, offset -- lhu $r_A, 0(t1) --#ifdef CONFIG_CPU_LITTLE_ENDIAN --# if MIPS_ISA_REV >= 2 -- wsbh $r_A, $r_A --# else -- sll t0, $r_A, 8 -- srl t1, $r_A, 8 -- andi t0, t0, 0xff00 -- or $r_A, t0, t1 --# endif --#endif -- jr $r_ra -- move $r_ret, zero -- END(sk_load_half) -- --LEAF(sk_load_byte) -- is_offset_negative(byte) --FEXPORT(sk_load_byte_positive) -- is_offset_in_header(1, byte) -- /* Offset within header boundaries */ -- PTR_ADDU t1, $r_skb_data, offset -- lbu $r_A, 0(t1) -- jr $r_ra -- move $r_ret, zero -- END(sk_load_byte) -- --/* -- * call skb_copy_bits: -- * (prototype in linux/skbuff.h) -- * -- * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) -- * -- * o32 mandates we leave 4 spaces for argument registers in case -- * the callee needs to use them. Even though we don't care about -- * the argument registers ourselves, we need to allocate that space -- * to remain ABI compliant since the callee may want to use that space. -- * We also allocate 2 more spaces for $r_ra and our return register (*to). -- * -- * n64 is a bit different. The *caller* will allocate the space to preserve -- * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no -- * good reason but it does not matter that much really. -- * -- * (void *to) is returned in r_s0 -- * -- */ --#ifdef CONFIG_CPU_LITTLE_ENDIAN --#define DS_OFFSET(SIZE) (4 * SZREG) --#else --#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) --#endif --#define bpf_slow_path_common(SIZE) \ -- /* Quick check. Are we within reasonable boundaries? */ \ -- LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ -- sltu $r_s0, offset, $r_s1; \ -- beqz $r_s0, fault; \ -- /* Load 4th argument in DS */ \ -- LONG_ADDIU a3, zero, SIZE; \ -- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ -- PTR_LA t0, skb_copy_bits; \ -- PTR_S $r_ra, (5 * SZREG)($r_sp); \ -- /* Assign low slot to a2 */ \ -- PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ -- jalr t0; \ -- /* Reset our destination slot (DS but it's ok) */ \ -- INT_S zero, (4 * SZREG)($r_sp); \ -- /* \ -- * skb_copy_bits returns 0 on success and -EFAULT \ -- * on error. Our data live in a2. Do not bother with \ -- * our data if an error has been returned. \ -- */ \ -- /* Restore our frame */ \ -- PTR_L $r_ra, (5 * SZREG)($r_sp); \ -- INT_L $r_s0, (4 * SZREG)($r_sp); \ -- bltz v0, fault; \ -- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ -- move $r_ret, zero; \ -- --NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) -- bpf_slow_path_common(4) --#ifdef CONFIG_CPU_LITTLE_ENDIAN --# if MIPS_ISA_REV >= 2 -- wsbh t0, $r_s0 -- jr $r_ra -- rotr $r_A, t0, 16 --# else -- sll t0, $r_s0, 24 -- srl t1, $r_s0, 24 -- srl t2, $r_s0, 8 -- or t0, t0, t1 -- andi t2, t2, 0xff00 -- andi t1, $r_s0, 0xff00 -- or t0, t0, t2 -- sll t1, t1, 8 -- jr $r_ra -- or $r_A, t0, t1 --# endif --#else -- jr $r_ra -- move $r_A, $r_s0 --#endif -- -- END(bpf_slow_path_word) -- --NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) -- bpf_slow_path_common(2) --#ifdef CONFIG_CPU_LITTLE_ENDIAN --# if MIPS_ISA_REV >= 2 -- jr $r_ra -- wsbh $r_A, $r_s0 --# else -- sll t0, $r_s0, 8 -- andi t1, $r_s0, 0xff00 -- andi t0, t0, 0xff00 -- srl t1, t1, 8 -- jr $r_ra -- or $r_A, t0, t1 --# endif --#else -- jr $r_ra -- move $r_A, $r_s0 --#endif -- -- END(bpf_slow_path_half) -- --NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) -- bpf_slow_path_common(1) -- jr $r_ra -- move $r_A, $r_s0 -- -- END(bpf_slow_path_byte) -- --/* -- * Negative entry points -- */ -- .macro bpf_is_end_of_data -- li t0, SKF_LL_OFF -- /* Reading link layer data? */ -- slt t1, offset, t0 -- bgtz t1, fault -- /* Be careful what follows in DS. */ -- .endm --/* -- * call skb_copy_bits: -- * (prototype in linux/filter.h) -- * -- * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, -- * int k, unsigned int size) -- * -- * see above (bpf_slow_path_common) for ABI restrictions -- */ --#define bpf_negative_common(SIZE) \ -- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ -- PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ -- PTR_S $r_ra, (5 * SZREG)($r_sp); \ -- jalr t0; \ -- li a2, SIZE; \ -- PTR_L $r_ra, (5 * SZREG)($r_sp); \ -- /* Check return pointer */ \ -- beqz v0, fault; \ -- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ -- /* Preserve our pointer */ \ -- move $r_s0, v0; \ -- /* Set return value */ \ -- move $r_ret, zero; \ -- --bpf_slow_path_word_neg: -- bpf_is_end_of_data --NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) -- bpf_negative_common(4) -- jr $r_ra -- lw $r_A, 0($r_s0) -- END(sk_load_word_negative) -- --bpf_slow_path_half_neg: -- bpf_is_end_of_data --NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) -- bpf_negative_common(2) -- jr $r_ra -- lhu $r_A, 0($r_s0) -- END(sk_load_half_negative) -- --bpf_slow_path_byte_neg: -- bpf_is_end_of_data --NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) -- bpf_negative_common(1) -- jr $r_ra -- lbu $r_A, 0($r_s0) -- END(sk_load_byte_negative) -- --fault: -- jr $r_ra -- addiu $r_ret, zero, 1 diff --git a/target/linux/generic/backport-6.1/080-v5.17-clk-gate-Add-devm_clk_hw_register_gate.patch b/target/linux/generic/backport-6.1/080-v5.17-clk-gate-Add-devm_clk_hw_register_gate.patch deleted file mode 100644 index 51c23b6e3495..000000000000 --- a/target/linux/generic/backport-6.1/080-v5.17-clk-gate-Add-devm_clk_hw_register_gate.patch +++ /dev/null @@ -1,105 +0,0 @@ -From 815f0e738a8d5663a02350e2580706829144a722 Mon Sep 17 00:00:00 2001 -From: Horatiu Vultur -Date: Wed, 3 Nov 2021 09:50:59 +0100 -Subject: [PATCH] clk: gate: Add devm_clk_hw_register_gate() - -Add devm_clk_hw_register_gate() - devres-managed version of -clk_hw_register_gate() - -Suggested-by: Stephen Boyd -Signed-off-by: Horatiu Vultur -Acked-by: Nicolas Ferre -Signed-off-by: Nicolas Ferre -Link: https://lore.kernel.org/r/20211103085102.1656081-2-horatiu.vultur@microchip.com ---- - drivers/clk/clk-gate.c | 35 +++++++++++++++++++++++++++++++++++ - include/linux/clk-provider.h | 23 +++++++++++++++++++++++ - 2 files changed, 58 insertions(+) - ---- a/drivers/clk/clk-gate.c -+++ b/drivers/clk/clk-gate.c -@@ -7,6 +7,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -222,3 +223,37 @@ void clk_hw_unregister_gate(struct clk_h - kfree(gate); - } - EXPORT_SYMBOL_GPL(clk_hw_unregister_gate); -+ -+static void devm_clk_hw_release_gate(struct device *dev, void *res) -+{ -+ clk_hw_unregister_gate(*(struct clk_hw **)res); -+} -+ -+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev, -+ struct device_node *np, const char *name, -+ const char *parent_name, const struct clk_hw *parent_hw, -+ const struct clk_parent_data *parent_data, -+ unsigned long flags, -+ void __iomem *reg, u8 bit_idx, -+ u8 clk_gate_flags, spinlock_t *lock) -+{ -+ struct clk_hw **ptr, *hw; -+ -+ ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL); -+ if (!ptr) -+ return ERR_PTR(-ENOMEM); -+ -+ hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw, -+ parent_data, flags, reg, bit_idx, -+ clk_gate_flags, lock); -+ -+ if (!IS_ERR(hw)) { -+ *ptr = hw; -+ devres_add(dev, ptr); -+ } else { -+ devres_free(ptr); -+ } -+ -+ return hw; -+} -+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate); ---- a/include/linux/clk-provider.h -+++ b/include/linux/clk-provider.h -@@ -490,6 +490,13 @@ struct clk_hw *__clk_hw_register_gate(st - unsigned long flags, - void __iomem *reg, u8 bit_idx, - u8 clk_gate_flags, spinlock_t *lock); -+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev, -+ struct device_node *np, const char *name, -+ const char *parent_name, const struct clk_hw *parent_hw, -+ const struct clk_parent_data *parent_data, -+ unsigned long flags, -+ void __iomem *reg, u8 bit_idx, -+ u8 clk_gate_flags, spinlock_t *lock); - struct clk *clk_register_gate(struct device *dev, const char *name, - const char *parent_name, unsigned long flags, - void __iomem *reg, u8 bit_idx, -@@ -544,6 +551,22 @@ struct clk *clk_register_gate(struct dev - __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \ - (flags), (reg), (bit_idx), \ - (clk_gate_flags), (lock)) -+/** -+ * devm_clk_hw_register_gate - register a gate clock with the clock framework -+ * @dev: device that is registering this clock -+ * @name: name of this clock -+ * @parent_name: name of this clock's parent -+ * @flags: framework-specific flags for this clock -+ * @reg: register address to control gating of this clock -+ * @bit_idx: which bit in the register controls gating of this clock -+ * @clk_gate_flags: gate-specific flags for this clock -+ * @lock: shared register lock for this clock -+ */ -+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\ -+ clk_gate_flags, lock) \ -+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \ -+ NULL, (flags), (reg), (bit_idx), \ -+ (clk_gate_flags), (lock)) - void clk_unregister_gate(struct clk *clk); - void clk_hw_unregister_gate(struct clk_hw *hw); - int clk_gate_is_enabled(struct clk_hw *hw); diff --git a/target/linux/generic/backport-6.1/081-v5.17-regmap-allow-to-define-reg_update_bits-for-no-bus.patch b/target/linux/generic/backport-6.1/081-v5.17-regmap-allow-to-define-reg_update_bits-for-no-bus.patch deleted file mode 100644 index e4c0833ae75f..000000000000 --- a/target/linux/generic/backport-6.1/081-v5.17-regmap-allow-to-define-reg_update_bits-for-no-bus.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 02d6fdecb9c38de19065f6bed8d5214556fd061d Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 4 Nov 2021 16:00:40 +0100 -Subject: regmap: allow to define reg_update_bits for no bus configuration - -Some device requires a special handling for reg_update_bits and can't use -the normal regmap read write logic. An example is when locking is -handled by the device and rmw operations requires to do atomic operations. -Allow to declare a dedicated function in regmap_config for -reg_update_bits in no bus configuration. - -Signed-off-by: Ansuel Smith -Link: https://lore.kernel.org/r/20211104150040.1260-1-ansuelsmth@gmail.com -Signed-off-by: Mark Brown ---- - drivers/base/regmap/regmap.c | 1 + - include/linux/regmap.h | 7 +++++++ - 2 files changed, 8 insertions(+) - ---- a/drivers/base/regmap/regmap.c -+++ b/drivers/base/regmap/regmap.c -@@ -877,6 +877,7 @@ struct regmap *__regmap_init(struct devi - if (!bus) { - map->reg_read = config->reg_read; - map->reg_write = config->reg_write; -+ map->reg_update_bits = config->reg_update_bits; - - map->defer_caching = false; - goto skip_format_initialization; ---- a/include/linux/regmap.h -+++ b/include/linux/regmap.h -@@ -290,6 +290,11 @@ typedef void (*regmap_unlock)(void *); - * read operation on a bus such as SPI, I2C, etc. Most of the - * devices do not need this. - * @reg_write: Same as above for writing. -+ * @reg_update_bits: Optional callback that if filled will be used to perform -+ * all the update_bits(rmw) operation. Should only be provided -+ * if the function require special handling with lock and reg -+ * handling and the operation cannot be represented as a simple -+ * update_bits operation on a bus such as SPI, I2C, etc. - * @fast_io: Register IO is fast. Use a spinlock instead of a mutex - * to perform locking. This field is ignored if custom lock/unlock - * functions are used (see fields lock/unlock of struct regmap_config). -@@ -372,6 +377,8 @@ struct regmap_config { - - int (*reg_read)(void *context, unsigned int reg, unsigned int *val); - int (*reg_write)(void *context, unsigned int reg, unsigned int val); -+ int (*reg_update_bits)(void *context, unsigned int reg, -+ unsigned int mask, unsigned int val); - - bool fast_io; - diff --git a/target/linux/generic/backport-6.1/100-v5.18-tty-serial-bcm63xx-use-more-precise-Kconfig-symbol.patch b/target/linux/generic/backport-6.1/100-v5.18-tty-serial-bcm63xx-use-more-precise-Kconfig-symbol.patch deleted file mode 100644 index 7de3cbbda075..000000000000 --- a/target/linux/generic/backport-6.1/100-v5.18-tty-serial-bcm63xx-use-more-precise-Kconfig-symbol.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 0dc0da881b4574d1e04a079ab2ea75da61f5ad2e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 11 Mar 2022 10:32:33 +0100 -Subject: [PATCH] tty: serial: bcm63xx: use more precise Kconfig symbol -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Patches lowering SERIAL_BCM63XX dependencies led to a discussion and -documentation change regarding "depends" usage. Adjust Kconfig entry to -match current guidelines. Make this symbol available for relevant -architectures only. - -Cc: Geert Uytterhoeven -Reviewed-by: Geert Uytterhoeven -Acked-by: Florian Fainelli -Signed-off-by: Rafał Miłecki -Ref: f35a07f92616 ("tty: serial: bcm63xx: lower driver dependencies") -Ref: 18084e435ff6 ("Documentation/kbuild: Document platform dependency practises") -Link: https://lore.kernel.org/r/20220311093233.10012-1-zajec5@gmail.com -Signed-off-by: Greg Kroah-Hartman ---- - drivers/tty/serial/Kconfig | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/tty/serial/Kconfig -+++ b/drivers/tty/serial/Kconfig -@@ -1098,7 +1098,8 @@ config SERIAL_TIMBERDALE - config SERIAL_BCM63XX - tristate "Broadcom BCM63xx/BCM33xx UART support" - select SERIAL_CORE -- depends on COMMON_CLK -+ depends on ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST -+ default ARCH_BCM4908 || ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC - help - This enables the driver for the onchip UART core found on - the following chipsets: diff --git a/target/linux/generic/backport-6.1/200-v5.18-tools-resolve_btfids-Build-with-host-flags.patch b/target/linux/generic/backport-6.1/200-v5.18-tools-resolve_btfids-Build-with-host-flags.patch deleted file mode 100644 index caec8db5d692..000000000000 --- a/target/linux/generic/backport-6.1/200-v5.18-tools-resolve_btfids-Build-with-host-flags.patch +++ /dev/null @@ -1,49 +0,0 @@ -From cdbc4e3399ed8cdcf234a85f7a2482b622379e82 Mon Sep 17 00:00:00 2001 -From: Connor O'Brien -Date: Wed, 12 Jan 2022 00:25:03 +0000 -Subject: [PATCH] tools/resolve_btfids: Build with host flags - -resolve_btfids is built using $(HOSTCC) and $(HOSTLD) but does not -pick up the corresponding flags. As a result, host-specific settings -(such as a sysroot specified via HOSTCFLAGS=--sysroot=..., or a linker -specified via HOSTLDFLAGS=-fuse-ld=...) will not be respected. - -Fix this by setting CFLAGS to KBUILD_HOSTCFLAGS and LDFLAGS to -KBUILD_HOSTLDFLAGS. - -Also pass the cflags through to libbpf via EXTRA_CFLAGS to ensure that -the host libbpf is built with flags consistent with resolve_btfids. - -Signed-off-by: Connor O'Brien -Signed-off-by: Andrii Nakryiko -Acked-by: Song Liu -Link: https://lore.kernel.org/bpf/20220112002503.115968-1-connoro@google.com -(cherry picked from commit 0e3a1c902ffb56e9fe4416f0cd382c97b09ecbf6) -Signed-off-by: Stijn Tintel ---- - tools/bpf/resolve_btfids/Makefile | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - ---- a/tools/bpf/resolve_btfids/Makefile -+++ b/tools/bpf/resolve_btfids/Makefile -@@ -23,6 +23,8 @@ CC = $(HOSTCC) - LD = $(HOSTLD) - ARCH = $(HOSTARCH) - RM ?= rm -+CFLAGS := $(KBUILD_HOSTCFLAGS) -+LDFLAGS := $(KBUILD_HOSTLDFLAGS) - - OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ - -@@ -45,9 +47,9 @@ $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/l - $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) - - $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf -- $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) -+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ EXTRA_CFLAGS="$(CFLAGS)" $(abspath $@) - --CFLAGS := -g \ -+CFLAGS += -g \ - -I$(srctree)/tools/include \ - -I$(srctree)/tools/include/uapi \ - -I$(LIBBPF_SRC) \ diff --git a/target/linux/generic/backport-6.1/201-v5.16-scripts-dtc-Update-to-upstream-version-v1.6.1-19-g0a.patch b/target/linux/generic/backport-6.1/201-v5.16-scripts-dtc-Update-to-upstream-version-v1.6.1-19-g0a.patch deleted file mode 100644 index d1bef74f327a..000000000000 --- a/target/linux/generic/backport-6.1/201-v5.16-scripts-dtc-Update-to-upstream-version-v1.6.1-19-g0a.patch +++ /dev/null @@ -1,997 +0,0 @@ -From a77725a9a3c5924e2fd4cd5b3557dd92a8e46f87 Mon Sep 17 00:00:00 2001 -From: Rob Herring -Date: Mon, 25 Oct 2021 11:05:45 -0500 -Subject: [PATCH 1/1] scripts/dtc: Update to upstream version - v1.6.1-19-g0a3a9d3449c8 - -This adds the following commits from upstream: - -0a3a9d3449c8 checks: Add an interrupt-map check -8fd24744e361 checks: Ensure '#interrupt-cells' only exists in interrupt providers -d8d1a9a77863 checks: Drop interrupt provider '#address-cells' check -52a16fd72824 checks: Make interrupt_provider check dependent on interrupts_extended_is_cell -37fd700685da treesource: Maintain phandle label/path on output -e33ce1d6a8c7 flattree: Use '\n', not ';' to separate asm pseudo-ops -d24cc189dca6 asm: Use assembler macros instead of cpp macros -ff3a30c115ad asm: Use .asciz and .ascii instead of .string -5eb5927d81ee fdtdump: fix -Werror=int-to-pointer-cast -0869f8269161 libfdt: Add ALIGNMENT error string -69595a167f06 checks: Fix bus-range check -72d09e2682a4 Makefile: add -Wsign-compare to warning options -b587787ef388 checks: Fix signedness comparisons warnings -69bed6c2418f dtc: Wrap phandle validity check -910221185560 fdtget: Fix signedness comparisons warnings -d966f08fcd21 tests: Fix signedness comparisons warnings -ecfb438c07fa dtc: Fix signedness comparisons warnings: pointer diff -5bec74a6d135 dtc: Fix signedness comparisons warnings: reservednum -24e7f511fd4a fdtdump: Fix signedness comparisons warnings -b6910bec1161 Bump version to v1.6.1 -21d61d18f968 Fix CID 1461557 -4c2ef8f4d14c checks: Introduce is_multiple_of() -e59ca36fb70e Make handling of cpp line information more tolerant -0c3fd9b6aceb checks: Drop interrupt_cells_is_cell check -6b3081abc4ac checks: Add check_is_cell() for all phandle+arg properties -2dffc192a77f yamltree: Remove marker ordering dependency -61e513439e40 pylibfdt: Rework "avoid unused variable warning" lines -c8bddd106095 tests: add a positive gpio test case -ad4abfadb687 checks: replace strstr and strrchr with strends -09c6a6e88718 dtc.h: add strends for suffix matching -9bb9b8d0b4a0 checks: tigthen up nr-gpios prop exception -b07b62ee3342 libfdt: Add FDT alignment check to fdt_check_header() -a2def5479950 libfdt: Check that the root-node name is empty -4ca61f84dc21 libfdt: Check that there is only one root node -34d708249a91 dtc: Remove -O dtbo support -8e7ff260f755 libfdt: Fix a possible "unchecked return value" warning -88875268c05c checks: Warn on node-name and property name being the same -9d2279e7e6ee checks: Change node-name check to match devicetree spec -f527c867a8c6 util: limit gnu_printf format attribute to gcc >= 4.4.0 - -Reviewed-by: Frank Rowand -Tested-by: Frank Rowand -Signed-off-by: Rob Herring ---- - scripts/dtc/checks.c | 222 ++++++++++++++++++++++-------- - scripts/dtc/dtc-lexer.l | 2 +- - scripts/dtc/dtc.c | 6 +- - scripts/dtc/dtc.h | 40 +++++- - scripts/dtc/flattree.c | 11 +- - scripts/dtc/libfdt/fdt.c | 4 + - scripts/dtc/libfdt/fdt_rw.c | 18 ++- - scripts/dtc/libfdt/fdt_strerror.c | 1 + - scripts/dtc/libfdt/libfdt.h | 7 + - scripts/dtc/livetree.c | 6 +- - scripts/dtc/treesource.c | 48 +++---- - scripts/dtc/util.h | 6 +- - scripts/dtc/version_gen.h | 2 +- - scripts/dtc/yamltree.c | 16 ++- - 14 files changed, 275 insertions(+), 114 deletions(-) - ---- a/scripts/dtc/checks.c -+++ b/scripts/dtc/checks.c -@@ -143,6 +143,14 @@ static void check_nodes_props(struct che - check_nodes_props(c, dti, child); - } - -+static bool is_multiple_of(int multiple, int divisor) -+{ -+ if (divisor == 0) -+ return multiple == 0; -+ else -+ return (multiple % divisor) == 0; -+} -+ - static bool run_check(struct check *c, struct dt_info *dti) - { - struct node *dt = dti->dt; -@@ -297,19 +305,20 @@ ERROR(duplicate_property_names, check_du - #define LOWERCASE "abcdefghijklmnopqrstuvwxyz" - #define UPPERCASE "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - #define DIGITS "0123456789" --#define PROPNODECHARS LOWERCASE UPPERCASE DIGITS ",._+*#?-" -+#define NODECHARS LOWERCASE UPPERCASE DIGITS ",._+-@" -+#define PROPCHARS LOWERCASE UPPERCASE DIGITS ",._+*#?-" - #define PROPNODECHARSSTRICT LOWERCASE UPPERCASE DIGITS ",-" - - static void check_node_name_chars(struct check *c, struct dt_info *dti, - struct node *node) - { -- int n = strspn(node->name, c->data); -+ size_t n = strspn(node->name, c->data); - - if (n < strlen(node->name)) - FAIL(c, dti, node, "Bad character '%c' in node name", - node->name[n]); - } --ERROR(node_name_chars, check_node_name_chars, PROPNODECHARS "@"); -+ERROR(node_name_chars, check_node_name_chars, NODECHARS); - - static void check_node_name_chars_strict(struct check *c, struct dt_info *dti, - struct node *node) -@@ -330,6 +339,20 @@ static void check_node_name_format(struc - } - ERROR(node_name_format, check_node_name_format, NULL, &node_name_chars); - -+static void check_node_name_vs_property_name(struct check *c, -+ struct dt_info *dti, -+ struct node *node) -+{ -+ if (!node->parent) -+ return; -+ -+ if (get_property(node->parent, node->name)) { -+ FAIL(c, dti, node, "node name and property name conflict"); -+ } -+} -+WARNING(node_name_vs_property_name, check_node_name_vs_property_name, -+ NULL, &node_name_chars); -+ - static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti, - struct node *node) - { -@@ -363,14 +386,14 @@ static void check_property_name_chars(st - struct property *prop; - - for_each_property(node, prop) { -- int n = strspn(prop->name, c->data); -+ size_t n = strspn(prop->name, c->data); - - if (n < strlen(prop->name)) - FAIL_PROP(c, dti, node, prop, "Bad character '%c' in property name", - prop->name[n]); - } - } --ERROR(property_name_chars, check_property_name_chars, PROPNODECHARS); -+ERROR(property_name_chars, check_property_name_chars, PROPCHARS); - - static void check_property_name_chars_strict(struct check *c, - struct dt_info *dti, -@@ -380,7 +403,7 @@ static void check_property_name_chars_st - - for_each_property(node, prop) { - const char *name = prop->name; -- int n = strspn(name, c->data); -+ size_t n = strspn(name, c->data); - - if (n == strlen(prop->name)) - continue; -@@ -497,7 +520,7 @@ static cell_t check_phandle_prop(struct - - phandle = propval_cell(prop); - -- if ((phandle == 0) || (phandle == -1)) { -+ if (!phandle_is_valid(phandle)) { - FAIL_PROP(c, dti, node, prop, "bad value (0x%x) in %s property", - phandle, prop->name); - return 0; -@@ -556,7 +579,7 @@ static void check_name_properties(struct - if (!prop) - return; /* No name property, that's fine */ - -- if ((prop->val.len != node->basenamelen+1) -+ if ((prop->val.len != node->basenamelen + 1U) - || (memcmp(prop->val.val, node->name, node->basenamelen) != 0)) { - FAIL(c, dti, node, "\"name\" property is incorrect (\"%s\" instead" - " of base node name)", prop->val.val); -@@ -657,7 +680,6 @@ ERROR(omit_unused_nodes, fixup_omit_unus - */ - WARNING_IF_NOT_CELL(address_cells_is_cell, "#address-cells"); - WARNING_IF_NOT_CELL(size_cells_is_cell, "#size-cells"); --WARNING_IF_NOT_CELL(interrupt_cells_is_cell, "#interrupt-cells"); - - WARNING_IF_NOT_STRING(device_type_is_string, "device_type"); - WARNING_IF_NOT_STRING(model_is_string, "model"); -@@ -672,8 +694,7 @@ static void check_names_is_string_list(s - struct property *prop; - - for_each_property(node, prop) { -- const char *s = strrchr(prop->name, '-'); -- if (!s || !streq(s, "-names")) -+ if (!strends(prop->name, "-names")) - continue; - - c->data = prop->name; -@@ -753,7 +774,7 @@ static void check_reg_format(struct chec - size_cells = node_size_cells(node->parent); - entrylen = (addr_cells + size_cells) * sizeof(cell_t); - -- if (!entrylen || (prop->val.len % entrylen) != 0) -+ if (!is_multiple_of(prop->val.len, entrylen)) - FAIL_PROP(c, dti, node, prop, "property has invalid length (%d bytes) " - "(#address-cells == %d, #size-cells == %d)", - prop->val.len, addr_cells, size_cells); -@@ -794,7 +815,7 @@ static void check_ranges_format(struct c - "#size-cells (%d) differs from %s (%d)", - ranges, c_size_cells, node->parent->fullpath, - p_size_cells); -- } else if ((prop->val.len % entrylen) != 0) { -+ } else if (!is_multiple_of(prop->val.len, entrylen)) { - FAIL_PROP(c, dti, node, prop, "\"%s\" property has invalid length (%d bytes) " - "(parent #address-cells == %d, child #address-cells == %d, " - "#size-cells == %d)", ranges, prop->val.len, -@@ -871,7 +892,7 @@ static void check_pci_device_bus_num(str - } else { - cells = (cell_t *)prop->val.val; - min_bus = fdt32_to_cpu(cells[0]); -- max_bus = fdt32_to_cpu(cells[0]); -+ max_bus = fdt32_to_cpu(cells[1]); - } - if ((bus_num < min_bus) || (bus_num > max_bus)) - FAIL_PROP(c, dti, node, prop, "PCI bus number %d out of range, expected (%d - %d)", -@@ -1367,9 +1388,9 @@ static void check_property_phandle_args( - const struct provider *provider) - { - struct node *root = dti->dt; -- int cell, cellsize = 0; -+ unsigned int cell, cellsize = 0; - -- if (prop->val.len % sizeof(cell_t)) { -+ if (!is_multiple_of(prop->val.len, sizeof(cell_t))) { - FAIL_PROP(c, dti, node, prop, - "property size (%d) is invalid, expected multiple of %zu", - prop->val.len, sizeof(cell_t)); -@@ -1379,14 +1400,14 @@ static void check_property_phandle_args( - for (cell = 0; cell < prop->val.len / sizeof(cell_t); cell += cellsize + 1) { - struct node *provider_node; - struct property *cellprop; -- int phandle; -+ cell_t phandle; - - phandle = propval_cell_n(prop, cell); - /* - * Some bindings use a cell value 0 or -1 to skip over optional - * entries when each index position has a specific definition. - */ -- if (phandle == 0 || phandle == -1) { -+ if (!phandle_is_valid(phandle)) { - /* Give up if this is an overlay with external references */ - if (dti->dtsflags & DTSF_PLUGIN) - break; -@@ -1452,7 +1473,8 @@ static void check_provider_cells_propert - } - #define WARNING_PROPERTY_PHANDLE_CELLS(nm, propname, cells_name, ...) \ - static struct provider nm##_provider = { (propname), (cells_name), __VA_ARGS__ }; \ -- WARNING(nm##_property, check_provider_cells_property, &nm##_provider, &phandle_references); -+ WARNING_IF_NOT_CELL(nm##_is_cell, cells_name); \ -+ WARNING(nm##_property, check_provider_cells_property, &nm##_provider, &nm##_is_cell, &phandle_references); - - WARNING_PROPERTY_PHANDLE_CELLS(clocks, "clocks", "#clock-cells"); - WARNING_PROPERTY_PHANDLE_CELLS(cooling_device, "cooling-device", "#cooling-cells"); -@@ -1473,24 +1495,17 @@ WARNING_PROPERTY_PHANDLE_CELLS(thermal_s - - static bool prop_is_gpio(struct property *prop) - { -- char *str; -- - /* - * *-gpios and *-gpio can appear in property names, - * so skip over any false matches (only one known ATM) - */ -- if (strstr(prop->name, "nr-gpio")) -+ if (strends(prop->name, ",nr-gpios")) - return false; - -- str = strrchr(prop->name, '-'); -- if (str) -- str++; -- else -- str = prop->name; -- if (!(streq(str, "gpios") || streq(str, "gpio"))) -- return false; -- -- return true; -+ return strends(prop->name, "-gpios") || -+ streq(prop->name, "gpios") || -+ strends(prop->name, "-gpio") || -+ streq(prop->name, "gpio"); - } - - static void check_gpios_property(struct check *c, -@@ -1525,13 +1540,10 @@ static void check_deprecated_gpio_proper - struct property *prop; - - for_each_property(node, prop) { -- char *str; -- - if (!prop_is_gpio(prop)) - continue; - -- str = strstr(prop->name, "gpio"); -- if (!streq(str, "gpio")) -+ if (!strends(prop->name, "gpio")) - continue; - - FAIL_PROP(c, dti, node, prop, -@@ -1561,21 +1573,106 @@ static void check_interrupt_provider(str - struct node *node) - { - struct property *prop; -+ bool irq_provider = node_is_interrupt_provider(node); - -- if (!node_is_interrupt_provider(node)) -+ prop = get_property(node, "#interrupt-cells"); -+ if (irq_provider && !prop) { -+ FAIL(c, dti, node, -+ "Missing '#interrupt-cells' in interrupt provider"); - return; -+ } - -- prop = get_property(node, "#interrupt-cells"); -- if (!prop) -+ if (!irq_provider && prop) { - FAIL(c, dti, node, -- "Missing #interrupt-cells in interrupt provider"); -+ "'#interrupt-cells' found, but node is not an interrupt provider"); -+ return; -+ } -+} -+WARNING(interrupt_provider, check_interrupt_provider, NULL, &interrupts_extended_is_cell); - -- prop = get_property(node, "#address-cells"); -- if (!prop) -+static void check_interrupt_map(struct check *c, -+ struct dt_info *dti, -+ struct node *node) -+{ -+ struct node *root = dti->dt; -+ struct property *prop, *irq_map_prop; -+ size_t cellsize, cell, map_cells; -+ -+ irq_map_prop = get_property(node, "interrupt-map"); -+ if (!irq_map_prop) -+ return; -+ -+ if (node->addr_cells < 0) { - FAIL(c, dti, node, -- "Missing #address-cells in interrupt provider"); -+ "Missing '#address-cells' in interrupt-map provider"); -+ return; -+ } -+ cellsize = node_addr_cells(node); -+ cellsize += propval_cell(get_property(node, "#interrupt-cells")); -+ -+ prop = get_property(node, "interrupt-map-mask"); -+ if (prop && (prop->val.len != (cellsize * sizeof(cell_t)))) -+ FAIL_PROP(c, dti, node, prop, -+ "property size (%d) is invalid, expected %zu", -+ prop->val.len, cellsize * sizeof(cell_t)); -+ -+ if (!is_multiple_of(irq_map_prop->val.len, sizeof(cell_t))) { -+ FAIL_PROP(c, dti, node, irq_map_prop, -+ "property size (%d) is invalid, expected multiple of %zu", -+ irq_map_prop->val.len, sizeof(cell_t)); -+ return; -+ } -+ -+ map_cells = irq_map_prop->val.len / sizeof(cell_t); -+ for (cell = 0; cell < map_cells; ) { -+ struct node *provider_node; -+ struct property *cellprop; -+ int phandle; -+ size_t parent_cellsize; -+ -+ if ((cell + cellsize) >= map_cells) { -+ FAIL_PROP(c, dti, node, irq_map_prop, -+ "property size (%d) too small, expected > %zu", -+ irq_map_prop->val.len, (cell + cellsize) * sizeof(cell_t)); -+ break; -+ } -+ cell += cellsize; -+ -+ phandle = propval_cell_n(irq_map_prop, cell); -+ if (!phandle_is_valid(phandle)) { -+ /* Give up if this is an overlay with external references */ -+ if (!(dti->dtsflags & DTSF_PLUGIN)) -+ FAIL_PROP(c, dti, node, irq_map_prop, -+ "Cell %zu is not a phandle(%d)", -+ cell, phandle); -+ break; -+ } -+ -+ provider_node = get_node_by_phandle(root, phandle); -+ if (!provider_node) { -+ FAIL_PROP(c, dti, node, irq_map_prop, -+ "Could not get phandle(%d) node for (cell %zu)", -+ phandle, cell); -+ break; -+ } -+ -+ cellprop = get_property(provider_node, "#interrupt-cells"); -+ if (cellprop) { -+ parent_cellsize = propval_cell(cellprop); -+ } else { -+ FAIL(c, dti, node, "Missing property '#interrupt-cells' in node %s or bad phandle (referred from interrupt-map[%zu])", -+ provider_node->fullpath, cell); -+ break; -+ } -+ -+ cellprop = get_property(provider_node, "#address-cells"); -+ if (cellprop) -+ parent_cellsize += propval_cell(cellprop); -+ -+ cell += 1 + parent_cellsize; -+ } - } --WARNING(interrupt_provider, check_interrupt_provider, NULL); -+WARNING(interrupt_map, check_interrupt_map, NULL, &phandle_references, &addr_size_cells, &interrupt_provider); - - static void check_interrupts_property(struct check *c, - struct dt_info *dti, -@@ -1584,13 +1681,13 @@ static void check_interrupts_property(st - struct node *root = dti->dt; - struct node *irq_node = NULL, *parent = node; - struct property *irq_prop, *prop = NULL; -- int irq_cells, phandle; -+ cell_t irq_cells, phandle; - - irq_prop = get_property(node, "interrupts"); - if (!irq_prop) - return; - -- if (irq_prop->val.len % sizeof(cell_t)) -+ if (!is_multiple_of(irq_prop->val.len, sizeof(cell_t))) - FAIL_PROP(c, dti, node, irq_prop, "size (%d) is invalid, expected multiple of %zu", - irq_prop->val.len, sizeof(cell_t)); - -@@ -1603,7 +1700,7 @@ static void check_interrupts_property(st - prop = get_property(parent, "interrupt-parent"); - if (prop) { - phandle = propval_cell(prop); -- if ((phandle == 0) || (phandle == -1)) { -+ if (!phandle_is_valid(phandle)) { - /* Give up if this is an overlay with - * external references */ - if (dti->dtsflags & DTSF_PLUGIN) -@@ -1639,7 +1736,7 @@ static void check_interrupts_property(st - } - - irq_cells = propval_cell(prop); -- if (irq_prop->val.len % (irq_cells * sizeof(cell_t))) { -+ if (!is_multiple_of(irq_prop->val.len, irq_cells * sizeof(cell_t))) { - FAIL_PROP(c, dti, node, prop, - "size is (%d), expected multiple of %d", - irq_prop->val.len, (int)(irq_cells * sizeof(cell_t))); -@@ -1750,7 +1847,7 @@ WARNING(graph_port, check_graph_port, NU - static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti, - struct node *endpoint) - { -- int phandle; -+ cell_t phandle; - struct node *node; - struct property *prop; - -@@ -1760,7 +1857,7 @@ static struct node *get_remote_endpoint( - - phandle = propval_cell(prop); - /* Give up if this is an overlay with external references */ -- if (phandle == 0 || phandle == -1) -+ if (!phandle_is_valid(phandle)) - return NULL; - - node = get_node_by_phandle(dti->dt, phandle); -@@ -1796,7 +1893,7 @@ WARNING(graph_endpoint, check_graph_endp - static struct check *check_table[] = { - &duplicate_node_names, &duplicate_property_names, - &node_name_chars, &node_name_format, &property_name_chars, -- &name_is_string, &name_properties, -+ &name_is_string, &name_properties, &node_name_vs_property_name, - - &duplicate_label, - -@@ -1804,7 +1901,7 @@ static struct check *check_table[] = { - &phandle_references, &path_references, - &omit_unused_nodes, - -- &address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell, -+ &address_cells_is_cell, &size_cells_is_cell, - &device_type_is_string, &model_is_string, &status_is_string, - &label_is_string, - -@@ -1839,26 +1936,43 @@ static struct check *check_table[] = { - &chosen_node_is_root, &chosen_node_bootargs, &chosen_node_stdout_path, - - &clocks_property, -+ &clocks_is_cell, - &cooling_device_property, -+ &cooling_device_is_cell, - &dmas_property, -+ &dmas_is_cell, - &hwlocks_property, -+ &hwlocks_is_cell, - &interrupts_extended_property, -+ &interrupts_extended_is_cell, - &io_channels_property, -+ &io_channels_is_cell, - &iommus_property, -+ &iommus_is_cell, - &mboxes_property, -+ &mboxes_is_cell, - &msi_parent_property, -+ &msi_parent_is_cell, - &mux_controls_property, -+ &mux_controls_is_cell, - &phys_property, -+ &phys_is_cell, - &power_domains_property, -+ &power_domains_is_cell, - &pwms_property, -+ &pwms_is_cell, - &resets_property, -+ &resets_is_cell, - &sound_dai_property, -+ &sound_dai_is_cell, - &thermal_sensors_property, -+ &thermal_sensors_is_cell, - - &deprecated_gpio_property, - &gpios_property, - &interrupts_property, - &interrupt_provider, -+ &interrupt_map, - - &alias_paths, - -@@ -1882,7 +1996,7 @@ static void enable_warning_error(struct - - static void disable_warning_error(struct check *c, bool warn, bool error) - { -- int i; -+ unsigned int i; - - /* Lowering level, also lower it for things this is the prereq - * for */ -@@ -1903,7 +2017,7 @@ static void disable_warning_error(struct - - void parse_checks_option(bool warn, bool error, const char *arg) - { -- int i; -+ unsigned int i; - const char *name = arg; - bool enable = true; - -@@ -1930,7 +2044,7 @@ void parse_checks_option(bool warn, bool - - void process_checks(bool force, struct dt_info *dti) - { -- int i; -+ unsigned int i; - int error = 0; - - for (i = 0; i < ARRAY_SIZE(check_table); i++) { ---- a/scripts/dtc/dtc-lexer.l -+++ b/scripts/dtc/dtc-lexer.l -@@ -57,7 +57,7 @@ static void PRINTF(1, 2) lexical_error(c - push_input_file(name); - } - --<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)? { -+<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)* { - char *line, *fnstart, *fnend; - struct data fn; - /* skip text before line # */ ---- a/scripts/dtc/dtc.c -+++ b/scripts/dtc/dtc.c -@@ -12,7 +12,7 @@ - * Command line options - */ - int quiet; /* Level of quietness */ --int reservenum; /* Number of memory reservation slots */ -+unsigned int reservenum;/* Number of memory reservation slots */ - int minsize; /* Minimum blob size */ - int padsize; /* Additional padding to blob */ - int alignsize; /* Additional padding to blob accroding to the alignsize */ -@@ -197,7 +197,7 @@ int main(int argc, char *argv[]) - depname = optarg; - break; - case 'R': -- reservenum = strtol(optarg, NULL, 0); -+ reservenum = strtoul(optarg, NULL, 0); - break; - case 'S': - minsize = strtol(optarg, NULL, 0); -@@ -359,8 +359,6 @@ int main(int argc, char *argv[]) - #endif - } else if (streq(outform, "dtb")) { - dt_to_blob(outf, dti, outversion); -- } else if (streq(outform, "dtbo")) { -- dt_to_blob(outf, dti, outversion); - } else if (streq(outform, "asm")) { - dt_to_asm(outf, dti, outversion); - } else if (streq(outform, "null")) { ---- a/scripts/dtc/dtc.h -+++ b/scripts/dtc/dtc.h -@@ -35,7 +35,7 @@ - * Command line options - */ - extern int quiet; /* Level of quietness */ --extern int reservenum; /* Number of memory reservation slots */ -+extern unsigned int reservenum; /* Number of memory reservation slots */ - extern int minsize; /* Minimum blob size */ - extern int padsize; /* Additional padding to blob */ - extern int alignsize; /* Additional padding to blob accroding to the alignsize */ -@@ -51,6 +51,11 @@ extern int annotate; /* annotate .dts w - - typedef uint32_t cell_t; - -+static inline bool phandle_is_valid(cell_t phandle) -+{ -+ return phandle != 0 && phandle != ~0U; -+} -+ - static inline uint16_t dtb_ld16(const void *p) - { - const uint8_t *bp = (const uint8_t *)p; -@@ -86,6 +91,16 @@ static inline uint64_t dtb_ld64(const vo - #define streq(a, b) (strcmp((a), (b)) == 0) - #define strstarts(s, prefix) (strncmp((s), (prefix), strlen(prefix)) == 0) - #define strprefixeq(a, n, b) (strlen(b) == (n) && (memcmp(a, b, n) == 0)) -+static inline bool strends(const char *str, const char *suffix) -+{ -+ unsigned int len, suffix_len; -+ -+ len = strlen(str); -+ suffix_len = strlen(suffix); -+ if (len < suffix_len) -+ return false; -+ return streq(str + len - suffix_len, suffix); -+} - - #define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) - -@@ -101,6 +116,12 @@ enum markertype { - TYPE_UINT64, - TYPE_STRING, - }; -+ -+static inline bool is_type_marker(enum markertype type) -+{ -+ return type >= TYPE_UINT8; -+} -+ - extern const char *markername(enum markertype markertype); - - struct marker { -@@ -125,7 +146,22 @@ struct data { - for_each_marker(m) \ - if ((m)->type == (t)) - --size_t type_marker_length(struct marker *m); -+static inline struct marker *next_type_marker(struct marker *m) -+{ -+ for_each_marker(m) -+ if (is_type_marker(m->type)) -+ break; -+ return m; -+} -+ -+static inline size_t type_marker_length(struct marker *m) -+{ -+ struct marker *next = next_type_marker(m->next); -+ -+ if (next) -+ return next->offset - m->offset; -+ return 0; -+} - - void data_free(struct data d); - ---- a/scripts/dtc/flattree.c -+++ b/scripts/dtc/flattree.c -@@ -124,7 +124,8 @@ static void asm_emit_cell(void *e, cell_ - { - FILE *f = e; - -- fprintf(f, "\t.byte 0x%02x; .byte 0x%02x; .byte 0x%02x; .byte 0x%02x\n", -+ fprintf(f, "\t.byte\t0x%02x\n" "\t.byte\t0x%02x\n" -+ "\t.byte\t0x%02x\n" "\t.byte\t0x%02x\n", - (val >> 24) & 0xff, (val >> 16) & 0xff, - (val >> 8) & 0xff, val & 0xff); - } -@@ -134,9 +135,9 @@ static void asm_emit_string(void *e, con - FILE *f = e; - - if (len != 0) -- fprintf(f, "\t.string\t\"%.*s\"\n", len, str); -+ fprintf(f, "\t.asciz\t\"%.*s\"\n", len, str); - else -- fprintf(f, "\t.string\t\"%s\"\n", str); -+ fprintf(f, "\t.asciz\t\"%s\"\n", str); - } - - static void asm_emit_align(void *e, int a) -@@ -295,7 +296,7 @@ static struct data flatten_reserve_list( - { - struct reserve_info *re; - struct data d = empty_data; -- int j; -+ unsigned int j; - - for (re = reservelist; re; re = re->next) { - d = data_append_re(d, re->address, re->size); -@@ -438,7 +439,7 @@ static void dump_stringtable_asm(FILE *f - - while (p < (strbuf.val + strbuf.len)) { - len = strlen(p); -- fprintf(f, "\t.string \"%s\"\n", p); -+ fprintf(f, "\t.asciz \"%s\"\n", p); - p += len+1; - } - } ---- a/scripts/dtc/libfdt/fdt.c -+++ b/scripts/dtc/libfdt/fdt.c -@@ -90,6 +90,10 @@ int fdt_check_header(const void *fdt) - { - size_t hdrsize; - -+ /* The device tree must be at an 8-byte aligned address */ -+ if ((uintptr_t)fdt & 7) -+ return -FDT_ERR_ALIGNMENT; -+ - if (fdt_magic(fdt) != FDT_MAGIC) - return -FDT_ERR_BADMAGIC; - if (!can_assume(LATEST)) { ---- a/scripts/dtc/libfdt/fdt_rw.c -+++ b/scripts/dtc/libfdt/fdt_rw.c -@@ -349,7 +349,10 @@ int fdt_add_subnode_namelen(void *fdt, i - return offset; - - /* Try to place the new node after the parent's properties */ -- fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ -+ tag = fdt_next_tag(fdt, parentoffset, &nextoffset); -+ /* the fdt_subnode_offset_namelen() should ensure this never hits */ -+ if (!can_assume(LIBFDT_FLAWLESS) && (tag != FDT_BEGIN_NODE)) -+ return -FDT_ERR_INTERNAL; - do { - offset = nextoffset; - tag = fdt_next_tag(fdt, offset, &nextoffset); -@@ -391,7 +394,9 @@ int fdt_del_node(void *fdt, int nodeoffs - } - - static void fdt_packblocks_(const char *old, char *new, -- int mem_rsv_size, int struct_size) -+ int mem_rsv_size, -+ int struct_size, -+ int strings_size) - { - int mem_rsv_off, struct_off, strings_off; - -@@ -406,8 +411,7 @@ static void fdt_packblocks_(const char * - fdt_set_off_dt_struct(new, struct_off); - fdt_set_size_dt_struct(new, struct_size); - -- memmove(new + strings_off, old + fdt_off_dt_strings(old), -- fdt_size_dt_strings(old)); -+ memmove(new + strings_off, old + fdt_off_dt_strings(old), strings_size); - fdt_set_off_dt_strings(new, strings_off); - fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); - } -@@ -467,7 +471,8 @@ int fdt_open_into(const void *fdt, void - return -FDT_ERR_NOSPACE; - } - -- fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size); -+ fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size, -+ fdt_size_dt_strings(fdt)); - memmove(buf, tmp, newsize); - - fdt_set_magic(buf, FDT_MAGIC); -@@ -487,7 +492,8 @@ int fdt_pack(void *fdt) - - mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) - * sizeof(struct fdt_reserve_entry); -- fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); -+ fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt), -+ fdt_size_dt_strings(fdt)); - fdt_set_totalsize(fdt, fdt_data_size_(fdt)); - - return 0; ---- a/scripts/dtc/libfdt/fdt_strerror.c -+++ b/scripts/dtc/libfdt/fdt_strerror.c -@@ -39,6 +39,7 @@ static struct fdt_errtabent fdt_errtable - FDT_ERRTABENT(FDT_ERR_BADOVERLAY), - FDT_ERRTABENT(FDT_ERR_NOPHANDLES), - FDT_ERRTABENT(FDT_ERR_BADFLAGS), -+ FDT_ERRTABENT(FDT_ERR_ALIGNMENT), - }; - #define FDT_ERRTABSIZE ((int)(sizeof(fdt_errtable) / sizeof(fdt_errtable[0]))) - ---- a/scripts/dtc/libfdt/libfdt.h -+++ b/scripts/dtc/libfdt/libfdt.h -@@ -131,6 +131,13 @@ uint32_t fdt_next_tag(const void *fdt, i - * to work even with unaligned pointers on platforms (such as ARMv5) that don't - * like unaligned loads and stores. - */ -+static inline uint16_t fdt16_ld(const fdt16_t *p) -+{ -+ const uint8_t *bp = (const uint8_t *)p; -+ -+ return ((uint16_t)bp[0] << 8) | bp[1]; -+} -+ - static inline uint32_t fdt32_ld(const fdt32_t *p) - { - const uint8_t *bp = (const uint8_t *)p; ---- a/scripts/dtc/livetree.c -+++ b/scripts/dtc/livetree.c -@@ -526,7 +526,7 @@ struct node *get_node_by_path(struct nod - p = strchr(path, '/'); - - for_each_child(tree, child) { -- if (p && strprefixeq(path, p - path, child->name)) -+ if (p && strprefixeq(path, (size_t)(p - path), child->name)) - return get_node_by_path(child, p+1); - else if (!p && streq(path, child->name)) - return child; -@@ -559,7 +559,7 @@ struct node *get_node_by_phandle(struct - { - struct node *child, *node; - -- if ((phandle == 0) || (phandle == -1)) { -+ if (!phandle_is_valid(phandle)) { - assert(generate_fixups); - return NULL; - } -@@ -594,7 +594,7 @@ cell_t get_node_phandle(struct node *roo - static cell_t phandle = 1; /* FIXME: ick, static local */ - struct data d = empty_data; - -- if ((node->phandle != 0) && (node->phandle != -1)) -+ if (phandle_is_valid(node->phandle)) - return node->phandle; - - while (get_node_by_phandle(root, phandle)) ---- a/scripts/dtc/treesource.c -+++ b/scripts/dtc/treesource.c -@@ -124,27 +124,6 @@ static void write_propval_int(FILE *f, c - } - } - --static bool has_data_type_information(struct marker *m) --{ -- return m->type >= TYPE_UINT8; --} -- --static struct marker *next_type_marker(struct marker *m) --{ -- while (m && !has_data_type_information(m)) -- m = m->next; -- return m; --} -- --size_t type_marker_length(struct marker *m) --{ -- struct marker *next = next_type_marker(m->next); -- -- if (next) -- return next->offset - m->offset; -- return 0; --} -- - static const char *delim_start[] = { - [TYPE_UINT8] = "[", - [TYPE_UINT16] = "/bits/ 16 <", -@@ -229,26 +208,39 @@ static void write_propval(FILE *f, struc - size_t chunk_len = (m->next ? m->next->offset : len) - m->offset; - size_t data_len = type_marker_length(m) ? : len - m->offset; - const char *p = &prop->val.val[m->offset]; -+ struct marker *m_phandle; - -- if (has_data_type_information(m)) { -+ if (is_type_marker(m->type)) { - emit_type = m->type; - fprintf(f, " %s", delim_start[emit_type]); - } else if (m->type == LABEL) - fprintf(f, " %s:", m->ref); -- else if (m->offset) -- fputc(' ', f); - -- if (emit_type == TYPE_NONE) { -- assert(chunk_len == 0); -+ if (emit_type == TYPE_NONE || chunk_len == 0) - continue; -- } - - switch(emit_type) { - case TYPE_UINT16: - write_propval_int(f, p, chunk_len, 2); - break; - case TYPE_UINT32: -- write_propval_int(f, p, chunk_len, 4); -+ m_phandle = prop->val.markers; -+ for_each_marker_of_type(m_phandle, REF_PHANDLE) -+ if (m->offset == m_phandle->offset) -+ break; -+ -+ if (m_phandle) { -+ if (m_phandle->ref[0] == '/') -+ fprintf(f, "&{%s}", m_phandle->ref); -+ else -+ fprintf(f, "&%s", m_phandle->ref); -+ if (chunk_len > 4) { -+ fputc(' ', f); -+ write_propval_int(f, p + 4, chunk_len - 4, 4); -+ } -+ } else { -+ write_propval_int(f, p, chunk_len, 4); -+ } - break; - case TYPE_UINT64: - write_propval_int(f, p, chunk_len, 8); ---- a/scripts/dtc/util.h -+++ b/scripts/dtc/util.h -@@ -13,10 +13,10 @@ - */ - - #ifdef __GNUC__ --#ifdef __clang__ --#define PRINTF(i, j) __attribute__((format (printf, i, j))) --#else -+#if __GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) - #define PRINTF(i, j) __attribute__((format (gnu_printf, i, j))) -+#else -+#define PRINTF(i, j) __attribute__((format (printf, i, j))) - #endif - #define NORETURN __attribute__((noreturn)) - #else ---- a/scripts/dtc/version_gen.h -+++ b/scripts/dtc/version_gen.h -@@ -1 +1 @@ --#define DTC_VERSION "DTC 1.6.0-g183df9e9" -+#define DTC_VERSION "DTC 1.6.1-g0a3a9d34" ---- a/scripts/dtc/yamltree.c -+++ b/scripts/dtc/yamltree.c -@@ -29,11 +29,12 @@ char *yaml_error_name[] = { - (emitter)->problem, __func__, __LINE__); \ - }) - --static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, char *data, unsigned int len, int width) -+static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, -+ char *data, unsigned int seq_offset, unsigned int len, int width) - { - yaml_event_t event; - void *tag; -- unsigned int off, start_offset = markers->offset; -+ unsigned int off; - - switch(width) { - case 1: tag = "!u8"; break; -@@ -66,7 +67,7 @@ static void yaml_propval_int(yaml_emitte - m = markers; - is_phandle = false; - for_each_marker_of_type(m, REF_PHANDLE) { -- if (m->offset == (start_offset + off)) { -+ if (m->offset == (seq_offset + off)) { - is_phandle = true; - break; - } -@@ -114,6 +115,7 @@ static void yaml_propval(yaml_emitter_t - yaml_event_t event; - unsigned int len = prop->val.len; - struct marker *m = prop->val.markers; -+ struct marker *markers = prop->val.markers; - - /* Emit the property name */ - yaml_scalar_event_initialize(&event, NULL, -@@ -151,19 +153,19 @@ static void yaml_propval(yaml_emitter_t - - switch(m->type) { - case TYPE_UINT16: -- yaml_propval_int(emitter, m, data, chunk_len, 2); -+ yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 2); - break; - case TYPE_UINT32: -- yaml_propval_int(emitter, m, data, chunk_len, 4); -+ yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 4); - break; - case TYPE_UINT64: -- yaml_propval_int(emitter, m, data, chunk_len, 8); -+ yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 8); - break; - case TYPE_STRING: - yaml_propval_string(emitter, data, chunk_len); - break; - default: -- yaml_propval_int(emitter, m, data, chunk_len, 1); -+ yaml_propval_int(emitter, markers, data, m->offset, chunk_len, 1); - break; - } - } diff --git a/target/linux/generic/backport-6.1/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch b/target/linux/generic/backport-6.1/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch deleted file mode 100644 index 18a8752a18f4..000000000000 --- a/target/linux/generic/backport-6.1/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch +++ /dev/null @@ -1,48 +0,0 @@ -From: Manivannan Sadhasivam -To: linus.walleij@linaro.org -Cc: bjorn.andersson@linaro.org, dianders@chromium.org, - linux-arm-msm@vger.kernel.org, linux-gpio@vger.kernel.org, - linux-kernel@vger.kernel.org, - Manivannan Sadhasivam -Subject: [PATCH] pinctrl: qcom: Return -EINVAL for setting affinity if no IRQ - parent -Date: Thu, 13 Jan 2022 21:56:17 +0530 -Message-Id: <20220113162617.131697-1-manivannan.sadhasivam@linaro.org> - -The MSM GPIO IRQ controller relies on the parent IRQ controller to set the -CPU affinity for the IRQ. And this is only valid if there is any wakeup -parent available and defined in DT. - -For the case of no parent IRQ controller defined in DT, -msm_gpio_irq_set_affinity() and msm_gpio_irq_set_vcpu_affinity() should -return -EINVAL instead of 0 as the affinity can't be set. - -Otherwise, below warning will be printed by genirq: - -genirq: irq_chip msmgpio did not update eff. affinity mask of irq 70 - -Signed-off-by: Manivannan Sadhasivam ---- - drivers/pinctrl/qcom/pinctrl-msm.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/pinctrl/qcom/pinctrl-msm.c -+++ b/drivers/pinctrl/qcom/pinctrl-msm.c -@@ -1157,7 +1157,7 @@ static int msm_gpio_irq_set_affinity(str - if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) - return irq_chip_set_affinity_parent(d, dest, force); - -- return 0; -+ return -EINVAL; - } - - static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) -@@ -1168,7 +1168,7 @@ static int msm_gpio_irq_set_vcpu_affinit - if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) - return irq_chip_set_vcpu_affinity_parent(d, vcpu_info); - -- return 0; -+ return -EINVAL; - } - - static void msm_gpio_irq_handler(struct irq_desc *desc) diff --git a/target/linux/generic/backport-6.1/301-v5.16-soc-qcom-smem-Support-reserved-memory-description.patch b/target/linux/generic/backport-6.1/301-v5.16-soc-qcom-smem-Support-reserved-memory-description.patch deleted file mode 100644 index ee0bf9309ff8..000000000000 --- a/target/linux/generic/backport-6.1/301-v5.16-soc-qcom-smem-Support-reserved-memory-description.patch +++ /dev/null @@ -1,166 +0,0 @@ -From b5af64fceb04dc298c5e69c517b4d83893ff060b Mon Sep 17 00:00:00 2001 -From: Bjorn Andersson -Date: Thu, 30 Sep 2021 11:21:10 -0700 -Subject: [PATCH 1/1] soc: qcom: smem: Support reserved-memory description - -Practically all modern Qualcomm platforms has a single reserved-memory -region for SMEM. So rather than having to describe SMEM in the form of a -node with a reference to a reserved-memory node, allow the SMEM device -to be instantiated directly from the reserved-memory node. - -The current means of falling back to dereferencing the "memory-region" -is kept as a fallback, if it's determined that the SMEM node is a -reserved-memory node. - -The "qcom,smem" compatible is added to the reserved_mem_matches list, to -allow the reserved-memory device to be probed. - -In order to retain the readability of the code, the resolution of -resources is split from the actual ioremapping. - -Signed-off-by: Bjorn Andersson -Acked-by: Rob Herring -Reviewed-by: Vladimir Zapolskiy -Link: https://lore.kernel.org/r/20210930182111.57353-4-bjorn.andersson@linaro.org ---- - drivers/of/platform.c | 1 + - drivers/soc/qcom/smem.c | 57 ++++++++++++++++++++++++++++------------- - 2 files changed, 40 insertions(+), 18 deletions(-) - ---- a/drivers/of/platform.c -+++ b/drivers/of/platform.c -@@ -509,6 +509,7 @@ EXPORT_SYMBOL_GPL(of_platform_default_po - static const struct of_device_id reserved_mem_matches[] = { - { .compatible = "qcom,rmtfs-mem" }, - { .compatible = "qcom,cmd-db" }, -+ { .compatible = "qcom,smem" }, - { .compatible = "ramoops" }, - { .compatible = "nvmem-rmem" }, - {} ---- a/drivers/soc/qcom/smem.c -+++ b/drivers/soc/qcom/smem.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -240,7 +241,7 @@ static const u8 SMEM_INFO_MAGIC[] = { 0x - * @size: size of the memory region - */ - struct smem_region { -- u32 aux_base; -+ phys_addr_t aux_base; - void __iomem *virt_base; - size_t size; - }; -@@ -499,7 +500,7 @@ static void *qcom_smem_get_global(struct - for (i = 0; i < smem->num_regions; i++) { - region = &smem->regions[i]; - -- if (region->aux_base == aux_base || !aux_base) { -+ if ((u32)region->aux_base == aux_base || !aux_base) { - if (size != NULL) - *size = le32_to_cpu(entry->size); - return region->virt_base + le32_to_cpu(entry->offset); -@@ -664,7 +665,7 @@ phys_addr_t qcom_smem_virt_to_phys(void - if (p < region->virt_base + region->size) { - u64 offset = p - region->virt_base; - -- return (phys_addr_t)region->aux_base + offset; -+ return region->aux_base + offset; - } - } - -@@ -863,12 +864,12 @@ qcom_smem_enumerate_partitions(struct qc - return 0; - } - --static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, -- const char *name, int i) -+static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name, -+ struct smem_region *region) - { -+ struct device *dev = smem->dev; - struct device_node *np; - struct resource r; -- resource_size_t size; - int ret; - - np = of_parse_phandle(dev->of_node, name, 0); -@@ -881,13 +882,9 @@ static int qcom_smem_map_memory(struct q - of_node_put(np); - if (ret) - return ret; -- size = resource_size(&r); - -- smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size); -- if (!smem->regions[i].virt_base) -- return -ENOMEM; -- smem->regions[i].aux_base = (u32)r.start; -- smem->regions[i].size = size; -+ region->aux_base = r.start; -+ region->size = resource_size(&r); - - return 0; - } -@@ -895,12 +892,14 @@ static int qcom_smem_map_memory(struct q - static int qcom_smem_probe(struct platform_device *pdev) - { - struct smem_header *header; -+ struct reserved_mem *rmem; - struct qcom_smem *smem; - size_t array_size; - int num_regions; - int hwlock_id; - u32 version; - int ret; -+ int i; - - num_regions = 1; - if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) -@@ -914,13 +913,35 @@ static int qcom_smem_probe(struct platfo - smem->dev = &pdev->dev; - smem->num_regions = num_regions; - -- ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); -- if (ret) -- return ret; -- -- if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, -- "qcom,rpm-msg-ram", 1))) -- return ret; -+ rmem = of_reserved_mem_lookup(pdev->dev.of_node); -+ if (rmem) { -+ smem->regions[0].aux_base = rmem->base; -+ smem->regions[0].size = rmem->size; -+ } else { -+ /* -+ * Fall back to the memory-region reference, if we're not a -+ * reserved-memory node. -+ */ -+ ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); -+ if (ret) -+ return ret; -+ } -+ -+ if (num_regions > 1) { -+ ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); -+ if (ret) -+ return ret; -+ } -+ -+ for (i = 0; i < num_regions; i++) { -+ smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, -+ smem->regions[i].aux_base, -+ smem->regions[i].size); -+ if (!smem->regions[i].virt_base) { -+ dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); -+ return -ENOMEM; -+ } -+ } - - header = smem->regions[0].virt_base; - if (le32_to_cpu(header->initialized) != 1 || diff --git a/target/linux/generic/backport-6.1/302-v5.16-watchdog-bcm63xx_wdt-fix-fallthrough-warning.patch b/target/linux/generic/backport-6.1/302-v5.16-watchdog-bcm63xx_wdt-fix-fallthrough-warning.patch deleted file mode 100644 index 84ae5a7fc73b..000000000000 --- a/target/linux/generic/backport-6.1/302-v5.16-watchdog-bcm63xx_wdt-fix-fallthrough-warning.patch +++ /dev/null @@ -1,33 +0,0 @@ -From ee1a0696934a8b77a6a2098f92832c46d34ec5da Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Wed, 27 Oct 2021 14:31:35 +0200 -Subject: [PATCH] watchdog: bcm63xx_wdt: fix fallthrough warning -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This fixes: -drivers/watchdog/bcm63xx_wdt.c: In function 'bcm63xx_wdt_ioctl': -drivers/watchdog/bcm63xx_wdt.c:208:17: warning: this statement may fall through [-Wimplicit-fallthrough=] - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Reviewed-by: Guenter Roeck -Link: https://lore.kernel.org/r/20211027123135.27458-1-zajec5@gmail.com -Signed-off-by: Guenter Roeck -Signed-off-by: Wim Van Sebroeck ---- - drivers/watchdog/bcm63xx_wdt.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/watchdog/bcm63xx_wdt.c -+++ b/drivers/watchdog/bcm63xx_wdt.c -@@ -207,6 +207,8 @@ static long bcm63xx_wdt_ioctl(struct fil - - bcm63xx_wdt_pet(); - -+ fallthrough; -+ - case WDIOC_GETTIMEOUT: - return put_user(wdt_time, p); - diff --git a/target/linux/generic/backport-6.1/330-v5.16-01-MIPS-kernel-proc-add-CPU-option-reporting.patch b/target/linux/generic/backport-6.1/330-v5.16-01-MIPS-kernel-proc-add-CPU-option-reporting.patch deleted file mode 100644 index c66a3f11b4b4..000000000000 --- a/target/linux/generic/backport-6.1/330-v5.16-01-MIPS-kernel-proc-add-CPU-option-reporting.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 626bfa03729959ea9917181fb3d8ffaa1594d02a Mon Sep 17 00:00:00 2001 -From: Hauke Mehrtens -Date: Wed, 13 Oct 2021 22:40:18 -0700 -Subject: [PATCH 1/1] MIPS: kernel: proc: add CPU option reporting - -Many MIPS CPUs have optional CPU features which are not activated for -all CPU cores. Print the CPU options, which are implemented in the core, -in /proc/cpuinfo. This makes it possible to see which features are -supported and which are not supported. This should cover all standard -MIPS extensions. Before, it only printed information about the main MIPS -ASEs. - -Signed-off-by: Hauke Mehrtens - -Changes from original patch[0]: -- Remove cpu_has_6k_cache and cpu_has_8k_cache due to commit 6ce91ba8589a - ("MIPS: Remove cpu_has_6k_cache and cpu_has_8k_cache in cpu_cache_init()") -- Add new options: mac2008_only, ftlbparex, gsexcex, mmid, mm_sysad, - mm_full -- Use seq_puts instead of seq_printf as suggested by checkpatch -- Minor commit message reword - -[0]: https://lore.kernel.org/linux-mips/20181223225224.23042-1-hauke@hauke-m.de/ - -Signed-off-by: Ilya Lipnitskiy -Acked-by: Hauke Mehrtens -Signed-off-by: Thomas Bogendoerfer ---- - arch/mips/kernel/proc.c | 122 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 122 insertions(+) - ---- a/arch/mips/kernel/proc.c -+++ b/arch/mips/kernel/proc.c -@@ -138,6 +138,128 @@ static int show_cpuinfo(struct seq_file - seq_printf(m, "micromips kernel\t: %s\n", - (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); - } -+ -+ seq_puts(m, "Options implemented\t:"); -+ if (cpu_has_tlb) -+ seq_puts(m, " tlb"); -+ if (cpu_has_ftlb) -+ seq_puts(m, " ftlb"); -+ if (cpu_has_tlbinv) -+ seq_puts(m, " tlbinv"); -+ if (cpu_has_segments) -+ seq_puts(m, " segments"); -+ if (cpu_has_rixiex) -+ seq_puts(m, " rixiex"); -+ if (cpu_has_ldpte) -+ seq_puts(m, " ldpte"); -+ if (cpu_has_maar) -+ seq_puts(m, " maar"); -+ if (cpu_has_rw_llb) -+ seq_puts(m, " rw_llb"); -+ if (cpu_has_4kex) -+ seq_puts(m, " 4kex"); -+ if (cpu_has_3k_cache) -+ seq_puts(m, " 3k_cache"); -+ if (cpu_has_4k_cache) -+ seq_puts(m, " 4k_cache"); -+ if (cpu_has_tx39_cache) -+ seq_puts(m, " tx39_cache"); -+ if (cpu_has_octeon_cache) -+ seq_puts(m, " octeon_cache"); -+ if (cpu_has_fpu) -+ seq_puts(m, " fpu"); -+ if (cpu_has_32fpr) -+ seq_puts(m, " 32fpr"); -+ if (cpu_has_cache_cdex_p) -+ seq_puts(m, " cache_cdex_p"); -+ if (cpu_has_cache_cdex_s) -+ seq_puts(m, " cache_cdex_s"); -+ if (cpu_has_prefetch) -+ seq_puts(m, " prefetch"); -+ if (cpu_has_mcheck) -+ seq_puts(m, " mcheck"); -+ if (cpu_has_ejtag) -+ seq_puts(m, " ejtag"); -+ if (cpu_has_llsc) -+ seq_puts(m, " llsc"); -+ if (cpu_has_guestctl0ext) -+ seq_puts(m, " guestctl0ext"); -+ if (cpu_has_guestctl1) -+ seq_puts(m, " guestctl1"); -+ if (cpu_has_guestctl2) -+ seq_puts(m, " guestctl2"); -+ if (cpu_has_guestid) -+ seq_puts(m, " guestid"); -+ if (cpu_has_drg) -+ seq_puts(m, " drg"); -+ if (cpu_has_rixi) -+ seq_puts(m, " rixi"); -+ if (cpu_has_lpa) -+ seq_puts(m, " lpa"); -+ if (cpu_has_mvh) -+ seq_puts(m, " mvh"); -+ if (cpu_has_vtag_icache) -+ seq_puts(m, " vtag_icache"); -+ if (cpu_has_dc_aliases) -+ seq_puts(m, " dc_aliases"); -+ if (cpu_has_ic_fills_f_dc) -+ seq_puts(m, " ic_fills_f_dc"); -+ if (cpu_has_pindexed_dcache) -+ seq_puts(m, " pindexed_dcache"); -+ if (cpu_has_userlocal) -+ seq_puts(m, " userlocal"); -+ if (cpu_has_nofpuex) -+ seq_puts(m, " nofpuex"); -+ if (cpu_has_vint) -+ seq_puts(m, " vint"); -+ if (cpu_has_veic) -+ seq_puts(m, " veic"); -+ if (cpu_has_inclusive_pcaches) -+ seq_puts(m, " inclusive_pcaches"); -+ if (cpu_has_perf_cntr_intr_bit) -+ seq_puts(m, " perf_cntr_intr_bit"); -+ if (cpu_has_ufr) -+ seq_puts(m, " ufr"); -+ if (cpu_has_fre) -+ seq_puts(m, " fre"); -+ if (cpu_has_cdmm) -+ seq_puts(m, " cdmm"); -+ if (cpu_has_small_pages) -+ seq_puts(m, " small_pages"); -+ if (cpu_has_nan_legacy) -+ seq_puts(m, " nan_legacy"); -+ if (cpu_has_nan_2008) -+ seq_puts(m, " nan_2008"); -+ if (cpu_has_ebase_wg) -+ seq_puts(m, " ebase_wg"); -+ if (cpu_has_badinstr) -+ seq_puts(m, " badinstr"); -+ if (cpu_has_badinstrp) -+ seq_puts(m, " badinstrp"); -+ if (cpu_has_contextconfig) -+ seq_puts(m, " contextconfig"); -+ if (cpu_has_perf) -+ seq_puts(m, " perf"); -+ if (cpu_has_mac2008_only) -+ seq_puts(m, " mac2008_only"); -+ if (cpu_has_ftlbparex) -+ seq_puts(m, " ftlbparex"); -+ if (cpu_has_gsexcex) -+ seq_puts(m, " gsexcex"); -+ if (cpu_has_shared_ftlb_ram) -+ seq_puts(m, " shared_ftlb_ram"); -+ if (cpu_has_shared_ftlb_entries) -+ seq_puts(m, " shared_ftlb_entries"); -+ if (cpu_has_mipsmt_pertccounters) -+ seq_puts(m, " mipsmt_pertccounters"); -+ if (cpu_has_mmid) -+ seq_puts(m, " mmid"); -+ if (cpu_has_mm_sysad) -+ seq_puts(m, " mm_sysad"); -+ if (cpu_has_mm_full) -+ seq_puts(m, " mm_full"); -+ seq_puts(m, "\n"); -+ - seq_printf(m, "shadow register sets\t: %d\n", - cpu_data[n].srsets); - seq_printf(m, "kscratch registers\t: %d\n", diff --git a/target/linux/generic/backport-6.1/330-v5.16-02-MIPS-Fix-using-smp_processor_id-in-preemptible-in-sh.patch b/target/linux/generic/backport-6.1/330-v5.16-02-MIPS-Fix-using-smp_processor_id-in-preemptible-in-sh.patch deleted file mode 100644 index 6caf7d06d4ec..000000000000 --- a/target/linux/generic/backport-6.1/330-v5.16-02-MIPS-Fix-using-smp_processor_id-in-preemptible-in-sh.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 1cab5bd69eb1f995ced2d7576cb15f8a8941fd85 Mon Sep 17 00:00:00 2001 -From: Tiezhu Yang -Date: Thu, 25 Nov 2021 19:39:32 +0800 -Subject: [PATCH 1/1] MIPS: Fix using smp_processor_id() in preemptible in - show_cpuinfo() - -There exists the following issue under DEBUG_PREEMPT: - - BUG: using smp_processor_id() in preemptible [00000000] code: systemd/1 - caller is show_cpuinfo+0x460/0xea0 - ... - Call Trace: - [] show_stack+0x94/0x128 - [] dump_stack_lvl+0x94/0xd8 - [] check_preemption_disabled+0x104/0x110 - [] show_cpuinfo+0x460/0xea0 - [] seq_read_iter+0xfc/0x4f8 - [] new_sync_read+0x110/0x1b8 - [] vfs_read+0x1b4/0x1d0 - [] ksys_read+0xd0/0x110 - [] syscall_common+0x34/0x58 - -We can see the following call trace: - show_cpuinfo() - cpu_has_fpu - current_cpu_data - smp_processor_id() - - $ addr2line -f -e vmlinux 0xffffffff802209c8 - show_cpuinfo - arch/mips/kernel/proc.c:188 - - $ head -188 arch/mips/kernel/proc.c | tail -1 - if (cpu_has_fpu) - - arch/mips/include/asm/cpu-features.h - # define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) - - arch/mips/include/asm/cpu-info.h - #define current_cpu_data cpu_data[smp_processor_id()] - -Based on the above analysis, fix the issue by using raw_cpu_has_fpu -which calls raw_smp_processor_id() in show_cpuinfo(). - -Fixes: 626bfa037299 ("MIPS: kernel: proc: add CPU option reporting") -Signed-off-by: Tiezhu Yang -Signed-off-by: Thomas Bogendoerfer ---- - arch/mips/kernel/proc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/mips/kernel/proc.c -+++ b/arch/mips/kernel/proc.c -@@ -166,7 +166,7 @@ static int show_cpuinfo(struct seq_file - seq_puts(m, " tx39_cache"); - if (cpu_has_octeon_cache) - seq_puts(m, " octeon_cache"); -- if (cpu_has_fpu) -+ if (raw_cpu_has_fpu) - seq_puts(m, " fpu"); - if (cpu_has_32fpr) - seq_puts(m, " 32fpr"); diff --git a/target/linux/generic/backport-6.1/331-v5.19-mtd-spinand-Add-support-for-XTX-XT26G0xA.patch b/target/linux/generic/backport-6.1/331-v5.19-mtd-spinand-Add-support-for-XTX-XT26G0xA.patch deleted file mode 100644 index 541d247542b6..000000000000 --- a/target/linux/generic/backport-6.1/331-v5.19-mtd-spinand-Add-support-for-XTX-XT26G0xA.patch +++ /dev/null @@ -1,186 +0,0 @@ -From f4c5c7f9d2e5ab005d57826b740b694b042a737c Mon Sep 17 00:00:00 2001 -From: Felix Matouschek -Date: Mon, 18 Apr 2022 15:28:03 +0200 -Subject: [PATCH 1/1] mtd: spinand: Add support for XTX XT26G0xA - -Add support for XTX Technology XT26G01AXXXXX, XTX26G02AXXXXX and -XTX26G04AXXXXX SPI NAND. - -These are 3V, 1G/2G/4Gbit serial SLC NAND flash devices with on-die ECC -(8bit strength per 512bytes). - -Tested on Teltonika RUTX10 flashed with OpenWrt. - -Links: - - http://www.xtxtech.com/download/?AId=225 - - https://datasheet.lcsc.com/szlcsc/2005251034_XTX-XT26G01AWSEGA_C558841.pdf -Signed-off-by: Felix Matouschek -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220418132803.664103-1-felix@matouschek.org ---- - drivers/mtd/nand/spi/Makefile | 2 +- - drivers/mtd/nand/spi/core.c | 1 + - drivers/mtd/nand/spi/xtx.c | 129 ++++++++++++++++++++++++++++++++++ - include/linux/mtd/spinand.h | 1 + - 4 files changed, 132 insertions(+), 1 deletion(-) - create mode 100644 drivers/mtd/nand/spi/xtx.c - ---- a/drivers/mtd/nand/spi/Makefile -+++ b/drivers/mtd/nand/spi/Makefile -@@ -1,3 +1,3 @@ - # SPDX-License-Identifier: GPL-2.0 --spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o -+spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o - obj-$(CONFIG_MTD_SPI_NAND) += spinand.o ---- a/drivers/mtd/nand/spi/core.c -+++ b/drivers/mtd/nand/spi/core.c -@@ -902,6 +902,7 @@ static const struct spinand_manufacturer - ¶gon_spinand_manufacturer, - &toshiba_spinand_manufacturer, - &winbond_spinand_manufacturer, -+ &xtx_spinand_manufacturer, - }; - - static int spinand_manufacturer_match(struct spinand_device *spinand, ---- /dev/null -+++ b/drivers/mtd/nand/spi/xtx.c -@@ -0,0 +1,129 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Author: -+ * Felix Matouschek -+ */ -+ -+#include -+#include -+#include -+ -+#define SPINAND_MFR_XTX 0x0B -+ -+#define XT26G0XA_STATUS_ECC_MASK GENMASK(5, 2) -+#define XT26G0XA_STATUS_ECC_NO_DETECTED (0 << 2) -+#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4) -+#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4) -+ -+static SPINAND_OP_VARIANTS(read_cache_variants, -+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); -+ -+static SPINAND_OP_VARIANTS(write_cache_variants, -+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), -+ SPINAND_PROG_LOAD(true, 0, NULL, 0)); -+ -+static SPINAND_OP_VARIANTS(update_cache_variants, -+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), -+ SPINAND_PROG_LOAD(false, 0, NULL, 0)); -+ -+static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *region) -+{ -+ if (section) -+ return -ERANGE; -+ -+ region->offset = 48; -+ region->length = 16; -+ -+ return 0; -+} -+ -+static int xt26g0xa_ooblayout_free(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *region) -+{ -+ if (section) -+ return -ERANGE; -+ -+ region->offset = 1; -+ region->length = 47; -+ -+ return 0; -+} -+ -+static const struct mtd_ooblayout_ops xt26g0xa_ooblayout = { -+ .ecc = xt26g0xa_ooblayout_ecc, -+ .free = xt26g0xa_ooblayout_free, -+}; -+ -+static int xt26g0xa_ecc_get_status(struct spinand_device *spinand, -+ u8 status) -+{ -+ status = status & XT26G0XA_STATUS_ECC_MASK; -+ -+ switch (status) { -+ case XT26G0XA_STATUS_ECC_NO_DETECTED: -+ return 0; -+ case XT26G0XA_STATUS_ECC_8_CORRECTED: -+ return 8; -+ case XT26G0XA_STATUS_ECC_UNCOR_ERROR: -+ return -EBADMSG; -+ default: -+ break; -+ } -+ -+ /* At this point values greater than (2 << 4) are invalid */ -+ if (status > XT26G0XA_STATUS_ECC_UNCOR_ERROR) -+ return -EINVAL; -+ -+ /* (1 << 2) through (7 << 2) are 1-7 corrected errors */ -+ return status >> 2; -+} -+ -+static const struct spinand_info xtx_spinand_table[] = { -+ SPINAND_INFO("XT26G01A", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1), -+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&xt26g0xa_ooblayout, -+ xt26g0xa_ecc_get_status)), -+ SPINAND_INFO("XT26G02A", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE2), -+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&xt26g0xa_ooblayout, -+ xt26g0xa_ecc_get_status)), -+ SPINAND_INFO("XT26G04A", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE3), -+ NAND_MEMORG(1, 2048, 64, 128, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&xt26g0xa_ooblayout, -+ xt26g0xa_ecc_get_status)), -+}; -+ -+static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = { -+}; -+ -+const struct spinand_manufacturer xtx_spinand_manufacturer = { -+ .id = SPINAND_MFR_XTX, -+ .name = "XTX", -+ .chips = xtx_spinand_table, -+ .nchips = ARRAY_SIZE(xtx_spinand_table), -+ .ops = &xtx_spinand_manuf_ops, -+}; ---- a/include/linux/mtd/spinand.h -+++ b/include/linux/mtd/spinand.h -@@ -266,6 +266,7 @@ extern const struct spinand_manufacturer - extern const struct spinand_manufacturer paragon_spinand_manufacturer; - extern const struct spinand_manufacturer toshiba_spinand_manufacturer; - extern const struct spinand_manufacturer winbond_spinand_manufacturer; -+extern const struct spinand_manufacturer xtx_spinand_manufacturer; - - /** - * struct spinand_op_variants - SPI NAND operation variants diff --git a/target/linux/generic/backport-6.1/344-v5.18-01-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch b/target/linux/generic/backport-6.1/344-v5.18-01-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch deleted file mode 100644 index d6c4b4fd1ee2..000000000000 --- a/target/linux/generic/backport-6.1/344-v5.18-01-phy-marvell-phy-mvebu-a3700-comphy-Remove-port-from-.patch +++ /dev/null @@ -1,219 +0,0 @@ -From 4bf18d5a2dd02db8c5b16a2cfae513510506df5b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Thu, 3 Feb 2022 22:44:40 +0100 -Subject: [PATCH 1/2] phy: marvell: phy-mvebu-a3700-comphy: Remove port from - driver configuration -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Port number is encoded into argument for SMC call. It is zero for SATA, -PCIe and also both USB 3.0 PHYs. It is non-zero only for Ethernet PHY -(incorrectly called SGMII) on lane 0. Ethernet PHY on lane 1 also uses zero -port number. - -So construct "port" bits for SMC call argument can be constructed directly -from PHY type and lane number. - -Change driver code to always pass zero port number for non-ethernet PHYs -and for ethernet PHYs determinate port number from lane number. This -simplifies the driver. - -As port number from DT PHY configuration is not used anymore, remove whole -driver code which parses it. This also simplifies the driver. - -Signed-off-by: Pali Rohár -Signed-off-by: Marek Behún -Reviewed-by: Miquel Raynal -Link: https://lore.kernel.org/r/20220203214444.1508-2-kabel@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 62 +++++++++----------- - 1 file changed, 29 insertions(+), 33 deletions(-) - ---- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -@@ -20,7 +20,6 @@ - #include - - #define MVEBU_A3700_COMPHY_LANES 3 --#define MVEBU_A3700_COMPHY_PORTS 2 - - /* COMPHY Fast SMC function identifiers */ - #define COMPHY_SIP_POWER_ON 0x82000001 -@@ -45,51 +44,47 @@ - #define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \ - ((idx) << 8) | \ - ((speed) << 2)) --#define COMPHY_FW_PCIE(mode, idx, speed, width) (COMPHY_FW_NET(mode, idx, speed) | \ -+#define COMPHY_FW_PCIE(mode, speed, width) (COMPHY_FW_NET(mode, 0, speed) | \ - ((width) << 18)) - - struct mvebu_a3700_comphy_conf { - unsigned int lane; - enum phy_mode mode; - int submode; -- unsigned int port; - u32 fw_mode; - }; - --#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _port, _fw) \ -+#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _fw) \ - { \ - .lane = _lane, \ - .mode = _mode, \ - .submode = _smode, \ -- .port = _port, \ - .fw_mode = _fw, \ - } - --#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _port, _fw) \ -- MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _port, _fw) -+#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _fw) \ -+ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _fw) - --#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _port, _fw) \ -- MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _port, _fw) -+#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _fw) \ -+ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _fw) - - static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { - /* lane 0 */ -- MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, 0, -+ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, - COMPHY_FW_MODE_USB3H), -- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1, -+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, - COMPHY_FW_MODE_SGMII), -- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1, -+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, - COMPHY_FW_MODE_2500BASEX), - /* lane 1 */ -- MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0, -- COMPHY_FW_MODE_PCIE), -- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0, -+ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), -+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, - COMPHY_FW_MODE_SGMII), -- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0, -+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, - COMPHY_FW_MODE_2500BASEX), - /* lane 2 */ -- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0, -- COMPHY_FW_MODE_SATA), -- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, 0, -+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), -+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, - COMPHY_FW_MODE_USB3H), - }; - -@@ -98,7 +93,6 @@ struct mvebu_a3700_comphy_lane { - unsigned int id; - enum phy_mode mode; - int submode; -- int port; - }; - - static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, -@@ -120,7 +114,7 @@ static int mvebu_a3700_comphy_smc(unsign - } - } - --static int mvebu_a3700_comphy_get_fw_mode(int lane, int port, -+static int mvebu_a3700_comphy_get_fw_mode(int lane, - enum phy_mode mode, - int submode) - { -@@ -132,7 +126,6 @@ static int mvebu_a3700_comphy_get_fw_mod - - for (i = 0; i < n; i++) { - if (mvebu_a3700_comphy_modes[i].lane == lane && -- mvebu_a3700_comphy_modes[i].port == port && - mvebu_a3700_comphy_modes[i].mode == mode && - mvebu_a3700_comphy_modes[i].submode == submode) - break; -@@ -153,7 +146,7 @@ static int mvebu_a3700_comphy_set_mode(s - if (submode == PHY_INTERFACE_MODE_1000BASEX) - submode = PHY_INTERFACE_MODE_SGMII; - -- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, mode, -+ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, mode, - submode); - if (fw_mode < 0) { - dev_err(lane->dev, "invalid COMPHY mode\n"); -@@ -172,9 +165,10 @@ static int mvebu_a3700_comphy_power_on(s - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - u32 fw_param; - int fw_mode; -+ int fw_port; - int ret; - -- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, -+ fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, - lane->mode, lane->submode); - if (fw_mode < 0) { - dev_err(lane->dev, "invalid COMPHY mode\n"); -@@ -191,17 +185,18 @@ static int mvebu_a3700_comphy_power_on(s - fw_param = COMPHY_FW_MODE(fw_mode); - break; - case PHY_MODE_ETHERNET: -+ fw_port = (lane->id == 0) ? 1 : 0; - switch (lane->submode) { - case PHY_INTERFACE_MODE_SGMII: - dev_dbg(lane->dev, "set lane %d to SGMII mode\n", - lane->id); -- fw_param = COMPHY_FW_NET(fw_mode, lane->port, -+ fw_param = COMPHY_FW_NET(fw_mode, fw_port, - COMPHY_FW_SPEED_1_25G); - break; - case PHY_INTERFACE_MODE_2500BASEX: - dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", - lane->id); -- fw_param = COMPHY_FW_NET(fw_mode, lane->port, -+ fw_param = COMPHY_FW_NET(fw_mode, fw_port, - COMPHY_FW_SPEED_3_125G); - break; - default: -@@ -212,8 +207,7 @@ static int mvebu_a3700_comphy_power_on(s - break; - case PHY_MODE_PCIE: - dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); -- fw_param = COMPHY_FW_PCIE(fw_mode, lane->port, -- COMPHY_FW_SPEED_5G, -+ fw_param = COMPHY_FW_PCIE(fw_mode, COMPHY_FW_SPEED_5G, - phy->attrs.bus_width); - break; - default: -@@ -247,17 +241,20 @@ static struct phy *mvebu_a3700_comphy_xl - struct of_phandle_args *args) - { - struct mvebu_a3700_comphy_lane *lane; -+ unsigned int port; - struct phy *phy; - -- if (WARN_ON(args->args[0] >= MVEBU_A3700_COMPHY_PORTS)) -- return ERR_PTR(-EINVAL); -- - phy = of_phy_simple_xlate(dev, args); - if (IS_ERR(phy)) - return phy; - - lane = phy_get_drvdata(phy); -- lane->port = args->args[0]; -+ -+ port = args->args[0]; -+ if (port != 0 && (port != 1 || lane->id != 0)) { -+ dev_err(lane->dev, "invalid port number %u\n", port); -+ return ERR_PTR(-EINVAL); -+ } - - return phy; - } -@@ -302,7 +299,6 @@ static int mvebu_a3700_comphy_probe(stru - lane->mode = PHY_MODE_INVALID; - lane->submode = PHY_INTERFACE_MODE_NA; - lane->id = lane_id; -- lane->port = -1; - phy_set_drvdata(phy, lane); - } - diff --git a/target/linux/generic/backport-6.1/344-v5.18-02-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch b/target/linux/generic/backport-6.1/344-v5.18-02-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch deleted file mode 100644 index 4593d14bfe6a..000000000000 --- a/target/linux/generic/backport-6.1/344-v5.18-02-phy-marvell-phy-mvebu-a3700-comphy-Add-native-kernel.patch +++ /dev/null @@ -1,1552 +0,0 @@ -From 934337080c6c59b75db76b180b509f218640ad48 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Thu, 3 Feb 2022 22:44:41 +0100 -Subject: [PATCH 2/2] phy: marvell: phy-mvebu-a3700-comphy: Add native kernel - implementation -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Remove old RPC implementation and add a new native kernel implementation. - -The old implementation uses ARM SMC API to issue RPC calls to ARM Trusted -Firmware which provides real implementation of PHY configuration. - -But older versions of ARM Trusted Firmware do not provide this PHY -configuration functionality, simply returning: operation not supported; or -worse, some versions provide the configuration functionality incorrectly. - -For example the firmware shipped in ESPRESSObin board has this older -version of ARM Trusted Firmware and therefore SATA, USB 3.0 and PCIe -functionality do not work with newer versions of Linux kernel. - -Due to the above reasons, the following commits were introduced into Linux, -to workaround these issues by ignoring -EOPNOTSUPP error code from -phy-mvebu-a3700-comphy driver function phy_power_on(): - -commit 45aefe3d2251 ("ata: ahci: mvebu: Make SATA PHY optional for Armada -3720") -commit 3241929b67d2 ("usb: host: xhci: mvebu: make USB 3.0 PHY optional for -Armada 3720") -commit b0c6ae0f8948 ("PCI: aardvark: Fix initialization with old Marvell's -Arm Trusted Firmware") - -Replace this RPC implementation with proper native kernel implementation, -which is independent on the firmware. Never return -EOPNOTSUPP for proper -arguments. - -This should solve multiple issues with real-world boards, where it is not -possible or really inconvenient to change the firmware. Let's eliminate -these issues. - -This implementation is ported directly from Armada 3720 comphy driver found -in newest version of ARM Trusted Firmware source code, but with various -fixes of register names, some added comments, some refactoring due to the -original code not conforming to kernel standards. Also PCIe mode poweroff -support was added here, and PHY reset support. These changes are also going -to be sent to ARM Trusted Firmware. - -[ Pali did the porting from ATF. - I (Marek) then fixed some register names, some various other things, - added some comments and refactored the code to kernel standards. Also - fixed PHY poweroff and added PHY reset. ] - -Signed-off-by: Pali Rohár -Acked-by: Miquel Raynal -Signed-off-by: Marek Behún -Link: https://lore.kernel.org/r/20220203214444.1508-3-kabel@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 1332 ++++++++++++++++-- - 1 file changed, 1215 insertions(+), 117 deletions(-) - ---- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -@@ -5,12 +5,16 @@ - * Authors: - * Evan Wang - * Miquèl Raynal -+ * Pali Rohár -+ * Marek Behún - * - * Structure inspired from phy-mvebu-cp110-comphy.c written by Antoine Tenart. -- * SMC call initial support done by Grzegorz Jaszczyk. -+ * Comphy code from ARM Trusted Firmware ported by Pali Rohár -+ * and Marek Behún . - */ - --#include -+#include -+#include - #include - #include - #include -@@ -18,103 +22,1118 @@ - #include - #include - #include -+#include - --#define MVEBU_A3700_COMPHY_LANES 3 -+#define PLL_SET_DELAY_US 600 -+#define COMPHY_PLL_SLEEP 1000 -+#define COMPHY_PLL_TIMEOUT 150000 -+ -+/* Comphy lane2 indirect access register offset */ -+#define COMPHY_LANE2_INDIR_ADDR 0x0 -+#define COMPHY_LANE2_INDIR_DATA 0x4 -+ -+/* SATA and USB3 PHY offset compared to SATA PHY */ -+#define COMPHY_LANE2_REGS_BASE 0x200 -+ -+/* -+ * When accessing common PHY lane registers directly, we need to shift by 1, -+ * since the registers are 16-bit. -+ */ -+#define COMPHY_LANE_REG_DIRECT(reg) (((reg) & 0x7FF) << 1) -+ -+/* COMPHY registers */ -+#define COMPHY_POWER_PLL_CTRL 0x01 -+#define PU_IVREF_BIT BIT(15) -+#define PU_PLL_BIT BIT(14) -+#define PU_RX_BIT BIT(13) -+#define PU_TX_BIT BIT(12) -+#define PU_TX_INTP_BIT BIT(11) -+#define PU_DFE_BIT BIT(10) -+#define RESET_DTL_RX_BIT BIT(9) -+#define PLL_LOCK_BIT BIT(8) -+#define REF_FREF_SEL_MASK GENMASK(4, 0) -+#define REF_FREF_SEL_SERDES_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x1) -+#define REF_FREF_SEL_SERDES_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) -+#define REF_FREF_SEL_SERDES_50MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x4) -+#define REF_FREF_SEL_PCIE_USB3_25MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x2) -+#define REF_FREF_SEL_PCIE_USB3_40MHZ FIELD_PREP(REF_FREF_SEL_MASK, 0x3) -+#define COMPHY_MODE_MASK GENMASK(7, 5) -+#define COMPHY_MODE_SATA FIELD_PREP(COMPHY_MODE_MASK, 0x0) -+#define COMPHY_MODE_PCIE FIELD_PREP(COMPHY_MODE_MASK, 0x3) -+#define COMPHY_MODE_SERDES FIELD_PREP(COMPHY_MODE_MASK, 0x4) -+#define COMPHY_MODE_USB3 FIELD_PREP(COMPHY_MODE_MASK, 0x5) -+ -+#define COMPHY_KVCO_CAL_CTRL 0x02 -+#define USE_MAX_PLL_RATE_BIT BIT(12) -+#define SPEED_PLL_MASK GENMASK(7, 2) -+#define SPEED_PLL_VALUE_16 FIELD_PREP(SPEED_PLL_MASK, 0x10) -+ -+#define COMPHY_DIG_LOOPBACK_EN 0x23 -+#define SEL_DATA_WIDTH_MASK GENMASK(11, 10) -+#define DATA_WIDTH_10BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x0) -+#define DATA_WIDTH_20BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x1) -+#define DATA_WIDTH_40BIT FIELD_PREP(SEL_DATA_WIDTH_MASK, 0x2) -+#define PLL_READY_TX_BIT BIT(4) -+ -+#define COMPHY_SYNC_PATTERN 0x24 -+#define TXD_INVERT_BIT BIT(10) -+#define RXD_INVERT_BIT BIT(11) -+ -+#define COMPHY_SYNC_MASK_GEN 0x25 -+#define PHY_GEN_MAX_MASK GENMASK(11, 10) -+#define PHY_GEN_MAX_USB3_5G FIELD_PREP(PHY_GEN_MAX_MASK, 0x1) -+ -+#define COMPHY_ISOLATION_CTRL 0x26 -+#define PHY_ISOLATE_MODE BIT(15) -+ -+#define COMPHY_GEN2_SET2 0x3e -+#define GS2_TX_SSC_AMP_MASK GENMASK(15, 9) -+#define GS2_TX_SSC_AMP_4128 FIELD_PREP(GS2_TX_SSC_AMP_MASK, 0x20) -+#define GS2_VREG_RXTX_MAS_ISET_MASK GENMASK(8, 7) -+#define GS2_VREG_RXTX_MAS_ISET_60U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ -+ 0x0) -+#define GS2_VREG_RXTX_MAS_ISET_80U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ -+ 0x1) -+#define GS2_VREG_RXTX_MAS_ISET_100U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ -+ 0x2) -+#define GS2_VREG_RXTX_MAS_ISET_120U FIELD_PREP(GS2_VREG_RXTX_MAS_ISET_MASK,\ -+ 0x3) -+#define GS2_RSVD_6_0_MASK GENMASK(6, 0) -+ -+#define COMPHY_GEN3_SET2 0x3f -+ -+#define COMPHY_IDLE_SYNC_EN 0x48 -+#define IDLE_SYNC_EN BIT(12) -+ -+#define COMPHY_MISC_CTRL0 0x4F -+#define CLK100M_125M_EN BIT(4) -+#define TXDCLK_2X_SEL BIT(6) -+#define CLK500M_EN BIT(7) -+#define PHY_REF_CLK_SEL BIT(10) -+ -+#define COMPHY_SFT_RESET 0x52 -+#define SFT_RST BIT(9) -+#define SFT_RST_NO_REG BIT(10) -+ -+#define COMPHY_MISC_CTRL1 0x73 -+#define SEL_BITS_PCIE_FORCE BIT(15) -+ -+#define COMPHY_GEN2_SET3 0x112 -+#define GS3_FFE_CAP_SEL_MASK GENMASK(3, 0) -+#define GS3_FFE_CAP_SEL_VALUE FIELD_PREP(GS3_FFE_CAP_SEL_MASK, 0xF) -+ -+/* PIPE registers */ -+#define COMPHY_PIPE_LANE_CFG0 0x180 -+#define PRD_TXDEEMPH0_MASK BIT(0) -+#define PRD_TXMARGIN_MASK GENMASK(3, 1) -+#define PRD_TXSWING_MASK BIT(4) -+#define CFG_TX_ALIGN_POS_MASK GENMASK(8, 5) -+ -+#define COMPHY_PIPE_LANE_CFG1 0x181 -+#define PRD_TXDEEMPH1_MASK BIT(15) -+#define USE_MAX_PLL_RATE_EN BIT(9) -+#define TX_DET_RX_MODE BIT(6) -+#define GEN2_TX_DATA_DLY_MASK GENMASK(4, 3) -+#define GEN2_TX_DATA_DLY_DEFT FIELD_PREP(GEN2_TX_DATA_DLY_MASK, 2) -+#define TX_ELEC_IDLE_MODE_EN BIT(0) -+ -+#define COMPHY_PIPE_LANE_STAT1 0x183 -+#define TXDCLK_PCLK_EN BIT(0) -+ -+#define COMPHY_PIPE_LANE_CFG4 0x188 -+#define SPREAD_SPECTRUM_CLK_EN BIT(7) -+ -+#define COMPHY_PIPE_RST_CLK_CTRL 0x1C1 -+#define PIPE_SOFT_RESET BIT(0) -+#define PIPE_REG_RESET BIT(1) -+#define MODE_CORE_CLK_FREQ_SEL BIT(9) -+#define MODE_PIPE_WIDTH_32 BIT(3) -+#define MODE_REFDIV_MASK GENMASK(5, 4) -+#define MODE_REFDIV_BY_4 FIELD_PREP(MODE_REFDIV_MASK, 0x2) -+ -+#define COMPHY_PIPE_TEST_MODE_CTRL 0x1C2 -+#define MODE_MARGIN_OVERRIDE BIT(2) -+ -+#define COMPHY_PIPE_CLK_SRC_LO 0x1C3 -+#define MODE_CLK_SRC BIT(0) -+#define BUNDLE_PERIOD_SEL BIT(1) -+#define BUNDLE_PERIOD_SCALE_MASK GENMASK(3, 2) -+#define BUNDLE_SAMPLE_CTRL BIT(4) -+#define PLL_READY_DLY_MASK GENMASK(7, 5) -+#define CFG_SEL_20B BIT(15) -+ -+#define COMPHY_PIPE_PWR_MGM_TIM1 0x1D0 -+#define CFG_PM_OSCCLK_WAIT_MASK GENMASK(15, 12) -+#define CFG_PM_RXDEN_WAIT_MASK GENMASK(11, 8) -+#define CFG_PM_RXDEN_WAIT_1_UNIT FIELD_PREP(CFG_PM_RXDEN_WAIT_MASK, 0x1) -+#define CFG_PM_RXDLOZ_WAIT_MASK GENMASK(7, 0) -+#define CFG_PM_RXDLOZ_WAIT_7_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0x7) -+#define CFG_PM_RXDLOZ_WAIT_12_UNIT FIELD_PREP(CFG_PM_RXDLOZ_WAIT_MASK, 0xC) -+ -+/* -+ * This register is not from PHY lane register space. It only exists in the -+ * indirect register space, before the actual PHY lane 2 registers. So the -+ * offset is absolute, not relative to COMPHY_LANE2_REGS_BASE. -+ * It is used only for SATA PHY initialization. -+ */ -+#define COMPHY_RESERVED_REG 0x0E -+#define PHYCTRL_FRM_PIN_BIT BIT(13) - --/* COMPHY Fast SMC function identifiers */ --#define COMPHY_SIP_POWER_ON 0x82000001 --#define COMPHY_SIP_POWER_OFF 0x82000002 --#define COMPHY_SIP_PLL_LOCK 0x82000003 -- --#define COMPHY_FW_MODE_SATA 0x1 --#define COMPHY_FW_MODE_SGMII 0x2 --#define COMPHY_FW_MODE_2500BASEX 0x3 --#define COMPHY_FW_MODE_USB3H 0x4 --#define COMPHY_FW_MODE_USB3D 0x5 --#define COMPHY_FW_MODE_PCIE 0x6 --#define COMPHY_FW_MODE_USB3 0xa -- --#define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ --#define COMPHY_FW_SPEED_2_5G 1 --#define COMPHY_FW_SPEED_3_125G 2 /* 2500BASE-X */ --#define COMPHY_FW_SPEED_5G 3 --#define COMPHY_FW_SPEED_MAX 0x3F -- --#define COMPHY_FW_MODE(mode) ((mode) << 12) --#define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \ -- ((idx) << 8) | \ -- ((speed) << 2)) --#define COMPHY_FW_PCIE(mode, speed, width) (COMPHY_FW_NET(mode, 0, speed) | \ -- ((width) << 18)) -+/* South Bridge PHY Configuration Registers */ -+#define COMPHY_PHY_REG(lane, reg) (((1 - (lane)) * 0x28) + ((reg) & 0x3f)) -+ -+/* -+ * lane0: USB3/GbE1 PHY Configuration 1 -+ * lane1: PCIe/GbE0 PHY Configuration 1 -+ * (used only by SGMII code) -+ */ -+#define COMPHY_PHY_CFG1 0x0 -+#define PIN_PU_IVREF_BIT BIT(1) -+#define PIN_RESET_CORE_BIT BIT(11) -+#define PIN_RESET_COMPHY_BIT BIT(12) -+#define PIN_PU_PLL_BIT BIT(16) -+#define PIN_PU_RX_BIT BIT(17) -+#define PIN_PU_TX_BIT BIT(18) -+#define PIN_TX_IDLE_BIT BIT(19) -+#define GEN_RX_SEL_MASK GENMASK(25, 22) -+#define GEN_RX_SEL_VALUE(val) FIELD_PREP(GEN_RX_SEL_MASK, (val)) -+#define GEN_TX_SEL_MASK GENMASK(29, 26) -+#define GEN_TX_SEL_VALUE(val) FIELD_PREP(GEN_TX_SEL_MASK, (val)) -+#define SERDES_SPEED_1_25_G 0x6 -+#define SERDES_SPEED_3_125_G 0x8 -+#define PHY_RX_INIT_BIT BIT(30) -+ -+/* -+ * lane0: USB3/GbE1 PHY Status 1 -+ * lane1: PCIe/GbE0 PHY Status 1 -+ * (used only by SGMII code) -+ */ -+#define COMPHY_PHY_STAT1 0x18 -+#define PHY_RX_INIT_DONE_BIT BIT(0) -+#define PHY_PLL_READY_RX_BIT BIT(2) -+#define PHY_PLL_READY_TX_BIT BIT(3) -+ -+/* PHY Selector */ -+#define COMPHY_SELECTOR_PHY_REG 0xFC -+/* bit0: 0: Lane1 is GbE0; 1: Lane1 is PCIe */ -+#define COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT BIT(0) -+/* bit4: 0: Lane0 is GbE1; 1: Lane0 is USB3 */ -+#define COMPHY_SELECTOR_USB3_GBE1_SEL_BIT BIT(4) -+/* bit8: 0: Lane0 is USB3 instead of GbE1, Lane2 is SATA; 1: Lane2 is USB3 */ -+#define COMPHY_SELECTOR_USB3_PHY_SEL_BIT BIT(8) - - struct mvebu_a3700_comphy_conf { - unsigned int lane; - enum phy_mode mode; - int submode; -- u32 fw_mode; - }; - --#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _fw) \ -+#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode) \ - { \ - .lane = _lane, \ - .mode = _mode, \ - .submode = _smode, \ -- .fw_mode = _fw, \ - } - --#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _fw) \ -- MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _fw) -+#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode) \ -+ MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA) - --#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _fw) \ -- MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _fw) -+#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode) \ -+ MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode) - - static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { - /* lane 0 */ -- MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, -- COMPHY_FW_MODE_USB3H), -- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, -- COMPHY_FW_MODE_SGMII), -- MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, -- COMPHY_FW_MODE_2500BASEX), -+ MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS), -+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII), -+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_1000BASEX), -+ MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX), - /* lane 1 */ -- MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), -- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, -- COMPHY_FW_MODE_SGMII), -- MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, -- COMPHY_FW_MODE_2500BASEX), -+ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE), -+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII), -+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_1000BASEX), -+ MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX), - /* lane 2 */ -- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), -- MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, -- COMPHY_FW_MODE_USB3H), -+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA), -+ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS), -+}; -+ -+struct mvebu_a3700_comphy_priv { -+ void __iomem *comphy_regs; -+ void __iomem *lane0_phy_regs; /* USB3 and GbE1 */ -+ void __iomem *lane1_phy_regs; /* PCIe and GbE0 */ -+ void __iomem *lane2_phy_indirect; /* SATA and USB3 */ -+ spinlock_t lock; /* for PHY selector access */ -+ bool xtal_is_40m; - }; - - struct mvebu_a3700_comphy_lane { -+ struct mvebu_a3700_comphy_priv *priv; - struct device *dev; - unsigned int id; - enum phy_mode mode; - int submode; -+ bool invert_tx; -+ bool invert_rx; -+ bool needs_reset; -+}; -+ -+struct gbe_phy_init_data_fix { -+ u16 addr; -+ u16 value; -+}; -+ -+/* Changes to 40M1G25 mode data required for running 40M3G125 init mode */ -+static struct gbe_phy_init_data_fix gbe_phy_init_fix[] = { -+ { 0x005, 0x07CC }, { 0x015, 0x0000 }, { 0x01B, 0x0000 }, -+ { 0x01D, 0x0000 }, { 0x01E, 0x0000 }, { 0x01F, 0x0000 }, -+ { 0x020, 0x0000 }, { 0x021, 0x0030 }, { 0x026, 0x0888 }, -+ { 0x04D, 0x0152 }, { 0x04F, 0xA020 }, { 0x050, 0x07CC }, -+ { 0x053, 0xE9CA }, { 0x055, 0xBD97 }, { 0x071, 0x3015 }, -+ { 0x076, 0x03AA }, { 0x07C, 0x0FDF }, { 0x0C2, 0x3030 }, -+ { 0x0C3, 0x8000 }, { 0x0E2, 0x5550 }, { 0x0E3, 0x12A4 }, -+ { 0x0E4, 0x7D00 }, { 0x0E6, 0x0C83 }, { 0x101, 0xFCC0 }, -+ { 0x104, 0x0C10 } - }; - --static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, -- unsigned long mode) -+/* 40M1G25 mode init data */ -+static u16 gbe_phy_init[512] = { -+ /* 0 1 2 3 4 5 6 7 */ -+ /*-----------------------------------------------------------*/ -+ /* 8 9 A B C D E F */ -+ 0x3110, 0xFD83, 0x6430, 0x412F, 0x82C0, 0x06FA, 0x4500, 0x6D26, /* 00 */ -+ 0xAFC0, 0x8000, 0xC000, 0x0000, 0x2000, 0x49CC, 0x0BC9, 0x2A52, /* 08 */ -+ 0x0BD2, 0x0CDE, 0x13D2, 0x0CE8, 0x1149, 0x10E0, 0x0000, 0x0000, /* 10 */ -+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x4134, 0x0D2D, 0xFFFF, /* 18 */ -+ 0xFFE0, 0x4030, 0x1016, 0x0030, 0x0000, 0x0800, 0x0866, 0x0000, /* 20 */ -+ 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, /* 28 */ -+ 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 30 */ -+ 0x0000, 0x0000, 0x000F, 0x6A62, 0x1988, 0x3100, 0x3100, 0x3100, /* 38 */ -+ 0x3100, 0xA708, 0x2430, 0x0830, 0x1030, 0x4610, 0xFF00, 0xFF00, /* 40 */ -+ 0x0060, 0x1000, 0x0400, 0x0040, 0x00F0, 0x0155, 0x1100, 0xA02A, /* 48 */ -+ 0x06FA, 0x0080, 0xB008, 0xE3ED, 0x5002, 0xB592, 0x7A80, 0x0001, /* 50 */ -+ 0x020A, 0x8820, 0x6014, 0x8054, 0xACAA, 0xFC88, 0x2A02, 0x45CF, /* 58 */ -+ 0x000F, 0x1817, 0x2860, 0x064F, 0x0000, 0x0204, 0x1800, 0x6000, /* 60 */ -+ 0x810F, 0x4F23, 0x4000, 0x4498, 0x0850, 0x0000, 0x000E, 0x1002, /* 68 */ -+ 0x9D3A, 0x3009, 0xD066, 0x0491, 0x0001, 0x6AB0, 0x0399, 0x3780, /* 70 */ -+ 0x0040, 0x5AC0, 0x4A80, 0x0000, 0x01DF, 0x0000, 0x0007, 0x0000, /* 78 */ -+ 0x2D54, 0x00A1, 0x4000, 0x0100, 0xA20A, 0x0000, 0x0000, 0x0000, /* 80 */ -+ 0x0000, 0x0000, 0x0000, 0x7400, 0x0E81, 0x1000, 0x1242, 0x0210, /* 88 */ -+ 0x80DF, 0x0F1F, 0x2F3F, 0x4F5F, 0x6F7F, 0x0F1F, 0x2F3F, 0x4F5F, /* 90 */ -+ 0x6F7F, 0x4BAD, 0x0000, 0x0000, 0x0800, 0x0000, 0x2400, 0xB651, /* 98 */ -+ 0xC9E0, 0x4247, 0x0A24, 0x0000, 0xAF19, 0x1004, 0x0000, 0x0000, /* A0 */ -+ 0x0000, 0x0013, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B0 */ -+ 0x0000, 0x0000, 0x0000, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, /* B8 */ -+ 0x0000, 0x0000, 0x3010, 0xFA00, 0x0000, 0x0000, 0x0000, 0x0003, /* C0 */ -+ 0x1618, 0x8200, 0x8000, 0x0400, 0x050F, 0x0000, 0x0000, 0x0000, /* C8 */ -+ 0x4C93, 0x0000, 0x1000, 0x1120, 0x0010, 0x1242, 0x1242, 0x1E00, /* D0 */ -+ 0x0000, 0x0000, 0x0000, 0x00F8, 0x0000, 0x0041, 0x0800, 0x0000, /* D8 */ -+ 0x82A0, 0x572E, 0x2490, 0x14A9, 0x4E00, 0x0000, 0x0803, 0x0541, /* E0 */ -+ 0x0C15, 0x0000, 0x0000, 0x0400, 0x2626, 0x0000, 0x0000, 0x4200, /* E8 */ -+ 0x0000, 0xAA55, 0x1020, 0x0000, 0x0000, 0x5010, 0x0000, 0x0000, /* F0 */ -+ 0x0000, 0x0000, 0x5000, 0x0000, 0x0000, 0x0000, 0x02F2, 0x0000, /* F8 */ -+ 0x101F, 0xFDC0, 0x4000, 0x8010, 0x0110, 0x0006, 0x0000, 0x0000, /*100 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*108 */ -+ 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04C6, 0x0000, /*110 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*118 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*120 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*128 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*130 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*138 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*140 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*148 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*150 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*158 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*160 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*168 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*170 */ -+ 0x0000, 0x0000, 0x0000, 0x00F0, 0x08A2, 0x3112, 0x0A14, 0x0000, /*178 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*180 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*188 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*190 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*198 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E8 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1F0 */ -+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 /*1F8 */ -+}; -+ -+static inline void comphy_reg_set(void __iomem *addr, u32 data, u32 mask) - { -- struct arm_smccc_res res; -- s32 ret; -+ u32 val; -+ -+ val = readl(addr); -+ val = (val & ~mask) | (data & mask); -+ writel(val, addr); -+} - -- arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res); -- ret = res.a0; -+static inline void comphy_reg_set16(void __iomem *addr, u16 data, u16 mask) -+{ -+ u16 val; - -- switch (ret) { -- case SMCCC_RET_SUCCESS: -- return 0; -- case SMCCC_RET_NOT_SUPPORTED: -- return -EOPNOTSUPP; -+ val = readw(addr); -+ val = (val & ~mask) | (data & mask); -+ writew(val, addr); -+} -+ -+/* Used for accessing lane 2 registers (SATA/USB3 PHY) */ -+static void comphy_set_indirect(struct mvebu_a3700_comphy_priv *priv, -+ u32 offset, u16 data, u16 mask) -+{ -+ writel(offset, -+ priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_ADDR); -+ comphy_reg_set(priv->lane2_phy_indirect + COMPHY_LANE2_INDIR_DATA, -+ data, mask); -+} -+ -+static void comphy_lane_reg_set(struct mvebu_a3700_comphy_lane *lane, -+ u16 reg, u16 data, u16 mask) -+{ -+ if (lane->id == 2) { -+ /* lane 2 PHY registers are accessed indirectly */ -+ comphy_set_indirect(lane->priv, -+ reg + COMPHY_LANE2_REGS_BASE, -+ data, mask); -+ } else { -+ void __iomem *base = lane->id == 1 ? -+ lane->priv->lane1_phy_regs : -+ lane->priv->lane0_phy_regs; -+ -+ comphy_reg_set16(base + COMPHY_LANE_REG_DIRECT(reg), -+ data, mask); -+ } -+} -+ -+static int comphy_lane_reg_poll(struct mvebu_a3700_comphy_lane *lane, -+ u16 reg, u16 bits, -+ ulong sleep_us, ulong timeout_us) -+{ -+ int ret; -+ -+ if (lane->id == 2) { -+ u32 data; -+ -+ /* lane 2 PHY registers are accessed indirectly */ -+ writel(reg + COMPHY_LANE2_REGS_BASE, -+ lane->priv->lane2_phy_indirect + -+ COMPHY_LANE2_INDIR_ADDR); -+ -+ ret = readl_poll_timeout(lane->priv->lane2_phy_indirect + -+ COMPHY_LANE2_INDIR_DATA, -+ data, (data & bits) == bits, -+ sleep_us, timeout_us); -+ } else { -+ void __iomem *base = lane->id == 1 ? -+ lane->priv->lane1_phy_regs : -+ lane->priv->lane0_phy_regs; -+ u16 data; -+ -+ ret = readw_poll_timeout(base + COMPHY_LANE_REG_DIRECT(reg), -+ data, (data & bits) == bits, -+ sleep_us, timeout_us); -+ } -+ -+ return ret; -+} -+ -+static void comphy_periph_reg_set(struct mvebu_a3700_comphy_lane *lane, -+ u8 reg, u32 data, u32 mask) -+{ -+ comphy_reg_set(lane->priv->comphy_regs + COMPHY_PHY_REG(lane->id, reg), -+ data, mask); -+} -+ -+static int comphy_periph_reg_poll(struct mvebu_a3700_comphy_lane *lane, -+ u8 reg, u32 bits, -+ ulong sleep_us, ulong timeout_us) -+{ -+ u32 data; -+ -+ return readl_poll_timeout(lane->priv->comphy_regs + -+ COMPHY_PHY_REG(lane->id, reg), -+ data, (data & bits) == bits, -+ sleep_us, timeout_us); -+} -+ -+/* PHY selector configures with corresponding modes */ -+static int -+mvebu_a3700_comphy_set_phy_selector(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 old, new, clr = 0, set = 0; -+ unsigned long flags; -+ -+ switch (lane->mode) { -+ case PHY_MODE_SATA: -+ /* SATA must be in Lane2 */ -+ if (lane->id == 2) -+ clr = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; -+ else -+ goto error; -+ break; -+ -+ case PHY_MODE_ETHERNET: -+ if (lane->id == 0) -+ clr = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; -+ else if (lane->id == 1) -+ clr = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; -+ else -+ goto error; -+ break; -+ -+ case PHY_MODE_USB_HOST_SS: -+ if (lane->id == 2) -+ set = COMPHY_SELECTOR_USB3_PHY_SEL_BIT; -+ else if (lane->id == 0) -+ set = COMPHY_SELECTOR_USB3_GBE1_SEL_BIT; -+ else -+ goto error; -+ break; -+ -+ case PHY_MODE_PCIE: -+ /* PCIE must be in Lane1 */ -+ if (lane->id == 1) -+ set = COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT; -+ else -+ goto error; -+ break; -+ -+ default: -+ goto error; -+ } -+ -+ spin_lock_irqsave(&lane->priv->lock, flags); -+ -+ old = readl(lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); -+ new = (old & ~clr) | set; -+ writel(new, lane->priv->comphy_regs + COMPHY_SELECTOR_PHY_REG); -+ -+ spin_unlock_irqrestore(&lane->priv->lock, flags); -+ -+ dev_dbg(lane->dev, -+ "COMPHY[%d] mode[%d] changed PHY selector 0x%08x -> 0x%08x\n", -+ lane->id, lane->mode, old, new); -+ -+ return 0; -+error: -+ dev_err(lane->dev, "COMPHY[%d] mode[%d] is invalid\n", lane->id, -+ lane->mode); -+ return -EINVAL; -+} -+ -+static int -+mvebu_a3700_comphy_sata_power_on(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 mask, data, ref_clk; -+ int ret; -+ -+ /* Configure phy selector for SATA */ -+ ret = mvebu_a3700_comphy_set_phy_selector(lane); -+ if (ret) -+ return ret; -+ -+ /* Clear phy isolation mode to make it work in normal mode */ -+ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, -+ 0x0, PHY_ISOLATE_MODE); -+ -+ /* 0. Check the Polarity invert bits */ -+ data = 0x0; -+ if (lane->invert_tx) -+ data |= TXD_INVERT_BIT; -+ if (lane->invert_rx) -+ data |= RXD_INVERT_BIT; -+ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; -+ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); -+ -+ /* 1. Select 40-bit data width */ -+ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, -+ DATA_WIDTH_40BIT, SEL_DATA_WIDTH_MASK); -+ -+ /* 2. Select reference clock(25M) and PHY mode (SATA) */ -+ if (lane->priv->xtal_is_40m) -+ ref_clk = REF_FREF_SEL_SERDES_40MHZ; -+ else -+ ref_clk = REF_FREF_SEL_SERDES_25MHZ; -+ -+ data = ref_clk | COMPHY_MODE_SATA; -+ mask = REF_FREF_SEL_MASK | COMPHY_MODE_MASK; -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); -+ -+ /* 3. Use maximum PLL rate (no power save) */ -+ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, -+ USE_MAX_PLL_RATE_BIT, USE_MAX_PLL_RATE_BIT); -+ -+ /* 4. Reset reserved bit */ -+ comphy_set_indirect(lane->priv, COMPHY_RESERVED_REG, -+ 0x0, PHYCTRL_FRM_PIN_BIT); -+ -+ /* 5. Set vendor-specific configuration (It is done in sata driver) */ -+ /* XXX: in U-Boot below sequence was executed in this place, in Linux -+ * not. Now it is done only in U-Boot before this comphy -+ * initialization - tests shows that it works ok, but in case of any -+ * future problem it is left for reference. -+ * reg_set(MVEBU_REGS_BASE + 0xe00a0, 0, 0xffffffff); -+ * reg_set(MVEBU_REGS_BASE + 0xe00a4, BIT(6), BIT(6)); -+ */ -+ -+ /* Wait for > 55 us to allow PLL be enabled */ -+ udelay(PLL_SET_DELAY_US); -+ -+ /* Polling status */ -+ ret = comphy_lane_reg_poll(lane, COMPHY_DIG_LOOPBACK_EN, -+ PLL_READY_TX_BIT, COMPHY_PLL_SLEEP, -+ COMPHY_PLL_TIMEOUT); -+ if (ret) -+ dev_err(lane->dev, "Failed to lock SATA PLL\n"); -+ -+ return ret; -+} -+ -+static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane, -+ bool is_1gbps) -+{ -+ int addr, fix_idx; -+ u16 val; -+ -+ fix_idx = 0; -+ for (addr = 0; addr < 512; addr++) { -+ /* -+ * All PHY register values are defined in full for 3.125Gbps -+ * SERDES speed. The values required for 1.25 Gbps are almost -+ * the same and only few registers should be "fixed" in -+ * comparison to 3.125 Gbps values. These register values are -+ * stored in "gbe_phy_init_fix" array. -+ */ -+ if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) { -+ /* Use new value */ -+ val = gbe_phy_init_fix[fix_idx].value; -+ if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix)) -+ fix_idx++; -+ } else { -+ val = gbe_phy_init[addr]; -+ } -+ -+ comphy_lane_reg_set(lane, addr, val, 0xFFFF); -+ } -+} -+ -+static int -+mvebu_a3700_comphy_ethernet_power_on(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 mask, data, speed_sel; -+ int ret; -+ -+ /* Set selector */ -+ ret = mvebu_a3700_comphy_set_phy_selector(lane); -+ if (ret) -+ return ret; -+ -+ /* -+ * 1. Reset PHY by setting PHY input port PIN_RESET=1. -+ * 2. Set PHY input port PIN_TX_IDLE=1, PIN_PU_IVREF=1 to keep -+ * PHY TXP/TXN output to idle state during PHY initialization -+ * 3. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0. -+ */ -+ data = PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT | PIN_RESET_COMPHY_BIT; -+ mask = data | PIN_RESET_CORE_BIT | PIN_PU_PLL_BIT | PIN_PU_RX_BIT | -+ PIN_PU_TX_BIT | PHY_RX_INIT_BIT; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+ -+ /* 4. Release reset to the PHY by setting PIN_RESET=0. */ -+ data = 0x0; -+ mask = PIN_RESET_COMPHY_BIT; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+ -+ /* -+ * 5. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide COMPHY -+ * bit rate -+ */ -+ switch (lane->submode) { -+ case PHY_INTERFACE_MODE_SGMII: -+ case PHY_INTERFACE_MODE_1000BASEX: -+ /* SGMII 1G, SerDes speed 1.25G */ -+ speed_sel = SERDES_SPEED_1_25_G; -+ break; -+ case PHY_INTERFACE_MODE_2500BASEX: -+ /* 2500Base-X, SerDes speed 3.125G */ -+ speed_sel = SERDES_SPEED_3_125_G; -+ break; - default: -+ /* Other rates are not supported */ -+ dev_err(lane->dev, -+ "unsupported phy speed %d on comphy lane%d\n", -+ lane->submode, lane->id); - return -EINVAL; - } -+ data = GEN_RX_SEL_VALUE(speed_sel) | GEN_TX_SEL_VALUE(speed_sel); -+ mask = GEN_RX_SEL_MASK | GEN_TX_SEL_MASK; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+ -+ /* -+ * 6. Wait 10mS for bandgap and reference clocks to stabilize; then -+ * start SW programming. -+ */ -+ mdelay(10); -+ -+ /* 7. Program COMPHY register PHY_MODE */ -+ data = COMPHY_MODE_SERDES; -+ mask = COMPHY_MODE_MASK; -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); -+ -+ /* -+ * 8. Set COMPHY register REFCLK_SEL to select the correct REFCLK -+ * source -+ */ -+ data = 0x0; -+ mask = PHY_REF_CLK_SEL; -+ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); -+ -+ /* -+ * 9. Set correct reference clock frequency in COMPHY register -+ * REF_FREF_SEL. -+ */ -+ if (lane->priv->xtal_is_40m) -+ data = REF_FREF_SEL_SERDES_50MHZ; -+ else -+ data = REF_FREF_SEL_SERDES_25MHZ; -+ -+ mask = REF_FREF_SEL_MASK; -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); -+ -+ /* -+ * 10. Program COMPHY register PHY_GEN_MAX[1:0] -+ * This step is mentioned in the flow received from verification team. -+ * However the PHY_GEN_MAX value is only meaningful for other interfaces -+ * (not SERDES). For instance, it selects SATA speed 1.5/3/6 Gbps or -+ * PCIe speed 2.5/5 Gbps -+ */ -+ -+ /* -+ * 11. Program COMPHY register SEL_BITS to set correct parallel data -+ * bus width -+ */ -+ data = DATA_WIDTH_10BIT; -+ mask = SEL_DATA_WIDTH_MASK; -+ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, data, mask); -+ -+ /* -+ * 12. As long as DFE function needs to be enabled in any mode, -+ * COMPHY register DFE_UPDATE_EN[5:0] shall be programmed to 0x3F -+ * for real chip during COMPHY power on. -+ * The value of the DFE_UPDATE_EN already is 0x3F, because it is the -+ * default value after reset of the PHY. -+ */ -+ -+ /* -+ * 13. Program COMPHY GEN registers. -+ * These registers should be programmed based on the lab testing result -+ * to achieve optimal performance. Please contact the CEA group to get -+ * the related GEN table during real chip bring-up. We only required to -+ * run though the entire registers programming flow defined by -+ * "comphy_gbe_phy_init" when the REF clock is 40 MHz. For REF clock -+ * 25 MHz the default values stored in PHY registers are OK. -+ */ -+ dev_dbg(lane->dev, "Running C-DPI phy init %s mode\n", -+ lane->submode == PHY_INTERFACE_MODE_2500BASEX ? "2G5" : "1G"); -+ if (lane->priv->xtal_is_40m) -+ comphy_gbe_phy_init(lane, -+ lane->submode != PHY_INTERFACE_MODE_2500BASEX); -+ -+ /* -+ * 14. Check the PHY Polarity invert bit -+ */ -+ data = 0x0; -+ if (lane->invert_tx) -+ data |= TXD_INVERT_BIT; -+ if (lane->invert_rx) -+ data |= RXD_INVERT_BIT; -+ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; -+ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); -+ -+ /* -+ * 15. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1 to -+ * start PHY power up sequence. All the PHY register programming should -+ * be done before PIN_PU_PLL=1. There should be no register programming -+ * for normal PHY operation from this point. -+ */ -+ data = PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; -+ mask = data; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+ -+ /* -+ * 16. Wait for PHY power up sequence to finish by checking output ports -+ * PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1. -+ */ -+ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, -+ PHY_PLL_READY_TX_BIT | -+ PHY_PLL_READY_RX_BIT, -+ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); -+ if (ret) { -+ dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", -+ lane->id); -+ return ret; -+ } -+ -+ /* -+ * 17. Set COMPHY input port PIN_TX_IDLE=0 -+ */ -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, 0x0, PIN_TX_IDLE_BIT); -+ -+ /* -+ * 18. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1. To -+ * start RX initialization. PIN_RX_INIT_DONE will be cleared to 0 by the -+ * PHY After RX initialization is done, PIN_RX_INIT_DONE will be set to -+ * 1 by COMPHY Set PIN_RX_INIT=0 after PIN_RX_INIT_DONE= 1. Please -+ * refer to RX initialization part for details. -+ */ -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, -+ PHY_RX_INIT_BIT, PHY_RX_INIT_BIT); -+ -+ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, -+ PHY_PLL_READY_TX_BIT | -+ PHY_PLL_READY_RX_BIT, -+ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); -+ if (ret) { -+ dev_err(lane->dev, "Failed to lock PLL for SERDES PHY %d\n", -+ lane->id); -+ return ret; -+ } -+ -+ ret = comphy_periph_reg_poll(lane, COMPHY_PHY_STAT1, -+ PHY_RX_INIT_DONE_BIT, -+ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); -+ if (ret) -+ dev_err(lane->dev, "Failed to init RX of SERDES PHY %d\n", -+ lane->id); -+ -+ return ret; - } - --static int mvebu_a3700_comphy_get_fw_mode(int lane, -+static int -+mvebu_a3700_comphy_usb3_power_on(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 mask, data, cfg, ref_clk; -+ int ret; -+ -+ /* Set phy seclector */ -+ ret = mvebu_a3700_comphy_set_phy_selector(lane); -+ if (ret) -+ return ret; -+ -+ /* -+ * 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The -+ * register belong to UTMI module, so it is set in UTMI phy driver. -+ */ -+ -+ /* -+ * 1. Set PRD_TXDEEMPH (3.5db de-emph) -+ */ -+ data = PRD_TXDEEMPH0_MASK; -+ mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK | -+ CFG_TX_ALIGN_POS_MASK; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG0, data, mask); -+ -+ /* -+ * 2. Set BIT0: enable transmitter in high impedance mode -+ * Set BIT[3:4]: delay 2 clock cycles for HiZ off latency -+ * Set BIT6: Tx detect Rx at HiZ mode -+ * Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db -+ * together with bit 0 of COMPHY_PIPE_LANE_CFG0 register -+ */ -+ data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN; -+ mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK | -+ TX_ELEC_IDLE_MODE_EN; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, data, mask); -+ -+ /* -+ * 3. Set Spread Spectrum Clock Enabled -+ */ -+ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG4, -+ SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN); -+ -+ /* -+ * 4. Set Override Margining Controls From the MAC: -+ * Use margining signals from lane configuration -+ */ -+ comphy_lane_reg_set(lane, COMPHY_PIPE_TEST_MODE_CTRL, -+ MODE_MARGIN_OVERRIDE, 0xFFFF); -+ -+ /* -+ * 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles -+ * set Mode Clock Source = PCLK is generated from REFCLK -+ */ -+ data = 0x0; -+ mask = MODE_CLK_SRC | BUNDLE_PERIOD_SEL | BUNDLE_PERIOD_SCALE_MASK | -+ BUNDLE_SAMPLE_CTRL | PLL_READY_DLY_MASK; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, data, mask); -+ -+ /* -+ * 6. Set G2 Spread Spectrum Clock Amplitude at 4K -+ */ -+ comphy_lane_reg_set(lane, COMPHY_GEN2_SET2, -+ GS2_TX_SSC_AMP_4128, GS2_TX_SSC_AMP_MASK); -+ -+ /* -+ * 7. Unset G3 Spread Spectrum Clock Amplitude -+ * set G3 TX and RX Register Master Current Select -+ */ -+ data = GS2_VREG_RXTX_MAS_ISET_60U; -+ mask = GS2_TX_SSC_AMP_MASK | GS2_VREG_RXTX_MAS_ISET_MASK | -+ GS2_RSVD_6_0_MASK; -+ comphy_lane_reg_set(lane, COMPHY_GEN3_SET2, data, mask); -+ -+ /* -+ * 8. Check crystal jumper setting and program the Power and PLL Control -+ * accordingly Change RX wait -+ */ -+ if (lane->priv->xtal_is_40m) { -+ ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; -+ cfg = CFG_PM_RXDLOZ_WAIT_12_UNIT; -+ } else { -+ ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; -+ cfg = CFG_PM_RXDLOZ_WAIT_7_UNIT; -+ } -+ -+ data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | -+ PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_USB3 | ref_clk; -+ mask = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | -+ PU_TX_INTP_BIT | PU_DFE_BIT | PLL_LOCK_BIT | COMPHY_MODE_MASK | -+ REF_FREF_SEL_MASK; -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); -+ -+ data = CFG_PM_RXDEN_WAIT_1_UNIT | cfg; -+ mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | -+ CFG_PM_RXDLOZ_WAIT_MASK; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); -+ -+ /* -+ * 9. Enable idle sync -+ */ -+ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, -+ IDLE_SYNC_EN, IDLE_SYNC_EN); -+ -+ /* -+ * 10. Enable the output of 500M clock -+ */ -+ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, CLK500M_EN, CLK500M_EN); -+ -+ /* -+ * 11. Set 20-bit data width -+ */ -+ comphy_lane_reg_set(lane, COMPHY_DIG_LOOPBACK_EN, -+ DATA_WIDTH_20BIT, 0xFFFF); -+ -+ /* -+ * 12. Override Speed_PLL value and use MAC PLL -+ */ -+ data = SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT; -+ mask = 0xFFFF; -+ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, data, mask); -+ -+ /* -+ * 13. Check the Polarity invert bit -+ */ -+ data = 0x0; -+ if (lane->invert_tx) -+ data |= TXD_INVERT_BIT; -+ if (lane->invert_rx) -+ data |= RXD_INVERT_BIT; -+ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; -+ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); -+ -+ /* -+ * 14. Set max speed generation to USB3.0 5Gbps -+ */ -+ comphy_lane_reg_set(lane, COMPHY_SYNC_MASK_GEN, -+ PHY_GEN_MAX_USB3_5G, PHY_GEN_MAX_MASK); -+ -+ /* -+ * 15. Set capacitor value for FFE gain peaking to 0xF -+ */ -+ comphy_lane_reg_set(lane, COMPHY_GEN2_SET3, -+ GS3_FFE_CAP_SEL_VALUE, GS3_FFE_CAP_SEL_MASK); -+ -+ /* -+ * 16. Release SW reset -+ */ -+ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4; -+ mask = 0xFFFF; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); -+ -+ /* Wait for > 55 us to allow PCLK be enabled */ -+ udelay(PLL_SET_DELAY_US); -+ -+ ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, -+ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); -+ if (ret) -+ dev_err(lane->dev, "Failed to lock USB3 PLL\n"); -+ -+ return ret; -+} -+ -+static int -+mvebu_a3700_comphy_pcie_power_on(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 mask, data, ref_clk; -+ int ret; -+ -+ /* Configure phy selector for PCIe */ -+ ret = mvebu_a3700_comphy_set_phy_selector(lane); -+ if (ret) -+ return ret; -+ -+ /* 1. Enable max PLL. */ -+ comphy_lane_reg_set(lane, COMPHY_PIPE_LANE_CFG1, -+ USE_MAX_PLL_RATE_EN, USE_MAX_PLL_RATE_EN); -+ -+ /* 2. Select 20 bit SERDES interface. */ -+ comphy_lane_reg_set(lane, COMPHY_PIPE_CLK_SRC_LO, -+ CFG_SEL_20B, CFG_SEL_20B); -+ -+ /* 3. Force to use reg setting for PCIe mode */ -+ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL1, -+ SEL_BITS_PCIE_FORCE, SEL_BITS_PCIE_FORCE); -+ -+ /* 4. Change RX wait */ -+ data = CFG_PM_RXDEN_WAIT_1_UNIT | CFG_PM_RXDLOZ_WAIT_12_UNIT; -+ mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK | -+ CFG_PM_RXDLOZ_WAIT_MASK; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_PWR_MGM_TIM1, data, mask); -+ -+ /* 5. Enable idle sync */ -+ comphy_lane_reg_set(lane, COMPHY_IDLE_SYNC_EN, -+ IDLE_SYNC_EN, IDLE_SYNC_EN); -+ -+ /* 6. Enable the output of 100M/125M/500M clock */ -+ data = CLK500M_EN | TXDCLK_2X_SEL | CLK100M_125M_EN; -+ mask = data; -+ comphy_lane_reg_set(lane, COMPHY_MISC_CTRL0, data, mask); -+ -+ /* -+ * 7. Enable TX, PCIE global register, 0xd0074814, it is done in -+ * PCI-E driver -+ */ -+ -+ /* -+ * 8. Check crystal jumper setting and program the Power and PLL -+ * Control accordingly -+ */ -+ -+ if (lane->priv->xtal_is_40m) -+ ref_clk = REF_FREF_SEL_PCIE_USB3_40MHZ; -+ else -+ ref_clk = REF_FREF_SEL_PCIE_USB3_25MHZ; -+ -+ data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT | -+ PU_TX_INTP_BIT | PU_DFE_BIT | COMPHY_MODE_PCIE | ref_clk; -+ mask = 0xFFFF; -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, data, mask); -+ -+ /* 9. Override Speed_PLL value and use MAC PLL */ -+ comphy_lane_reg_set(lane, COMPHY_KVCO_CAL_CTRL, -+ SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT, -+ 0xFFFF); -+ -+ /* 10. Check the Polarity invert bit */ -+ data = 0x0; -+ if (lane->invert_tx) -+ data |= TXD_INVERT_BIT; -+ if (lane->invert_rx) -+ data |= RXD_INVERT_BIT; -+ mask = TXD_INVERT_BIT | RXD_INVERT_BIT; -+ comphy_lane_reg_set(lane, COMPHY_SYNC_PATTERN, data, mask); -+ -+ /* 11. Release SW reset */ -+ data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32; -+ mask = data | PIPE_SOFT_RESET | MODE_REFDIV_MASK; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); -+ -+ /* Wait for > 55 us to allow PCLK be enabled */ -+ udelay(PLL_SET_DELAY_US); -+ -+ ret = comphy_lane_reg_poll(lane, COMPHY_PIPE_LANE_STAT1, TXDCLK_PCLK_EN, -+ COMPHY_PLL_SLEEP, COMPHY_PLL_TIMEOUT); -+ if (ret) -+ dev_err(lane->dev, "Failed to lock PCIE PLL\n"); -+ -+ return ret; -+} -+ -+static void -+mvebu_a3700_comphy_sata_power_off(struct mvebu_a3700_comphy_lane *lane) -+{ -+ /* Set phy isolation mode */ -+ comphy_lane_reg_set(lane, COMPHY_ISOLATION_CTRL, -+ PHY_ISOLATE_MODE, PHY_ISOLATE_MODE); -+ -+ /* Power off PLL, Tx, Rx */ -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, -+ 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); -+} -+ -+static void -+mvebu_a3700_comphy_ethernet_power_off(struct mvebu_a3700_comphy_lane *lane) -+{ -+ u32 mask, data; -+ -+ data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | PIN_PU_IVREF_BIT | -+ PHY_RX_INIT_BIT; -+ mask = data; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+} -+ -+static void -+mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane) -+{ -+ /* Power off PLL, Tx, Rx */ -+ comphy_lane_reg_set(lane, COMPHY_POWER_PLL_CTRL, -+ 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); -+} -+ -+static int mvebu_a3700_comphy_reset(struct phy *phy) -+{ -+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -+ u16 mask, data; -+ -+ dev_dbg(lane->dev, "resetting lane %d\n", lane->id); -+ -+ /* COMPHY reset for internal logic */ -+ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, -+ SFT_RST_NO_REG, SFT_RST_NO_REG); -+ -+ /* COMPHY register reset (cleared automatically) */ -+ comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); -+ -+ /* PIPE soft and register reset */ -+ data = PIPE_SOFT_RESET | PIPE_REG_RESET; -+ mask = data; -+ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); -+ -+ /* Release PIPE register reset */ -+ comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, -+ 0x0, PIPE_REG_RESET); -+ -+ /* Reset SB configuration register (only for lanes 0 and 1) */ -+ if (lane->id == 0 || lane->id == 1) { -+ u32 mask, data; -+ -+ data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | -+ PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; -+ mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT; -+ comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -+ } -+ -+ return 0; -+} -+ -+static bool mvebu_a3700_comphy_check_mode(int lane, - enum phy_mode mode, - int submode) - { -@@ -122,7 +1141,7 @@ static int mvebu_a3700_comphy_get_fw_mod - - /* Unused PHY mux value is 0x0 */ - if (mode == PHY_MODE_INVALID) -- return -EINVAL; -+ return false; - - for (i = 0; i < n; i++) { - if (mvebu_a3700_comphy_modes[i].lane == lane && -@@ -132,27 +1151,30 @@ static int mvebu_a3700_comphy_get_fw_mod - } - - if (i == n) -- return -EINVAL; -+ return false; - -- return mvebu_a3700_comphy_modes[i].fw_mode; -+ return true; - } - - static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, - int submode) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- int fw_mode; - -- if (submode == PHY_INTERFACE_MODE_1000BASEX) -- submode = PHY_INTERFACE_MODE_SGMII; -- -- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, mode, -- submode); -- if (fw_mode < 0) { -+ if (!mvebu_a3700_comphy_check_mode(lane->id, mode, submode)) { - dev_err(lane->dev, "invalid COMPHY mode\n"); -- return fw_mode; -+ return -EINVAL; - } - -+ /* Mode cannot be changed while the PHY is powered on */ -+ if (phy->power_count && -+ (lane->mode != mode || lane->submode != submode)) -+ return -EBUSY; -+ -+ /* If changing mode, ensure reset is called */ -+ if (lane->mode != PHY_MODE_INVALID && lane->mode != mode) -+ lane->needs_reset = true; -+ - /* Just remember the mode, ->power_on() will do the real setup */ - lane->mode = mode; - lane->submode = submode; -@@ -163,76 +1185,77 @@ static int mvebu_a3700_comphy_set_mode(s - static int mvebu_a3700_comphy_power_on(struct phy *phy) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- u32 fw_param; -- int fw_mode; -- int fw_port; - int ret; - -- fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, -- lane->mode, lane->submode); -- if (fw_mode < 0) { -+ if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, -+ lane->submode)) { - dev_err(lane->dev, "invalid COMPHY mode\n"); -- return fw_mode; -+ return -EINVAL; -+ } -+ -+ if (lane->needs_reset) { -+ ret = mvebu_a3700_comphy_reset(phy); -+ if (ret) -+ return ret; -+ -+ lane->needs_reset = false; - } - - switch (lane->mode) { - case PHY_MODE_USB_HOST_SS: - dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); -- fw_param = COMPHY_FW_MODE(fw_mode); -- break; -+ return mvebu_a3700_comphy_usb3_power_on(lane); - case PHY_MODE_SATA: - dev_dbg(lane->dev, "set lane %d to SATA mode\n", lane->id); -- fw_param = COMPHY_FW_MODE(fw_mode); -- break; -+ return mvebu_a3700_comphy_sata_power_on(lane); - case PHY_MODE_ETHERNET: -- fw_port = (lane->id == 0) ? 1 : 0; -- switch (lane->submode) { -- case PHY_INTERFACE_MODE_SGMII: -- dev_dbg(lane->dev, "set lane %d to SGMII mode\n", -- lane->id); -- fw_param = COMPHY_FW_NET(fw_mode, fw_port, -- COMPHY_FW_SPEED_1_25G); -- break; -- case PHY_INTERFACE_MODE_2500BASEX: -- dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", -- lane->id); -- fw_param = COMPHY_FW_NET(fw_mode, fw_port, -- COMPHY_FW_SPEED_3_125G); -- break; -- default: -- dev_err(lane->dev, "unsupported PHY submode (%d)\n", -- lane->submode); -- return -ENOTSUPP; -- } -- break; -+ dev_dbg(lane->dev, "set lane %d to Ethernet mode\n", lane->id); -+ return mvebu_a3700_comphy_ethernet_power_on(lane); - case PHY_MODE_PCIE: - dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); -- fw_param = COMPHY_FW_PCIE(fw_mode, COMPHY_FW_SPEED_5G, -- phy->attrs.bus_width); -- break; -+ return mvebu_a3700_comphy_pcie_power_on(lane); - default: - dev_err(lane->dev, "unsupported PHY mode (%d)\n", lane->mode); -- return -ENOTSUPP; -+ return -EOPNOTSUPP; - } -- -- ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param); -- if (ret == -EOPNOTSUPP) -- dev_err(lane->dev, -- "unsupported SMC call, try updating your firmware\n"); -- -- return ret; - } - - static int mvebu_a3700_comphy_power_off(struct phy *phy) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - -- return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_OFF, lane->id, 0); -+ switch (lane->mode) { -+ case PHY_MODE_USB_HOST_SS: -+ /* -+ * The USB3 MAC sets the USB3 PHY to low state, so we do not -+ * need to power off USB3 PHY again. -+ */ -+ break; -+ -+ case PHY_MODE_SATA: -+ mvebu_a3700_comphy_sata_power_off(lane); -+ break; -+ -+ case PHY_MODE_ETHERNET: -+ mvebu_a3700_comphy_ethernet_power_off(lane); -+ break; -+ -+ case PHY_MODE_PCIE: -+ mvebu_a3700_comphy_pcie_power_off(lane); -+ break; -+ -+ default: -+ dev_err(lane->dev, "invalid COMPHY mode\n"); -+ return -EINVAL; -+ } -+ -+ return 0; - } - - static const struct phy_ops mvebu_a3700_comphy_ops = { - .power_on = mvebu_a3700_comphy_power_on, - .power_off = mvebu_a3700_comphy_power_off, -+ .reset = mvebu_a3700_comphy_reset, - .set_mode = mvebu_a3700_comphy_set_mode, - .owner = THIS_MODULE, - }; -@@ -256,13 +1279,75 @@ static struct phy *mvebu_a3700_comphy_xl - return ERR_PTR(-EINVAL); - } - -+ lane->invert_tx = args->args[1] & BIT(0); -+ lane->invert_rx = args->args[1] & BIT(1); -+ - return phy; - } - - static int mvebu_a3700_comphy_probe(struct platform_device *pdev) - { -+ struct mvebu_a3700_comphy_priv *priv; - struct phy_provider *provider; - struct device_node *child; -+ struct resource *res; -+ struct clk *clk; -+ int ret; -+ -+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ spin_lock_init(&priv->lock); -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "comphy"); -+ priv->comphy_regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(priv->comphy_regs)) -+ return PTR_ERR(priv->comphy_regs); -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, -+ "lane1_pcie_gbe"); -+ priv->lane1_phy_regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(priv->lane1_phy_regs)) -+ return PTR_ERR(priv->lane1_phy_regs); -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, -+ "lane0_usb3_gbe"); -+ priv->lane0_phy_regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(priv->lane0_phy_regs)) -+ return PTR_ERR(priv->lane0_phy_regs); -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, -+ "lane2_sata_usb3"); -+ priv->lane2_phy_indirect = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(priv->lane2_phy_indirect)) -+ return PTR_ERR(priv->lane2_phy_indirect); -+ -+ /* -+ * Driver needs to know if reference xtal clock is 40MHz or 25MHz. -+ * Old DT bindings do not have xtal clk present. So do not fail here -+ * and expects that default 25MHz reference clock is used. -+ */ -+ clk = clk_get(&pdev->dev, "xtal"); -+ if (IS_ERR(clk)) { -+ if (PTR_ERR(clk) == -EPROBE_DEFER) -+ return -EPROBE_DEFER; -+ dev_warn(&pdev->dev, "missing 'xtal' clk (%ld)\n", -+ PTR_ERR(clk)); -+ } else { -+ ret = clk_prepare_enable(clk); -+ if (ret) { -+ dev_warn(&pdev->dev, "enabling xtal clk failed (%d)\n", -+ ret); -+ } else { -+ if (clk_get_rate(clk) == 40000000) -+ priv->xtal_is_40m = true; -+ clk_disable_unprepare(clk); -+ } -+ clk_put(clk); -+ } -+ -+ dev_set_drvdata(&pdev->dev, priv); - - for_each_available_child_of_node(pdev->dev.of_node, child) { - struct mvebu_a3700_comphy_lane *lane; -@@ -277,7 +1362,7 @@ static int mvebu_a3700_comphy_probe(stru - continue; - } - -- if (lane_id >= MVEBU_A3700_COMPHY_LANES) { -+ if (lane_id >= 3) { - dev_err(&pdev->dev, "invalid 'reg' property\n"); - continue; - } -@@ -295,15 +1380,26 @@ static int mvebu_a3700_comphy_probe(stru - return PTR_ERR(phy); - } - -+ lane->priv = priv; - lane->dev = &pdev->dev; - lane->mode = PHY_MODE_INVALID; - lane->submode = PHY_INTERFACE_MODE_NA; - lane->id = lane_id; -+ lane->invert_tx = false; -+ lane->invert_rx = false; - phy_set_drvdata(phy, lane); -+ -+ /* -+ * To avoid relying on the bootloader/firmware configuration, -+ * power off all comphys. -+ */ -+ mvebu_a3700_comphy_reset(phy); -+ lane->needs_reset = false; - } - - provider = devm_of_phy_provider_register(&pdev->dev, - mvebu_a3700_comphy_xlate); -+ - return PTR_ERR_OR_ZERO(provider); - } - -@@ -323,5 +1419,7 @@ static struct platform_driver mvebu_a370 - module_platform_driver(mvebu_a3700_comphy_driver); - - MODULE_AUTHOR("Miquèl Raynal "); -+MODULE_AUTHOR("Pali Rohár "); -+MODULE_AUTHOR("Marek Behún "); - MODULE_DESCRIPTION("Common PHY driver for A3700"); - MODULE_LICENSE("GPL v2"); diff --git a/target/linux/generic/backport-6.1/345-v5.17-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch b/target/linux/generic/backport-6.1/345-v5.17-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch deleted file mode 100644 index 03b6a5754d96..000000000000 --- a/target/linux/generic/backport-6.1/345-v5.17-arm64-dts-marvell-armada-37xx-Add-xtal-clock-to-comp.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 73a78b6130d9e13daca22b86ad52f063b9403e03 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Wed, 8 Dec 2021 03:40:35 +0100 -Subject: [PATCH 1/1] arm64: dts: marvell: armada-37xx: Add xtal clock to - comphy node -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Kernel driver phy-mvebu-a3700-comphy.c needs to know the rate of the -reference xtal clock. So add missing xtal clock source into comphy device -tree node. If the property is not present, the driver defaults to 25 MHz -xtal rate (which, as far as we know, is used by all the existing boards). - -Signed-off-by: Pali Rohár -Signed-off-by: Marek Behún -Signed-off-by: Gregory CLEMENT ---- - arch/arm64/boot/dts/marvell/armada-37xx.dtsi | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi -+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi -@@ -265,6 +265,8 @@ - "lane2_sata_usb3"; - #address-cells = <1>; - #size-cells = <0>; -+ clocks = <&xtalclk>; -+ clock-names = "xtal"; - - comphy0: phy@0 { - reg = <0>; diff --git a/target/linux/generic/backport-6.1/346-v5.18-01-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch b/target/linux/generic/backport-6.1/346-v5.18-01-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch deleted file mode 100644 index b254e7c0b3fb..000000000000 --- a/target/linux/generic/backport-6.1/346-v5.18-01-Revert-ata-ahci-mvebu-Make-SATA-PHY-optional-for-Arm.patch +++ /dev/null @@ -1,64 +0,0 @@ -From ee995101fde67f85a3cd4c74f4f92fc4592e726b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Thu, 3 Feb 2022 22:44:42 +0100 -Subject: [PATCH 1/3] Revert "ata: ahci: mvebu: Make SATA PHY optional for - Armada 3720" -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This reverts commit 45aefe3d2251e4e229d7662052739f96ad1d08d9. - -Armada 3720 PHY driver (phy-mvebu-a3700-comphy.c) does not return --EOPNOTSUPP from phy_power_on() callback anymore. - -So remove AHCI_HFLAG_IGN_NOTSUPP_POWER_ON flag from Armada 3720 plat data. - -AHCI_HFLAG_IGN_NOTSUPP_POWER_ON is not used by any other ahci driver, so -remove this flag completely. - -Signed-off-by: Pali Rohár -Signed-off-by: Marek Behún -Acked-by: Miquel Raynal -Acked-by: Damien Le Moal -Link: https://lore.kernel.org/r/20220203214444.1508-4-kabel@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/ata/ahci.h | 2 -- - drivers/ata/ahci_mvebu.c | 2 +- - drivers/ata/libahci_platform.c | 2 +- - 3 files changed, 2 insertions(+), 4 deletions(-) - ---- a/drivers/ata/ahci.h -+++ b/drivers/ata/ahci.h -@@ -240,8 +240,6 @@ enum { - as default lpm_policy */ - AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during - suspend/resume */ -- AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP -- from phy_power_on() */ - AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ - - /* ap->flags bits */ ---- a/drivers/ata/ahci_mvebu.c -+++ b/drivers/ata/ahci_mvebu.c -@@ -227,7 +227,7 @@ static const struct ahci_mvebu_plat_data - - static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = { - .plat_config = ahci_mvebu_armada_3700_config, -- .flags = AHCI_HFLAG_SUSPEND_PHYS | AHCI_HFLAG_IGN_NOTSUPP_POWER_ON, -+ .flags = AHCI_HFLAG_SUSPEND_PHYS, - }; - - static const struct of_device_id ahci_mvebu_of_match[] = { ---- a/drivers/ata/libahci_platform.c -+++ b/drivers/ata/libahci_platform.c -@@ -59,7 +59,7 @@ int ahci_platform_enable_phys(struct ahc - } - - rc = phy_power_on(hpriv->phys[i]); -- if (rc && !(rc == -EOPNOTSUPP && (hpriv->flags & AHCI_HFLAG_IGN_NOTSUPP_POWER_ON))) { -+ if (rc) { - phy_exit(hpriv->phys[i]); - goto disable_phys; - } diff --git a/target/linux/generic/backport-6.1/346-v5.18-02-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch b/target/linux/generic/backport-6.1/346-v5.18-02-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch deleted file mode 100644 index 1e8afb7bbf57..000000000000 --- a/target/linux/generic/backport-6.1/346-v5.18-02-Revert-usb-host-xhci-mvebu-make-USB-3.0-PHY-optional.patch +++ /dev/null @@ -1,166 +0,0 @@ -From 8e10548f7f4814e530857d2049d6af6bc78add53 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Thu, 3 Feb 2022 22:44:43 +0100 -Subject: [PATCH 2/3] Revert "usb: host: xhci: mvebu: make USB 3.0 PHY optional - for Armada 3720" -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This reverts commit 3241929b67d28c83945d3191c6816a3271fd6b85. - -Armada 3720 phy driver (phy-mvebu-a3700-comphy.c) does not return --EOPNOTSUPP from phy_power_on() callback anymore. - -So remove XHCI_SKIP_PHY_INIT flag from xhci_mvebu_a3700_plat_setup() and -then also whole xhci_mvebu_a3700_plat_setup() function which is there just -to handle -EOPNOTSUPP for XHCI_SKIP_PHY_INIT. - -xhci plat_setup callback is not used by any other xhci plat driver, so -remove this callback completely. - -Signed-off-by: Pali Rohár -Signed-off-by: Marek Behún -Acked-by: Miquel Raynal -Acked-by: Greg Kroah-Hartman -Link: https://lore.kernel.org/r/20220203214444.1508-5-kabel@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/usb/host/xhci-mvebu.c | 42 ----------------------------------- - drivers/usb/host/xhci-mvebu.h | 6 ----- - drivers/usb/host/xhci-plat.c | 20 +---------------- - drivers/usb/host/xhci-plat.h | 1 - - 4 files changed, 1 insertion(+), 68 deletions(-) - ---- a/drivers/usb/host/xhci-mvebu.c -+++ b/drivers/usb/host/xhci-mvebu.c -@@ -8,7 +8,6 @@ - #include - #include - #include --#include - - #include - #include -@@ -74,47 +73,6 @@ int xhci_mvebu_mbus_init_quirk(struct us - - return 0; - } -- --int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) --{ -- struct xhci_hcd *xhci = hcd_to_xhci(hcd); -- struct device *dev = hcd->self.controller; -- struct phy *phy; -- int ret; -- -- /* Old bindings miss the PHY handle */ -- phy = of_phy_get(dev->of_node, "usb3-phy"); -- if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) -- return -EPROBE_DEFER; -- else if (IS_ERR(phy)) -- goto phy_out; -- -- ret = phy_init(phy); -- if (ret) -- goto phy_put; -- -- ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); -- if (ret) -- goto phy_exit; -- -- ret = phy_power_on(phy); -- if (ret == -EOPNOTSUPP) { -- /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ -- dev_warn(dev, "PHY unsupported by firmware\n"); -- xhci->quirks |= XHCI_SKIP_PHY_INIT; -- } -- if (ret) -- goto phy_exit; -- -- phy_power_off(phy); --phy_exit: -- phy_exit(phy); --phy_put: -- of_phy_put(phy); --phy_out: -- -- return 0; --} - - int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) - { ---- a/drivers/usb/host/xhci-mvebu.h -+++ b/drivers/usb/host/xhci-mvebu.h -@@ -12,18 +12,12 @@ struct usb_hcd; - - #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) - int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); --int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); - int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); - #else - static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) - { - return 0; - } -- --static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) --{ -- return 0; --} - - static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) - { ---- a/drivers/usb/host/xhci-plat.c -+++ b/drivers/usb/host/xhci-plat.c -@@ -44,16 +44,6 @@ static void xhci_priv_plat_start(struct - priv->plat_start(hcd); - } - --static int xhci_priv_plat_setup(struct usb_hcd *hcd) --{ -- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); -- -- if (!priv->plat_setup) -- return 0; -- -- return priv->plat_setup(hcd); --} -- - static int xhci_priv_init_quirk(struct usb_hcd *hcd) - { - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); -@@ -121,7 +111,6 @@ static const struct xhci_plat_priv xhci_ - }; - - static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { -- .plat_setup = xhci_mvebu_a3700_plat_setup, - .init_quirk = xhci_mvebu_a3700_init_quirk, - }; - -@@ -341,14 +330,7 @@ static int xhci_plat_probe(struct platfo - - hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); - xhci->shared_hcd->tpl_support = hcd->tpl_support; -- -- if (priv) { -- ret = xhci_priv_plat_setup(hcd); -- if (ret) -- goto disable_usb_phy; -- } -- -- if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) -+ if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) - hcd->skip_phy_initialization = 1; - - if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) ---- a/drivers/usb/host/xhci-plat.h -+++ b/drivers/usb/host/xhci-plat.h -@@ -13,7 +13,6 @@ - struct xhci_plat_priv { - const char *firmware_name; - unsigned long long quirks; -- int (*plat_setup)(struct usb_hcd *); - void (*plat_start)(struct usb_hcd *); - int (*init_quirk)(struct usb_hcd *); - int (*suspend_quirk)(struct usb_hcd *); diff --git a/target/linux/generic/backport-6.1/346-v5.18-03-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch b/target/linux/generic/backport-6.1/346-v5.18-03-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch deleted file mode 100644 index fcfb02d35a1f..000000000000 --- a/target/linux/generic/backport-6.1/346-v5.18-03-Revert-PCI-aardvark-Fix-initialization-with-old-Marv.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 9a4556dad7bd0a6b8339cb72e169f5c76f2af6f1 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Thu, 3 Feb 2022 22:44:44 +0100 -Subject: [PATCH 3/3] Revert "PCI: aardvark: Fix initialization with old - Marvell's Arm Trusted Firmware" -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This reverts commit b0c6ae0f8948a2be6bf4e8b4bbab9ca1343289b6. - -Armada 3720 phy driver (phy-mvebu-a3700-comphy.c) does not return --EOPNOTSUPP from phy_power_on() callback anymore. - -So remove dead code which handles -EOPNOTSUPP return value. - -Signed-off-by: Pali Rohár -Signed-off-by: Marek Behún -Acked-by: Miquel Raynal -Acked-by: Lorenzo Pieralisi -Link: https://lore.kernel.org/r/20220203214444.1508-6-kabel@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/pci/controller/pci-aardvark.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/pci/controller/pci-aardvark.c -+++ b/drivers/pci/controller/pci-aardvark.c -@@ -1642,9 +1642,7 @@ static int advk_pcie_enable_phy(struct a - } - - ret = phy_power_on(pcie->phy); -- if (ret == -EOPNOTSUPP) { -- dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); -- } else if (ret) { -+ if (ret) { - phy_exit(pcie->phy); - return ret; - } diff --git a/target/linux/generic/backport-6.1/347-v6.0-phy-marvell-phy-mvebu-a3700-comphy-Remove-broken-res.patch b/target/linux/generic/backport-6.1/347-v6.0-phy-marvell-phy-mvebu-a3700-comphy-Remove-broken-res.patch deleted file mode 100644 index a2c897b7a9aa..000000000000 --- a/target/linux/generic/backport-6.1/347-v6.0-phy-marvell-phy-mvebu-a3700-comphy-Remove-broken-res.patch +++ /dev/null @@ -1,194 +0,0 @@ -From 0a6fc70d76bddf98278af2ac000379c82aec8f11 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Pali=20Roh=C3=A1r?= -Date: Mon, 29 Aug 2022 10:30:46 +0200 -Subject: [PATCH] phy: marvell: phy-mvebu-a3700-comphy: Remove broken reset - support -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Reset support for SATA PHY is somehow broken and after calling it, kernel -is not able to detect and initialize SATA disk Samsung SSD 850 EMT0 [1]. - -Reset support was introduced in commit 934337080c6c ("phy: marvell: -phy-mvebu-a3700-comphy: Add native kernel implementation") as part of -complete rewrite of this driver. v1 patch series of that commit [2] did -not contain reset support and was tested that is working fine with -Ethernet, SATA and USB PHYs without issues too. - -So for now remove broken reset support and change implementation of -power_off callback to power off all functions on specified lane (and not -only selected function) because during startup kernel does not know which -function was selected and configured by bootloader. Same logic was used -also in v1 patch series of that commit. - -This change fixes issues with initialization of SATA disk Samsung SSD 850 -and disk is working again, like before mentioned commit. - -Once problem with PHY reset callback is solved its functionality could be -re-introduced. But for now it is unknown why it does not work. - -[1] - https://lore.kernel.org/r/20220531124159.3e4lgn2v462irbtz@shindev/ -[2] - https://lore.kernel.org/r/20211028184242.22105-1-kabel@kernel.org/ - -Reported-by: Shinichiro Kawasaki -Fixes: 934337080c6c ("phy: marvell: phy-mvebu-a3700-comphy: Add native kernel implementation") -Cc: stable@vger.kernel.org # v5.18+ -Signed-off-by: Pali Rohár -Tested-by: Shinichiro Kawasaki -Link: https://lore.kernel.org/r/20220829083046.15082-1-pali@kernel.org -Signed-off-by: Vinod Koul ---- - drivers/phy/marvell/phy-mvebu-a3700-comphy.c | 87 ++++---------------- - 1 file changed, 17 insertions(+), 70 deletions(-) - ---- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane { - int submode; - bool invert_tx; - bool invert_rx; -- bool needs_reset; - }; - - struct gbe_phy_init_data_fix { -@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct - 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); - } - --static int mvebu_a3700_comphy_reset(struct phy *phy) -+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane) - { -- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- u16 mask, data; -- -- dev_dbg(lane->dev, "resetting lane %d\n", lane->id); -- -- /* COMPHY reset for internal logic */ -- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, -- SFT_RST_NO_REG, SFT_RST_NO_REG); -- -- /* COMPHY register reset (cleared automatically) */ -- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); -- -- /* PIPE soft and register reset */ -- data = PIPE_SOFT_RESET | PIPE_REG_RESET; -- mask = data; -- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); -- -- /* Release PIPE register reset */ -- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, -- 0x0, PIPE_REG_RESET); -- -- /* Reset SB configuration register (only for lanes 0 and 1) */ -- if (lane->id == 0 || lane->id == 1) { -- u32 mask, data; -- -- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | -- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; -- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT; -- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -- } -- -- return 0; -+ /* -+ * The USB3 MAC sets the USB3 PHY to low state, so we do not -+ * need to power off USB3 PHY again. -+ */ - } - - static bool mvebu_a3700_comphy_check_mode(int lane, -@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(s - (lane->mode != mode || lane->submode != submode)) - return -EBUSY; - -- /* If changing mode, ensure reset is called */ -- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode) -- lane->needs_reset = true; -- - /* Just remember the mode, ->power_on() will do the real setup */ - lane->mode = mode; - lane->submode = submode; -@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(s - static int mvebu_a3700_comphy_power_on(struct phy *phy) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- int ret; - - if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, - lane->submode)) { -@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(s - return -EINVAL; - } - -- if (lane->needs_reset) { -- ret = mvebu_a3700_comphy_reset(phy); -- if (ret) -- return ret; -- -- lane->needs_reset = false; -- } -- - switch (lane->mode) { - case PHY_MODE_USB_HOST_SS: - dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); -@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off( - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - -- switch (lane->mode) { -- case PHY_MODE_USB_HOST_SS: -- /* -- * The USB3 MAC sets the USB3 PHY to low state, so we do not -- * need to power off USB3 PHY again. -- */ -- break; -- -- case PHY_MODE_SATA: -- mvebu_a3700_comphy_sata_power_off(lane); -- break; -- -- case PHY_MODE_ETHERNET: -+ switch (lane->id) { -+ case 0: -+ mvebu_a3700_comphy_usb3_power_off(lane); - mvebu_a3700_comphy_ethernet_power_off(lane); -- break; -- -- case PHY_MODE_PCIE: -+ return 0; -+ case 1: - mvebu_a3700_comphy_pcie_power_off(lane); -- break; -- -+ mvebu_a3700_comphy_ethernet_power_off(lane); -+ return 0; -+ case 2: -+ mvebu_a3700_comphy_usb3_power_off(lane); -+ mvebu_a3700_comphy_sata_power_off(lane); -+ return 0; - default: - dev_err(lane->dev, "invalid COMPHY mode\n"); - return -EINVAL; - } -- -- return 0; - } - - static const struct phy_ops mvebu_a3700_comphy_ops = { - .power_on = mvebu_a3700_comphy_power_on, - .power_off = mvebu_a3700_comphy_power_off, -- .reset = mvebu_a3700_comphy_reset, - .set_mode = mvebu_a3700_comphy_set_mode, - .owner = THIS_MODULE, - }; -@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(stru - * To avoid relying on the bootloader/firmware configuration, - * power off all comphys. - */ -- mvebu_a3700_comphy_reset(phy); -- lane->needs_reset = false; -+ mvebu_a3700_comphy_power_off(phy); - } - - provider = devm_of_phy_provider_register(&pdev->dev, diff --git a/target/linux/generic/backport-6.1/350-v5.18-regmap-add-configurable-downshift-for-addresses.patch b/target/linux/generic/backport-6.1/350-v5.18-regmap-add-configurable-downshift-for-addresses.patch deleted file mode 100644 index 99cd89ea002f..000000000000 --- a/target/linux/generic/backport-6.1/350-v5.18-regmap-add-configurable-downshift-for-addresses.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 86fc59ef818beb0e1945d17f8e734898baba7e4e Mon Sep 17 00:00:00 2001 -From: Colin Foster -Date: Sun, 13 Mar 2022 15:45:23 -0700 -Subject: [PATCH 1/2] regmap: add configurable downshift for addresses - -Add an additional reg_downshift to be applied to register addresses before -any register accesses. An example of a device that uses this is a VSC7514 -chip, which require each register address to be downshifted by two if the -access is performed over a SPI bus. - -Signed-off-by: Colin Foster -Link: https://lore.kernel.org/r/20220313224524.399947-2-colin.foster@in-advantage.com -Signed-off-by: Mark Brown ---- - drivers/base/regmap/internal.h | 1 + - drivers/base/regmap/regmap.c | 5 +++++ - include/linux/regmap.h | 3 +++ - 3 files changed, 9 insertions(+) - ---- a/drivers/base/regmap/internal.h -+++ b/drivers/base/regmap/internal.h -@@ -31,6 +31,7 @@ struct regmap_format { - size_t buf_size; - size_t reg_bytes; - size_t pad_bytes; -+ size_t reg_downshift; - size_t val_bytes; - void (*format_write)(struct regmap *map, - unsigned int reg, unsigned int val); ---- a/drivers/base/regmap/regmap.c -+++ b/drivers/base/regmap/regmap.c -@@ -823,6 +823,7 @@ struct regmap *__regmap_init(struct devi - - map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); - map->format.pad_bytes = config->pad_bits / 8; -+ map->format.reg_downshift = config->reg_downshift; - map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); - map->format.buf_size = DIV_ROUND_UP(config->reg_bits + - config->val_bits + config->pad_bits, 8); -@@ -1735,6 +1736,7 @@ static int _regmap_raw_write_impl(struct - return ret; - } - -+ reg >>= map->format.reg_downshift; - map->format.format_reg(map->work_buf, reg, map->reg_shift); - regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, - map->write_flag_mask); -@@ -1905,6 +1907,7 @@ static int _regmap_bus_formatted_write(v - return ret; - } - -+ reg >>= map->format.reg_downshift; - map->format.format_write(map, reg, val); - - trace_regmap_hw_write_start(map, reg, 1); -@@ -2346,6 +2349,7 @@ static int _regmap_raw_multi_reg_write(s - unsigned int reg = regs[i].reg; - unsigned int val = regs[i].def; - trace_regmap_hw_write_start(map, reg, 1); -+ reg >>= map->format.reg_downshift; - map->format.format_reg(u8, reg, map->reg_shift); - u8 += reg_bytes + pad_bytes; - map->format.format_val(u8, val, 0); -@@ -2673,6 +2677,7 @@ static int _regmap_raw_read(struct regma - return ret; - } - -+ reg >>= map->format.reg_downshift; - map->format.format_reg(map->work_buf, reg, map->reg_shift); - regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, - map->read_flag_mask); ---- a/include/linux/regmap.h -+++ b/include/linux/regmap.h -@@ -237,6 +237,8 @@ typedef void (*regmap_unlock)(void *); - * @reg_stride: The register address stride. Valid register addresses are a - * multiple of this value. If set to 0, a value of 1 will be - * used. -+ * @reg_downshift: The number of bits to downshift the register before -+ * performing any operations. - * @pad_bits: Number of bits of padding between register and value. - * @val_bits: Number of bits in a register value, mandatory. - * -@@ -360,6 +362,7 @@ struct regmap_config { - - int reg_bits; - int reg_stride; -+ int reg_downshift; - int pad_bits; - int val_bits; - diff --git a/target/linux/generic/backport-6.1/351-v5.18-regmap-allow-a-defined-reg_base-to-be-added-to-every.patch b/target/linux/generic/backport-6.1/351-v5.18-regmap-allow-a-defined-reg_base-to-be-added-to-every.patch deleted file mode 100644 index 0f32288fcab5..000000000000 --- a/target/linux/generic/backport-6.1/351-v5.18-regmap-allow-a-defined-reg_base-to-be-added-to-every.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 0074f3f2b1e43d3cedd97e47fb6980db6d2ba79e Mon Sep 17 00:00:00 2001 -From: Colin Foster -Date: Sun, 13 Mar 2022 15:45:24 -0700 -Subject: [PATCH 2/2] regmap: allow a defined reg_base to be added to every - address - -There's an inconsistency that arises when a register set can be accessed -internally via MMIO, or externally via SPI. The VSC7514 chip allows both -modes of operation. When internally accessed, the system utilizes __iomem, -devm_ioremap_resource, and devm_regmap_init_mmio. - -For SPI it isn't possible to utilize memory-mapped IO. To properly operate, -the resource base must be added to the register before every operation. - -Signed-off-by: Colin Foster -Link: https://lore.kernel.org/r/20220313224524.399947-3-colin.foster@in-advantage.com -Signed-off-by: Mark Brown ---- - drivers/base/regmap/internal.h | 1 + - drivers/base/regmap/regmap.c | 6 ++++++ - include/linux/regmap.h | 3 +++ - 3 files changed, 10 insertions(+) - ---- a/drivers/base/regmap/internal.h -+++ b/drivers/base/regmap/internal.h -@@ -63,6 +63,7 @@ struct regmap { - regmap_unlock unlock; - void *lock_arg; /* This is passed to lock/unlock functions */ - gfp_t alloc_flags; -+ unsigned int reg_base; - - struct device *dev; /* Device we do I/O on */ - void *work_buf; /* Scratch buffer used to format I/O */ ---- a/drivers/base/regmap/regmap.c -+++ b/drivers/base/regmap/regmap.c -@@ -821,6 +821,8 @@ struct regmap *__regmap_init(struct devi - else - map->alloc_flags = GFP_KERNEL; - -+ map->reg_base = config->reg_base; -+ - map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); - map->format.pad_bytes = config->pad_bits / 8; - map->format.reg_downshift = config->reg_downshift; -@@ -1736,6 +1738,7 @@ static int _regmap_raw_write_impl(struct - return ret; - } - -+ reg += map->reg_base; - reg >>= map->format.reg_downshift; - map->format.format_reg(map->work_buf, reg, map->reg_shift); - regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, -@@ -1907,6 +1910,7 @@ static int _regmap_bus_formatted_write(v - return ret; - } - -+ reg += map->reg_base; - reg >>= map->format.reg_downshift; - map->format.format_write(map, reg, val); - -@@ -2349,6 +2353,7 @@ static int _regmap_raw_multi_reg_write(s - unsigned int reg = regs[i].reg; - unsigned int val = regs[i].def; - trace_regmap_hw_write_start(map, reg, 1); -+ reg += map->reg_base; - reg >>= map->format.reg_downshift; - map->format.format_reg(u8, reg, map->reg_shift); - u8 += reg_bytes + pad_bytes; -@@ -2677,6 +2682,7 @@ static int _regmap_raw_read(struct regma - return ret; - } - -+ reg += map->reg_base; - reg >>= map->format.reg_downshift; - map->format.format_reg(map->work_buf, reg, map->reg_shift); - regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, ---- a/include/linux/regmap.h -+++ b/include/linux/regmap.h -@@ -239,6 +239,8 @@ typedef void (*regmap_unlock)(void *); - * used. - * @reg_downshift: The number of bits to downshift the register before - * performing any operations. -+ * @reg_base: Value to be added to every register address before performing any -+ * operation. - * @pad_bits: Number of bits of padding between register and value. - * @val_bits: Number of bits in a register value, mandatory. - * -@@ -363,6 +365,7 @@ struct regmap_config { - int reg_bits; - int reg_stride; - int reg_downshift; -+ unsigned int reg_base; - int pad_bits; - int val_bits; - diff --git a/target/linux/generic/backport-6.1/352-v6.3-regmap-apply-reg_base-and-reg_downshift-for-single-r.patch b/target/linux/generic/backport-6.1/352-v6.3-regmap-apply-reg_base-and-reg_downshift-for-single-r.patch deleted file mode 100644 index 804f68d23c50..000000000000 --- a/target/linux/generic/backport-6.1/352-v6.3-regmap-apply-reg_base-and-reg_downshift-for-single-r.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 697c3892d825fb78f42ec8e53bed065dd728db3e Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Mon, 30 Jan 2023 02:04:57 +0000 -Subject: [PATCH] regmap: apply reg_base and reg_downshift for single register - ops - -reg_base and reg_downshift currently don't have any effect if used with -a regmap_bus or regmap_config which only offers single register -operations (ie. reg_read, reg_write and optionally reg_update_bits). - -Fix that and take them into account also for regmap_bus with only -reg_read and read_write operations by applying reg_base and -reg_downshift in _regmap_bus_reg_write, _regmap_bus_reg_read. - -Also apply reg_base and reg_downshift in _regmap_update_bits, but only -in case the operation is carried out with a reg_update_bits call -defined in either regmap_bus or regmap_config. - -Fixes: 0074f3f2b1e43d ("regmap: allow a defined reg_base to be added to every address") -Fixes: 86fc59ef818beb ("regmap: add configurable downshift for addresses") -Signed-off-by: Daniel Golle -Tested-by: Colin Foster -Link: https://lore.kernel.org/r/Y9clyVS3tQEHlUhA@makrotopia.org -Signed-off-by: Mark Brown ---- - drivers/base/regmap/regmap.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/base/regmap/regmap.c -+++ b/drivers/base/regmap/regmap.c -@@ -1929,6 +1929,8 @@ static int _regmap_bus_reg_write(void *c - { - struct regmap *map = context; - -+ reg += map->reg_base; -+ reg >>= map->format.reg_downshift; - return map->bus->reg_write(map->bus_context, reg, val); - } - -@@ -2703,6 +2705,8 @@ static int _regmap_bus_reg_read(void *co - { - struct regmap *map = context; - -+ reg += map->reg_base; -+ reg >>= map->format.reg_downshift; - return map->bus->reg_read(map->bus_context, reg, val); - } - -@@ -3078,6 +3082,8 @@ static int _regmap_update_bits(struct re - *change = false; - - if (regmap_volatile(map, reg) && map->reg_update_bits) { -+ reg += map->reg_base; -+ reg >>= map->format.reg_downshift; - ret = map->reg_update_bits(map->bus_context, reg, mask, val); - if (ret == 0 && change) - *change = true; diff --git a/target/linux/generic/backport-6.1/400-v5.19-mtd-call-of_platform_populate-for-MTD-partitions.patch b/target/linux/generic/backport-6.1/400-v5.19-mtd-call-of_platform_populate-for-MTD-partitions.patch deleted file mode 100644 index 1f3aae13b4d9..000000000000 --- a/target/linux/generic/backport-6.1/400-v5.19-mtd-call-of_platform_populate-for-MTD-partitions.patch +++ /dev/null @@ -1,72 +0,0 @@ -From bcdf0315a61a29eb753a607d3a85a4032de72d94 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Tue, 10 May 2022 15:12:59 +0200 -Subject: [PATCH] mtd: call of_platform_populate() for MTD partitions -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Until this change MTD subsystem supported handling partitions only with -MTD partitions parsers. That's a specific / limited API designed around -partitions. - -Some MTD partitions may however require different handling. They may -contain specific data that needs to be parsed and somehow extracted. For -that purpose MTD subsystem should allow binding of standard platform -drivers. - -An example can be U-Boot (sub)partition with environment variables. -There exist a "u-boot,env" DT binding for MTD (sub)partition that -requires an NVMEM driver. - -Ref: 5db1c2dbc04c ("dt-bindings: nvmem: add U-Boot environment variables binding") -Signed-off-by: Rafał Miłecki -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220510131259.555-1-zajec5@gmail.com ---- - drivers/mtd/mtdpart.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - ---- a/drivers/mtd/mtdpart.c -+++ b/drivers/mtd/mtdpart.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - - #include "mtdcore.h" - -@@ -577,10 +578,16 @@ static int mtd_part_of_parse(struct mtd_ - struct mtd_part_parser *parser; - struct device_node *np; - struct property *prop; -+ struct device *dev; - const char *compat; - const char *fixed = "fixed-partitions"; - int ret, err = 0; - -+ dev = &master->dev; -+ /* Use parent device (controller) if the top level MTD is not registered */ -+ if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master)) -+ dev = master->dev.parent; -+ - np = mtd_get_of_node(master); - if (mtd_is_partition(master)) - of_node_get(np); -@@ -593,6 +600,7 @@ static int mtd_part_of_parse(struct mtd_ - continue; - ret = mtd_part_do_parse(parser, master, pparts, NULL); - if (ret > 0) { -+ of_platform_populate(np, NULL, NULL, dev); - of_node_put(np); - return ret; - } -@@ -600,6 +608,7 @@ static int mtd_part_of_parse(struct mtd_ - if (ret < 0 && !err) - err = ret; - } -+ of_platform_populate(np, NULL, NULL, dev); - of_node_put(np); - - /* diff --git a/target/linux/generic/backport-6.1/401-v6.0-mtd-parsers-add-support-for-Sercomm-partitions.patch b/target/linux/generic/backport-6.1/401-v6.0-mtd-parsers-add-support-for-Sercomm-partitions.patch deleted file mode 100644 index 113a96ad42ea..000000000000 --- a/target/linux/generic/backport-6.1/401-v6.0-mtd-parsers-add-support-for-Sercomm-partitions.patch +++ /dev/null @@ -1,302 +0,0 @@ -From 9b78ef0c7997052e9eaa0f7a4513d546fa17358c Mon Sep 17 00:00:00 2001 -From: Mikhail Zhilkin -Date: Sun, 29 May 2022 11:07:14 +0000 -Subject: [PATCH] mtd: parsers: add support for Sercomm partitions - -This adds an MTD partition parser for the Sercomm partition table that -is used in some Beeline, Netgear and Sercomm routers. - -The Sercomm partition map table contains real partition offsets, which -may differ from device to device depending on the number and location of -bad blocks on NAND. - -Original patch (proposed by NOGUCHI Hiroshi): -Link: https://github.com/openwrt/openwrt/pull/1318#issuecomment-420607394 - -Signed-off-by: NOGUCHI Hiroshi -Signed-off-by: Mikhail Zhilkin -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220529110714.189732-1-csharper2005@gmail.com ---- - drivers/mtd/parsers/Kconfig | 9 ++ - drivers/mtd/parsers/Makefile | 1 + - drivers/mtd/parsers/scpart.c | 248 +++++++++++++++++++++++++++++++++++ - 3 files changed, 258 insertions(+) - create mode 100644 drivers/mtd/parsers/scpart.c - ---- a/drivers/mtd/parsers/Kconfig -+++ b/drivers/mtd/parsers/Kconfig -@@ -186,3 +186,12 @@ config MTD_QCOMSMEM_PARTS - help - This provides support for parsing partitions from Shared Memory (SMEM) - for NAND and SPI flash on Qualcomm platforms. -+ -+config MTD_SERCOMM_PARTS -+ tristate "Sercomm partition table parser" -+ depends on MTD && RALINK -+ help -+ This provides partitions table parser for devices with Sercomm -+ partition map. This partition table contains real partition -+ offsets, which may differ from device to device depending on the -+ number and location of bad blocks on NAND. ---- a/drivers/mtd/parsers/Makefile -+++ b/drivers/mtd/parsers/Makefile -@@ -10,6 +10,7 @@ ofpart-$(CONFIG_MTD_OF_PARTS_LINKSYS_NS) - obj-$(CONFIG_MTD_PARSER_IMAGETAG) += parser_imagetag.o - obj-$(CONFIG_MTD_AFS_PARTS) += afs.o - obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o -+obj-$(CONFIG_MTD_SERCOMM_PARTS) += scpart.o - obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o - obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o - obj-$(CONFIG_MTD_QCOMSMEM_PARTS) += qcomsmempart.o ---- /dev/null -+++ b/drivers/mtd/parsers/scpart.c -@@ -0,0 +1,248 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * drivers/mtd/scpart.c: Sercomm Partition Parser -+ * -+ * Copyright (C) 2018 NOGUCHI Hiroshi -+ * Copyright (C) 2022 Mikhail Zhilkin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define MOD_NAME "scpart" -+ -+#ifdef pr_fmt -+#undef pr_fmt -+#endif -+ -+#define pr_fmt(fmt) MOD_NAME ": " fmt -+ -+#define ID_ALREADY_FOUND 0xffffffffUL -+ -+#define MAP_OFFS_IN_BLK 0x800 -+#define MAP_MIRROR_NUM 2 -+ -+static const char sc_part_magic[] = { -+ 'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0', -+}; -+#define PART_MAGIC_LEN sizeof(sc_part_magic) -+ -+/* assumes that all fields are set by CPU native endian */ -+struct sc_part_desc { -+ uint32_t part_id; -+ uint32_t part_offs; -+ uint32_t part_bytes; -+}; -+ -+static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc) -+{ -+ return ((pdesc->part_id != 0xffffffffUL) && -+ (pdesc->part_offs != 0xffffffffUL) && -+ (pdesc->part_bytes != 0xffffffffUL)); -+} -+ -+static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs, -+ struct sc_part_desc **ppdesc) -+{ -+ int cnt = 0; -+ int res = 0; -+ int res2; -+ loff_t offs; -+ size_t retlen; -+ struct sc_part_desc *pdesc = NULL; -+ struct sc_part_desc *tmpdesc; -+ uint8_t *buf; -+ -+ buf = kzalloc(master->erasesize, GFP_KERNEL); -+ if (!buf) { -+ res = -ENOMEM; -+ goto out; -+ } -+ -+ res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf); -+ if (res2 || retlen != master->erasesize) { -+ res = -EIO; -+ goto free; -+ } -+ -+ for (offs = MAP_OFFS_IN_BLK; -+ offs < master->erasesize - sizeof(*tmpdesc); -+ offs += sizeof(*tmpdesc)) { -+ tmpdesc = (struct sc_part_desc *)&buf[offs]; -+ if (!scpart_desc_is_valid(tmpdesc)) -+ break; -+ cnt++; -+ } -+ -+ if (cnt > 0) { -+ int bytes = cnt * sizeof(*pdesc); -+ -+ pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL); -+ if (!pdesc) { -+ res = -ENOMEM; -+ goto free; -+ } -+ memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes); -+ -+ *ppdesc = pdesc; -+ res = cnt; -+ } -+ -+free: -+ kfree(buf); -+ -+out: -+ return res; -+} -+ -+static int scpart_find_partmap(struct mtd_info *master, -+ struct sc_part_desc **ppdesc) -+{ -+ int magic_found = 0; -+ int res = 0; -+ int res2; -+ loff_t offs = 0; -+ size_t retlen; -+ uint8_t rdbuf[PART_MAGIC_LEN]; -+ -+ while ((magic_found < MAP_MIRROR_NUM) && -+ (offs < master->size) && -+ !mtd_block_isbad(master, offs)) { -+ res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf); -+ if (res2 || retlen != PART_MAGIC_LEN) { -+ res = -EIO; -+ goto out; -+ } -+ if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) { -+ pr_debug("Signature found at 0x%llx\n", offs); -+ magic_found++; -+ res = scpart_scan_partmap(master, offs, ppdesc); -+ if (res > 0) -+ goto out; -+ } -+ offs += master->erasesize; -+ } -+ -+out: -+ if (res > 0) -+ pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs); -+ else -+ pr_info("No valid 'SC PART MAP' was found\n"); -+ -+ return res; -+} -+ -+static int scpart_parse(struct mtd_info *master, -+ const struct mtd_partition **pparts, -+ struct mtd_part_parser_data *data) -+{ -+ const char *partname; -+ int n; -+ int nr_scparts; -+ int nr_parts = 0; -+ int res = 0; -+ struct sc_part_desc *scpart_map = NULL; -+ struct mtd_partition *parts = NULL; -+ struct device_node *mtd_node; -+ struct device_node *ofpart_node; -+ struct device_node *pp; -+ -+ mtd_node = mtd_get_of_node(master); -+ if (!mtd_node) { -+ res = -ENOENT; -+ goto out; -+ } -+ -+ ofpart_node = of_get_child_by_name(mtd_node, "partitions"); -+ if (!ofpart_node) { -+ pr_info("%s: 'partitions' subnode not found on %pOF.\n", -+ master->name, mtd_node); -+ res = -ENOENT; -+ goto out; -+ } -+ -+ nr_scparts = scpart_find_partmap(master, &scpart_map); -+ if (nr_scparts <= 0) { -+ pr_info("No any partitions was found in 'SC PART MAP'.\n"); -+ res = -ENOENT; -+ goto free; -+ } -+ -+ parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts), -+ GFP_KERNEL); -+ if (!parts) { -+ res = -ENOMEM; -+ goto free; -+ } -+ -+ for_each_child_of_node(ofpart_node, pp) { -+ u32 scpart_id; -+ -+ if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id)) -+ continue; -+ -+ for (n = 0 ; n < nr_scparts ; n++) -+ if ((scpart_map[n].part_id != ID_ALREADY_FOUND) && -+ (scpart_id == scpart_map[n].part_id)) -+ break; -+ if (n >= nr_scparts) -+ /* not match */ -+ continue; -+ -+ /* add the partition found in OF into MTD partition array */ -+ parts[nr_parts].offset = scpart_map[n].part_offs; -+ parts[nr_parts].size = scpart_map[n].part_bytes; -+ parts[nr_parts].of_node = pp; -+ -+ if (!of_property_read_string(pp, "label", &partname)) -+ parts[nr_parts].name = partname; -+ if (of_property_read_bool(pp, "read-only")) -+ parts[nr_parts].mask_flags |= MTD_WRITEABLE; -+ if (of_property_read_bool(pp, "lock")) -+ parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK; -+ -+ /* mark as 'done' */ -+ scpart_map[n].part_id = ID_ALREADY_FOUND; -+ -+ nr_parts++; -+ } -+ -+ if (nr_parts > 0) { -+ *pparts = parts; -+ res = nr_parts; -+ } else -+ pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n"); -+ -+ of_node_put(pp); -+ -+free: -+ kfree(scpart_map); -+ if (res <= 0) -+ kfree(parts); -+ -+out: -+ return res; -+} -+ -+static const struct of_device_id scpart_parser_of_match_table[] = { -+ { .compatible = "sercomm,sc-partitions" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table); -+ -+static struct mtd_part_parser scpart_parser = { -+ .parse_fn = scpart_parse, -+ .name = "scpart", -+ .of_match_table = scpart_parser_of_match_table, -+}; -+module_mtd_part_parser(scpart_parser); -+ -+/* mtd parsers will request the module by parser name */ -+MODULE_ALIAS("scpart"); -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("NOGUCHI Hiroshi "); -+MODULE_AUTHOR("Mikhail Zhilkin "); -+MODULE_DESCRIPTION("Sercomm partition parser"); diff --git a/target/linux/generic/backport-6.1/402-v6.0-mtd-next-mtd-core-introduce-of-support-for-dynamic-partitions.patch b/target/linux/generic/backport-6.1/402-v6.0-mtd-next-mtd-core-introduce-of-support-for-dynamic-partitions.patch deleted file mode 100644 index ee385416d1d0..000000000000 --- a/target/linux/generic/backport-6.1/402-v6.0-mtd-next-mtd-core-introduce-of-support-for-dynamic-partitions.patch +++ /dev/null @@ -1,106 +0,0 @@ -From ad9b10d1eaada169bd764abcab58f08538877e26 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 22 Jun 2022 03:06:28 +0200 -Subject: mtd: core: introduce of support for dynamic partitions - -We have many parser that register mtd partitions at runtime. One example -is the cmdlinepart or the smem-part parser where the compatible is defined -in the dts and the partitions gets detected and registered by the -parser. This is problematic for the NVMEM subsystem that requires an OF -node to detect NVMEM cells. - -To fix this problem, introduce an additional logic that will try to -assign an OF node to the MTD if declared. - -On MTD addition, it will be checked if the MTD has an OF node and if -not declared will check if a partition with the same label / node name is -declared in DTS. If an exact match is found, the partition dynamically -allocated by the parser will have a connected OF node. - -The NVMEM subsystem will detect the OF node and register any NVMEM cells -declared statically in the DTS. - -Signed-off-by: Christian Marangi -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220622010628.30414-4-ansuelsmth@gmail.com ---- - drivers/mtd/mtdcore.c | 61 +++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 61 insertions(+) - ---- a/drivers/mtd/mtdcore.c -+++ b/drivers/mtd/mtdcore.c -@@ -564,6 +564,66 @@ static int mtd_nvmem_add(struct mtd_info - return 0; - } - -+static void mtd_check_of_node(struct mtd_info *mtd) -+{ -+ struct device_node *partitions, *parent_dn, *mtd_dn = NULL; -+ const char *pname, *prefix = "partition-"; -+ int plen, mtd_name_len, offset, prefix_len; -+ struct mtd_info *parent; -+ bool found = false; -+ -+ /* Check if MTD already has a device node */ -+ if (dev_of_node(&mtd->dev)) -+ return; -+ -+ /* Check if a partitions node exist */ -+ parent = mtd->parent; -+ parent_dn = dev_of_node(&parent->dev); -+ if (!parent_dn) -+ return; -+ -+ partitions = of_get_child_by_name(parent_dn, "partitions"); -+ if (!partitions) -+ goto exit_parent; -+ -+ prefix_len = strlen(prefix); -+ mtd_name_len = strlen(mtd->name); -+ -+ /* Search if a partition is defined with the same name */ -+ for_each_child_of_node(partitions, mtd_dn) { -+ offset = 0; -+ -+ /* Skip partition with no/wrong prefix */ -+ if (!of_node_name_prefix(mtd_dn, "partition-")) -+ continue; -+ -+ /* Label have priority. Check that first */ -+ if (of_property_read_string(mtd_dn, "label", &pname)) { -+ of_property_read_string(mtd_dn, "name", &pname); -+ offset = prefix_len; -+ } -+ -+ plen = strlen(pname) - offset; -+ if (plen == mtd_name_len && -+ !strncmp(mtd->name, pname + offset, plen)) { -+ found = true; -+ break; -+ } -+ } -+ -+ if (!found) -+ goto exit_partitions; -+ -+ /* Set of_node only for nvmem */ -+ if (of_device_is_compatible(mtd_dn, "nvmem-cells")) -+ mtd_set_of_node(mtd, mtd_dn); -+ -+exit_partitions: -+ of_node_put(partitions); -+exit_parent: -+ of_node_put(parent_dn); -+} -+ - /** - * add_mtd_device - register an MTD device - * @mtd: pointer to new MTD device info structure -@@ -669,6 +729,7 @@ int add_mtd_device(struct mtd_info *mtd) - mtd->dev.devt = MTD_DEVT(i); - dev_set_name(&mtd->dev, "mtd%d", i); - dev_set_drvdata(&mtd->dev, mtd); -+ mtd_check_of_node(mtd); - of_node_get(mtd_get_of_node(mtd)); - error = device_register(&mtd->dev); - if (error) { diff --git a/target/linux/generic/backport-6.1/403-v6.1-mtd-allow-getting-MTD-device-associated-with-a-speci.patch b/target/linux/generic/backport-6.1/403-v6.1-mtd-allow-getting-MTD-device-associated-with-a-speci.patch deleted file mode 100644 index 3039eabea502..000000000000 --- a/target/linux/generic/backport-6.1/403-v6.1-mtd-allow-getting-MTD-device-associated-with-a-speci.patch +++ /dev/null @@ -1,72 +0,0 @@ -From b0321721be50b80c03a51866a94fde4f94690e18 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Wed, 15 Jun 2022 21:42:59 +0200 -Subject: [PATCH] mtd: allow getting MTD device associated with a specific DT - node -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -MTD subsystem API allows interacting with MTD devices (e.g. reading, -writing, handling bad blocks). So far a random driver could get MTD -device only by its name (get_mtd_device_nm()). This change allows -getting them also by a DT node. - -This API is required for drivers handling DT defined MTD partitions in a -specific way (e.g. U-Boot (sub)partition with environment variables). - -Signed-off-by: Rafał Miłecki -Acked-by: Miquel Raynal -Signed-off-by: Srinivas Kandagatla ---- - drivers/mtd/mtdcore.c | 28 ++++++++++++++++++++++++++++ - include/linux/mtd/mtd.h | 1 + - 2 files changed, 29 insertions(+) - ---- a/drivers/mtd/mtdcore.c -+++ b/drivers/mtd/mtdcore.c -@@ -1236,6 +1236,34 @@ int __get_mtd_device(struct mtd_info *mt - EXPORT_SYMBOL_GPL(__get_mtd_device); - - /** -+ * of_get_mtd_device_by_node - obtain an MTD device associated with a given node -+ * -+ * @np: device tree node -+ */ -+struct mtd_info *of_get_mtd_device_by_node(struct device_node *np) -+{ -+ struct mtd_info *mtd = NULL; -+ struct mtd_info *tmp; -+ int err; -+ -+ mutex_lock(&mtd_table_mutex); -+ -+ err = -EPROBE_DEFER; -+ mtd_for_each_device(tmp) { -+ if (mtd_get_of_node(tmp) == np) { -+ mtd = tmp; -+ err = __get_mtd_device(mtd); -+ break; -+ } -+ } -+ -+ mutex_unlock(&mtd_table_mutex); -+ -+ return err ? ERR_PTR(err) : mtd; -+} -+EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node); -+ -+/** - * get_mtd_device_nm - obtain a validated handle for an MTD device by - * device name - * @name: MTD device name to open ---- a/include/linux/mtd/mtd.h -+++ b/include/linux/mtd/mtd.h -@@ -682,6 +682,7 @@ extern int mtd_device_unregister(struct - extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); - extern int __get_mtd_device(struct mtd_info *mtd); - extern void __put_mtd_device(struct mtd_info *mtd); -+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np); - extern struct mtd_info *get_mtd_device_nm(const char *name); - extern void put_mtd_device(struct mtd_info *mtd); - diff --git a/target/linux/generic/backport-6.1/404-v6.0-mtd-core-check-partition-before-dereference.patch b/target/linux/generic/backport-6.1/404-v6.0-mtd-core-check-partition-before-dereference.patch deleted file mode 100644 index e45e2ab48e39..000000000000 --- a/target/linux/generic/backport-6.1/404-v6.0-mtd-core-check-partition-before-dereference.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 7ec4cdb321738d44ae5d405e7b6ac73dfbf99caa Mon Sep 17 00:00:00 2001 -From: Tetsuo Handa -Date: Mon, 25 Jul 2022 22:49:25 +0900 -Subject: [PATCH] mtd: core: check partition before dereference - -syzbot is reporting NULL pointer dereference at mtd_check_of_node() [1], -for mtdram test device (CONFIG_MTD_MTDRAM) is not partition. - -Link: https://syzkaller.appspot.com/bug?extid=fe013f55a2814a9e8cfd [1] -Reported-by: syzbot -Reported-by: kernel test robot -Fixes: ad9b10d1eaada169 ("mtd: core: introduce of support for dynamic partitions") -Signed-off-by: Tetsuo Handa -CC: stable@vger.kernel.org -Signed-off-by: Richard Weinberger ---- - drivers/mtd/mtdcore.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/mtd/mtdcore.c -+++ b/drivers/mtd/mtdcore.c -@@ -577,6 +577,8 @@ static void mtd_check_of_node(struct mtd - return; - - /* Check if a partitions node exist */ -+ if (!mtd_is_partition(mtd)) -+ return; - parent = mtd->parent; - parent_dn = dev_of_node(&parent->dev); - if (!parent_dn) diff --git a/target/linux/generic/backport-6.1/405-v6.1-mtd-core-add-missing-of_node_get-in-dynamic-partitio.patch b/target/linux/generic/backport-6.1/405-v6.1-mtd-core-add-missing-of_node_get-in-dynamic-partitio.patch deleted file mode 100644 index 9399a00aa165..000000000000 --- a/target/linux/generic/backport-6.1/405-v6.1-mtd-core-add-missing-of_node_get-in-dynamic-partitio.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 12b58961de0bd88b3c7dfa5d21f6d67f4678b780 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Tue, 18 Oct 2022 07:18:22 +0200 -Subject: [PATCH] mtd: core: add missing of_node_get() in dynamic partitions - code -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This fixes unbalanced of_node_put(): -[ 1.078910] 6 cmdlinepart partitions found on MTD device gpmi-nand -[ 1.085116] Creating 6 MTD partitions on "gpmi-nand": -[ 1.090181] 0x000000000000-0x000008000000 : "nandboot" -[ 1.096952] 0x000008000000-0x000009000000 : "nandfit" -[ 1.103547] 0x000009000000-0x00000b000000 : "nandkernel" -[ 1.110317] 0x00000b000000-0x00000c000000 : "nanddtb" -[ 1.115525] ------------[ cut here ]------------ -[ 1.120141] refcount_t: addition on 0; use-after-free. -[ 1.125328] WARNING: CPU: 0 PID: 1 at lib/refcount.c:25 refcount_warn_saturate+0xdc/0x148 -[ 1.133528] Modules linked in: -[ 1.136589] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.0.0-rc7-next-20220930-04543-g8cf3f7 -[ 1.146342] Hardware name: Freescale i.MX8DXL DDR3L EVK (DT) -[ 1.151999] pstate: 600000c5 (nZCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) -[ 1.158965] pc : refcount_warn_saturate+0xdc/0x148 -[ 1.163760] lr : refcount_warn_saturate+0xdc/0x148 -[ 1.168556] sp : ffff800009ddb080 -[ 1.171866] x29: ffff800009ddb080 x28: ffff800009ddb35a x27: 0000000000000002 -[ 1.179015] x26: ffff8000098b06ad x25: ffffffffffffffff x24: ffff0a00ffffff05 -[ 1.186165] x23: ffff00001fdf6470 x22: ffff800009ddb367 x21: 0000000000000000 -[ 1.193314] x20: ffff00001fdfebe8 x19: ffff00001fdfec50 x18: ffffffffffffffff -[ 1.200464] x17: 0000000000000000 x16: 0000000000000118 x15: 0000000000000004 -[ 1.207614] x14: 0000000000000fff x13: ffff800009bca248 x12: 0000000000000003 -[ 1.214764] x11: 00000000ffffefff x10: c0000000ffffefff x9 : 4762cb2ccb52de00 -[ 1.221914] x8 : 4762cb2ccb52de00 x7 : 205d313431303231 x6 : 312e31202020205b -[ 1.229063] x5 : ffff800009d55c1f x4 : 0000000000000001 x3 : 0000000000000000 -[ 1.236213] x2 : 0000000000000000 x1 : ffff800009954be6 x0 : 000000000000002a -[ 1.243365] Call trace: -[ 1.245806] refcount_warn_saturate+0xdc/0x148 -[ 1.250253] kobject_get+0x98/0x9c -[ 1.253658] of_node_get+0x20/0x34 -[ 1.257072] of_fwnode_get+0x3c/0x54 -[ 1.260652] fwnode_get_nth_parent+0xd8/0xf4 -[ 1.264926] fwnode_full_name_string+0x3c/0xb4 -[ 1.269373] device_node_string+0x498/0x5b4 -[ 1.273561] pointer+0x41c/0x5d0 -[ 1.276793] vsnprintf+0x4d8/0x694 -[ 1.280198] vprintk_store+0x164/0x528 -[ 1.283951] vprintk_emit+0x98/0x164 -[ 1.287530] vprintk_default+0x44/0x6c -[ 1.291284] vprintk+0xf0/0x134 -[ 1.294428] _printk+0x54/0x7c -[ 1.297486] of_node_release+0xe8/0x128 -[ 1.301326] kobject_put+0x98/0xfc -[ 1.304732] of_node_put+0x1c/0x28 -[ 1.308137] add_mtd_device+0x484/0x6d4 -[ 1.311977] add_mtd_partitions+0xf0/0x1d0 -[ 1.316078] parse_mtd_partitions+0x45c/0x518 -[ 1.320439] mtd_device_parse_register+0xb0/0x274 -[ 1.325147] gpmi_nand_probe+0x51c/0x650 -[ 1.329074] platform_probe+0xa8/0xd0 -[ 1.332740] really_probe+0x130/0x334 -[ 1.336406] __driver_probe_device+0xb4/0xe0 -[ 1.340681] driver_probe_device+0x3c/0x1f8 -[ 1.344869] __driver_attach+0xdc/0x1a4 -[ 1.348708] bus_for_each_dev+0x80/0xcc -[ 1.352548] driver_attach+0x24/0x30 -[ 1.356127] bus_add_driver+0x108/0x1f4 -[ 1.359967] driver_register+0x78/0x114 -[ 1.363807] __platform_driver_register+0x24/0x30 -[ 1.368515] gpmi_nand_driver_init+0x1c/0x28 -[ 1.372798] do_one_initcall+0xbc/0x238 -[ 1.376638] do_initcall_level+0x94/0xb4 -[ 1.380565] do_initcalls+0x54/0x94 -[ 1.384058] do_basic_setup+0x1c/0x28 -[ 1.387724] kernel_init_freeable+0x110/0x188 -[ 1.392084] kernel_init+0x20/0x1a0 -[ 1.395578] ret_from_fork+0x10/0x20 -[ 1.399157] ---[ end trace 0000000000000000 ]--- -[ 1.403782] ------------[ cut here ]------------ - -Reported-by: Han Xu -Fixes: ad9b10d1eaada169 ("mtd: core: introduce of support for dynamic partitions") -Signed-off-by: Rafał Miłecki -Tested-by: Han Xu -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20221018051822.28685-1-zajec5@gmail.com ---- - drivers/mtd/mtdcore.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/mtdcore.c -+++ b/drivers/mtd/mtdcore.c -@@ -580,7 +580,7 @@ static void mtd_check_of_node(struct mtd - if (!mtd_is_partition(mtd)) - return; - parent = mtd->parent; -- parent_dn = dev_of_node(&parent->dev); -+ parent_dn = of_node_get(dev_of_node(&parent->dev)); - if (!parent_dn) - return; - diff --git a/target/linux/generic/backport-6.1/407-v5.17-mtd-parsers-qcom-Don-t-print-error-message-on-EPROBE.patch b/target/linux/generic/backport-6.1/407-v5.17-mtd-parsers-qcom-Don-t-print-error-message-on-EPROBE.patch deleted file mode 100644 index 0efad99157a6..000000000000 --- a/target/linux/generic/backport-6.1/407-v5.17-mtd-parsers-qcom-Don-t-print-error-message-on-EPROBE.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 26bccc9671ba5e01f7153addbe94e7dc3f677375 Mon Sep 17 00:00:00 2001 -From: Bryan O'Donoghue -Date: Mon, 3 Jan 2022 03:03:16 +0000 -Subject: [PATCH 13/14] mtd: parsers: qcom: Don't print error message on - -EPROBE_DEFER - -Its possible for the main smem driver to not be loaded by the time we come -along to parse the smem partition description but, this is a perfectly -normal thing. - -No need to print out an error message in this case. - -Signed-off-by: Bryan O'Donoghue -Reviewed-by: Manivannan Sadhasivam -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220103030316.58301-3-bryan.odonoghue@linaro.org ---- - drivers/mtd/parsers/qcomsmempart.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/mtd/parsers/qcomsmempart.c -+++ b/drivers/mtd/parsers/qcomsmempart.c -@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mt - pr_debug("Parsing partition table info from SMEM\n"); - ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); - if (IS_ERR(ptable)) { -- pr_err("Error reading partition table header\n"); -+ if (PTR_ERR(ptable) != -EPROBE_DEFER) -+ pr_err("Error reading partition table header\n"); - return PTR_ERR(ptable); - } - diff --git a/target/linux/generic/backport-6.1/410-v5.18-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch b/target/linux/generic/backport-6.1/410-v5.18-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch deleted file mode 100644 index 5c49841760f1..000000000000 --- a/target/linux/generic/backport-6.1/410-v5.18-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 2365f91c861cbfeef7141c69842848c7b2d3c2db Mon Sep 17 00:00:00 2001 -From: INAGAKI Hiroshi -Date: Sun, 13 Feb 2022 15:40:44 +0900 -Subject: [PATCH] mtd: parsers: trx: allow to use on MediaTek MIPS SoCs - -Buffalo sells some router devices which have trx-formatted firmware, -based on MediaTek MIPS SoCs. To use parser_trx on those devices, add -"RALINK" to dependency and allow to compile for MediaTek MIPS SoCs. - -examples: - -- WCR-1166DS (MT7628) -- WSR-1166DHP (MT7621) -- WSR-2533DHP (MT7621) - -Signed-off-by: INAGAKI Hiroshi -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220213064045.1781-1-musashino.open@gmail.com ---- - drivers/mtd/parsers/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/mtd/parsers/Kconfig -+++ b/drivers/mtd/parsers/Kconfig -@@ -115,7 +115,7 @@ config MTD_AFS_PARTS - - config MTD_PARSER_TRX - tristate "Parser for TRX format partitions" -- depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST) -+ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST) - help - TRX is a firmware format used by Broadcom on their devices. It - may contain up to 3/4 partitions (depending on the version). diff --git a/target/linux/generic/backport-6.1/420-v5.19-02-mtd-spinand-gigadevice-add-support-for-GD5FxGQ4xExxG.patch b/target/linux/generic/backport-6.1/420-v5.19-02-mtd-spinand-gigadevice-add-support-for-GD5FxGQ4xExxG.patch deleted file mode 100644 index 181c912fbfcf..000000000000 --- a/target/linux/generic/backport-6.1/420-v5.19-02-mtd-spinand-gigadevice-add-support-for-GD5FxGQ4xExxG.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 573eec222bc82fb5e724586267fbbb1aed9ffd03 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sun, 20 Mar 2022 17:59:58 +0800 -Subject: [PATCH 2/5] mtd: spinand: gigadevice: add support for GD5FxGQ4xExxG - -Add support for: - GD5F1GQ4RExxG - GD5F2GQ4{U,R}ExxG - -These chips differ from GD5F1GQ4UExxG only in chip ID, voltage -and capacity. - -Signed-off-by: Chuanhong Guo -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220320100001.247905-3-gch981213@gmail.com ---- - drivers/mtd/nand/spi/gigadevice.c | 30 ++++++++++++++++++++++++++++++ - 1 file changed, 30 insertions(+) - ---- a/drivers/mtd/nand/spi/gigadevice.c -+++ b/drivers/mtd/nand/spi/gigadevice.c -@@ -333,6 +333,36 @@ static const struct spinand_info gigadev - SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F1GQ4RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc1), -+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GQ4UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd2), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GQ4RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc2), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), - SPINAND_INFO("GD5F1GQ4UFxxG", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48), - NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), diff --git a/target/linux/generic/backport-6.1/420-v5.19-03-mtd-spinand-gigadevice-add-support-for-GD5F1GQ5RExxG.patch b/target/linux/generic/backport-6.1/420-v5.19-03-mtd-spinand-gigadevice-add-support-for-GD5F1GQ5RExxG.patch deleted file mode 100644 index 3a1cc9efcfbc..000000000000 --- a/target/linux/generic/backport-6.1/420-v5.19-03-mtd-spinand-gigadevice-add-support-for-GD5F1GQ5RExxG.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 620a988813403318023296b61228ee8f3fcdb8e0 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sun, 20 Mar 2022 17:59:59 +0800 -Subject: [PATCH 3/5] mtd: spinand: gigadevice: add support for GD5F1GQ5RExxG - -This chip is the 1.8v version of GD5F1GQ5UExxG. - -Signed-off-by: Chuanhong Guo -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220320100001.247905-4-gch981213@gmail.com ---- - drivers/mtd/nand/spi/gigadevice.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/drivers/mtd/nand/spi/gigadevice.c -+++ b/drivers/mtd/nand/spi/gigadevice.c -@@ -383,6 +383,16 @@ static const struct spinand_info gigadev - SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F1GQ5RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x41), -+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), -+ NAND_ECCREQ(4, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq5xexxg_ecc_get_status)), - }; - - static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { diff --git a/target/linux/generic/backport-6.1/420-v5.19-04-mtd-spinand-gigadevice-add-support-for-GD5F-2-4-GQ5x.patch b/target/linux/generic/backport-6.1/420-v5.19-04-mtd-spinand-gigadevice-add-support-for-GD5F-2-4-GQ5x.patch deleted file mode 100644 index cee9d9db3eb4..000000000000 --- a/target/linux/generic/backport-6.1/420-v5.19-04-mtd-spinand-gigadevice-add-support-for-GD5F-2-4-GQ5x.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 194ec04b3a9e7fa97d1fbef296410631bc3cf1c8 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sun, 20 Mar 2022 18:00:00 +0800 -Subject: [PATCH 4/5] mtd: spinand: gigadevice: add support for GD5F{2, - 4}GQ5xExxG - -Add support for: - GD5F2GQ5{U,R}ExxG - GD5F4GQ6{U,R}ExxG - -These chips uses 4 dummy bytes for quad io and 2 dummy bytes for dual io. -Besides that and memory layout, they are identical to their 1G variant. - -Signed-off-by: Chuanhong Guo -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220320100001.247905-5-gch981213@gmail.com ---- - drivers/mtd/nand/spi/gigadevice.c | 48 +++++++++++++++++++++++++++++++ - 1 file changed, 48 insertions(+) - ---- a/drivers/mtd/nand/spi/gigadevice.c -+++ b/drivers/mtd/nand/spi/gigadevice.c -@@ -47,6 +47,14 @@ static SPINAND_OP_VARIANTS(read_cache_va - SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); - -+static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, -+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), -+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); -+ - static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); -@@ -391,6 +399,46 @@ static const struct spinand_info gigadev - &write_cache_variants, - &update_cache_variants), - SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GQ5UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x52), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(4, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GQ5RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x42), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(4, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F4GQ6UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x55), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), -+ NAND_ECCREQ(4, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F4GQ6RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x45), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1), -+ NAND_ECCREQ(4, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq5xexxg_ecc_get_status)), - }; diff --git a/target/linux/generic/backport-6.1/420-v5.19-05-mtd-spinand-gigadevice-add-support-for-GD5FxGM7xExxG.patch b/target/linux/generic/backport-6.1/420-v5.19-05-mtd-spinand-gigadevice-add-support-for-GD5FxGM7xExxG.patch deleted file mode 100644 index d63113e1a66f..000000000000 --- a/target/linux/generic/backport-6.1/420-v5.19-05-mtd-spinand-gigadevice-add-support-for-GD5FxGM7xExxG.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 54647cd003c08b714474a5b599a147ec6a160486 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sun, 20 Mar 2022 18:00:01 +0800 -Subject: [PATCH 5/5] mtd: spinand: gigadevice: add support for GD5FxGM7xExxG - -Add support for: - GD5F{1,2}GM7{U,R}ExxG - GD5F4GM8{U,R}ExxG - -These are new 27nm counterparts for the GD5FxGQ4 chips from GigaDevice -with 8b/512b on-die ECC capability. -These chips (and currently supported GD5FxGQ5 chips) have QIO DTR -instruction for reading page cache. It isn't added in this patch because -I don't have a DTR spi controller for testing. - -Signed-off-by: Chuanhong Guo -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220320100001.247905-6-gch981213@gmail.com ---- - drivers/mtd/nand/spi/gigadevice.c | 60 +++++++++++++++++++++++++++++++ - 1 file changed, 60 insertions(+) - ---- a/drivers/mtd/nand/spi/gigadevice.c -+++ b/drivers/mtd/nand/spi/gigadevice.c -@@ -441,6 +441,66 @@ static const struct spinand_info gigadev - SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, - gd5fxgq5xexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F1GM7UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91), -+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F1GM7RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81), -+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GM7UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F2GM7RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x82), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F4GM8UExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x95), -+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), -+ SPINAND_INFO("GD5F4GM8RExxG", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x85), -+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5, -+ &write_cache_variants, -+ &update_cache_variants), -+ SPINAND_HAS_QE_BIT, -+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout, -+ gd5fxgq4uexxg_ecc_get_status)), - }; - - static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { diff --git a/target/linux/generic/backport-6.1/422-v5.19-mtd-spi-nor-support-eon-en25qh256a.patch b/target/linux/generic/backport-6.1/422-v5.19-mtd-spi-nor-support-eon-en25qh256a.patch deleted file mode 100644 index 2358352e93c9..000000000000 --- a/target/linux/generic/backport-6.1/422-v5.19-mtd-spi-nor-support-eon-en25qh256a.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 6abef37d16d0c570ef5a149e63762fba2a30804b Mon Sep 17 00:00:00 2001 -From: "Leon M. George" -Date: Wed, 30 Mar 2022 16:16:56 +0200 -Subject: [PATCH] mtd: spi-nor: support eon en25qh256a variant - -The EN25QH256A variant of the EN25QH256 doesn't initialize correctly from SFDP -alone and only accesses memory below 8m (addr_width is 4 but read_opcode takes -only 3 bytes). - -Set SNOR_F_4B_OPCODES if the flash chip variant was detected using hwcaps. - -The fix submitted upstream uses the PARSE_SFDP initializer that is not -available in the kernel used with Openwrt. - -Signed-off-by: Leon M. George ---- - drivers/mtd/spi-nor/eon.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - ---- a/drivers/mtd/spi-nor/eon.c -+++ b/drivers/mtd/spi-nor/eon.c -@@ -8,6 +8,16 @@ - - #include "core.h" - -+static void en25qh256_post_sfdp_fixups(struct spi_nor *nor) -+{ -+ if (nor->params->hwcaps.mask & SNOR_HWCAPS_READ_1_1_4) -+ nor->flags |= SNOR_F_4B_OPCODES; -+} -+ -+static const struct spi_nor_fixups en25qh256_fixups = { -+ .post_sfdp = en25qh256_post_sfdp_fixups, -+}; -+ - static const struct flash_info eon_parts[] = { - /* EON -- en25xxx */ - { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, -@@ -23,7 +33,9 @@ static const struct flash_info eon_parts - { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, - SECT_4K | SPI_NOR_DUAL_READ) }, - { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, -- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, -+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, -+ SPI_NOR_DUAL_READ) -+ .fixups = &en25qh256_fixups }, - { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, - }; - diff --git a/target/linux/generic/backport-6.1/423-v6.1-0001-mtd-track-maximum-number-of-bitflips-for-each-read-r.patch b/target/linux/generic/backport-6.1/423-v6.1-0001-mtd-track-maximum-number-of-bitflips-for-each-read-r.patch deleted file mode 100644 index 9f1757caa797..000000000000 --- a/target/linux/generic/backport-6.1/423-v6.1-0001-mtd-track-maximum-number-of-bitflips-for-each-read-r.patch +++ /dev/null @@ -1,73 +0,0 @@ -From e237285113963bd1dd2e925770aa8b3aa8a1894c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= -Date: Wed, 29 Jun 2022 14:57:34 +0200 -Subject: [PATCH 1/4] mtd: track maximum number of bitflips for each read - request -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -mtd_read_oob() callers are currently oblivious to the details of ECC -errors detected during the read operation - they only learn (through the -return value) whether any corrected bitflips or uncorrectable errors -occurred. More detailed ECC information can be useful to user-space -applications for making better-informed choices about moving data -around. - -Extend struct mtd_oob_ops with a pointer to a newly-introduced struct -mtd_req_stats and set its 'max_bitflips' field to the maximum number of -bitflips found in a single ECC step during the read operation performed -by mtd_read_oob(). This is a prerequisite for ultimately passing that -value back to user space. - -Suggested-by: Boris Brezillon -Signed-off-by: Michał Kępień -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-2-kernel@kempniu.pl ---- - drivers/mtd/mtdcore.c | 5 +++++ - include/linux/mtd/mtd.h | 5 +++++ - 2 files changed, 10 insertions(+) - ---- a/drivers/mtd/mtdcore.c -+++ b/drivers/mtd/mtdcore.c -@@ -1676,6 +1676,9 @@ int mtd_read_oob(struct mtd_info *mtd, l - if (!master->_read_oob && (!master->_read || ops->oobbuf)) - return -EOPNOTSUPP; - -+ if (ops->stats) -+ memset(ops->stats, 0, sizeof(*ops->stats)); -+ - if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) - ret_code = mtd_io_emulated_slc(mtd, from, true, ops); - else -@@ -1693,6 +1696,8 @@ int mtd_read_oob(struct mtd_info *mtd, l - return ret_code; - if (mtd->ecc_strength == 0) - return 0; /* device lacks ecc */ -+ if (ops->stats) -+ ops->stats->max_bitflips = ret_code; - return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; - } - EXPORT_SYMBOL_GPL(mtd_read_oob); ---- a/include/linux/mtd/mtd.h -+++ b/include/linux/mtd/mtd.h -@@ -40,6 +40,10 @@ struct mtd_erase_region_info { - unsigned long *lockmap; /* If keeping bitmap of locks */ - }; - -+struct mtd_req_stats { -+ unsigned int max_bitflips; -+}; -+ - /** - * struct mtd_oob_ops - oob operation operands - * @mode: operation mode -@@ -70,6 +74,7 @@ struct mtd_oob_ops { - uint32_t ooboffs; - uint8_t *datbuf; - uint8_t *oobbuf; -+ struct mtd_req_stats *stats; - }; - - #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 diff --git a/target/linux/generic/backport-6.1/423-v6.1-0002-mtd-always-initialize-stats-in-struct-mtd_oob_ops.patch b/target/linux/generic/backport-6.1/423-v6.1-0002-mtd-always-initialize-stats-in-struct-mtd_oob_ops.patch deleted file mode 100644 index 1484624e4e51..000000000000 --- a/target/linux/generic/backport-6.1/423-v6.1-0002-mtd-always-initialize-stats-in-struct-mtd_oob_ops.patch +++ /dev/null @@ -1,325 +0,0 @@ -From e97709c9d18903f5acd5fbe2985dd054da0432b1 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= -Date: Wed, 29 Jun 2022 14:57:35 +0200 -Subject: [PATCH 2/4] mtd: always initialize 'stats' in struct mtd_oob_ops -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -As the 'stats' field in struct mtd_oob_ops is used in conditional -expressions, ensure it is always zero-initialized in all such structures -to prevent random stack garbage from being interpreted as a pointer. - -Strictly speaking, this problem currently only needs to be fixed for -struct mtd_oob_ops structures subsequently passed to mtd_read_oob(). -However, this commit goes a step further and makes all instances of -struct mtd_oob_ops in the tree zero-initialized, in hope of preventing -future problems, e.g. if struct mtd_req_stats gets extended with write -statistics at some point. - -Signed-off-by: Michał Kępień -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-3-kernel@kempniu.pl ---- - drivers/mtd/inftlcore.c | 6 +++--- - drivers/mtd/mtdswap.c | 6 +++--- - drivers/mtd/nand/onenand/onenand_base.c | 4 ++-- - drivers/mtd/nand/onenand/onenand_bbt.c | 2 +- - drivers/mtd/nand/raw/nand_bbt.c | 8 ++++---- - drivers/mtd/nand/raw/sm_common.c | 2 +- - drivers/mtd/nftlcore.c | 6 +++--- - drivers/mtd/sm_ftl.c | 4 ++-- - drivers/mtd/ssfdc.c | 2 +- - drivers/mtd/tests/nandbiterrs.c | 2 +- - drivers/mtd/tests/oobtest.c | 8 ++++---- - drivers/mtd/tests/readtest.c | 2 +- - fs/jffs2/wbuf.c | 6 +++--- - 13 files changed, 29 insertions(+), 29 deletions(-) - ---- a/drivers/mtd/inftlcore.c -+++ b/drivers/mtd/inftlcore.c -@@ -136,7 +136,7 @@ static void inftl_remove_dev(struct mtd_ - int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, - size_t *retlen, uint8_t *buf) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; -@@ -156,7 +156,7 @@ int inftl_read_oob(struct mtd_info *mtd, - int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, - size_t *retlen, uint8_t *buf) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; -@@ -176,7 +176,7 @@ int inftl_write_oob(struct mtd_info *mtd - static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len, - size_t *retlen, uint8_t *buf, uint8_t *oob) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; ---- a/drivers/mtd/mtdswap.c -+++ b/drivers/mtd/mtdswap.c -@@ -323,7 +323,7 @@ static int mtdswap_read_markers(struct m - struct mtdswap_oobdata *data, *data2; - int ret; - loff_t offset; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - offset = mtdswap_eb_offset(d, eb); - -@@ -370,7 +370,7 @@ static int mtdswap_write_marker(struct m - struct mtdswap_oobdata n; - int ret; - loff_t offset; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - ops.ooboffs = 0; - ops.oobbuf = (uint8_t *)&n; -@@ -879,7 +879,7 @@ static unsigned int mtdswap_eblk_passes( - loff_t base, pos; - unsigned int *p1 = (unsigned int *)d->page_buf; - unsigned char *p2 = (unsigned char *)d->oob_buf; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int ret; - - ops.mode = MTD_OPS_AUTO_OOB; ---- a/drivers/mtd/nand/onenand/onenand_base.c -+++ b/drivers/mtd/nand/onenand/onenand_base.c -@@ -2935,7 +2935,7 @@ static int do_otp_write(struct mtd_info - struct onenand_chip *this = mtd->priv; - unsigned char *pbuf = buf; - int ret; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - /* Force buffer page aligned */ - if (len < mtd->writesize) { -@@ -2977,7 +2977,7 @@ static int do_otp_lock(struct mtd_info * - size_t *retlen, u_char *buf) - { - struct onenand_chip *this = mtd->priv; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int ret; - - if (FLEXONENAND(this)) { ---- a/drivers/mtd/nand/onenand/onenand_bbt.c -+++ b/drivers/mtd/nand/onenand/onenand_bbt.c -@@ -61,7 +61,7 @@ static int create_bbt(struct mtd_info *m - int startblock; - loff_t from; - size_t readlen, ooblen; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int rgn; - - printk(KERN_INFO "Scanning device for bad blocks\n"); ---- a/drivers/mtd/nand/raw/nand_bbt.c -+++ b/drivers/mtd/nand/raw/nand_bbt.c -@@ -313,7 +313,7 @@ static int scan_read_oob(struct nand_chi - size_t len) - { - struct mtd_info *mtd = nand_to_mtd(this); -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res, ret = 0; - - ops.mode = MTD_OPS_PLACE_OOB; -@@ -354,7 +354,7 @@ static int scan_write_bbt(struct nand_ch - uint8_t *buf, uint8_t *oob) - { - struct mtd_info *mtd = nand_to_mtd(this); -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - ops.mode = MTD_OPS_PLACE_OOB; - ops.ooboffs = 0; -@@ -416,7 +416,7 @@ static int scan_block_fast(struct nand_c - { - struct mtd_info *mtd = nand_to_mtd(this); - -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int ret, page_offset; - - ops.ooblen = mtd->oobsize; -@@ -756,7 +756,7 @@ static int write_bbt(struct nand_chip *t - uint8_t rcode = td->reserved_block_code; - size_t retlen, len = 0; - loff_t to; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - ops.ooblen = mtd->oobsize; - ops.ooboffs = 0; ---- a/drivers/mtd/nand/raw/sm_common.c -+++ b/drivers/mtd/nand/raw/sm_common.c -@@ -99,7 +99,7 @@ static const struct mtd_ooblayout_ops oo - static int sm_block_markbad(struct nand_chip *chip, loff_t ofs) - { - struct mtd_info *mtd = nand_to_mtd(chip); -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - struct sm_oob oob; - int ret; - ---- a/drivers/mtd/nftlcore.c -+++ b/drivers/mtd/nftlcore.c -@@ -124,7 +124,7 @@ int nftl_read_oob(struct mtd_info *mtd, - size_t *retlen, uint8_t *buf) - { - loff_t mask = mtd->writesize - 1; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; -@@ -145,7 +145,7 @@ int nftl_write_oob(struct mtd_info *mtd, - size_t *retlen, uint8_t *buf) - { - loff_t mask = mtd->writesize - 1; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; -@@ -168,7 +168,7 @@ static int nftl_write(struct mtd_info *m - size_t *retlen, uint8_t *buf, uint8_t *oob) - { - loff_t mask = mtd->writesize - 1; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int res; - - ops.mode = MTD_OPS_PLACE_OOB; ---- a/drivers/mtd/sm_ftl.c -+++ b/drivers/mtd/sm_ftl.c -@@ -239,7 +239,7 @@ static int sm_read_sector(struct sm_ftl - uint8_t *buffer, struct sm_oob *oob) - { - struct mtd_info *mtd = ftl->trans->mtd; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - struct sm_oob tmp_oob; - int ret = -EIO; - int try = 0; -@@ -323,7 +323,7 @@ static int sm_write_sector(struct sm_ftl - int zone, int block, int boffset, - uint8_t *buffer, struct sm_oob *oob) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - struct mtd_info *mtd = ftl->trans->mtd; - int ret; - ---- a/drivers/mtd/ssfdc.c -+++ b/drivers/mtd/ssfdc.c -@@ -163,7 +163,7 @@ static int read_physical_sector(struct m - /* Read redundancy area (wrapper to MTD_READ_OOB */ - static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int ret; - - ops.mode = MTD_OPS_RAW; ---- a/drivers/mtd/tests/nandbiterrs.c -+++ b/drivers/mtd/tests/nandbiterrs.c -@@ -99,7 +99,7 @@ static int write_page(int log) - static int rewrite_page(int log) - { - int err = 0; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - if (log) - pr_info("rewrite page\n"); ---- a/drivers/mtd/tests/oobtest.c -+++ b/drivers/mtd/tests/oobtest.c -@@ -56,7 +56,7 @@ static void do_vary_offset(void) - static int write_eraseblock(int ebnum) - { - int i; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int err = 0; - loff_t addr = (loff_t)ebnum * mtd->erasesize; - -@@ -165,7 +165,7 @@ static size_t memffshow(loff_t addr, lof - static int verify_eraseblock(int ebnum) - { - int i; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int err = 0; - loff_t addr = (loff_t)ebnum * mtd->erasesize; - size_t bitflips; -@@ -260,7 +260,7 @@ static int verify_eraseblock(int ebnum) - - static int verify_eraseblock_in_one_go(int ebnum) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int err = 0; - loff_t addr = (loff_t)ebnum * mtd->erasesize; - size_t len = mtd->oobavail * pgcnt; -@@ -338,7 +338,7 @@ static int __init mtd_oobtest_init(void) - int err = 0; - unsigned int i; - uint64_t tmp; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - loff_t addr = 0, addr0; - - printk(KERN_INFO "\n"); ---- a/drivers/mtd/tests/readtest.c -+++ b/drivers/mtd/tests/readtest.c -@@ -47,7 +47,7 @@ static int read_eraseblock_by_page(int e - err = ret; - } - if (mtd->oobsize) { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - ops.mode = MTD_OPS_PLACE_OOB; - ops.len = 0; ---- a/fs/jffs2/wbuf.c -+++ b/fs/jffs2/wbuf.c -@@ -1035,7 +1035,7 @@ int jffs2_check_oob_empty(struct jffs2_s - { - int i, ret; - int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - - ops.mode = MTD_OPS_AUTO_OOB; - ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; -@@ -1076,7 +1076,7 @@ int jffs2_check_oob_empty(struct jffs2_s - int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, - struct jffs2_eraseblock *jeb) - { -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); - - ops.mode = MTD_OPS_AUTO_OOB; -@@ -1101,7 +1101,7 @@ int jffs2_write_nand_cleanmarker(struct - struct jffs2_eraseblock *jeb) - { - int ret; -- struct mtd_oob_ops ops; -+ struct mtd_oob_ops ops = { }; - int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); - - ops.mode = MTD_OPS_AUTO_OOB; diff --git a/target/linux/generic/backport-6.1/423-v6.1-0003-mtd-add-ECC-error-accounting-for-each-read-request.patch b/target/linux/generic/backport-6.1/423-v6.1-0003-mtd-add-ECC-error-accounting-for-each-read-request.patch deleted file mode 100644 index f2f45eb7bc7a..000000000000 --- a/target/linux/generic/backport-6.1/423-v6.1-0003-mtd-add-ECC-error-accounting-for-each-read-request.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 2ed18d818d1f7492172f8dd5904344c7d367e8ed Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= -Date: Wed, 29 Jun 2022 14:57:36 +0200 -Subject: [PATCH 3/4] mtd: add ECC error accounting for each read request -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Extend struct mtd_req_stats with two new fields holding the number of -corrected bitflips and uncorrectable errors detected during a read -operation. This is a prerequisite for ultimately passing those counters -to user space, where they can be useful to applications for making -better-informed choices about moving data around. - -Unlike 'max_bitflips' (which is set - in a common code path - to the -return value of a function called while the MTD device's mutex is held), -these counters have to be maintained in each MTD driver which defines -the '_read_oob' callback because the statistics need to be calculated -while the MTD device's mutex is held. - -Suggested-by: Boris Brezillon -Signed-off-by: Michał Kępień -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-4-kernel@kempniu.pl ---- - drivers/mtd/devices/docg3.c | 8 ++++++++ - drivers/mtd/nand/onenand/onenand_base.c | 12 ++++++++++++ - drivers/mtd/nand/raw/nand_base.c | 10 ++++++++++ - drivers/mtd/nand/spi/core.c | 10 ++++++++++ - include/linux/mtd/mtd.h | 2 ++ - 5 files changed, 42 insertions(+) - ---- a/drivers/mtd/devices/docg3.c -+++ b/drivers/mtd/devices/docg3.c -@@ -871,6 +871,7 @@ static int doc_read_oob(struct mtd_info - u8 *buf = ops->datbuf; - size_t len, ooblen, nbdata, nboob; - u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; -+ struct mtd_ecc_stats old_stats; - int max_bitflips = 0; - - if (buf) -@@ -895,6 +896,7 @@ static int doc_read_oob(struct mtd_info - ret = 0; - skip = from % DOC_LAYOUT_PAGE_SIZE; - mutex_lock(&docg3->cascade->lock); -+ old_stats = mtd->ecc_stats; - while (ret >= 0 && (len > 0 || ooblen > 0)) { - calc_block_sector(from - skip, &block0, &block1, &page, &ofs, - docg3->reliable); -@@ -966,6 +968,12 @@ static int doc_read_oob(struct mtd_info - } - - out: -+ if (ops->stats) { -+ ops->stats->uncorrectable_errors += -+ mtd->ecc_stats.failed - old_stats.failed; -+ ops->stats->corrected_bitflips += -+ mtd->ecc_stats.corrected - old_stats.corrected; -+ } - mutex_unlock(&docg3->cascade->lock); - return ret; - err_in_read: ---- a/drivers/mtd/nand/onenand/onenand_base.c -+++ b/drivers/mtd/nand/onenand/onenand_base.c -@@ -1440,6 +1440,7 @@ static int onenand_read_oob(struct mtd_i - struct mtd_oob_ops *ops) - { - struct onenand_chip *this = mtd->priv; -+ struct mtd_ecc_stats old_stats; - int ret; - - switch (ops->mode) { -@@ -1453,12 +1454,23 @@ static int onenand_read_oob(struct mtd_i - } - - onenand_get_device(mtd, FL_READING); -+ -+ old_stats = mtd->ecc_stats; -+ - if (ops->datbuf) - ret = ONENAND_IS_4KB_PAGE(this) ? - onenand_mlc_read_ops_nolock(mtd, from, ops) : - onenand_read_ops_nolock(mtd, from, ops); - else - ret = onenand_read_oob_nolock(mtd, from, ops); -+ -+ if (ops->stats) { -+ ops->stats->uncorrectable_errors += -+ mtd->ecc_stats.failed - old_stats.failed; -+ ops->stats->corrected_bitflips += -+ mtd->ecc_stats.corrected - old_stats.corrected; -+ } -+ - onenand_release_device(mtd); - - return ret; ---- a/drivers/mtd/nand/raw/nand_base.c -+++ b/drivers/mtd/nand/raw/nand_base.c -@@ -3815,6 +3815,7 @@ static int nand_read_oob(struct mtd_info - struct mtd_oob_ops *ops) - { - struct nand_chip *chip = mtd_to_nand(mtd); -+ struct mtd_ecc_stats old_stats; - int ret; - - ops->retlen = 0; -@@ -3826,11 +3827,20 @@ static int nand_read_oob(struct mtd_info - - nand_get_device(chip); - -+ old_stats = mtd->ecc_stats; -+ - if (!ops->datbuf) - ret = nand_do_read_oob(chip, from, ops); - else - ret = nand_do_read_ops(chip, from, ops); - -+ if (ops->stats) { -+ ops->stats->uncorrectable_errors += -+ mtd->ecc_stats.failed - old_stats.failed; -+ ops->stats->corrected_bitflips += -+ mtd->ecc_stats.corrected - old_stats.corrected; -+ } -+ - nand_release_device(chip); - return ret; - } ---- a/drivers/mtd/nand/spi/core.c -+++ b/drivers/mtd/nand/spi/core.c -@@ -629,6 +629,7 @@ static int spinand_mtd_read(struct mtd_i - { - struct spinand_device *spinand = mtd_to_spinand(mtd); - struct nand_device *nand = mtd_to_nanddev(mtd); -+ struct mtd_ecc_stats old_stats; - unsigned int max_bitflips = 0; - struct nand_io_iter iter; - bool disable_ecc = false; -@@ -640,6 +641,8 @@ static int spinand_mtd_read(struct mtd_i - - mutex_lock(&spinand->lock); - -+ old_stats = mtd->ecc_stats; -+ - nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { - if (disable_ecc) - iter.req.mode = MTD_OPS_RAW; -@@ -662,6 +665,13 @@ static int spinand_mtd_read(struct mtd_i - ops->oobretlen += iter.req.ooblen; - } - -+ if (ops->stats) { -+ ops->stats->uncorrectable_errors += -+ mtd->ecc_stats.failed - old_stats.failed; -+ ops->stats->corrected_bitflips += -+ mtd->ecc_stats.corrected - old_stats.corrected; -+ } -+ - mutex_unlock(&spinand->lock); - - if (ecc_failed && !ret) ---- a/include/linux/mtd/mtd.h -+++ b/include/linux/mtd/mtd.h -@@ -41,6 +41,8 @@ struct mtd_erase_region_info { - }; - - struct mtd_req_stats { -+ unsigned int uncorrectable_errors; -+ unsigned int corrected_bitflips; - unsigned int max_bitflips; - }; - diff --git a/target/linux/generic/backport-6.1/423-v6.1-0004-mtdchar-add-MEMREAD-ioctl.patch b/target/linux/generic/backport-6.1/423-v6.1-0004-mtdchar-add-MEMREAD-ioctl.patch deleted file mode 100644 index 182e4f6ab568..000000000000 --- a/target/linux/generic/backport-6.1/423-v6.1-0004-mtdchar-add-MEMREAD-ioctl.patch +++ /dev/null @@ -1,321 +0,0 @@ -From 2c9745d36e04ac27161acd78514f647b9b587ad4 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= -Date: Wed, 29 Jun 2022 14:57:37 +0200 -Subject: [PATCH 4/4] mtdchar: add MEMREAD ioctl -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -User-space applications making use of MTD devices via /dev/mtd* -character devices currently have limited capabilities for reading data: - - - only deprecated methods of accessing OOB layout information exist, - - - there is no way to explicitly specify MTD operation mode to use; it - is auto-selected based on the MTD file mode (MTD_FILE_MODE_*) set - for the character device; in particular, this prevents using - MTD_OPS_AUTO_OOB for reads, - - - all existing user-space interfaces which cause mtd_read() or - mtd_read_oob() to be called (via mtdchar_read() and - mtdchar_read_oob(), respectively) return success even when those - functions return -EUCLEAN or -EBADMSG; this renders user-space - applications using these interfaces unaware of any corrected - bitflips or uncorrectable ECC errors detected during reads. - -Note that the existing MEMWRITE ioctl allows the MTD operation mode to -be explicitly set, allowing user-space applications to write page data -and OOB data without requiring them to know anything about the OOB -layout of the MTD device they are writing to (MTD_OPS_AUTO_OOB). Also, -the MEMWRITE ioctl does not mangle the return value of mtd_write_oob(). - -Add a new ioctl, MEMREAD, which addresses the above issues. It is -intended to be a read-side counterpart of the existing MEMWRITE ioctl. -Similarly to the latter, the read operation is performed in a loop which -processes at most mtd->erasesize bytes in each iteration. This is done -to prevent unbounded memory allocations caused by calling kmalloc() with -the 'size' argument taken directly from the struct mtd_read_req provided -by user space. However, the new ioctl is implemented so that the values -it returns match those that would have been returned if just a single -mtd_read_oob() call was issued to handle the entire read operation in -one go. - -Note that while just returning -EUCLEAN or -EBADMSG to user space would -already be a valid and useful indication of the ECC algorithm detecting -errors during a read operation, that signal would not be granular enough -to cover all use cases. For example, knowing the maximum number of -bitflips detected in a single ECC step during a read operation performed -on a given page may be useful when dealing with an MTD partition whose -ECC layout varies across pages (e.g. a partition consisting of a -bootloader area using a "custom" ECC layout followed by data pages using -a "standard" ECC layout). To address that, include ECC statistics in -the structure returned to user space by the new MEMREAD ioctl. - -Link: https://www.infradead.org/pipermail/linux-mtd/2016-April/067085.html - -Suggested-by: Boris Brezillon -Signed-off-by: Michał Kępień -Acked-by: Richard Weinberger -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-5-kernel@kempniu.pl ---- - drivers/mtd/mtdchar.c | 139 +++++++++++++++++++++++++++++++++++++ - include/uapi/mtd/mtd-abi.h | 64 +++++++++++++++-- - 2 files changed, 198 insertions(+), 5 deletions(-) - ---- a/drivers/mtd/mtdchar.c -+++ b/drivers/mtd/mtdchar.c -@@ -621,6 +621,137 @@ static int mtdchar_write_ioctl(struct mt - return ret; - } - -+static int mtdchar_read_ioctl(struct mtd_info *mtd, -+ struct mtd_read_req __user *argp) -+{ -+ struct mtd_info *master = mtd_get_master(mtd); -+ struct mtd_read_req req; -+ void __user *usr_data, *usr_oob; -+ uint8_t *datbuf = NULL, *oobbuf = NULL; -+ size_t datbuf_len, oobbuf_len; -+ size_t orig_len, orig_ooblen; -+ int ret = 0; -+ -+ if (copy_from_user(&req, argp, sizeof(req))) -+ return -EFAULT; -+ -+ orig_len = req.len; -+ orig_ooblen = req.ooblen; -+ -+ usr_data = (void __user *)(uintptr_t)req.usr_data; -+ usr_oob = (void __user *)(uintptr_t)req.usr_oob; -+ -+ if (!master->_read_oob) -+ return -EOPNOTSUPP; -+ -+ if (!usr_data) -+ req.len = 0; -+ -+ if (!usr_oob) -+ req.ooblen = 0; -+ -+ req.ecc_stats.uncorrectable_errors = 0; -+ req.ecc_stats.corrected_bitflips = 0; -+ req.ecc_stats.max_bitflips = 0; -+ -+ req.len &= 0xffffffff; -+ req.ooblen &= 0xffffffff; -+ -+ if (req.start + req.len > mtd->size) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ datbuf_len = min_t(size_t, req.len, mtd->erasesize); -+ if (datbuf_len > 0) { -+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL); -+ if (!datbuf) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ } -+ -+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); -+ if (oobbuf_len > 0) { -+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); -+ if (!oobbuf) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ } -+ -+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) { -+ struct mtd_req_stats stats; -+ struct mtd_oob_ops ops = { -+ .mode = req.mode, -+ .len = min_t(size_t, req.len, datbuf_len), -+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len), -+ .datbuf = datbuf, -+ .oobbuf = oobbuf, -+ .stats = &stats, -+ }; -+ -+ /* -+ * Shorten non-page-aligned, eraseblock-sized reads so that the -+ * read ends on an eraseblock boundary. This is necessary in -+ * order to prevent OOB data for some pages from being -+ * duplicated in the output of non-page-aligned reads requiring -+ * multiple mtd_read_oob() calls to be completed. -+ */ -+ if (ops.len == mtd->erasesize) -+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); -+ -+ ret = mtd_read_oob(mtd, (loff_t)req.start, &ops); -+ -+ req.ecc_stats.uncorrectable_errors += -+ stats.uncorrectable_errors; -+ req.ecc_stats.corrected_bitflips += stats.corrected_bitflips; -+ req.ecc_stats.max_bitflips = -+ max(req.ecc_stats.max_bitflips, stats.max_bitflips); -+ -+ if (ret && !mtd_is_bitflip_or_eccerr(ret)) -+ break; -+ -+ if (copy_to_user(usr_data, ops.datbuf, ops.retlen) || -+ copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) { -+ ret = -EFAULT; -+ break; -+ } -+ -+ req.start += ops.retlen; -+ req.len -= ops.retlen; -+ usr_data += ops.retlen; -+ -+ req.ooblen -= ops.oobretlen; -+ usr_oob += ops.oobretlen; -+ } -+ -+ /* -+ * As multiple iterations of the above loop (and therefore multiple -+ * mtd_read_oob() calls) may be necessary to complete the read request, -+ * adjust the final return code to ensure it accounts for all detected -+ * ECC errors. -+ */ -+ if (!ret || mtd_is_bitflip(ret)) { -+ if (req.ecc_stats.uncorrectable_errors > 0) -+ ret = -EBADMSG; -+ else if (req.ecc_stats.corrected_bitflips > 0) -+ ret = -EUCLEAN; -+ } -+ -+out: -+ req.len = orig_len - req.len; -+ req.ooblen = orig_ooblen - req.ooblen; -+ -+ if (copy_to_user(argp, &req, sizeof(req))) -+ ret = -EFAULT; -+ -+ kvfree(datbuf); -+ kvfree(oobbuf); -+ -+ return ret; -+} -+ - static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) - { - struct mtd_file_info *mfi = file->private_data; -@@ -643,6 +774,7 @@ static int mtdchar_ioctl(struct file *fi - case MEMGETINFO: - case MEMREADOOB: - case MEMREADOOB64: -+ case MEMREAD: - case MEMISLOCKED: - case MEMGETOOBSEL: - case MEMGETBADBLOCK: -@@ -817,6 +949,13 @@ static int mtdchar_ioctl(struct file *fi - break; - } - -+ case MEMREAD: -+ { -+ ret = mtdchar_read_ioctl(mtd, -+ (struct mtd_read_req __user *)arg); -+ break; -+ } -+ - case MEMLOCK: - { - struct erase_info_user einfo; ---- a/include/uapi/mtd/mtd-abi.h -+++ b/include/uapi/mtd/mtd-abi.h -@@ -55,9 +55,9 @@ struct mtd_oob_buf64 { - * @MTD_OPS_RAW: data are transferred as-is, with no error correction; - * this mode implies %MTD_OPS_PLACE_OOB - * -- * These modes can be passed to ioctl(MEMWRITE) and are also used internally. -- * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs. -- * %MTD_FILE_MODE_RAW. -+ * These modes can be passed to ioctl(MEMWRITE) and ioctl(MEMREAD); they are -+ * also used internally. See notes on "MTD file modes" for discussion on -+ * %MTD_OPS_RAW vs. %MTD_FILE_MODE_RAW. - */ - enum { - MTD_OPS_PLACE_OOB = 0, -@@ -91,6 +91,53 @@ struct mtd_write_req { - __u8 padding[7]; - }; - -+/** -+ * struct mtd_read_req_ecc_stats - ECC statistics for a read operation -+ * -+ * @uncorrectable_errors: the number of uncorrectable errors that happened -+ * during the read operation -+ * @corrected_bitflips: the number of bitflips corrected during the read -+ * operation -+ * @max_bitflips: the maximum number of bitflips detected in any single ECC -+ * step for the data read during the operation; this information -+ * can be used to decide whether the data stored in a specific -+ * region of the MTD device should be moved somewhere else to -+ * avoid data loss. -+ */ -+struct mtd_read_req_ecc_stats { -+ __u32 uncorrectable_errors; -+ __u32 corrected_bitflips; -+ __u32 max_bitflips; -+}; -+ -+/** -+ * struct mtd_read_req - data structure for requesting a read operation -+ * -+ * @start: start address -+ * @len: length of data buffer (only lower 32 bits are used) -+ * @ooblen: length of OOB buffer (only lower 32 bits are used) -+ * @usr_data: user-provided data buffer -+ * @usr_oob: user-provided OOB buffer -+ * @mode: MTD mode (see "MTD operation modes") -+ * @padding: reserved, must be set to 0 -+ * @ecc_stats: ECC statistics for the read operation -+ * -+ * This structure supports ioctl(MEMREAD) operations, allowing data and/or OOB -+ * reads in various modes. To read from OOB-only, set @usr_data == NULL, and to -+ * read data-only, set @usr_oob == NULL. However, setting both @usr_data and -+ * @usr_oob to NULL is not allowed. -+ */ -+struct mtd_read_req { -+ __u64 start; -+ __u64 len; -+ __u64 ooblen; -+ __u64 usr_data; -+ __u64 usr_oob; -+ __u8 mode; -+ __u8 padding[7]; -+ struct mtd_read_req_ecc_stats ecc_stats; -+}; -+ - #define MTD_ABSENT 0 - #define MTD_RAM 1 - #define MTD_ROM 2 -@@ -207,6 +254,12 @@ struct otp_info { - #define MEMWRITE _IOWR('M', 24, struct mtd_write_req) - /* Erase a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */ - #define OTPERASE _IOW('M', 25, struct otp_info) -+/* -+ * Most generic read interface; can read in-band and/or out-of-band in various -+ * modes (see "struct mtd_read_req"). This ioctl is not supported for flashes -+ * without OOB, e.g., NOR flash. -+ */ -+#define MEMREAD _IOWR('M', 26, struct mtd_read_req) - - /* - * Obsolete legacy interface. Keep it in order not to break userspace -@@ -270,8 +323,9 @@ struct mtd_ecc_stats { - * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW - - * raw access to the flash, without error correction or autoplacement schemes. - * Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode -- * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is -- * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)). -+ * (e.g., when using ioctl(MEMWRITE) or ioctl(MEMREAD)), but in some cases, the -+ * MTD_FILE_MODE is used out of necessity (e.g., `write()', -+ * ioctl(MEMWRITEOOB64)). - */ - enum mtd_file_modes { - MTD_FILE_MODE_NORMAL = MTD_OTP_OFF, diff --git a/target/linux/generic/backport-6.1/600-v5.18-page_pool-Add-allocation-stats.patch b/target/linux/generic/backport-6.1/600-v5.18-page_pool-Add-allocation-stats.patch deleted file mode 100644 index 9e383de92cda..000000000000 --- a/target/linux/generic/backport-6.1/600-v5.18-page_pool-Add-allocation-stats.patch +++ /dev/null @@ -1,165 +0,0 @@ -From 8610037e8106b48c79cfe0afb92b2b2466e51c3d Mon Sep 17 00:00:00 2001 -From: Joe Damato -Date: Tue, 1 Mar 2022 23:55:47 -0800 -Subject: [PATCH] page_pool: Add allocation stats - -Add per-pool statistics counters for the allocation path of a page pool. -These stats are incremented in softirq context, so no locking or per-cpu -variables are needed. - -This code is disabled by default and a kernel config option is provided for -users who wish to enable them. - -The statistics added are: - - fast: successful fast path allocations - - slow: slow path order-0 allocations - - slow_high_order: slow path high order allocations - - empty: ptr ring is empty, so a slow path allocation was forced. - - refill: an allocation which triggered a refill of the cache - - waive: pages obtained from the ptr ring that cannot be added to - the cache due to a NUMA mismatch. - -Signed-off-by: Joe Damato -Acked-by: Jesper Dangaard Brouer -Reviewed-by: Ilias Apalodimas -Signed-off-by: David S. Miller ---- - include/net/page_pool.h | 18 ++++++++++++++++++ - net/Kconfig | 13 +++++++++++++ - net/core/page_pool.c | 24 ++++++++++++++++++++---- - 3 files changed, 51 insertions(+), 4 deletions(-) - ---- a/include/net/page_pool.h -+++ b/include/net/page_pool.h -@@ -82,6 +82,19 @@ struct page_pool_params { - unsigned int offset; /* DMA addr offset */ - }; - -+#ifdef CONFIG_PAGE_POOL_STATS -+struct page_pool_alloc_stats { -+ u64 fast; /* fast path allocations */ -+ u64 slow; /* slow-path order 0 allocations */ -+ u64 slow_high_order; /* slow-path high order allocations */ -+ u64 empty; /* failed refills due to empty ptr ring, forcing -+ * slow path allocation -+ */ -+ u64 refill; /* allocations via successful refill */ -+ u64 waive; /* failed refills due to numa zone mismatch */ -+}; -+#endif -+ - struct page_pool { - struct page_pool_params p; - -@@ -132,6 +145,11 @@ struct page_pool { - refcount_t user_cnt; - - u64 destroy_cnt; -+ -+#ifdef CONFIG_PAGE_POOL_STATS -+ /* these stats are incremented while in softirq context */ -+ struct page_pool_alloc_stats alloc_stats; -+#endif - }; - - struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); ---- a/net/Kconfig -+++ b/net/Kconfig -@@ -434,6 +434,19 @@ config NET_DEVLINK - config PAGE_POOL - bool - -+config PAGE_POOL_STATS -+ default n -+ bool "Page pool stats" -+ depends on PAGE_POOL -+ help -+ Enable page pool statistics to track page allocation and recycling -+ in page pools. This option incurs additional CPU cost in allocation -+ and recycle paths and additional memory cost to store the statistics. -+ These statistics are only available if this option is enabled and if -+ the driver using the page pool supports exporting this data. -+ -+ If unsure, say N. -+ - config FAILOVER - tristate "Generic failover module" - help ---- a/net/core/page_pool.c -+++ b/net/core/page_pool.c -@@ -26,6 +26,13 @@ - - #define BIAS_MAX LONG_MAX - -+#ifdef CONFIG_PAGE_POOL_STATS -+/* alloc_stat_inc is intended to be used in softirq context */ -+#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) -+#else -+#define alloc_stat_inc(pool, __stat) -+#endif -+ - static int page_pool_init(struct page_pool *pool, - const struct page_pool_params *params) - { -@@ -117,8 +124,10 @@ static struct page *page_pool_refill_all - int pref_nid; /* preferred NUMA node */ - - /* Quicker fallback, avoid locks when ring is empty */ -- if (__ptr_ring_empty(r)) -+ if (__ptr_ring_empty(r)) { -+ alloc_stat_inc(pool, empty); - return NULL; -+ } - - /* Softirq guarantee CPU and thus NUMA node is stable. This, - * assumes CPU refilling driver RX-ring will also run RX-NAPI. -@@ -148,14 +157,17 @@ static struct page *page_pool_refill_all - * This limit stress on page buddy alloactor. - */ - page_pool_return_page(pool, page); -+ alloc_stat_inc(pool, waive); - page = NULL; - break; - } - } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); - - /* Return last page */ -- if (likely(pool->alloc.count > 0)) -+ if (likely(pool->alloc.count > 0)) { - page = pool->alloc.cache[--pool->alloc.count]; -+ alloc_stat_inc(pool, refill); -+ } - - spin_unlock(&r->consumer_lock); - return page; -@@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach - if (likely(pool->alloc.count)) { - /* Fast-path */ - page = pool->alloc.cache[--pool->alloc.count]; -+ alloc_stat_inc(pool, fast); - } else { - page = page_pool_refill_alloc_cache(pool); - } -@@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa - return NULL; - } - -+ alloc_stat_inc(pool, slow_high_order); - page_pool_set_pp_info(pool, page); - - /* Track how many pages are held 'in-flight' */ -@@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa - } - - /* Return last page */ -- if (likely(pool->alloc.count > 0)) -+ if (likely(pool->alloc.count > 0)) { - page = pool->alloc.cache[--pool->alloc.count]; -- else -+ alloc_stat_inc(pool, slow); -+ } else { - page = NULL; -+ } - - /* When page just alloc'ed is should/must have refcnt 1. */ - return page; diff --git a/target/linux/generic/backport-6.1/601-v5.18-page_pool-Add-recycle-stats.patch b/target/linux/generic/backport-6.1/601-v5.18-page_pool-Add-recycle-stats.patch deleted file mode 100644 index fb11f0035f86..000000000000 --- a/target/linux/generic/backport-6.1/601-v5.18-page_pool-Add-recycle-stats.patch +++ /dev/null @@ -1,140 +0,0 @@ -From ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad Mon Sep 17 00:00:00 2001 -From: Joe Damato -Date: Tue, 1 Mar 2022 23:55:48 -0800 -Subject: [PATCH 2/3] page_pool: Add recycle stats - -Add per-cpu stats tracking page pool recycling events: - - cached: recycling placed page in the page pool cache - - cache_full: page pool cache was full - - ring: page placed into the ptr ring - - ring_full: page released from page pool because the ptr ring was full - - released_refcnt: page released (and not recycled) because refcnt > 1 - -Signed-off-by: Joe Damato -Acked-by: Jesper Dangaard Brouer -Reviewed-by: Ilias Apalodimas -Signed-off-by: David S. Miller ---- - include/net/page_pool.h | 16 ++++++++++++++++ - net/core/page_pool.c | 30 ++++++++++++++++++++++++++++-- - 2 files changed, 44 insertions(+), 2 deletions(-) - ---- a/include/net/page_pool.h -+++ b/include/net/page_pool.h -@@ -93,6 +93,18 @@ struct page_pool_alloc_stats { - u64 refill; /* allocations via successful refill */ - u64 waive; /* failed refills due to numa zone mismatch */ - }; -+ -+struct page_pool_recycle_stats { -+ u64 cached; /* recycling placed page in the cache. */ -+ u64 cache_full; /* cache was full */ -+ u64 ring; /* recycling placed page back into ptr ring */ -+ u64 ring_full; /* page was released from page-pool because -+ * PTR ring was full. -+ */ -+ u64 released_refcnt; /* page released because of elevated -+ * refcnt -+ */ -+}; - #endif - - struct page_pool { -@@ -136,6 +148,10 @@ struct page_pool { - */ - struct ptr_ring ring; - -+#ifdef CONFIG_PAGE_POOL_STATS -+ /* recycle stats are per-cpu to avoid locking */ -+ struct page_pool_recycle_stats __percpu *recycle_stats; -+#endif - atomic_t pages_state_release_cnt; - - /* A page_pool is strictly tied to a single RX-queue being ---- a/net/core/page_pool.c -+++ b/net/core/page_pool.c -@@ -29,8 +29,15 @@ - #ifdef CONFIG_PAGE_POOL_STATS - /* alloc_stat_inc is intended to be used in softirq context */ - #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) -+/* recycle_stat_inc is safe to use when preemption is possible. */ -+#define recycle_stat_inc(pool, __stat) \ -+ do { \ -+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ -+ this_cpu_inc(s->__stat); \ -+ } while (0) - #else - #define alloc_stat_inc(pool, __stat) -+#define recycle_stat_inc(pool, __stat) - #endif - - static int page_pool_init(struct page_pool *pool, -@@ -80,6 +87,12 @@ static int page_pool_init(struct page_po - pool->p.flags & PP_FLAG_PAGE_FRAG) - return -EINVAL; - -+#ifdef CONFIG_PAGE_POOL_STATS -+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); -+ if (!pool->recycle_stats) -+ return -ENOMEM; -+#endif -+ - if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) - return -ENOMEM; - -@@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st - else - ret = ptr_ring_produce_bh(&pool->ring, page); - -- return (ret == 0) ? true : false; -+ if (!ret) { -+ recycle_stat_inc(pool, ring); -+ return true; -+ } -+ -+ return false; - } - - /* Only allow direct recycling in special circumstances, into the -@@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st - static bool page_pool_recycle_in_cache(struct page *page, - struct page_pool *pool) - { -- if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) -+ if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { -+ recycle_stat_inc(pool, cache_full); - return false; -+ } - - /* Caller MUST have verified/know (page_ref_count(page) == 1) */ - pool->alloc.cache[pool->alloc.count++] = page; -+ recycle_stat_inc(pool, cached); - return true; - } - -@@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p - * doing refcnt based recycle tricks, meaning another process - * will be invoking put_page. - */ -+ recycle_stat_inc(pool, released_refcnt); - /* Do not replace this with page_pool_return_page() */ - page_pool_release_page(pool, page); - put_page(page); -@@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool - page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); - if (page && !page_pool_recycle_in_ring(pool, page)) { - /* Cache full, fallback to free pages */ -+ recycle_stat_inc(pool, ring_full); - page_pool_return_page(pool, page); - } - } -@@ -641,6 +664,9 @@ static void page_pool_free(struct page_p - if (pool->p.flags & PP_FLAG_DMA_MAP) - put_device(pool->p.dev); - -+#ifdef CONFIG_PAGE_POOL_STATS -+ free_percpu(pool->recycle_stats); -+#endif - kfree(pool); - } - diff --git a/target/linux/generic/backport-6.1/602-v5.18-page_pool-Add-function-to-batch-and-return-stats.patch b/target/linux/generic/backport-6.1/602-v5.18-page_pool-Add-function-to-batch-and-return-stats.patch deleted file mode 100644 index 41188fb7f279..000000000000 --- a/target/linux/generic/backport-6.1/602-v5.18-page_pool-Add-function-to-batch-and-return-stats.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 6b95e3388b1ea0ca63500c5a6e39162dbf828433 Mon Sep 17 00:00:00 2001 -From: Joe Damato -Date: Tue, 1 Mar 2022 23:55:49 -0800 -Subject: [PATCH 3/3] page_pool: Add function to batch and return stats - -Adds a function page_pool_get_stats which can be used by drivers to obtain -stats for a specified page_pool. - -Signed-off-by: Joe Damato -Acked-by: Jesper Dangaard Brouer -Reviewed-by: Ilias Apalodimas -Signed-off-by: David S. Miller ---- - include/net/page_pool.h | 17 +++++++++++++++++ - net/core/page_pool.c | 25 +++++++++++++++++++++++++ - 2 files changed, 42 insertions(+) - ---- a/include/net/page_pool.h -+++ b/include/net/page_pool.h -@@ -105,6 +105,23 @@ struct page_pool_recycle_stats { - * refcnt - */ - }; -+ -+/* This struct wraps the above stats structs so users of the -+ * page_pool_get_stats API can pass a single argument when requesting the -+ * stats for the page pool. -+ */ -+struct page_pool_stats { -+ struct page_pool_alloc_stats alloc_stats; -+ struct page_pool_recycle_stats recycle_stats; -+}; -+ -+/* -+ * Drivers that wish to harvest page pool stats and report them to users -+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a -+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. -+ */ -+bool page_pool_get_stats(struct page_pool *pool, -+ struct page_pool_stats *stats); - #endif - - struct page_pool { ---- a/net/core/page_pool.c -+++ b/net/core/page_pool.c -@@ -35,6 +35,31 @@ - struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ - this_cpu_inc(s->__stat); \ - } while (0) -+ -+bool page_pool_get_stats(struct page_pool *pool, -+ struct page_pool_stats *stats) -+{ -+ int cpu = 0; -+ -+ if (!stats) -+ return false; -+ -+ memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); -+ -+ for_each_possible_cpu(cpu) { -+ const struct page_pool_recycle_stats *pcpu = -+ per_cpu_ptr(pool->recycle_stats, cpu); -+ -+ stats->recycle_stats.cached += pcpu->cached; -+ stats->recycle_stats.cache_full += pcpu->cache_full; -+ stats->recycle_stats.ring += pcpu->ring; -+ stats->recycle_stats.ring_full += pcpu->ring_full; -+ stats->recycle_stats.released_refcnt += pcpu->released_refcnt; -+ } -+ -+ return true; -+} -+EXPORT_SYMBOL(page_pool_get_stats); - #else - #define alloc_stat_inc(pool, __stat) - #define recycle_stat_inc(pool, __stat) diff --git a/target/linux/generic/backport-6.1/603-v5.19-page_pool-Add-recycle-stats-to-page_pool_put_page_bu.patch b/target/linux/generic/backport-6.1/603-v5.19-page_pool-Add-recycle-stats-to-page_pool_put_page_bu.patch deleted file mode 100644 index 6ae3fb71331b..000000000000 --- a/target/linux/generic/backport-6.1/603-v5.19-page_pool-Add-recycle-stats-to-page_pool_put_page_bu.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 590032a4d2133ecc10d3078a8db1d85a4842f12c Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Mon, 11 Apr 2022 16:05:26 +0200 -Subject: [PATCH] page_pool: Add recycle stats to page_pool_put_page_bulk - -Add missing recycle stats to page_pool_put_page_bulk routine. - -Reviewed-by: Joe Damato -Signed-off-by: Lorenzo Bianconi -Reviewed-by: Ilias Apalodimas -Link: https://lore.kernel.org/r/3712178b51c007cfaed910ea80e68f00c916b1fa.1649685634.git.lorenzo@kernel.org -Signed-off-by: Paolo Abeni ---- - net/core/page_pool.c | 15 +++++++++++++-- - 1 file changed, 13 insertions(+), 2 deletions(-) - ---- a/net/core/page_pool.c -+++ b/net/core/page_pool.c -@@ -36,6 +36,12 @@ - this_cpu_inc(s->__stat); \ - } while (0) - -+#define recycle_stat_add(pool, __stat, val) \ -+ do { \ -+ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ -+ this_cpu_add(s->__stat, val); \ -+ } while (0) -+ - bool page_pool_get_stats(struct page_pool *pool, - struct page_pool_stats *stats) - { -@@ -63,6 +69,7 @@ EXPORT_SYMBOL(page_pool_get_stats); - #else - #define alloc_stat_inc(pool, __stat) - #define recycle_stat_inc(pool, __stat) -+#define recycle_stat_add(pool, __stat, val) - #endif - - static int page_pool_init(struct page_pool *pool, -@@ -569,9 +576,13 @@ void page_pool_put_page_bulk(struct page - /* Bulk producer into ptr_ring page_pool cache */ - page_pool_ring_lock(pool); - for (i = 0; i < bulk_len; i++) { -- if (__ptr_ring_produce(&pool->ring, data[i])) -- break; /* ring full */ -+ if (__ptr_ring_produce(&pool->ring, data[i])) { -+ /* ring full */ -+ recycle_stat_inc(pool, ring_full); -+ break; -+ } - } -+ recycle_stat_add(pool, ring, i); - page_pool_ring_unlock(pool); - - /* Hopefully all pages was return into ptr_ring */ diff --git a/target/linux/generic/backport-6.1/604-v5.19-net-page_pool-introduce-ethtool-stats.patch b/target/linux/generic/backport-6.1/604-v5.19-net-page_pool-introduce-ethtool-stats.patch deleted file mode 100644 index e5cf91ceeede..000000000000 --- a/target/linux/generic/backport-6.1/604-v5.19-net-page_pool-introduce-ethtool-stats.patch +++ /dev/null @@ -1,147 +0,0 @@ -From f3c5264f452a5b0ac1de1f2f657efbabdea3c76a Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Tue, 12 Apr 2022 18:31:58 +0200 -Subject: [PATCH] net: page_pool: introduce ethtool stats - -Introduce page_pool APIs to report stats through ethtool and reduce -duplicated code in each driver. - -Signed-off-by: Lorenzo Bianconi -Reviewed-by: Jakub Kicinski -Reviewed-by: Ilias Apalodimas -Signed-off-by: David S. Miller ---- - include/net/page_pool.h | 21 ++++++++++++++ - net/core/page_pool.c | 63 ++++++++++++++++++++++++++++++++++++++++- - 2 files changed, 83 insertions(+), 1 deletion(-) - ---- a/include/net/page_pool.h -+++ b/include/net/page_pool.h -@@ -115,6 +115,10 @@ struct page_pool_stats { - struct page_pool_recycle_stats recycle_stats; - }; - -+int page_pool_ethtool_stats_get_count(void); -+u8 *page_pool_ethtool_stats_get_strings(u8 *data); -+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); -+ - /* - * Drivers that wish to harvest page pool stats and report them to users - * (perhaps via ethtool, debugfs, or another mechanism) can allocate a -@@ -122,6 +126,23 @@ struct page_pool_stats { - */ - bool page_pool_get_stats(struct page_pool *pool, - struct page_pool_stats *stats); -+#else -+ -+static inline int page_pool_ethtool_stats_get_count(void) -+{ -+ return 0; -+} -+ -+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) -+{ -+ return data; -+} -+ -+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) -+{ -+ return data; -+} -+ - #endif - - struct page_pool { ---- a/net/core/page_pool.c -+++ b/net/core/page_pool.c -@@ -18,6 +18,7 @@ - #include - #include /* for __put_page() */ - #include -+#include - - #include - -@@ -42,6 +43,20 @@ - this_cpu_add(s->__stat, val); \ - } while (0) - -+static const char pp_stats[][ETH_GSTRING_LEN] = { -+ "rx_pp_alloc_fast", -+ "rx_pp_alloc_slow", -+ "rx_pp_alloc_slow_ho", -+ "rx_pp_alloc_empty", -+ "rx_pp_alloc_refill", -+ "rx_pp_alloc_waive", -+ "rx_pp_recycle_cached", -+ "rx_pp_recycle_cache_full", -+ "rx_pp_recycle_ring", -+ "rx_pp_recycle_ring_full", -+ "rx_pp_recycle_released_ref", -+}; -+ - bool page_pool_get_stats(struct page_pool *pool, - struct page_pool_stats *stats) - { -@@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_poo - if (!stats) - return false; - -- memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); -+ /* The caller is responsible to initialize stats. */ -+ stats->alloc_stats.fast += pool->alloc_stats.fast; -+ stats->alloc_stats.slow += pool->alloc_stats.slow; -+ stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; -+ stats->alloc_stats.empty += pool->alloc_stats.empty; -+ stats->alloc_stats.refill += pool->alloc_stats.refill; -+ stats->alloc_stats.waive += pool->alloc_stats.waive; - - for_each_possible_cpu(cpu) { - const struct page_pool_recycle_stats *pcpu = -@@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_poo - return true; - } - EXPORT_SYMBOL(page_pool_get_stats); -+ -+u8 *page_pool_ethtool_stats_get_strings(u8 *data) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { -+ memcpy(data, pp_stats[i], ETH_GSTRING_LEN); -+ data += ETH_GSTRING_LEN; -+ } -+ -+ return data; -+} -+EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); -+ -+int page_pool_ethtool_stats_get_count(void) -+{ -+ return ARRAY_SIZE(pp_stats); -+} -+EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); -+ -+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) -+{ -+ struct page_pool_stats *pool_stats = stats; -+ -+ *data++ = pool_stats->alloc_stats.fast; -+ *data++ = pool_stats->alloc_stats.slow; -+ *data++ = pool_stats->alloc_stats.slow_high_order; -+ *data++ = pool_stats->alloc_stats.empty; -+ *data++ = pool_stats->alloc_stats.refill; -+ *data++ = pool_stats->alloc_stats.waive; -+ *data++ = pool_stats->recycle_stats.cached; -+ *data++ = pool_stats->recycle_stats.cache_full; -+ *data++ = pool_stats->recycle_stats.ring; -+ *data++ = pool_stats->recycle_stats.ring_full; -+ *data++ = pool_stats->recycle_stats.released_refcnt; -+ -+ return data; -+} -+EXPORT_SYMBOL(page_pool_ethtool_stats_get); -+ - #else - #define alloc_stat_inc(pool, __stat) - #define recycle_stat_inc(pool, __stat) diff --git a/target/linux/generic/backport-6.1/605-v5.18-xdp-introduce-flags-field-in-xdp_buff-xdp_frame.patch b/target/linux/generic/backport-6.1/605-v5.18-xdp-introduce-flags-field-in-xdp_buff-xdp_frame.patch deleted file mode 100644 index 4a914404a2f9..000000000000 --- a/target/linux/generic/backport-6.1/605-v5.18-xdp-introduce-flags-field-in-xdp_buff-xdp_frame.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 2e88d4ff03013937028f5397268b21e10cf68713 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 21 Jan 2022 11:09:45 +0100 -Subject: [PATCH] xdp: introduce flags field in xdp_buff/xdp_frame - -Introduce flags field in xdp_frame and xdp_buffer data structures -to define additional buffer features. At the moment the only -supported buffer feature is frags bit (XDP_FLAGS_HAS_FRAGS). -frags bit is used to specify if this is a linear buffer -(XDP_FLAGS_HAS_FRAGS not set) or a frags frame (XDP_FLAGS_HAS_FRAGS -set). In the latter case the driver is expected to initialize the -skb_shared_info structure at the end of the first buffer to link together -subsequent buffers belonging to the same frame. - -Acked-by: Toke Hoiland-Jorgensen -Acked-by: John Fastabend -Acked-by: Jesper Dangaard Brouer -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/e389f14f3a162c0a5bc6a2e1aa8dd01a90be117d.1642758637.git.lorenzo@kernel.org -Signed-off-by: Alexei Starovoitov ---- - include/net/xdp.h | 29 +++++++++++++++++++++++++++++ - 1 file changed, 29 insertions(+) - ---- a/include/net/xdp.h -+++ b/include/net/xdp.h -@@ -66,6 +66,10 @@ struct xdp_txq_info { - struct net_device *dev; - }; - -+enum xdp_buff_flags { -+ XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ -+}; -+ - struct xdp_buff { - void *data; - void *data_end; -@@ -74,13 +78,30 @@ struct xdp_buff { - struct xdp_rxq_info *rxq; - struct xdp_txq_info *txq; - u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ -+ u32 flags; /* supported values defined in xdp_buff_flags */ - }; - -+static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) -+{ -+ return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); -+} -+ -+static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) -+{ -+ xdp->flags |= XDP_FLAGS_HAS_FRAGS; -+} -+ -+static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) -+{ -+ xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; -+} -+ - static __always_inline void - xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) - { - xdp->frame_sz = frame_sz; - xdp->rxq = rxq; -+ xdp->flags = 0; - } - - static __always_inline void -@@ -122,8 +143,14 @@ struct xdp_frame { - */ - struct xdp_mem_info mem; - struct net_device *dev_rx; /* used by cpumap */ -+ u32 flags; /* supported values defined in xdp_buff_flags */ - }; - -+static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) -+{ -+ return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); -+} -+ - #define XDP_BULK_QUEUE_SIZE 16 - struct xdp_frame_bulk { - int count; -@@ -180,6 +207,7 @@ void xdp_convert_frame_to_buff(struct xd - xdp->data_end = frame->data + frame->len; - xdp->data_meta = frame->data - frame->metasize; - xdp->frame_sz = frame->frame_sz; -+ xdp->flags = frame->flags; - } - - static inline -@@ -206,6 +234,7 @@ int xdp_update_frame_from_buff(struct xd - xdp_frame->headroom = headroom - sizeof(*xdp_frame); - xdp_frame->metasize = metasize; - xdp_frame->frame_sz = xdp->frame_sz; -+ xdp_frame->flags = xdp->flags; - - return 0; - } diff --git a/target/linux/generic/backport-6.1/606-v5.18-xdp-add-frags-support-to-xdp_return_-buff-frame.patch b/target/linux/generic/backport-6.1/606-v5.18-xdp-add-frags-support-to-xdp_return_-buff-frame.patch deleted file mode 100644 index 86d24367dd69..000000000000 --- a/target/linux/generic/backport-6.1/606-v5.18-xdp-add-frags-support-to-xdp_return_-buff-frame.patch +++ /dev/null @@ -1,137 +0,0 @@ -From 7c48cb0176c6d6d3b55029f7ff4ffa05faee6446 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 21 Jan 2022 11:09:50 +0100 -Subject: [PATCH] xdp: add frags support to xdp_return_{buff/frame} - -Take into account if the received xdp_buff/xdp_frame is non-linear -recycling/returning the frame memory to the allocator or into -xdp_frame_bulk. - -Acked-by: Toke Hoiland-Jorgensen -Acked-by: John Fastabend -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/a961069febc868508ce1bdf5e53a343eb4e57cb2.1642758637.git.lorenzo@kernel.org -Signed-off-by: Alexei Starovoitov ---- - include/net/xdp.h | 18 ++++++++++++++-- - net/core/xdp.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++- - 2 files changed, 69 insertions(+), 3 deletions(-) - ---- a/include/net/xdp.h -+++ b/include/net/xdp.h -@@ -275,10 +275,24 @@ void __xdp_release_frame(void *data, str - static inline void xdp_release_frame(struct xdp_frame *xdpf) - { - struct xdp_mem_info *mem = &xdpf->mem; -+ struct skb_shared_info *sinfo; -+ int i; - - /* Curr only page_pool needs this */ -- if (mem->type == MEM_TYPE_PAGE_POOL) -- __xdp_release_frame(xdpf->data, mem); -+ if (mem->type != MEM_TYPE_PAGE_POOL) -+ return; -+ -+ if (likely(!xdp_frame_has_frags(xdpf))) -+ goto out; -+ -+ sinfo = xdp_get_shared_info_from_frame(xdpf); -+ for (i = 0; i < sinfo->nr_frags; i++) { -+ struct page *page = skb_frag_page(&sinfo->frags[i]); -+ -+ __xdp_release_frame(page_address(page), mem); -+ } -+out: -+ __xdp_release_frame(xdpf->data, mem); - } - - int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, ---- a/net/core/xdp.c -+++ b/net/core/xdp.c -@@ -376,12 +376,38 @@ static void __xdp_return(void *data, str - - void xdp_return_frame(struct xdp_frame *xdpf) - { -+ struct skb_shared_info *sinfo; -+ int i; -+ -+ if (likely(!xdp_frame_has_frags(xdpf))) -+ goto out; -+ -+ sinfo = xdp_get_shared_info_from_frame(xdpf); -+ for (i = 0; i < sinfo->nr_frags; i++) { -+ struct page *page = skb_frag_page(&sinfo->frags[i]); -+ -+ __xdp_return(page_address(page), &xdpf->mem, false, NULL); -+ } -+out: - __xdp_return(xdpf->data, &xdpf->mem, false, NULL); - } - EXPORT_SYMBOL_GPL(xdp_return_frame); - - void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) - { -+ struct skb_shared_info *sinfo; -+ int i; -+ -+ if (likely(!xdp_frame_has_frags(xdpf))) -+ goto out; -+ -+ sinfo = xdp_get_shared_info_from_frame(xdpf); -+ for (i = 0; i < sinfo->nr_frags; i++) { -+ struct page *page = skb_frag_page(&sinfo->frags[i]); -+ -+ __xdp_return(page_address(page), &xdpf->mem, true, NULL); -+ } -+out: - __xdp_return(xdpf->data, &xdpf->mem, true, NULL); - } - EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); -@@ -417,7 +443,7 @@ void xdp_return_frame_bulk(struct xdp_fr - struct xdp_mem_allocator *xa; - - if (mem->type != MEM_TYPE_PAGE_POOL) { -- __xdp_return(xdpf->data, &xdpf->mem, false, NULL); -+ xdp_return_frame(xdpf); - return; - } - -@@ -436,12 +462,38 @@ void xdp_return_frame_bulk(struct xdp_fr - bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); - } - -+ if (unlikely(xdp_frame_has_frags(xdpf))) { -+ struct skb_shared_info *sinfo; -+ int i; -+ -+ sinfo = xdp_get_shared_info_from_frame(xdpf); -+ for (i = 0; i < sinfo->nr_frags; i++) { -+ skb_frag_t *frag = &sinfo->frags[i]; -+ -+ bq->q[bq->count++] = skb_frag_address(frag); -+ if (bq->count == XDP_BULK_QUEUE_SIZE) -+ xdp_flush_frame_bulk(bq); -+ } -+ } - bq->q[bq->count++] = xdpf->data; - } - EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); - - void xdp_return_buff(struct xdp_buff *xdp) - { -+ struct skb_shared_info *sinfo; -+ int i; -+ -+ if (likely(!xdp_buff_has_frags(xdp))) -+ goto out; -+ -+ sinfo = xdp_get_shared_info_from_buff(xdp); -+ for (i = 0; i < sinfo->nr_frags; i++) { -+ struct page *page = skb_frag_page(&sinfo->frags[i]); -+ -+ __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); -+ } -+out: - __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); - } - diff --git a/target/linux/generic/backport-6.1/607-v5.18-net-skbuff-add-size-metadata-to-skb_shared_info-for-.patch b/target/linux/generic/backport-6.1/607-v5.18-net-skbuff-add-size-metadata-to-skb_shared_info-for-.patch deleted file mode 100644 index 36f55d511ad9..000000000000 --- a/target/linux/generic/backport-6.1/607-v5.18-net-skbuff-add-size-metadata-to-skb_shared_info-for-.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d16697cb6261d4cc23422e6b1cb2759df8aa76d0 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 21 Jan 2022 11:09:44 +0100 -Subject: [PATCH] net: skbuff: add size metadata to skb_shared_info for xdp - -Introduce xdp_frags_size field in skb_shared_info data structure -to store xdp_buff/xdp_frame frame paged size (xdp_frags_size will -be used in xdp frags support). In order to not increase -skb_shared_info size we will use a hole due to skb_shared_info -alignment. - -Acked-by: Toke Hoiland-Jorgensen -Acked-by: John Fastabend -Acked-by: Jesper Dangaard Brouer -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/8a849819a3e0a143d540f78a3a5add76e17e980d.1642758637.git.lorenzo@kernel.org -Signed-off-by: Alexei Starovoitov ---- - include/linux/skbuff.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -568,6 +568,7 @@ struct skb_shared_info { - * Warning : all fields before dataref are cleared in __alloc_skb() - */ - atomic_t dataref; -+ unsigned int xdp_frags_size; - - /* Intermediate layers must ensure that destructor_arg - * remains valid until skb destructor */ diff --git a/target/linux/generic/backport-6.1/608-v5.18-net-veth-Account-total-xdp_frame-len-running-ndo_xdp.patch b/target/linux/generic/backport-6.1/608-v5.18-net-veth-Account-total-xdp_frame-len-running-ndo_xdp.patch deleted file mode 100644 index 3bdba8728cfa..000000000000 --- a/target/linux/generic/backport-6.1/608-v5.18-net-veth-Account-total-xdp_frame-len-running-ndo_xdp.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 5142239a22219921a7863cf00c9ab853c00689d8 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 11 Mar 2022 10:14:18 +0100 -Subject: [PATCH] net: veth: Account total xdp_frame len running ndo_xdp_xmit - -Even if this is a theoretical issue since it is not possible to perform -XDP_REDIRECT on a non-linear xdp_frame, veth driver does not account -paged area in ndo_xdp_xmit function pointer. -Introduce xdp_get_frame_len utility routine to get the xdp_frame full -length and account total frame size running XDP_REDIRECT of a -non-linear xdp frame into a veth device. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: Daniel Borkmann -Acked-by: Toke Hoiland-Jorgensen -Acked-by: John Fastabend -Link: https://lore.kernel.org/bpf/54f9fd3bb65d190daf2c0bbae2f852ff16cfbaa0.1646989407.git.lorenzo@kernel.org ---- - drivers/net/veth.c | 4 ++-- - include/net/xdp.h | 14 ++++++++++++++ - 2 files changed, 16 insertions(+), 2 deletions(-) - ---- a/drivers/net/veth.c -+++ b/drivers/net/veth.c -@@ -501,7 +501,7 @@ static int veth_xdp_xmit(struct net_devi - struct xdp_frame *frame = frames[i]; - void *ptr = veth_xdp_to_ptr(frame); - -- if (unlikely(frame->len > max_len || -+ if (unlikely(xdp_get_frame_len(frame) > max_len || - __ptr_ring_produce(&rq->xdp_ring, ptr))) - break; - nxmit++; -@@ -862,7 +862,7 @@ static int veth_xdp_rcv(struct veth_rq * - /* ndo_xdp_xmit */ - struct xdp_frame *frame = veth_ptr_to_xdp(ptr); - -- stats->xdp_bytes += frame->len; -+ stats->xdp_bytes += xdp_get_frame_len(frame); - frame = veth_xdp_rcv_one(rq, frame, bq, stats); - if (frame) { - /* XDP_PASS */ ---- a/include/net/xdp.h -+++ b/include/net/xdp.h -@@ -295,6 +295,20 @@ out: - __xdp_release_frame(xdpf->data, mem); - } - -+static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf) -+{ -+ struct skb_shared_info *sinfo; -+ unsigned int len = xdpf->len; -+ -+ if (likely(!xdp_frame_has_frags(xdpf))) -+ goto out; -+ -+ sinfo = xdp_get_shared_info_from_frame(xdpf); -+ len += sinfo->xdp_frags_size; -+out: -+ return len; -+} -+ - int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, - struct net_device *dev, u32 queue_index, unsigned int napi_id); - void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); diff --git a/target/linux/generic/backport-6.1/609-v5.18-veth-Allow-jumbo-frames-in-xdp-mode.patch b/target/linux/generic/backport-6.1/609-v5.18-veth-Allow-jumbo-frames-in-xdp-mode.patch deleted file mode 100644 index 31b44f117079..000000000000 --- a/target/linux/generic/backport-6.1/609-v5.18-veth-Allow-jumbo-frames-in-xdp-mode.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 7cda76d858a4e71ac4a04066c093679a12e1312c Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 11 Mar 2022 10:14:20 +0100 -Subject: [PATCH] veth: Allow jumbo frames in xdp mode -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Allow increasing the MTU over page boundaries on veth devices -if the attached xdp program declares to support xdp fragments. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: Daniel Borkmann -Acked-by: Toke Høiland-Jørgensen -Acked-by: John Fastabend -Link: https://lore.kernel.org/bpf/d5dc039c3d4123426e7023a488c449181a7bc57f.1646989407.git.lorenzo@kernel.org ---- - drivers/net/veth.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - ---- a/drivers/net/veth.c -+++ b/drivers/net/veth.c -@@ -1471,9 +1471,14 @@ static int veth_xdp_set(struct net_devic - goto err; - } - -- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM - -- peer->hard_header_len - -- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); -+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - -+ peer->hard_header_len; -+ /* Allow increasing the max_mtu if the program supports -+ * XDP fragments. -+ */ -+ //if (prog->aux->xdp_has_frags) -+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS; -+ - if (peer->mtu > max_mtu) { - NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); - err = -ERANGE; diff --git a/target/linux/generic/backport-6.1/700-v5.17-net-dsa-introduce-tagger-owned-storage-for-private.patch b/target/linux/generic/backport-6.1/700-v5.17-net-dsa-introduce-tagger-owned-storage-for-private.patch deleted file mode 100644 index f2b651f0c613..000000000000 --- a/target/linux/generic/backport-6.1/700-v5.17-net-dsa-introduce-tagger-owned-storage-for-private.patch +++ /dev/null @@ -1,279 +0,0 @@ -From dc452a471dbae8aca8257c565174212620880093 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Fri, 10 Dec 2021 01:34:37 +0200 -Subject: net: dsa: introduce tagger-owned storage for private and shared data - -Ansuel is working on register access over Ethernet for the qca8k switch -family. This requires the qca8k tagging protocol driver to receive -frames which aren't intended for the network stack, but instead for the -qca8k switch driver itself. - -The dp->priv is currently the prevailing method for passing data back -and forth between the tagging protocol driver and the switch driver. -However, this method is riddled with caveats. - -The DSA design allows in principle for any switch driver to return any -protocol it desires in ->get_tag_protocol(). The dsa_loop driver can be -modified to do just that. But in the current design, the memory behind -dp->priv has to be allocated by the switch driver, so if the tagging -protocol is paired to an unexpected switch driver, we may end up in NULL -pointer dereferences inside the kernel, or worse (a switch driver may -allocate dp->priv according to the expectations of a different tagger). - -The latter possibility is even more plausible considering that DSA -switches can dynamically change tagging protocols in certain cases -(dsa <-> edsa, ocelot <-> ocelot-8021q), and the current design lends -itself to mistakes that are all too easy to make. - -This patch proposes that the tagging protocol driver should manage its -own memory, instead of relying on the switch driver to do so. -After analyzing the different in-tree needs, it can be observed that the -required tagger storage is per switch, therefore a ds->tagger_data -pointer is introduced. In principle, per-port storage could also be -introduced, although there is no need for it at the moment. Future -changes will replace the current usage of dp->priv with ds->tagger_data. - -We define a "binding" event between the DSA switch tree and the tagging -protocol. During this binding event, the tagging protocol's ->connect() -method is called first, and this may allocate some memory for each -switch of the tree. Then a cross-chip notifier is emitted for the -switches within that tree, and they are given the opportunity to fix up -the tagger's memory (for example, they might set up some function -pointers that represent virtual methods for consuming packets). -Because the memory is owned by the tagger, there exists a ->disconnect() -method for the tagger (which is the place to free the resources), but -there doesn't exist a ->disconnect() method for the switch driver. -This is part of the design. The switch driver should make minimal use of -the public part of the tagger data, and only after type-checking it -using the supplied "proto" argument. - -In the code there are in fact two binding events, one is the initial -event in dsa_switch_setup_tag_protocol(). At this stage, the cross chip -notifier chains aren't initialized, so we call each switch's connect() -method by hand. Then there is dsa_tree_bind_tag_proto() during -dsa_tree_change_tag_proto(), and here we have an old protocol and a new -one. We first connect to the new one before disconnecting from the old -one, to simplify error handling a bit and to ensure we remain in a valid -state at all times. - -Co-developed-by: Ansuel Smith -Signed-off-by: Ansuel Smith -Signed-off-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 12 +++++++++ - net/dsa/dsa2.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++--- - net/dsa/dsa_priv.h | 1 + - net/dsa/switch.c | 14 +++++++++++ - 4 files changed, 96 insertions(+), 4 deletions(-) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -80,12 +80,15 @@ enum dsa_tag_protocol { - }; - - struct dsa_switch; -+struct dsa_switch_tree; - - struct dsa_device_ops { - struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); - struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); - void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, - int *offset); -+ int (*connect)(struct dsa_switch_tree *dst); -+ void (*disconnect)(struct dsa_switch_tree *dst); - unsigned int needed_headroom; - unsigned int needed_tailroom; - const char *name; -@@ -329,6 +332,8 @@ struct dsa_switch { - */ - void *priv; - -+ void *tagger_data; -+ - /* - * Configuration data for this switch. - */ -@@ -584,6 +589,13 @@ struct dsa_switch_ops { - enum dsa_tag_protocol mprot); - int (*change_tag_protocol)(struct dsa_switch *ds, int port, - enum dsa_tag_protocol proto); -+ /* -+ * Method for switch drivers to connect to the tagging protocol driver -+ * in current use. The switch driver can provide handlers for certain -+ * types of packets for switch management. -+ */ -+ int (*connect_tag_protocol)(struct dsa_switch *ds, -+ enum dsa_tag_protocol proto); - - /* Optional switch-wide initialization and destruction methods */ - int (*setup)(struct dsa_switch *ds); ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -230,8 +230,12 @@ static struct dsa_switch_tree *dsa_tree_ - - static void dsa_tree_free(struct dsa_switch_tree *dst) - { -- if (dst->tag_ops) -+ if (dst->tag_ops) { -+ if (dst->tag_ops->disconnect) -+ dst->tag_ops->disconnect(dst); -+ - dsa_tag_driver_put(dst->tag_ops); -+ } - list_del(&dst->list); - kfree(dst); - } -@@ -805,7 +809,7 @@ static int dsa_switch_setup_tag_protocol - int port, err; - - if (tag_ops->proto == dst->default_proto) -- return 0; -+ goto connect; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) -@@ -821,6 +825,17 @@ static int dsa_switch_setup_tag_protocol - } - } - -+connect: -+ if (ds->ops->connect_tag_protocol) { -+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); -+ if (err) { -+ dev_err(ds->dev, -+ "Unable to connect to tag protocol \"%s\": %pe\n", -+ tag_ops->name, ERR_PTR(err)); -+ return err; -+ } -+ } -+ - return 0; - } - -@@ -1132,6 +1147,46 @@ static void dsa_tree_teardown(struct dsa - dst->setup = false; - } - -+static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, -+ const struct dsa_device_ops *tag_ops) -+{ -+ const struct dsa_device_ops *old_tag_ops = dst->tag_ops; -+ struct dsa_notifier_tag_proto_info info; -+ int err; -+ -+ dst->tag_ops = tag_ops; -+ -+ /* Notify the new tagger about the connection to this tree */ -+ if (tag_ops->connect) { -+ err = tag_ops->connect(dst); -+ if (err) -+ goto out_revert; -+ } -+ -+ /* Notify the switches from this tree about the connection -+ * to the new tagger -+ */ -+ info.tag_ops = tag_ops; -+ err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); -+ if (err && err != -EOPNOTSUPP) -+ goto out_disconnect; -+ -+ /* Notify the old tagger about the disconnection from this tree */ -+ if (old_tag_ops->disconnect) -+ old_tag_ops->disconnect(dst); -+ -+ return 0; -+ -+out_disconnect: -+ /* Revert the new tagger's connection to this tree */ -+ if (tag_ops->disconnect) -+ tag_ops->disconnect(dst); -+out_revert: -+ dst->tag_ops = old_tag_ops; -+ -+ return err; -+} -+ - /* Since the dsa/tagging sysfs device attribute is per master, the assumption - * is that all DSA switches within a tree share the same tagger, otherwise - * they would have formed disjoint trees (different "dsa,member" values). -@@ -1164,12 +1219,15 @@ int dsa_tree_change_tag_proto(struct dsa - goto out_unlock; - } - -+ /* Notify the tag protocol change */ - info.tag_ops = tag_ops; - err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); - if (err) -- goto out_unwind_tagger; -+ return err; - -- dst->tag_ops = tag_ops; -+ err = dsa_tree_bind_tag_proto(dst, tag_ops); -+ if (err) -+ goto out_unwind_tagger; - - rtnl_unlock(); - -@@ -1257,6 +1315,7 @@ static int dsa_port_parse_cpu(struct dsa - struct dsa_switch *ds = dp->ds; - struct dsa_switch_tree *dst = ds->dst; - enum dsa_tag_protocol default_proto; -+ int err; - - /* Find out which protocol the switch would prefer. */ - default_proto = dsa_get_tag_protocol(dp, master); -@@ -1311,6 +1370,12 @@ static int dsa_port_parse_cpu(struct dsa - */ - dsa_tag_driver_put(tag_ops); - } else { -+ if (tag_ops->connect) { -+ err = tag_ops->connect(dst); -+ if (err) -+ return err; -+ } -+ - dst->tag_ops = tag_ops; - } - ---- a/net/dsa/dsa_priv.h -+++ b/net/dsa/dsa_priv.h -@@ -37,6 +37,7 @@ enum { - DSA_NOTIFIER_VLAN_DEL, - DSA_NOTIFIER_MTU, - DSA_NOTIFIER_TAG_PROTO, -+ DSA_NOTIFIER_TAG_PROTO_CONNECT, - DSA_NOTIFIER_MRP_ADD, - DSA_NOTIFIER_MRP_DEL, - DSA_NOTIFIER_MRP_ADD_RING_ROLE, ---- a/net/dsa/switch.c -+++ b/net/dsa/switch.c -@@ -616,6 +616,17 @@ static int dsa_switch_change_tag_proto(s - return 0; - } - -+static int dsa_switch_connect_tag_proto(struct dsa_switch *ds, -+ struct dsa_notifier_tag_proto_info *info) -+{ -+ const struct dsa_device_ops *tag_ops = info->tag_ops; -+ -+ if (!ds->ops->connect_tag_protocol) -+ return -EOPNOTSUPP; -+ -+ return ds->ops->connect_tag_protocol(ds, tag_ops->proto); -+} -+ - static int dsa_switch_mrp_add(struct dsa_switch *ds, - struct dsa_notifier_mrp_info *info) - { -@@ -735,6 +746,9 @@ static int dsa_switch_event(struct notif - case DSA_NOTIFIER_TAG_PROTO: - err = dsa_switch_change_tag_proto(ds, info); - break; -+ case DSA_NOTIFIER_TAG_PROTO_CONNECT: -+ err = dsa_switch_connect_tag_proto(ds, info); -+ break; - case DSA_NOTIFIER_MRP_ADD: - err = dsa_switch_mrp_add(ds, info); - break; diff --git a/target/linux/generic/backport-6.1/701-v5.17-dsa-make-tagging-protocols-connect-to-individual-switches.patch b/target/linux/generic/backport-6.1/701-v5.17-dsa-make-tagging-protocols-connect-to-individual-switches.patch deleted file mode 100644 index 0c50ae6fb9de..000000000000 --- a/target/linux/generic/backport-6.1/701-v5.17-dsa-make-tagging-protocols-connect-to-individual-switches.patch +++ /dev/null @@ -1,274 +0,0 @@ -From 7f2973149c22e7a6fee4c0c9fa6b8e4108e9c208 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Tue, 14 Dec 2021 03:45:36 +0200 -Subject: net: dsa: make tagging protocols connect to individual switches from - a tree - -On the NXP Bluebox 3 board which uses a multi-switch setup with sja1105, -the mechanism through which the tagger connects to the switch tree is -broken, due to improper DSA code design. At the time when tag_ops->connect() -is called in dsa_port_parse_cpu(), DSA hasn't finished "touching" all -the ports, so it doesn't know how large the tree is and how many ports -it has. It has just seen the first CPU port by this time. As a result, -this function will call the tagger's ->connect method too early, and the -tagger will connect only to the first switch from the tree. - -This could be perhaps addressed a bit more simply by just moving the -tag_ops->connect(dst) call a bit later (for example in dsa_tree_setup), -but there is already a design inconsistency at present: on the switch -side, the notification is on a per-switch basis, but on the tagger side, -it is on a per-tree basis. Furthermore, the persistent storage itself is -per switch (ds->tagger_data). And the tagger connect and disconnect -procedures (at least the ones that exist currently) could see a fair bit -of simplification if they didn't have to iterate through the switches of -a tree. - -To fix the issue, this change transforms tag_ops->connect(dst) into -tag_ops->connect(ds) and moves it somewhere where we already iterate -over all switches of a tree. That is in dsa_switch_setup_tag_protocol(), -which is a good placement because we already have there the connection -call to the switch side of things. - -As for the dsa_tree_bind_tag_proto() method (called from the code path -that changes the tag protocol), things are a bit more complicated -because we receive the tree as argument, yet when we unwind on errors, -it would be nice to not call tag_ops->disconnect(ds) where we didn't -previously call tag_ops->connect(ds). We didn't have this problem before -because the tag_ops connection operations passed the entire dst before, -and this is more fine grained now. To solve the error rewind case using -the new API, we have to create yet one more cross-chip notifier for -disconnection, and stay connected with the old tag protocol to all the -switches in the tree until we've succeeded to connect with the new one -as well. So if something fails half way, the whole tree is still -connected to the old tagger. But there may still be leaks if the tagger -fails to connect to the 2nd out of 3 switches in a tree: somebody needs -to tell the tagger to disconnect from the first switch. Nothing comes -for free, and this was previously handled privately by the tagging -protocol driver before, but now we need to emit a disconnect cross-chip -notifier for that, because DSA has to take care of the unwind path. We -assume that the tagging protocol has connected to a switch if it has set -ds->tagger_data to something, otherwise we avoid calling its -disconnection method in the error rewind path. - -The rest of the changes are in the tagging protocol drivers, and have to -do with the replacement of dst with ds. The iteration is removed and the -error unwind path is simplified, as mentioned above. - -Signed-off-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 5 ++-- - net/dsa/dsa2.c | 44 +++++++++++++----------------- - net/dsa/dsa_priv.h | 1 + - net/dsa/switch.c | 52 ++++++++++++++++++++++++++++++++--- - net/dsa/tag_ocelot_8021q.c | 53 +++++++++++------------------------- - net/dsa/tag_sja1105.c | 67 ++++++++++++++++------------------------------ - 6 files changed, 109 insertions(+), 113 deletions(-) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -80,15 +80,14 @@ enum dsa_tag_protocol { - }; - - struct dsa_switch; --struct dsa_switch_tree; - - struct dsa_device_ops { - struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); - struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); - void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, - int *offset); -- int (*connect)(struct dsa_switch_tree *dst); -- void (*disconnect)(struct dsa_switch_tree *dst); -+ int (*connect)(struct dsa_switch *ds); -+ void (*disconnect)(struct dsa_switch *ds); - unsigned int needed_headroom; - unsigned int needed_tailroom; - const char *name; ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -230,12 +230,8 @@ static struct dsa_switch_tree *dsa_tree_ - - static void dsa_tree_free(struct dsa_switch_tree *dst) - { -- if (dst->tag_ops) { -- if (dst->tag_ops->disconnect) -- dst->tag_ops->disconnect(dst); -- -+ if (dst->tag_ops) - dsa_tag_driver_put(dst->tag_ops); -- } - list_del(&dst->list); - kfree(dst); - } -@@ -826,17 +822,29 @@ static int dsa_switch_setup_tag_protocol - } - - connect: -+ if (tag_ops->connect) { -+ err = tag_ops->connect(ds); -+ if (err) -+ return err; -+ } -+ - if (ds->ops->connect_tag_protocol) { - err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); - if (err) { - dev_err(ds->dev, - "Unable to connect to tag protocol \"%s\": %pe\n", - tag_ops->name, ERR_PTR(err)); -- return err; -+ goto disconnect; - } - } - - return 0; -+ -+disconnect: -+ if (tag_ops->disconnect) -+ tag_ops->disconnect(ds); -+ -+ return err; - } - - static int dsa_switch_setup(struct dsa_switch *ds) -@@ -1156,13 +1164,6 @@ static int dsa_tree_bind_tag_proto(struc - - dst->tag_ops = tag_ops; - -- /* Notify the new tagger about the connection to this tree */ -- if (tag_ops->connect) { -- err = tag_ops->connect(dst); -- if (err) -- goto out_revert; -- } -- - /* Notify the switches from this tree about the connection - * to the new tagger - */ -@@ -1172,16 +1173,14 @@ static int dsa_tree_bind_tag_proto(struc - goto out_disconnect; - - /* Notify the old tagger about the disconnection from this tree */ -- if (old_tag_ops->disconnect) -- old_tag_ops->disconnect(dst); -+ info.tag_ops = old_tag_ops; -+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); - - return 0; - - out_disconnect: -- /* Revert the new tagger's connection to this tree */ -- if (tag_ops->disconnect) -- tag_ops->disconnect(dst); --out_revert: -+ info.tag_ops = tag_ops; -+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); - dst->tag_ops = old_tag_ops; - - return err; -@@ -1315,7 +1314,6 @@ static int dsa_port_parse_cpu(struct dsa - struct dsa_switch *ds = dp->ds; - struct dsa_switch_tree *dst = ds->dst; - enum dsa_tag_protocol default_proto; -- int err; - - /* Find out which protocol the switch would prefer. */ - default_proto = dsa_get_tag_protocol(dp, master); -@@ -1370,12 +1368,6 @@ static int dsa_port_parse_cpu(struct dsa - */ - dsa_tag_driver_put(tag_ops); - } else { -- if (tag_ops->connect) { -- err = tag_ops->connect(dst); -- if (err) -- return err; -- } -- - dst->tag_ops = tag_ops; - } - ---- a/net/dsa/dsa_priv.h -+++ b/net/dsa/dsa_priv.h -@@ -38,6 +38,7 @@ enum { - DSA_NOTIFIER_MTU, - DSA_NOTIFIER_TAG_PROTO, - DSA_NOTIFIER_TAG_PROTO_CONNECT, -+ DSA_NOTIFIER_TAG_PROTO_DISCONNECT, - DSA_NOTIFIER_MRP_ADD, - DSA_NOTIFIER_MRP_DEL, - DSA_NOTIFIER_MRP_ADD_RING_ROLE, ---- a/net/dsa/switch.c -+++ b/net/dsa/switch.c -@@ -616,15 +616,58 @@ static int dsa_switch_change_tag_proto(s - return 0; - } - --static int dsa_switch_connect_tag_proto(struct dsa_switch *ds, -- struct dsa_notifier_tag_proto_info *info) -+/* We use the same cross-chip notifiers to inform both the tagger side, as well -+ * as the switch side, of connection and disconnection events. -+ * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the -+ * switch side doesn't support connecting to this tagger, and therefore, the -+ * fact that we don't disconnect the tagger side doesn't constitute a memory -+ * leak: the tagger will still operate with persistent per-switch memory, just -+ * with the switch side unconnected to it. What does constitute a hard error is -+ * when the switch side supports connecting but fails. -+ */ -+static int -+dsa_switch_connect_tag_proto(struct dsa_switch *ds, -+ struct dsa_notifier_tag_proto_info *info) - { - const struct dsa_device_ops *tag_ops = info->tag_ops; -+ int err; -+ -+ /* Notify the new tagger about the connection to this switch */ -+ if (tag_ops->connect) { -+ err = tag_ops->connect(ds); -+ if (err) -+ return err; -+ } - - if (!ds->ops->connect_tag_protocol) - return -EOPNOTSUPP; - -- return ds->ops->connect_tag_protocol(ds, tag_ops->proto); -+ /* Notify the switch about the connection to the new tagger */ -+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); -+ if (err) { -+ /* Revert the new tagger's connection to this tree */ -+ if (tag_ops->disconnect) -+ tag_ops->disconnect(ds); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int -+dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, -+ struct dsa_notifier_tag_proto_info *info) -+{ -+ const struct dsa_device_ops *tag_ops = info->tag_ops; -+ -+ /* Notify the tagger about the disconnection from this switch */ -+ if (tag_ops->disconnect && ds->tagger_data) -+ tag_ops->disconnect(ds); -+ -+ /* No need to notify the switch, since it shouldn't have any -+ * resources to tear down -+ */ -+ return 0; - } - - static int dsa_switch_mrp_add(struct dsa_switch *ds, -@@ -749,6 +792,9 @@ static int dsa_switch_event(struct notif - case DSA_NOTIFIER_TAG_PROTO_CONNECT: - err = dsa_switch_connect_tag_proto(ds, info); - break; -+ case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: -+ err = dsa_switch_disconnect_tag_proto(ds, info); -+ break; - case DSA_NOTIFIER_MRP_ADD: - err = dsa_switch_mrp_add(ds, info); - break; diff --git a/target/linux/generic/backport-6.1/702-v5.19-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/generic/backport-6.1/702-v5.19-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch deleted file mode 100644 index c83d659d1c06..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch +++ /dev/null @@ -1,327 +0,0 @@ -From: Felix Fietkau -Date: Sat, 5 Feb 2022 17:59:07 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent - DMA - -It improves performance by eliminating the need for a cache flush on rx and tx -In preparation for supporting WED (Wireless Ethernet Dispatch), also add a -function for disabling coherent DMA at runtime. - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -840,7 +841,7 @@ static int mtk_init_fq_dma(struct mtk_et - dma_addr_t dma_addr; - int i; - -- eth->scratch_ring = dma_alloc_coherent(eth->dev, -+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * sizeof(struct mtk_tx_dma), - ð->phy_scratch_ring, - GFP_ATOMIC); -@@ -852,10 +853,10 @@ static int mtk_init_fq_dma(struct mtk_et - if (unlikely(!eth->scratch_head)) - return -ENOMEM; - -- dma_addr = dma_map_single(eth->dev, -+ dma_addr = dma_map_single(eth->dma_dev, - eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, - DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) -+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; - - phy_ring_tail = eth->phy_scratch_ring + -@@ -909,26 +910,26 @@ static void mtk_tx_unmap(struct mtk_eth - { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { -- dma_unmap_single(eth->dev, -+ dma_unmap_single(eth->dma_dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { -- dma_unmap_page(eth->dev, -+ dma_unmap_page(eth->dma_dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } - } else { - if (dma_unmap_len(tx_buf, dma_len0)) { -- dma_unmap_page(eth->dev, -+ dma_unmap_page(eth->dma_dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } - - if (dma_unmap_len(tx_buf, dma_len1)) { -- dma_unmap_page(eth->dev, -+ dma_unmap_page(eth->dma_dev, - dma_unmap_addr(tx_buf, dma_addr1), - dma_unmap_len(tx_buf, dma_len1), - DMA_TO_DEVICE); -@@ -1006,9 +1007,9 @@ static int mtk_tx_map(struct sk_buff *sk - if (skb_vlan_tag_present(skb)) - txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - -- mapped_addr = dma_map_single(eth->dev, skb->data, -+ mapped_addr = dma_map_single(eth->dma_dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) -+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) - return -ENOMEM; - - WRITE_ONCE(itxd->txd1, mapped_addr); -@@ -1047,10 +1048,10 @@ static int mtk_tx_map(struct sk_buff *sk - - - frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); -- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, -+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, - frag_map_size, - DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) -+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) - goto err_dma; - - if (i == nr_frags - 1 && -@@ -1331,18 +1332,18 @@ static int mtk_poll_rx(struct napi_struc - netdev->stats.rx_dropped++; - goto release_desc; - } -- dma_addr = dma_map_single(eth->dev, -+ dma_addr = dma_map_single(eth->dma_dev, - new_data + NET_SKB_PAD + - eth->ip_align, - ring->buf_size, - DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { -+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { - skb_free_frag(new_data); - netdev->stats.rx_dropped++; - goto release_desc; - } - -- dma_unmap_single(eth->dev, trxd.rxd1, -+ dma_unmap_single(eth->dma_dev, trxd.rxd1, - ring->buf_size, DMA_FROM_DEVICE); - - /* receive data */ -@@ -1615,7 +1616,7 @@ static int mtk_tx_alloc(struct mtk_eth * - if (!ring->buf) - goto no_tx_mem; - -- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, -+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, - &ring->phys, GFP_ATOMIC); - if (!ring->dma) - goto no_tx_mem; -@@ -1633,7 +1634,7 @@ static int mtk_tx_alloc(struct mtk_eth * - * descriptors in ring->dma_pdma. - */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, -+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, - &ring->phys_pdma, - GFP_ATOMIC); - if (!ring->dma_pdma) -@@ -1692,7 +1693,7 @@ static void mtk_tx_clean(struct mtk_eth - } - - if (ring->dma) { -- dma_free_coherent(eth->dev, -+ dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(*ring->dma), - ring->dma, - ring->phys); -@@ -1700,7 +1701,7 @@ static void mtk_tx_clean(struct mtk_eth - } - - if (ring->dma_pdma) { -- dma_free_coherent(eth->dev, -+ dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(*ring->dma_pdma), - ring->dma_pdma, - ring->phys_pdma); -@@ -1748,18 +1749,18 @@ static int mtk_rx_alloc(struct mtk_eth * - return -ENOMEM; - } - -- ring->dma = dma_alloc_coherent(eth->dev, -+ ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * sizeof(*ring->dma), - &ring->phys, GFP_ATOMIC); - if (!ring->dma) - return -ENOMEM; - - for (i = 0; i < rx_dma_size; i++) { -- dma_addr_t dma_addr = dma_map_single(eth->dev, -+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev, - ring->data[i] + NET_SKB_PAD + eth->ip_align, - ring->buf_size, - DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) -+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; - ring->dma[i].rxd1 = (unsigned int)dma_addr; - -@@ -1795,7 +1796,7 @@ static void mtk_rx_clean(struct mtk_eth - continue; - if (!ring->dma[i].rxd1) - continue; -- dma_unmap_single(eth->dev, -+ dma_unmap_single(eth->dma_dev, - ring->dma[i].rxd1, - ring->buf_size, - DMA_FROM_DEVICE); -@@ -1806,7 +1807,7 @@ static void mtk_rx_clean(struct mtk_eth - } - - if (ring->dma) { -- dma_free_coherent(eth->dev, -+ dma_free_coherent(eth->dma_dev, - ring->dma_size * sizeof(*ring->dma), - ring->dma, - ring->phys); -@@ -2162,7 +2163,7 @@ static void mtk_dma_free(struct mtk_eth - if (eth->netdev[i]) - netdev_reset_queue(eth->netdev[i]); - if (eth->scratch_ring) { -- dma_free_coherent(eth->dev, -+ dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), - eth->scratch_ring, - eth->phy_scratch_ring); -@@ -2514,6 +2515,8 @@ static void mtk_dim_tx(struct work_struc - - static int mtk_hw_init(struct mtk_eth *eth) - { -+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | -+ ETHSYS_DMA_AG_MAP_PPE; - int i, val, ret; - - if (test_and_set_bit(MTK_HW_INIT, ð->state)) -@@ -2526,6 +2529,10 @@ static int mtk_hw_init(struct mtk_eth *e - if (ret) - goto err_disable_pm; - -+ if (eth->ethsys) -+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, -+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); -+ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - ret = device_reset(eth->dev); - if (ret) { -@@ -3079,6 +3086,35 @@ free_netdev: - return err; - } - -+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) -+{ -+ struct net_device *dev, *tmp; -+ LIST_HEAD(dev_list); -+ int i; -+ -+ rtnl_lock(); -+ -+ for (i = 0; i < MTK_MAC_COUNT; i++) { -+ dev = eth->netdev[i]; -+ -+ if (!dev || !(dev->flags & IFF_UP)) -+ continue; -+ -+ list_add_tail(&dev->close_list, &dev_list); -+ } -+ -+ dev_close_many(&dev_list, false); -+ -+ eth->dma_dev = dma_dev; -+ -+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { -+ list_del_init(&dev->close_list); -+ dev_open(dev, NULL); -+ } -+ -+ rtnl_unlock(); -+} -+ - static int mtk_probe(struct platform_device *pdev) - { - struct device_node *mac_np; -@@ -3092,6 +3128,7 @@ static int mtk_probe(struct platform_dev - eth->soc = of_device_get_match_data(&pdev->dev); - - eth->dev = &pdev->dev; -+ eth->dma_dev = &pdev->dev; - eth->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(eth->base)) - return PTR_ERR(eth->base); -@@ -3140,6 +3177,16 @@ static int mtk_probe(struct platform_dev - } - } - -+ if (of_dma_is_coherent(pdev->dev.of_node)) { -+ struct regmap *cci; -+ -+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, -+ "mediatek,cci-control"); -+ /* enable CPU/bus coherency */ -+ if (!IS_ERR(cci)) -+ regmap_write(cci, 0, 3); -+ } -+ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { - eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), - GFP_KERNEL); ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -463,6 +463,12 @@ - #define RSTCTRL_FE BIT(6) - #define RSTCTRL_PPE BIT(31) - -+/* ethernet dma channel agent map */ -+#define ETHSYS_DMA_AG_MAP 0x408 -+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) -+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) -+#define ETHSYS_DMA_AG_MAP_PPE BIT(2) -+ - /* SGMII subsystem config registers */ - /* Register to auto-negotiation restart */ - #define SGMSYS_PCS_CONTROL_1 0x0 -@@ -880,6 +886,7 @@ struct mtk_sgmii { - /* struct mtk_eth - This is the main datasructure for holding the state - * of the driver - * @dev: The device pointer -+ * @dev: The device pointer used for dma mapping/alloc - * @base: The mapped register i/o base - * @page_lock: Make sure that register operations are atomic - * @tx_irq__lock: Make sure that IRQ register operations are atomic -@@ -923,6 +930,7 @@ struct mtk_sgmii { - - struct mtk_eth { - struct device *dev; -+ struct device *dma_dev; - void __iomem *base; - spinlock_t page_lock; - spinlock_t tx_irq_lock; -@@ -1021,6 +1029,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk - int mtk_eth_offload_init(struct mtk_eth *eth); - int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data); -+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); - - - #endif /* MTK_ETH_H */ diff --git a/target/linux/generic/backport-6.1/702-v5.19-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch b/target/linux/generic/backport-6.1/702-v5.19-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch deleted file mode 100644 index 9f2512a1d050..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Felix Fietkau -Date: Mon, 7 Feb 2022 10:27:22 +0100 -Subject: [PATCH] arm64: dts: mediatek: mt7622: add support for coherent - DMA - -It improves performance by eliminating the need for a cache flush on rx and tx - -Signed-off-by: Felix Fietkau ---- - ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -357,7 +357,7 @@ - }; - - cci_control2: slave-if@5000 { -- compatible = "arm,cci-400-ctrl-if"; -+ compatible = "arm,cci-400-ctrl-if", "syscon"; - interface-type = "ace"; - reg = <0x5000 0x1000>; - }; -@@ -938,6 +938,8 @@ - power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; - mediatek,ethsys = <ðsys>; - mediatek,sgmiisys = <&sgmiisys>; -+ mediatek,cci-control = <&cci_control2>; -+ dma-coherent; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; diff --git a/target/linux/generic/backport-6.1/702-v5.19-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch b/target/linux/generic/backport-6.1/702-v5.19-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch deleted file mode 100644 index 2f3a0827fe45..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch +++ /dev/null @@ -1,1679 +0,0 @@ -From: Felix Fietkau -Date: Sat, 5 Feb 2022 17:56:08 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for Wireless - Ethernet Dispatch (WED) - -The Wireless Ethernet Dispatch subsystem on the MT7622 SoC can be -configured to intercept and handle access to the DMA queues and -PCIe interrupts for a MT7615/MT7915 wireless card. -It can manage the internal WDMA (Wireless DMA) controller, which allows -ethernet packets to be passed from the packet switch engine (PSE) to the -wireless card, bypassing the CPU entirely. -This can be used to implement hardware flow offloading from ethernet to -WLAN. - -Signed-off-by: Felix Fietkau ---- - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h - create mode 100644 include/linux/soc/mediatek/mtk_wed.h - ---- a/drivers/net/ethernet/mediatek/Kconfig -+++ b/drivers/net/ethernet/mediatek/Kconfig -@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK - - if NET_VENDOR_MEDIATEK - -+config NET_MEDIATEK_SOC_WED -+ depends on ARCH_MEDIATEK || COMPILE_TEST -+ def_bool NET_MEDIATEK_SOC != n -+ - config NET_MEDIATEK_SOC - tristate "MediaTek SoC Gigabit Ethernet support" - depends on NET_DSA || !NET_DSA ---- a/drivers/net/ethernet/mediatek/Makefile -+++ b/drivers/net/ethernet/mediatek/Makefile -@@ -5,4 +5,9 @@ - - obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o - mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o -+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o -+ifdef CONFIG_DEBUG_FS -+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o -+endif -+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o - obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -24,6 +24,7 @@ - #include - - #include "mtk_eth_soc.h" -+#include "mtk_wed.h" - - static int mtk_msg_level = -1; - module_param_named(msg_level, mtk_msg_level, int, 0); -@@ -3209,6 +3210,22 @@ static int mtk_probe(struct platform_dev - } - } - -+ for (i = 0;; i++) { -+ struct device_node *np = of_parse_phandle(pdev->dev.of_node, -+ "mediatek,wed", i); -+ static const u32 wdma_regs[] = { -+ MTK_WDMA0_BASE, -+ MTK_WDMA1_BASE -+ }; -+ void __iomem *wdma; -+ -+ if (!np || i >= ARRAY_SIZE(wdma_regs)) -+ break; -+ -+ wdma = eth->base + wdma_regs[i]; -+ mtk_wed_add_hw(np, eth, wdma, i); -+ } -+ - for (i = 0; i < 3; i++) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) - eth->irq[i] = eth->irq[0]; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -295,6 +295,9 @@ - #define MTK_GDM1_TX_GPCNT 0x2438 - #define MTK_STAT_OFFSET 0x40 - -+#define MTK_WDMA0_BASE 0x2800 -+#define MTK_WDMA1_BASE 0x2c00 -+ - /* QDMA descriptor txd4 */ - #define TX_DMA_CHKSUM (0x7 << 29) - #define TX_DMA_TSO BIT(28) ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -0,0 +1,875 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2021 Felix Fietkau */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "mtk_eth_soc.h" -+#include "mtk_wed_regs.h" -+#include "mtk_wed.h" -+#include "mtk_ppe.h" -+ -+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) -+ -+#define MTK_WED_PKT_SIZE 1900 -+#define MTK_WED_BUF_SIZE 2048 -+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) -+ -+#define MTK_WED_TX_RING_SIZE 2048 -+#define MTK_WED_WDMA_RING_SIZE 1024 -+ -+static struct mtk_wed_hw *hw_list[2]; -+static DEFINE_MUTEX(hw_lock); -+ -+static void -+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) -+{ -+ regmap_update_bits(dev->hw->regs, reg, mask | val, val); -+} -+ -+static void -+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) -+{ -+ return wed_m32(dev, reg, 0, mask); -+} -+ -+static void -+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) -+{ -+ return wed_m32(dev, reg, mask, 0); -+} -+ -+static void -+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) -+{ -+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); -+} -+ -+static void -+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) -+{ -+ wdma_m32(dev, reg, 0, mask); -+} -+ -+static u32 -+mtk_wed_read_reset(struct mtk_wed_device *dev) -+{ -+ return wed_r32(dev, MTK_WED_RESET); -+} -+ -+static void -+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) -+{ -+ u32 status; -+ -+ wed_w32(dev, MTK_WED_RESET, mask); -+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status, -+ !(status & mask), 0, 1000)) -+ WARN_ON_ONCE(1); -+} -+ -+static struct mtk_wed_hw * -+mtk_wed_assign(struct mtk_wed_device *dev) -+{ -+ struct mtk_wed_hw *hw; -+ -+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; -+ if (!hw || hw->wed_dev) -+ return NULL; -+ -+ hw->wed_dev = dev; -+ return hw; -+} -+ -+static int -+mtk_wed_buffer_alloc(struct mtk_wed_device *dev) -+{ -+ struct mtk_wdma_desc *desc; -+ dma_addr_t desc_phys; -+ void **page_list; -+ int token = dev->wlan.token_start; -+ int ring_size; -+ int n_pages; -+ int i, page_idx; -+ -+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); -+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE; -+ -+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); -+ if (!page_list) -+ return -ENOMEM; -+ -+ dev->buf_ring.size = ring_size; -+ dev->buf_ring.pages = page_list; -+ -+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), -+ &desc_phys, GFP_KERNEL); -+ if (!desc) -+ return -ENOMEM; -+ -+ dev->buf_ring.desc = desc; -+ dev->buf_ring.desc_phys = desc_phys; -+ -+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { -+ dma_addr_t page_phys, buf_phys; -+ struct page *page; -+ void *buf; -+ int s; -+ -+ page = __dev_alloc_pages(GFP_KERNEL, 0); -+ if (!page) -+ return -ENOMEM; -+ -+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, -+ DMA_BIDIRECTIONAL); -+ if (dma_mapping_error(dev->hw->dev, page_phys)) { -+ __free_page(page); -+ return -ENOMEM; -+ } -+ -+ page_list[page_idx++] = page; -+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, -+ DMA_BIDIRECTIONAL); -+ -+ buf = page_to_virt(page); -+ buf_phys = page_phys; -+ -+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { -+ u32 txd_size; -+ -+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++); -+ -+ desc->buf0 = buf_phys; -+ desc->buf1 = buf_phys + txd_size; -+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, -+ txd_size) | -+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, -+ MTK_WED_BUF_SIZE - txd_size) | -+ MTK_WDMA_DESC_CTRL_LAST_SEG1; -+ desc->info = 0; -+ desc++; -+ -+ buf += MTK_WED_BUF_SIZE; -+ buf_phys += MTK_WED_BUF_SIZE; -+ } -+ -+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, -+ DMA_BIDIRECTIONAL); -+ } -+ -+ return 0; -+} -+ -+static void -+mtk_wed_free_buffer(struct mtk_wed_device *dev) -+{ -+ struct mtk_wdma_desc *desc = dev->buf_ring.desc; -+ void **page_list = dev->buf_ring.pages; -+ int page_idx; -+ int i; -+ -+ if (!page_list) -+ return; -+ -+ if (!desc) -+ goto free_pagelist; -+ -+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { -+ void *page = page_list[page_idx++]; -+ -+ if (!page) -+ break; -+ -+ dma_unmap_page(dev->hw->dev, desc[i].buf0, -+ PAGE_SIZE, DMA_BIDIRECTIONAL); -+ __free_page(page); -+ } -+ -+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), -+ desc, dev->buf_ring.desc_phys); -+ -+free_pagelist: -+ kfree(page_list); -+} -+ -+static void -+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) -+{ -+ if (!ring->desc) -+ return; -+ -+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), -+ ring->desc, ring->desc_phys); -+} -+ -+static void -+mtk_wed_free_tx_rings(struct mtk_wed_device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) -+ mtk_wed_free_ring(dev, &dev->tx_ring[i]); -+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]); -+} -+ -+static void -+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) -+{ -+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; -+ -+ if (!dev->hw->num_flows) -+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; -+ -+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); -+ wed_r32(dev, MTK_WED_EXT_INT_MASK); -+} -+ -+static void -+mtk_wed_stop(struct mtk_wed_device *dev) -+{ -+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); -+ mtk_wed_set_ext_int(dev, false); -+ -+ wed_clr(dev, MTK_WED_CTRL, -+ MTK_WED_CTRL_WDMA_INT_AGENT_EN | -+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | -+ MTK_WED_CTRL_WED_TX_BM_EN | -+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); -+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); -+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0); -+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); -+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); -+ -+ wed_clr(dev, MTK_WED_GLO_CFG, -+ MTK_WED_GLO_CFG_TX_DMA_EN | -+ MTK_WED_GLO_CFG_RX_DMA_EN); -+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); -+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); -+} -+ -+static void -+mtk_wed_detach(struct mtk_wed_device *dev) -+{ -+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; -+ struct mtk_wed_hw *hw = dev->hw; -+ -+ mutex_lock(&hw_lock); -+ -+ mtk_wed_stop(dev); -+ -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); -+ -+ mtk_wed_reset(dev, MTK_WED_RESET_WED); -+ -+ mtk_wed_free_buffer(dev); -+ mtk_wed_free_tx_rings(dev); -+ -+ if (of_dma_is_coherent(wlan_node)) -+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, -+ BIT(hw->index), BIT(hw->index)); -+ -+ if (!hw_list[!hw->index]->wed_dev && -+ hw->eth->dma_dev != hw->eth->dev) -+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev); -+ -+ memset(dev, 0, sizeof(*dev)); -+ module_put(THIS_MODULE); -+ -+ hw->wed_dev = NULL; -+ mutex_unlock(&hw_lock); -+} -+ -+static void -+mtk_wed_hw_init_early(struct mtk_wed_device *dev) -+{ -+ u32 mask, set; -+ u32 offset; -+ -+ mtk_wed_stop(dev); -+ mtk_wed_reset(dev, MTK_WED_RESET_WED); -+ -+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | -+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | -+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; -+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | -+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | -+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; -+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); -+ -+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); -+ -+ offset = dev->hw->index ? 0x04000400 : 0; -+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); -+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); -+ -+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); -+} -+ -+static void -+mtk_wed_hw_init(struct mtk_wed_device *dev) -+{ -+ if (dev->init_done) -+ return; -+ -+ dev->init_done = true; -+ mtk_wed_set_ext_int(dev, false); -+ wed_w32(dev, MTK_WED_TX_BM_CTRL, -+ MTK_WED_TX_BM_CTRL_PAUSE | -+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, -+ dev->buf_ring.size / 128) | -+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, -+ MTK_WED_TX_RING_SIZE / 256)); -+ -+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); -+ -+ wed_w32(dev, MTK_WED_TX_BM_TKID, -+ FIELD_PREP(MTK_WED_TX_BM_TKID_START, -+ dev->wlan.token_start) | -+ FIELD_PREP(MTK_WED_TX_BM_TKID_END, -+ dev->wlan.token_start + dev->wlan.nbuf - 1)); -+ -+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); -+ -+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR, -+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | -+ MTK_WED_TX_BM_DYN_THR_HI); -+ -+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); -+ -+ wed_set(dev, MTK_WED_CTRL, -+ MTK_WED_CTRL_WED_TX_BM_EN | -+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -+ -+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); -+} -+ -+static void -+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) -+{ -+ int i; -+ -+ for (i = 0; i < size; i++) { -+ desc[i].buf0 = 0; -+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); -+ desc[i].buf1 = 0; -+ desc[i].info = 0; -+ } -+} -+ -+static u32 -+mtk_wed_check_busy(struct mtk_wed_device *dev) -+{ -+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) -+ return true; -+ -+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & -+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) -+ return true; -+ -+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) -+ return true; -+ -+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & -+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) -+ return true; -+ -+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & -+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) -+ return true; -+ -+ if (wed_r32(dev, MTK_WED_CTRL) & -+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) -+ return true; -+ -+ return false; -+} -+ -+static int -+mtk_wed_poll_busy(struct mtk_wed_device *dev) -+{ -+ int sleep = 15000; -+ int timeout = 100 * sleep; -+ u32 val; -+ -+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, -+ timeout, false, dev); -+} -+ -+static void -+mtk_wed_reset_dma(struct mtk_wed_device *dev) -+{ -+ bool busy = false; -+ u32 val; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { -+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; -+ -+ if (!desc) -+ continue; -+ -+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); -+ } -+ -+ if (mtk_wed_poll_busy(dev)) -+ busy = mtk_wed_check_busy(dev); -+ -+ if (busy) { -+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); -+ } else { -+ wed_w32(dev, MTK_WED_RESET_IDX, -+ MTK_WED_RESET_IDX_TX | -+ MTK_WED_RESET_IDX_RX); -+ wed_w32(dev, MTK_WED_RESET_IDX, 0); -+ } -+ -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); -+ -+ if (busy) { -+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); -+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); -+ } else { -+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, -+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); -+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); -+ -+ wed_set(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); -+ -+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); -+ } -+ -+ for (i = 0; i < 100; i++) { -+ val = wed_r32(dev, MTK_WED_TX_BM_INTF); -+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) -+ break; -+ } -+ -+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); -+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); -+ -+ if (busy) { -+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); -+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); -+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); -+ } else { -+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, -+ MTK_WED_WPDMA_RESET_IDX_TX | -+ MTK_WED_WPDMA_RESET_IDX_RX); -+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); -+ } -+ -+} -+ -+static int -+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, -+ int size) -+{ -+ ring->desc = dma_alloc_coherent(dev->hw->dev, -+ size * sizeof(*ring->desc), -+ &ring->desc_phys, GFP_KERNEL); -+ if (!ring->desc) -+ return -ENOMEM; -+ -+ ring->size = size; -+ mtk_wed_ring_reset(ring->desc, size); -+ -+ return 0; -+} -+ -+static int -+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) -+{ -+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; -+ -+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) -+ return -ENOMEM; -+ -+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, -+ wdma->desc_phys); -+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, -+ size); -+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); -+ -+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, -+ wdma->desc_phys); -+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, -+ size); -+ -+ return 0; -+} -+ -+static void -+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) -+{ -+ u32 wdma_mask; -+ u32 val; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -+ if (!dev->tx_wdma[i].desc) -+ mtk_wed_wdma_ring_setup(dev, i, 16); -+ -+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); -+ -+ mtk_wed_hw_init(dev); -+ -+ wed_set(dev, MTK_WED_CTRL, -+ MTK_WED_CTRL_WDMA_INT_AGENT_EN | -+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | -+ MTK_WED_CTRL_WED_TX_BM_EN | -+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -+ -+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); -+ -+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, -+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | -+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); -+ -+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, -+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); -+ -+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); -+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); -+ -+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); -+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); -+ -+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); -+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask); -+ -+ wed_set(dev, MTK_WED_GLO_CFG, -+ MTK_WED_GLO_CFG_TX_DMA_EN | -+ MTK_WED_GLO_CFG_RX_DMA_EN); -+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); -+ wed_set(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); -+ -+ mtk_wed_set_ext_int(dev, true); -+ val = dev->wlan.wpdma_phys | -+ MTK_PCIE_MIRROR_MAP_EN | -+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); -+ -+ if (dev->hw->index) -+ val |= BIT(1); -+ val |= BIT(0); -+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val); -+ -+ dev->running = true; -+} -+ -+static int -+mtk_wed_attach(struct mtk_wed_device *dev) -+ __releases(RCU) -+{ -+ struct mtk_wed_hw *hw; -+ int ret = 0; -+ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), -+ "mtk_wed_attach without holding the RCU read lock"); -+ -+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || -+ !try_module_get(THIS_MODULE)) -+ ret = -ENODEV; -+ -+ rcu_read_unlock(); -+ -+ if (ret) -+ return ret; -+ -+ mutex_lock(&hw_lock); -+ -+ hw = mtk_wed_assign(dev); -+ if (!hw) { -+ module_put(THIS_MODULE); -+ ret = -ENODEV; -+ goto out; -+ } -+ -+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); -+ -+ dev->hw = hw; -+ dev->dev = hw->dev; -+ dev->irq = hw->irq; -+ dev->wdma_idx = hw->index; -+ -+ if (hw->eth->dma_dev == hw->eth->dev && -+ of_dma_is_coherent(hw->eth->dev->of_node)) -+ mtk_eth_set_dma_device(hw->eth, hw->dev); -+ -+ ret = mtk_wed_buffer_alloc(dev); -+ if (ret) { -+ mtk_wed_detach(dev); -+ goto out; -+ } -+ -+ mtk_wed_hw_init_early(dev); -+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); -+ -+out: -+ mutex_unlock(&hw_lock); -+ -+ return ret; -+} -+ -+static int -+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) -+{ -+ struct mtk_wed_ring *ring = &dev->tx_ring[idx]; -+ -+ /* -+ * Tx ring redirection: -+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN -+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) -+ * registers. -+ * -+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it -+ * into MTK_WED_WPDMA_RING_TX(n) registers. -+ * It gets filled with packets picked up from WED TX ring and from -+ * WDMA RX. -+ */ -+ -+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); -+ -+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) -+ return -ENOMEM; -+ -+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) -+ return -ENOMEM; -+ -+ ring->reg_base = MTK_WED_RING_TX(idx); -+ ring->wpdma = regs; -+ -+ /* WED -> WPDMA */ -+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); -+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); -+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); -+ -+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, -+ ring->desc_phys); -+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, -+ MTK_WED_TX_RING_SIZE); -+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); -+ -+ return 0; -+} -+ -+static int -+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) -+{ -+ struct mtk_wed_ring *ring = &dev->txfree_ring; -+ int i; -+ -+ /* -+ * For txfree event handling, the same DMA ring is shared between WED -+ * and WLAN. The WLAN driver accesses the ring index registers through -+ * WED -+ */ -+ ring->reg_base = MTK_WED_RING_RX(1); -+ ring->wpdma = regs; -+ -+ for (i = 0; i < 12; i += 4) { -+ u32 val = readl(regs + i); -+ -+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val); -+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); -+ } -+ -+ return 0; -+} -+ -+static u32 -+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) -+{ -+ u32 val; -+ -+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); -+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); -+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; -+ if (!dev->hw->num_flows) -+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; -+ if (val && net_ratelimit()) -+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); -+ -+ val = wed_r32(dev, MTK_WED_INT_STATUS); -+ val &= mask; -+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ -+ -+ return val; -+} -+ -+static void -+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) -+{ -+ if (!dev->running) -+ return; -+ -+ mtk_wed_set_ext_int(dev, !!mask); -+ wed_w32(dev, MTK_WED_INT_MASK, mask); -+} -+ -+int mtk_wed_flow_add(int index) -+{ -+ struct mtk_wed_hw *hw = hw_list[index]; -+ int ret; -+ -+ if (!hw || !hw->wed_dev) -+ return -ENODEV; -+ -+ if (hw->num_flows) { -+ hw->num_flows++; -+ return 0; -+ } -+ -+ mutex_lock(&hw_lock); -+ if (!hw->wed_dev) { -+ ret = -ENODEV; -+ goto out; -+ } -+ -+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); -+ if (!ret) -+ hw->num_flows++; -+ mtk_wed_set_ext_int(hw->wed_dev, true); -+ -+out: -+ mutex_unlock(&hw_lock); -+ -+ return ret; -+} -+ -+void mtk_wed_flow_remove(int index) -+{ -+ struct mtk_wed_hw *hw = hw_list[index]; -+ -+ if (!hw) -+ return; -+ -+ if (--hw->num_flows) -+ return; -+ -+ mutex_lock(&hw_lock); -+ if (!hw->wed_dev) -+ goto out; -+ -+ hw->wed_dev->wlan.offload_disable(hw->wed_dev); -+ mtk_wed_set_ext_int(hw->wed_dev, true); -+ -+out: -+ mutex_unlock(&hw_lock); -+} -+ -+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -+ void __iomem *wdma, int index) -+{ -+ static const struct mtk_wed_ops wed_ops = { -+ .attach = mtk_wed_attach, -+ .tx_ring_setup = mtk_wed_tx_ring_setup, -+ .txfree_ring_setup = mtk_wed_txfree_ring_setup, -+ .start = mtk_wed_start, -+ .stop = mtk_wed_stop, -+ .reset_dma = mtk_wed_reset_dma, -+ .reg_read = wed_r32, -+ .reg_write = wed_w32, -+ .irq_get = mtk_wed_irq_get, -+ .irq_set_mask = mtk_wed_irq_set_mask, -+ .detach = mtk_wed_detach, -+ }; -+ struct device_node *eth_np = eth->dev->of_node; -+ struct platform_device *pdev; -+ struct mtk_wed_hw *hw; -+ struct regmap *regs; -+ int irq; -+ -+ if (!np) -+ return; -+ -+ pdev = of_find_device_by_node(np); -+ if (!pdev) -+ return; -+ -+ get_device(&pdev->dev); -+ irq = platform_get_irq(pdev, 0); -+ if (irq < 0) -+ return; -+ -+ regs = syscon_regmap_lookup_by_phandle(np, NULL); -+ if (!regs) -+ return; -+ -+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); -+ -+ mutex_lock(&hw_lock); -+ -+ if (WARN_ON(hw_list[index])) -+ goto unlock; -+ -+ hw = kzalloc(sizeof(*hw), GFP_KERNEL); -+ hw->node = np; -+ hw->regs = regs; -+ hw->eth = eth; -+ hw->dev = &pdev->dev; -+ hw->wdma = wdma; -+ hw->index = index; -+ hw->irq = irq; -+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, -+ "mediatek,pcie-mirror"); -+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, -+ "mediatek,hifsys"); -+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { -+ kfree(hw); -+ goto unlock; -+ } -+ -+ if (!index) { -+ regmap_write(hw->mirror, 0, 0); -+ regmap_write(hw->mirror, 4, 0); -+ } -+ mtk_wed_hw_add_debugfs(hw); -+ -+ hw_list[index] = hw; -+ -+unlock: -+ mutex_unlock(&hw_lock); -+} -+ -+void mtk_wed_exit(void) -+{ -+ int i; -+ -+ rcu_assign_pointer(mtk_soc_wed_ops, NULL); -+ -+ synchronize_rcu(); -+ -+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { -+ struct mtk_wed_hw *hw; -+ -+ hw = hw_list[i]; -+ if (!hw) -+ continue; -+ -+ hw_list[i] = NULL; -+ debugfs_remove(hw->debugfs_dir); -+ put_device(hw->dev); -+ kfree(hw); -+ } -+} ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed.h -@@ -0,0 +1,128 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2021 Felix Fietkau */ -+ -+#ifndef __MTK_WED_PRIV_H -+#define __MTK_WED_PRIV_H -+ -+#include -+#include -+#include -+ -+struct mtk_eth; -+ -+struct mtk_wed_hw { -+ struct device_node *node; -+ struct mtk_eth *eth; -+ struct regmap *regs; -+ struct regmap *hifsys; -+ struct device *dev; -+ void __iomem *wdma; -+ struct regmap *mirror; -+ struct dentry *debugfs_dir; -+ struct mtk_wed_device *wed_dev; -+ u32 debugfs_reg; -+ u32 num_flows; -+ char dirname[5]; -+ int irq; -+ int index; -+}; -+ -+ -+#ifdef CONFIG_NET_MEDIATEK_SOC_WED -+static inline void -+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) -+{ -+ regmap_write(dev->hw->regs, reg, val); -+} -+ -+static inline u32 -+wed_r32(struct mtk_wed_device *dev, u32 reg) -+{ -+ unsigned int val; -+ -+ regmap_read(dev->hw->regs, reg, &val); -+ -+ return val; -+} -+ -+static inline void -+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) -+{ -+ writel(val, dev->hw->wdma + reg); -+} -+ -+static inline u32 -+wdma_r32(struct mtk_wed_device *dev, u32 reg) -+{ -+ return readl(dev->hw->wdma + reg); -+} -+ -+static inline u32 -+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) -+{ -+ if (!dev->tx_ring[ring].wpdma) -+ return 0; -+ -+ return readl(dev->tx_ring[ring].wpdma + reg); -+} -+ -+static inline void -+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) -+{ -+ if (!dev->tx_ring[ring].wpdma) -+ return; -+ -+ writel(val, dev->tx_ring[ring].wpdma + reg); -+} -+ -+static inline u32 -+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) -+{ -+ if (!dev->txfree_ring.wpdma) -+ return 0; -+ -+ return readl(dev->txfree_ring.wpdma + reg); -+} -+ -+static inline void -+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) -+{ -+ if (!dev->txfree_ring.wpdma) -+ return; -+ -+ writel(val, dev->txfree_ring.wpdma + reg); -+} -+ -+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -+ void __iomem *wdma, int index); -+void mtk_wed_exit(void); -+int mtk_wed_flow_add(int index); -+void mtk_wed_flow_remove(int index); -+#else -+static inline void -+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -+ void __iomem *wdma, int index) -+{ -+} -+static inline void -+mtk_wed_exit(void) -+{ -+} -+static inline int mtk_wed_flow_add(int index) -+{ -+ return -EINVAL; -+} -+static inline void mtk_wed_flow_remove(int index) -+{ -+} -+#endif -+ -+#ifdef CONFIG_DEBUG_FS -+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); -+#else -+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) -+{ -+} -+#endif -+ -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c -@@ -0,0 +1,175 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2021 Felix Fietkau */ -+ -+#include -+#include "mtk_wed.h" -+#include "mtk_wed_regs.h" -+ -+struct reg_dump { -+ const char *name; -+ u16 offset; -+ u8 type; -+ u8 base; -+}; -+ -+enum { -+ DUMP_TYPE_STRING, -+ DUMP_TYPE_WED, -+ DUMP_TYPE_WDMA, -+ DUMP_TYPE_WPDMA_TX, -+ DUMP_TYPE_WPDMA_TXFREE, -+}; -+ -+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } -+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } -+#define DUMP_RING(_prefix, _base, ...) \ -+ { _prefix " BASE", _base, __VA_ARGS__ }, \ -+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ -+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ -+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } -+ -+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) -+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) -+ -+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) -+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) -+ -+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) -+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) -+ -+static void -+print_reg_val(struct seq_file *s, const char *name, u32 val) -+{ -+ seq_printf(s, "%-32s %08x\n", name, val); -+} -+ -+static void -+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, -+ const struct reg_dump *regs, int n_regs) -+{ -+ const struct reg_dump *cur; -+ u32 val; -+ -+ for (cur = regs; cur < ®s[n_regs]; cur++) { -+ switch (cur->type) { -+ case DUMP_TYPE_STRING: -+ seq_printf(s, "%s======== %s:\n", -+ cur > regs ? "\n" : "", -+ cur->name); -+ continue; -+ case DUMP_TYPE_WED: -+ val = wed_r32(dev, cur->offset); -+ break; -+ case DUMP_TYPE_WDMA: -+ val = wdma_r32(dev, cur->offset); -+ break; -+ case DUMP_TYPE_WPDMA_TX: -+ val = wpdma_tx_r32(dev, cur->base, cur->offset); -+ break; -+ case DUMP_TYPE_WPDMA_TXFREE: -+ val = wpdma_txfree_r32(dev, cur->offset); -+ break; -+ } -+ print_reg_val(s, cur->name, val); -+ } -+} -+ -+ -+static int -+wed_txinfo_show(struct seq_file *s, void *data) -+{ -+ static const struct reg_dump regs[] = { -+ DUMP_STR("WED TX"), -+ DUMP_WED(WED_TX_MIB(0)), -+ DUMP_WED_RING(WED_RING_TX(0)), -+ -+ DUMP_WED(WED_TX_MIB(1)), -+ DUMP_WED_RING(WED_RING_TX(1)), -+ -+ DUMP_STR("WPDMA TX"), -+ DUMP_WED(WED_WPDMA_TX_MIB(0)), -+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)), -+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), -+ -+ DUMP_WED(WED_WPDMA_TX_MIB(1)), -+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)), -+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), -+ -+ DUMP_STR("WPDMA TX"), -+ DUMP_WPDMA_TX_RING(0), -+ DUMP_WPDMA_TX_RING(1), -+ -+ DUMP_STR("WED WDMA RX"), -+ DUMP_WED(WED_WDMA_RX_MIB(0)), -+ DUMP_WED_RING(WED_WDMA_RING_RX(0)), -+ DUMP_WED(WED_WDMA_RX_THRES(0)), -+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), -+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), -+ -+ DUMP_WED(WED_WDMA_RX_MIB(1)), -+ DUMP_WED_RING(WED_WDMA_RING_RX(1)), -+ DUMP_WED(WED_WDMA_RX_THRES(1)), -+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), -+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), -+ -+ DUMP_STR("WDMA RX"), -+ DUMP_WDMA(WDMA_GLO_CFG), -+ DUMP_WDMA_RING(WDMA_RING_RX(0)), -+ DUMP_WDMA_RING(WDMA_RING_RX(1)), -+ }; -+ struct mtk_wed_hw *hw = s->private; -+ struct mtk_wed_device *dev = hw->wed_dev; -+ -+ if (!dev) -+ return 0; -+ -+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); -+ -+ return 0; -+} -+DEFINE_SHOW_ATTRIBUTE(wed_txinfo); -+ -+ -+static int -+mtk_wed_reg_set(void *data, u64 val) -+{ -+ struct mtk_wed_hw *hw = data; -+ -+ regmap_write(hw->regs, hw->debugfs_reg, val); -+ -+ return 0; -+} -+ -+static int -+mtk_wed_reg_get(void *data, u64 *val) -+{ -+ struct mtk_wed_hw *hw = data; -+ unsigned int regval; -+ int ret; -+ -+ ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); -+ if (ret) -+ return ret; -+ -+ *val = regval; -+ -+ return 0; -+} -+ -+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, -+ "0x%08llx\n"); -+ -+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) -+{ -+ struct dentry *dir; -+ -+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); -+ dir = debugfs_create_dir(hw->dirname, NULL); -+ if (!dir) -+ return; -+ -+ hw->debugfs_dir = dir; -+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); -+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); -+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); -+} ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c -@@ -0,0 +1,8 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2020 Felix Fietkau */ -+ -+#include -+#include -+ -+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; -+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -0,0 +1,251 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2020 Felix Fietkau */ -+ -+#ifndef __MTK_WED_REGS_H -+#define __MTK_WED_REGS_H -+ -+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) -+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) -+#define MTK_WDMA_DESC_CTRL_BURST BIT(16) -+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) -+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) -+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) -+ -+struct mtk_wdma_desc { -+ __le32 buf0; -+ __le32 ctrl; -+ __le32 buf1; -+ __le32 info; -+} __packed __aligned(4); -+ -+#define MTK_WED_RESET 0x008 -+#define MTK_WED_RESET_TX_BM BIT(0) -+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) -+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) -+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) -+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) -+#define MTK_WED_RESET_WED_TX_DMA BIT(12) -+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) -+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) -+#define MTK_WED_RESET_WED BIT(31) -+ -+#define MTK_WED_CTRL 0x00c -+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) -+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) -+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) -+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) -+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) -+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) -+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) -+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) -+#define MTK_WED_CTRL_RESERVE_EN BIT(12) -+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) -+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) -+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) -+ -+#define MTK_WED_EXT_INT_STATUS 0x020 -+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) -+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) -+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) -+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) -+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) -+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) -+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) -+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) -+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) -+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ -+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ -+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ -+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ -+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ -+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ -+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ -+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) -+ -+#define MTK_WED_EXT_INT_MASK 0x028 -+ -+#define MTK_WED_STATUS 0x060 -+#define MTK_WED_STATUS_TX GENMASK(15, 8) -+ -+#define MTK_WED_TX_BM_CTRL 0x080 -+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) -+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) -+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) -+ -+#define MTK_WED_TX_BM_BASE 0x084 -+ -+#define MTK_WED_TX_BM_TKID 0x088 -+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) -+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) -+ -+#define MTK_WED_TX_BM_BUF_LEN 0x08c -+ -+#define MTK_WED_TX_BM_INTF 0x09c -+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) -+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) -+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) -+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) -+ -+#define MTK_WED_TX_BM_DYN_THR 0x0a0 -+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) -+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) -+ -+#define MTK_WED_INT_STATUS 0x200 -+#define MTK_WED_INT_MASK 0x204 -+ -+#define MTK_WED_GLO_CFG 0x208 -+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) -+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) -+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) -+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) -+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) -+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) -+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) -+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) -+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) -+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) -+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) -+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) -+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) -+#define MTK_WED_GLO_CFG_SW_RESET BIT(24) -+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) -+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) -+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) -+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) -+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) -+ -+#define MTK_WED_RESET_IDX 0x20c -+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) -+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) -+ -+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) -+ -+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) -+ -+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) -+ -+#define MTK_WED_WPDMA_INT_TRIGGER 0x504 -+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) -+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) -+ -+#define MTK_WED_WPDMA_GLO_CFG 0x508 -+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) -+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) -+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) -+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) -+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) -+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) -+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) -+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) -+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) -+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) -+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) -+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) -+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) -+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) -+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) -+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) -+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) -+ -+#define MTK_WED_WPDMA_RESET_IDX 0x50c -+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) -+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) -+ -+#define MTK_WED_WPDMA_INT_CTRL 0x520 -+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) -+ -+#define MTK_WED_WPDMA_INT_MASK 0x524 -+ -+#define MTK_WED_PCIE_CFG_BASE 0x560 -+ -+#define MTK_WED_PCIE_INT_TRIGGER 0x570 -+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) -+ -+#define MTK_WED_WPDMA_CFG_BASE 0x580 -+ -+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) -+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) -+ -+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) -+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) -+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) -+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) -+ -+#define MTK_WED_WDMA_GLO_CFG 0xa04 -+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) -+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) -+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) -+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) -+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) -+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) -+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) -+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) -+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) -+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) -+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) -+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) -+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) -+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) -+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) -+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) -+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) -+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) -+ -+#define MTK_WED_WDMA_RESET_IDX 0xa08 -+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) -+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) -+ -+#define MTK_WED_WDMA_INT_TRIGGER 0xa28 -+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) -+ -+#define MTK_WED_WDMA_INT_CTRL 0xa2c -+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) -+ -+#define MTK_WED_WDMA_OFFSET0 0xaa4 -+#define MTK_WED_WDMA_OFFSET1 0xaa8 -+ -+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) -+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) -+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) -+ -+#define MTK_WED_RING_OFS_BASE 0x00 -+#define MTK_WED_RING_OFS_COUNT 0x04 -+#define MTK_WED_RING_OFS_CPU_IDX 0x08 -+#define MTK_WED_RING_OFS_DMA_IDX 0x0c -+ -+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) -+ -+#define MTK_WDMA_GLO_CFG 0x204 -+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) -+ -+#define MTK_WDMA_RESET_IDX 0x208 -+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) -+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) -+ -+#define MTK_WDMA_INT_MASK 0x228 -+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) -+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) -+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) -+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) -+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) -+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) -+ -+#define MTK_WDMA_INT_GRP1 0x250 -+#define MTK_WDMA_INT_GRP2 0x254 -+ -+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) -+#define MTK_PCIE_MIRROR_MAP_EN BIT(0) -+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) -+ -+/* DMA channel mapping */ -+#define HIFSYS_DMA_AG_MAP 0x008 -+ -+#endif ---- /dev/null -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -0,0 +1,131 @@ -+#ifndef __MTK_WED_H -+#define __MTK_WED_H -+ -+#include -+#include -+#include -+#include -+ -+#define MTK_WED_TX_QUEUES 2 -+ -+struct mtk_wed_hw; -+struct mtk_wdma_desc; -+ -+struct mtk_wed_ring { -+ struct mtk_wdma_desc *desc; -+ dma_addr_t desc_phys; -+ int size; -+ -+ u32 reg_base; -+ void __iomem *wpdma; -+}; -+ -+struct mtk_wed_device { -+#ifdef CONFIG_NET_MEDIATEK_SOC_WED -+ const struct mtk_wed_ops *ops; -+ struct device *dev; -+ struct mtk_wed_hw *hw; -+ bool init_done, running; -+ int wdma_idx; -+ int irq; -+ -+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; -+ struct mtk_wed_ring txfree_ring; -+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; -+ -+ struct { -+ int size; -+ void **pages; -+ struct mtk_wdma_desc *desc; -+ dma_addr_t desc_phys; -+ } buf_ring; -+ -+ /* filled by driver: */ -+ struct { -+ struct pci_dev *pci_dev; -+ -+ u32 wpdma_phys; -+ -+ u16 token_start; -+ unsigned int nbuf; -+ -+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); -+ int (*offload_enable)(struct mtk_wed_device *wed); -+ void (*offload_disable)(struct mtk_wed_device *wed); -+ } wlan; -+#endif -+}; -+ -+struct mtk_wed_ops { -+ int (*attach)(struct mtk_wed_device *dev); -+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, -+ void __iomem *regs); -+ int (*txfree_ring_setup)(struct mtk_wed_device *dev, -+ void __iomem *regs); -+ void (*detach)(struct mtk_wed_device *dev); -+ -+ void (*stop)(struct mtk_wed_device *dev); -+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask); -+ void (*reset_dma)(struct mtk_wed_device *dev); -+ -+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); -+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); -+ -+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); -+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); -+}; -+ -+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; -+ -+static inline int -+mtk_wed_device_attach(struct mtk_wed_device *dev) -+{ -+ int ret = -ENODEV; -+ -+#ifdef CONFIG_NET_MEDIATEK_SOC_WED -+ rcu_read_lock(); -+ dev->ops = rcu_dereference(mtk_soc_wed_ops); -+ if (dev->ops) -+ ret = dev->ops->attach(dev); -+ else -+ rcu_read_unlock(); -+ -+ if (ret) -+ dev->ops = NULL; -+#endif -+ -+ return ret; -+} -+ -+#ifdef CONFIG_NET_MEDIATEK_SOC_WED -+#define mtk_wed_device_active(_dev) !!(_dev)->ops -+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) -+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) -+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ -+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) -+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ -+ (_dev)->ops->txfree_ring_setup(_dev, _regs) -+#define mtk_wed_device_reg_read(_dev, _reg) \ -+ (_dev)->ops->reg_read(_dev, _reg) -+#define mtk_wed_device_reg_write(_dev, _reg, _val) \ -+ (_dev)->ops->reg_write(_dev, _reg, _val) -+#define mtk_wed_device_irq_get(_dev, _mask) \ -+ (_dev)->ops->irq_get(_dev, _mask) -+#define mtk_wed_device_irq_set_mask(_dev, _mask) \ -+ (_dev)->ops->irq_set_mask(_dev, _mask) -+#else -+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) -+{ -+ return false; -+} -+#define mtk_wed_device_detach(_dev) do {} while (0) -+#define mtk_wed_device_start(_dev, _mask) do {} while (0) -+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV -+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV -+#define mtk_wed_device_reg_read(_dev, _reg) 0 -+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) -+#define mtk_wed_device_irq_get(_dev, _mask) 0 -+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) -+#endif -+ -+#endif diff --git a/target/linux/generic/backport-6.1/702-v5.19-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch b/target/linux/generic/backport-6.1/702-v5.19-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch deleted file mode 100644 index 50d65b1eb6ff..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch +++ /dev/null @@ -1,269 +0,0 @@ -From: Felix Fietkau -Date: Sat, 5 Feb 2022 18:29:22 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: implement flow offloading - to WED devices - -This allows hardware flow offloading from Ethernet to WLAN on MT7622 SoC - -Co-developed-by: Lorenzo Bianconi -Signed-off-by: Lorenzo Bianconi -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -329,6 +329,24 @@ int mtk_foe_entry_set_pppoe(struct mtk_f - return 0; - } - -+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, -+ int bss, int wcid) -+{ -+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); -+ u32 *ib2 = mtk_foe_entry_ib2(entry); -+ -+ *ib2 &= ~MTK_FOE_IB2_PORT_MG; -+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO; -+ if (wdma_idx) -+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; -+ -+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | -+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | -+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); -+ -+ return 0; -+} -+ - static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) - { - return !(entry->ib1 & MTK_FOE_IB1_STATIC) && ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -48,9 +48,9 @@ enum { - #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) - #define MTK_FOE_IB2_MULTICAST BIT(8) - --#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) --#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) --#define MTK_FOE_IB2_WHNAT_NAT BIT(17) -+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) -+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) -+#define MTK_FOE_IB2_WDMA_WINFO BIT(17) - - #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) - -@@ -58,9 +58,9 @@ enum { - - #define MTK_FOE_IB2_DSCP GENMASK(31, 24) - --#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) --#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) --#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) -+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) -+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) -+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) - - enum { - MTK_FOE_STATE_INVALID, -@@ -281,6 +281,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct - int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); - int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); - int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); -+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, -+ int bss, int wcid); - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 timestamp); - int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -10,6 +10,7 @@ - #include - #include - #include "mtk_eth_soc.h" -+#include "mtk_wed.h" - - struct mtk_flow_data { - struct ethhdr eth; -@@ -39,6 +40,7 @@ struct mtk_flow_entry { - struct rhash_head node; - unsigned long cookie; - u16 hash; -+ s8 wed_index; - }; - - static const struct rhashtable_params mtk_flow_ht_params = { -@@ -80,6 +82,35 @@ mtk_flow_offload_mangle_eth(const struct - memcpy(dest, src, act->mangle.mask ? 2 : 4); - } - -+static int -+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) -+{ -+ struct net_device_path_ctx ctx = { -+ .dev = dev, -+ .daddr = addr, -+ }; -+ struct net_device_path path = {}; -+ -+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) -+ return -1; -+ -+ if (!dev->netdev_ops->ndo_fill_forward_path) -+ return -1; -+ -+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) -+ return -1; -+ -+ if (path.type != DEV_PATH_MTK_WDMA) -+ return -1; -+ -+ info->wdma_idx = path.mtk_wdma.wdma_idx; -+ info->queue = path.mtk_wdma.queue; -+ info->bss = path.mtk_wdma.bss; -+ info->wcid = path.mtk_wdma.wcid; -+ -+ return 0; -+} -+ - - static int - mtk_flow_mangle_ports(const struct flow_action_entry *act, -@@ -149,10 +180,20 @@ mtk_flow_get_dsa_port(struct net_device - - static int - mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, -- struct net_device *dev) -+ struct net_device *dev, const u8 *dest_mac, -+ int *wed_index) - { -+ struct mtk_wdma_info info = {}; - int pse_port, dsa_port; - -+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { -+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, -+ info.wcid); -+ pse_port = 3; -+ *wed_index = info.wdma_idx; -+ goto out; -+ } -+ - dsa_port = mtk_flow_get_dsa_port(&dev); - if (dsa_port >= 0) - mtk_foe_entry_set_dsa(foe, dsa_port); -@@ -164,6 +205,7 @@ mtk_flow_set_output_device(struct mtk_et - else - return -EOPNOTSUPP; - -+out: - mtk_foe_entry_set_pse_port(foe, pse_port); - - return 0; -@@ -179,6 +221,7 @@ mtk_flow_offload_replace(struct mtk_eth - struct net_device *odev = NULL; - struct mtk_flow_entry *entry; - int offload_type = 0; -+ int wed_index = -1; - u16 addr_type = 0; - u32 timestamp; - u8 l4proto = 0; -@@ -326,10 +369,14 @@ mtk_flow_offload_replace(struct mtk_eth - if (data.pppoe.num == 1) - mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); - -- err = mtk_flow_set_output_device(eth, &foe, odev); -+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, -+ &wed_index); - if (err) - return err; - -+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) -+ return err; -+ - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; -@@ -343,6 +390,7 @@ mtk_flow_offload_replace(struct mtk_eth - } - - entry->hash = hash; -+ entry->wed_index = wed_index; - err = rhashtable_insert_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); - if (err < 0) -@@ -353,6 +401,8 @@ clear_flow: - mtk_foe_entry_clear(ð->ppe, hash); - free: - kfree(entry); -+ if (wed_index >= 0) -+ mtk_wed_flow_remove(wed_index); - return err; - } - -@@ -369,6 +419,8 @@ mtk_flow_offload_destroy(struct mtk_eth - mtk_foe_entry_clear(ð->ppe, entry->hash); - rhashtable_remove_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); -+ if (entry->wed_index >= 0) -+ mtk_wed_flow_remove(entry->wed_index); - kfree(entry); - - return 0; ---- a/drivers/net/ethernet/mediatek/mtk_wed.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed.h -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - struct mtk_eth; - -@@ -27,6 +28,12 @@ struct mtk_wed_hw { - int index; - }; - -+struct mtk_wdma_info { -+ u8 wdma_idx; -+ u8 queue; -+ u16 wcid; -+ u8 bss; -+}; - - #ifdef CONFIG_NET_MEDIATEK_SOC_WED - static inline void ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -872,6 +872,7 @@ enum net_device_path_type { - DEV_PATH_BRIDGE, - DEV_PATH_PPPOE, - DEV_PATH_DSA, -+ DEV_PATH_MTK_WDMA, - }; - - struct net_device_path { -@@ -897,6 +898,12 @@ struct net_device_path { - int port; - u16 proto; - } dsa; -+ struct { -+ u8 wdma_idx; -+ u8 queue; -+ u16 wcid; -+ u8 bss; -+ } mtk_wdma; - }; - }; - ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -763,6 +763,10 @@ int dev_fill_forward_path(const struct n - if (WARN_ON_ONCE(last_dev == ctx.dev)) - return -1; - } -+ -+ if (!ctx.dev) -+ return ret; -+ - path = dev_fwd_path(stack); - if (!path) - return -1; diff --git a/target/linux/generic/backport-6.1/702-v5.19-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch b/target/linux/generic/backport-6.1/702-v5.19-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch deleted file mode 100644 index 2c6e3fd3cd3e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch +++ /dev/null @@ -1,62 +0,0 @@ -From: Felix Fietkau -Date: Sat, 5 Feb 2022 18:36:36 +0100 -Subject: [PATCH] arm64: dts: mediatek: mt7622: introduce nodes for - Wireless Ethernet Dispatch - -Introduce wed0 and wed1 nodes in order to enable offloading forwarding -between ethernet and wireless devices on the mt7622 chipset. - -Signed-off-by: Felix Fietkau ---- - ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -894,6 +894,11 @@ - }; - }; - -+ hifsys: syscon@1af00000 { -+ compatible = "mediatek,mt7622-hifsys", "syscon"; -+ reg = <0 0x1af00000 0 0x70>; -+ }; -+ - ethsys: syscon@1b000000 { - compatible = "mediatek,mt7622-ethsys", - "syscon"; -@@ -912,6 +917,26 @@ - #dma-cells = <1>; - }; - -+ pcie_mirror: pcie-mirror@10000400 { -+ compatible = "mediatek,mt7622-pcie-mirror", -+ "syscon"; -+ reg = <0 0x10000400 0 0x10>; -+ }; -+ -+ wed0: wed@1020a000 { -+ compatible = "mediatek,mt7622-wed", -+ "syscon"; -+ reg = <0 0x1020a000 0 0x1000>; -+ interrupts = ; -+ }; -+ -+ wed1: wed@1020b000 { -+ compatible = "mediatek,mt7622-wed", -+ "syscon"; -+ reg = <0 0x1020b000 0 0x1000>; -+ interrupts = ; -+ }; -+ - eth: ethernet@1b100000 { - compatible = "mediatek,mt7622-eth", - "mediatek,mt2701-eth", -@@ -939,6 +964,9 @@ - mediatek,ethsys = <ðsys>; - mediatek,sgmiisys = <&sgmiisys>; - mediatek,cci-control = <&cci_control2>; -+ mediatek,wed = <&wed0>, <&wed1>; -+ mediatek,pcie-mirror = <&pcie_mirror>; -+ mediatek,hifsys = <&hifsys>; - dma-coherent; - #address-cells = <1>; - #size-cells = <0>; diff --git a/target/linux/generic/backport-6.1/702-v5.19-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch b/target/linux/generic/backport-6.1/702-v5.19-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch deleted file mode 100644 index 9adb067015ee..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: David Bentham -Date: Mon, 21 Feb 2022 15:36:16 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add ipv6 flow offload - support - -Add the missing IPv6 flow offloading support for routing only. -Hardware flow offloading is done by the packet processing engine (PPE) -of the Ethernet MAC and as it doesn't support mangling of IPv6 packets, -IPv6 NAT cannot be supported. - -Signed-off-by: David Bentham -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -20,6 +21,11 @@ struct mtk_flow_data { - __be32 src_addr; - __be32 dst_addr; - } v4; -+ -+ struct { -+ struct in6_addr src_addr; -+ struct in6_addr dst_addr; -+ } v6; - }; - - __be16 src_port; -@@ -65,6 +71,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en - data->v4.dst_addr, data->dst_port); - } - -+static int -+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) -+{ -+ return mtk_foe_entry_set_ipv6_tuple(foe, -+ data->v6.src_addr.s6_addr32, data->src_port, -+ data->v6.dst_addr.s6_addr32, data->dst_port); -+} -+ - static void - mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) - { -@@ -296,6 +310,9 @@ mtk_flow_offload_replace(struct mtk_eth - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; - break; -+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: -+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; -+ break; - default: - return -EOPNOTSUPP; - } -@@ -331,6 +348,17 @@ mtk_flow_offload_replace(struct mtk_eth - mtk_flow_set_ipv4_addr(&foe, &data, false); - } - -+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { -+ struct flow_match_ipv6_addrs addrs; -+ -+ flow_rule_match_ipv6_addrs(rule, &addrs); -+ -+ data.v6.src_addr = addrs.key->src; -+ data.v6.dst_addr = addrs.key->dst; -+ -+ mtk_flow_set_ipv6_addr(&foe, &data); -+ } -+ - flow_action_for_each(i, act, &rule->action) { - if (act->id != FLOW_ACTION_MANGLE) - continue; diff --git a/target/linux/generic/backport-6.1/702-v5.19-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch b/target/linux/generic/backport-6.1/702-v5.19-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch deleted file mode 100644 index 1950d81ebbac..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Felix Fietkau -Date: Mon, 21 Feb 2022 15:37:21 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: support TC_SETUP_BLOCK for - PPE offload - -This allows offload entries to be created from user space - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -564,10 +564,13 @@ mtk_eth_setup_tc_block(struct net_device - int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) - { -- if (type == TC_SETUP_FT) -+ switch (type) { -+ case TC_SETUP_BLOCK: -+ case TC_SETUP_FT: - return mtk_eth_setup_tc_block(dev, type_data); -- -- return -EOPNOTSUPP; -+ default: -+ return -EOPNOTSUPP; -+ } - } - - int mtk_eth_offload_init(struct mtk_eth *eth) diff --git a/target/linux/generic/backport-6.1/702-v5.19-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch b/target/linux/generic/backport-6.1/702-v5.19-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch deleted file mode 100644 index f18a816b7030..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch +++ /dev/null @@ -1,159 +0,0 @@ -From: Felix Fietkau -Date: Mon, 21 Feb 2022 15:38:20 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: allocate struct mtk_ppe - separately - -Preparation for adding more data to it, which will increase its size. - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2335,7 +2335,7 @@ static int mtk_open(struct net_device *d - return err; - } - -- if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) -+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) - gdm_config = MTK_GDMA_TO_PPE; - - mtk_gdm_config(eth, gdm_config); -@@ -2409,7 +2409,7 @@ static int mtk_stop(struct net_device *d - mtk_dma_free(eth); - - if (eth->soc->offload_version) -- mtk_ppe_stop(ð->ppe); -+ mtk_ppe_stop(eth->ppe); - - return 0; - } -@@ -3301,10 +3301,11 @@ static int mtk_probe(struct platform_dev - } - - if (eth->soc->offload_version) { -- err = mtk_ppe_init(ð->ppe, eth->dev, -- eth->base + MTK_ETH_PPE_BASE, 2); -- if (err) -+ eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); -+ if (!eth->ppe) { -+ err = -ENOMEM; - goto err_free_dev; -+ } - - err = mtk_eth_offload_init(eth); - if (err) ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -983,7 +983,7 @@ struct mtk_eth { - u32 rx_dma_l4_valid; - int ip_align; - -- struct mtk_ppe ppe; -+ struct mtk_ppe *ppe; - struct rhashtable flow_table; - }; - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -384,10 +384,15 @@ int mtk_foe_entry_commit(struct mtk_ppe - return hash; - } - --int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, -+struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, - int version) - { - struct mtk_foe_entry *foe; -+ struct mtk_ppe *ppe; -+ -+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); -+ if (!ppe) -+ return NULL; - - /* need to allocate a separate device, since it PPE DMA access is - * not coherent. -@@ -399,13 +404,13 @@ int mtk_ppe_init(struct mtk_ppe *ppe, st - foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), - &ppe->foe_phys, GFP_KERNEL); - if (!foe) -- return -ENOMEM; -+ return NULL; - - ppe->foe_table = foe; - - mtk_ppe_debugfs_init(ppe); - -- return 0; -+ return ppe; - } - - static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -246,8 +246,7 @@ struct mtk_ppe { - void *acct_table; - }; - --int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, -- int version); -+struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); - int mtk_ppe_start(struct mtk_ppe *ppe); - int mtk_ppe_stop(struct mtk_ppe *ppe); - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -411,7 +411,7 @@ mtk_flow_offload_replace(struct mtk_eth - - entry->cookie = f->cookie; - timestamp = mtk_eth_timestamp(eth); -- hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); -+ hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); - if (hash < 0) { - err = hash; - goto free; -@@ -426,7 +426,7 @@ mtk_flow_offload_replace(struct mtk_eth - - return 0; - clear_flow: -- mtk_foe_entry_clear(ð->ppe, hash); -+ mtk_foe_entry_clear(eth->ppe, hash); - free: - kfree(entry); - if (wed_index >= 0) -@@ -444,7 +444,7 @@ mtk_flow_offload_destroy(struct mtk_eth - if (!entry) - return -ENOENT; - -- mtk_foe_entry_clear(ð->ppe, entry->hash); -+ mtk_foe_entry_clear(eth->ppe, entry->hash); - rhashtable_remove_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); - if (entry->wed_index >= 0) -@@ -466,7 +466,7 @@ mtk_flow_offload_stats(struct mtk_eth *e - if (!entry) - return -ENOENT; - -- timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); -+ timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); - if (timestamp < 0) - return -ETIMEDOUT; - -@@ -522,7 +522,7 @@ mtk_eth_setup_tc_block(struct net_device - struct flow_block_cb *block_cb; - flow_setup_cb_t *cb; - -- if (!eth->ppe.foe_table) -+ if (!eth->ppe || !eth->ppe->foe_table) - return -EOPNOTSUPP; - - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) -@@ -575,7 +575,7 @@ int mtk_eth_setup_tc(struct net_device * - - int mtk_eth_offload_init(struct mtk_eth *eth) - { -- if (!eth->ppe.foe_table) -+ if (!eth->ppe || !eth->ppe->foe_table) - return 0; - - return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); diff --git a/target/linux/generic/backport-6.1/702-v5.19-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch b/target/linux/generic/backport-6.1/702-v5.19-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch deleted file mode 100644 index 2609cbedec2e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch +++ /dev/null @@ -1,424 +0,0 @@ -From: Felix Fietkau -Date: Mon, 21 Feb 2022 15:39:18 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rework hardware flow table - management - -The hardware was designed to handle flow detection and creation of flow entries -by itself, relying on the software primarily for filling in egress routing -information. -When there is a hash collision between multiple flows, this allows the hardware -to maintain the entry for the most active flow. -Additionally, the hardware only keeps offloading active for entries with at -least 30 packets per second. - -With this rework, the code no longer creates a hardware entries directly. -Instead, the hardware entry is only created when the PPE reports a matching -unbound flow with the minimum target rate. -In order to reduce CPU overhead, looking for flows belonging to a hash entry -is rate limited to once every 100ms. - -This rework is also used as preparation for emulating bridge offload by -managing L4 offload entries on demand. - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - - #include "mtk_eth_soc.h" -@@ -1293,7 +1294,7 @@ static int mtk_poll_rx(struct napi_struc - struct net_device *netdev; - unsigned int pktlen; - dma_addr_t dma_addr; -- u32 hash; -+ u32 hash, reason; - int mac; - - ring = mtk_get_rx_ring(eth); -@@ -1372,6 +1373,11 @@ static int mtk_poll_rx(struct napi_struc - skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); - } - -+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); -+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) -+ mtk_ppe_check_skb(eth->ppe, skb, -+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY); -+ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && - (trxd.rxd2 & RX_DMA_VTAG)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), -@@ -3301,7 +3307,7 @@ static int mtk_probe(struct platform_dev - } - - if (eth->soc->offload_version) { -- eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); -+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); - if (!eth->ppe) { - err = -ENOMEM; - goto err_free_dev; ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -6,9 +6,12 @@ - #include - #include - #include -+#include "mtk_eth_soc.h" - #include "mtk_ppe.h" - #include "mtk_ppe_regs.h" - -+static DEFINE_SPINLOCK(ppe_lock); -+ - static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) - { - writel(val, ppe->base + reg); -@@ -41,6 +44,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe - return ppe_m32(ppe, reg, val, 0); - } - -+static u32 mtk_eth_timestamp(struct mtk_eth *eth) -+{ -+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; -+} -+ - static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) - { - int ret; -@@ -353,26 +361,59 @@ static inline bool mtk_foe_entry_usable( - FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; - } - --int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, -- u16 timestamp) -+static bool -+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) -+{ -+ int type, len; -+ -+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) -+ return false; -+ -+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); -+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) -+ len = offsetof(struct mtk_foe_entry, ipv6._rsv); -+ else -+ len = offsetof(struct mtk_foe_entry, ipv4.ib2); -+ -+ return !memcmp(&entry->data.data, &data->data, len - 4); -+} -+ -+static void -+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - struct mtk_foe_entry *hwe; -- u32 hash; -+ struct mtk_foe_entry foe; - -+ spin_lock_bh(&ppe_lock); -+ if (entry->hash == 0xffff) -+ goto out; -+ -+ hwe = &ppe->foe_table[entry->hash]; -+ memcpy(&foe, hwe, sizeof(foe)); -+ if (!mtk_flow_entry_match(entry, &foe)) { -+ entry->hash = 0xffff; -+ goto out; -+ } -+ -+ entry->data.ib1 = foe.ib1; -+ -+out: -+ spin_unlock_bh(&ppe_lock); -+} -+ -+static void -+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, -+ u16 hash) -+{ -+ struct mtk_foe_entry *hwe; -+ u16 timestamp; -+ -+ timestamp = mtk_eth_timestamp(ppe->eth); - timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; - entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; - entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); - -- hash = mtk_ppe_hash_entry(entry); - hwe = &ppe->foe_table[hash]; -- if (!mtk_foe_entry_usable(hwe)) { -- hwe++; -- hash++; -- -- if (!mtk_foe_entry_usable(hwe)) -- return -ENOSPC; -- } -- - memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); - wmb(); - hwe->ib1 = entry->ib1; -@@ -380,13 +421,77 @@ int mtk_foe_entry_commit(struct mtk_ppe - dma_wmb(); - - mtk_ppe_cache_clear(ppe); -+} - -- return hash; -+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ spin_lock_bh(&ppe_lock); -+ hlist_del_init(&entry->list); -+ if (entry->hash != 0xffff) { -+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; -+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, -+ MTK_FOE_STATE_BIND); -+ dma_wmb(); -+ } -+ entry->hash = 0xffff; -+ spin_unlock_bh(&ppe_lock); -+} -+ -+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ u32 hash = mtk_ppe_hash_entry(&entry->data); -+ -+ entry->hash = 0xffff; -+ spin_lock_bh(&ppe_lock); -+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); -+ spin_unlock_bh(&ppe_lock); -+ -+ return 0; -+} -+ -+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) -+{ -+ struct hlist_head *head = &ppe->foe_flow[hash / 2]; -+ struct mtk_flow_entry *entry; -+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; -+ bool found = false; -+ -+ if (hlist_empty(head)) -+ return; -+ -+ spin_lock_bh(&ppe_lock); -+ hlist_for_each_entry(entry, head, list) { -+ if (found || !mtk_flow_entry_match(entry, hwe)) { -+ if (entry->hash != 0xffff) -+ entry->hash = 0xffff; -+ continue; -+ } -+ -+ entry->hash = hash; -+ __mtk_foe_entry_commit(ppe, &entry->data, hash); -+ found = true; -+ } -+ spin_unlock_bh(&ppe_lock); -+} -+ -+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; -+ u16 timestamp; -+ -+ mtk_flow_entry_update(ppe, entry); -+ timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; -+ -+ if (timestamp > now) -+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; -+ else -+ return now - timestamp; - } - --struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, -+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, - int version) - { -+ struct device *dev = eth->dev; - struct mtk_foe_entry *foe; - struct mtk_ppe *ppe; - -@@ -398,6 +503,7 @@ struct mtk_ppe *mtk_ppe_init(struct devi - * not coherent. - */ - ppe->base = base; -+ ppe->eth = eth; - ppe->dev = dev; - ppe->version = version; - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -235,7 +235,17 @@ enum { - MTK_PPE_CPU_REASON_INVALID = 0x1f, - }; - -+struct mtk_flow_entry { -+ struct rhash_head node; -+ struct hlist_node list; -+ unsigned long cookie; -+ struct mtk_foe_entry data; -+ u16 hash; -+ s8 wed_index; -+}; -+ - struct mtk_ppe { -+ struct mtk_eth *eth; - struct device *dev; - void __iomem *base; - int version; -@@ -243,18 +253,33 @@ struct mtk_ppe { - struct mtk_foe_entry *foe_table; - dma_addr_t foe_phys; - -+ u16 foe_check_time[MTK_PPE_ENTRIES]; -+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; -+ - void *acct_table; - }; - --struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); -+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); - int mtk_ppe_start(struct mtk_ppe *ppe); - int mtk_ppe_stop(struct mtk_ppe *ppe); - -+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); -+ - static inline void --mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) -+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) - { -- ppe->foe_table[hash].ib1 = 0; -- dma_wmb(); -+ u16 now, diff; -+ -+ if (!ppe) -+ return; -+ -+ now = (u16)jiffies; -+ diff = now - ppe->foe_check_time[hash]; -+ if (diff < HZ / 10) -+ return; -+ -+ ppe->foe_check_time[hash] = now; -+ __mtk_ppe_check_skb(ppe, skb, hash); - } - - static inline int -@@ -282,8 +307,9 @@ int mtk_foe_entry_set_vlan(struct mtk_fo - int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); - int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, - int bss, int wcid); --int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, -- u16 timestamp); -+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); -+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); -+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); - - #endif ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -42,13 +42,6 @@ struct mtk_flow_data { - } pppoe; - }; - --struct mtk_flow_entry { -- struct rhash_head node; -- unsigned long cookie; -- u16 hash; -- s8 wed_index; --}; -- - static const struct rhashtable_params mtk_flow_ht_params = { - .head_offset = offsetof(struct mtk_flow_entry, node), - .key_offset = offsetof(struct mtk_flow_entry, cookie), -@@ -56,12 +49,6 @@ static const struct rhashtable_params mt - .automatic_shrinking = true, - }; - --static u32 --mtk_eth_timestamp(struct mtk_eth *eth) --{ -- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; --} -- - static int - mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, - bool egress) -@@ -237,10 +224,8 @@ mtk_flow_offload_replace(struct mtk_eth - int offload_type = 0; - int wed_index = -1; - u16 addr_type = 0; -- u32 timestamp; - u8 l4proto = 0; - int err = 0; -- int hash; - int i; - - if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) -@@ -410,23 +395,21 @@ mtk_flow_offload_replace(struct mtk_eth - return -ENOMEM; - - entry->cookie = f->cookie; -- timestamp = mtk_eth_timestamp(eth); -- hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); -- if (hash < 0) { -- err = hash; -+ memcpy(&entry->data, &foe, sizeof(entry->data)); -+ entry->wed_index = wed_index; -+ -+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0) - goto free; -- } - -- entry->hash = hash; -- entry->wed_index = wed_index; - err = rhashtable_insert_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); - if (err < 0) -- goto clear_flow; -+ goto clear; - - return 0; --clear_flow: -- mtk_foe_entry_clear(eth->ppe, hash); -+ -+clear: -+ mtk_foe_entry_clear(eth->ppe, entry); - free: - kfree(entry); - if (wed_index >= 0) -@@ -444,7 +427,7 @@ mtk_flow_offload_destroy(struct mtk_eth - if (!entry) - return -ENOENT; - -- mtk_foe_entry_clear(eth->ppe, entry->hash); -+ mtk_foe_entry_clear(eth->ppe, entry); - rhashtable_remove_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); - if (entry->wed_index >= 0) -@@ -458,7 +441,6 @@ static int - mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) - { - struct mtk_flow_entry *entry; -- int timestamp; - u32 idle; - - entry = rhashtable_lookup(ð->flow_table, &f->cookie, -@@ -466,11 +448,7 @@ mtk_flow_offload_stats(struct mtk_eth *e - if (!entry) - return -ENOENT; - -- timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); -- if (timestamp < 0) -- return -ETIMEDOUT; -- -- idle = mtk_eth_timestamp(eth) - timestamp; -+ idle = mtk_foe_entry_idle_time(eth->ppe, entry); - f->stats.lastused = jiffies - idle * HZ; - - return 0; diff --git a/target/linux/generic/backport-6.1/702-v5.19-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch b/target/linux/generic/backport-6.1/702-v5.19-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch deleted file mode 100644 index 2ff0b341f915..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch +++ /dev/null @@ -1,44 +0,0 @@ -From: Felix Fietkau -Date: Mon, 21 Feb 2022 15:55:19 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: remove bridge flow offload - type entry support - -According to MediaTek, this feature is not supported in current hardware - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -84,13 +84,6 @@ static u32 mtk_ppe_hash_entry(struct mtk - u32 hash; - - switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { -- case MTK_PPE_PKT_TYPE_BRIDGE: -- hv1 = e->bridge.src_mac_lo; -- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); -- hv2 = e->bridge.src_mac_hi >> 16; -- hv2 ^= e->bridge.dest_mac_lo; -- hv3 = e->bridge.dest_mac_hi; -- break; - case MTK_PPE_PKT_TYPE_IPV4_ROUTE: - case MTK_PPE_PKT_TYPE_IPV4_HNAPT: - hv1 = e->ipv4.orig.ports; -@@ -572,7 +565,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) - MTK_PPE_FLOW_CFG_IP4_NAT | - MTK_PPE_FLOW_CFG_IP4_NAPT | - MTK_PPE_FLOW_CFG_IP4_DSLITE | -- MTK_PPE_FLOW_CFG_L2_BRIDGE | - MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; - ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str( - static const char * const type_str[] = { - [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", - [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", -- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", - [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", - [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", - [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", diff --git a/target/linux/generic/backport-6.1/702-v5.19-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch b/target/linux/generic/backport-6.1/702-v5.19-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch deleted file mode 100644 index 209c65e66aa0..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch +++ /dev/null @@ -1,553 +0,0 @@ -From: Felix Fietkau -Date: Wed, 23 Feb 2022 10:56:34 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: support creating mac - address based offload entries - -This will be used to implement a limited form of bridge offloading. -Since the hardware does not support flow table entries with just source -and destination MAC address, the driver has to emulate it. - -The hardware automatically creates entries entries for incoming flows, even -when they are bridged instead of routed, and reports when packets for these -flows have reached the minimum PPS rate for offloading. - -After this happens, we look up the L2 flow offload entry based on the MAC -header and fill in the output routing information in the flow table. -The dynamically created per-flow entries are automatically removed when -either the hardware flowtable entry expires, is replaced, or if the offload -rule they belong to is removed - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -6,12 +6,22 @@ - #include - #include - #include -+#include -+#include -+#include - #include "mtk_eth_soc.h" - #include "mtk_ppe.h" - #include "mtk_ppe_regs.h" - - static DEFINE_SPINLOCK(ppe_lock); - -+static const struct rhashtable_params mtk_flow_l2_ht_params = { -+ .head_offset = offsetof(struct mtk_flow_entry, l2_node), -+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge), -+ .key_len = offsetof(struct mtk_foe_bridge, key_end), -+ .automatic_shrinking = true, -+}; -+ - static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) - { - writel(val, ppe->base + reg); -@@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e - { - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); - -+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return &entry->bridge.l2; -+ - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) - return &entry->ipv6.l2; - -@@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry * - { - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); - -+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return &entry->bridge.ib2; -+ - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) - return &entry->ipv6.ib2; - -@@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe - if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) - entry->ipv6.ports = ports_pad; - -- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { -+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) { -+ ether_addr_copy(entry->bridge.src_mac, src_mac); -+ ether_addr_copy(entry->bridge.dest_mac, dest_mac); -+ entry->bridge.ib2 = val; -+ l2 = &entry->bridge.l2; -+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { - entry->ipv6.ib2 = val; - l2 = &entry->ipv6.l2; - } else { -@@ -372,12 +393,96 @@ mtk_flow_entry_match(struct mtk_flow_ent - } - - static void -+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ struct hlist_head *head; -+ struct hlist_node *tmp; -+ -+ if (entry->type == MTK_FLOW_TYPE_L2) { -+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, -+ mtk_flow_l2_ht_params); -+ -+ head = &entry->l2_flows; -+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) -+ __mtk_foe_entry_clear(ppe, entry); -+ return; -+ } -+ -+ hlist_del_init(&entry->list); -+ if (entry->hash != 0xffff) { -+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; -+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, -+ MTK_FOE_STATE_BIND); -+ dma_wmb(); -+ } -+ entry->hash = 0xffff; -+ -+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) -+ return; -+ -+ hlist_del_init(&entry->l2_data.list); -+ kfree(entry); -+} -+ -+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) -+{ -+ u16 timestamp; -+ u16 now; -+ -+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; -+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; -+ -+ if (timestamp > now) -+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; -+ else -+ return now - timestamp; -+} -+ -+static void -+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ struct mtk_flow_entry *cur; -+ struct mtk_foe_entry *hwe; -+ struct hlist_node *tmp; -+ int idle; -+ -+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { -+ int cur_idle; -+ u32 ib1; -+ -+ hwe = &ppe->foe_table[cur->hash]; -+ ib1 = READ_ONCE(hwe->ib1); -+ -+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { -+ cur->hash = 0xffff; -+ __mtk_foe_entry_clear(ppe, cur); -+ continue; -+ } -+ -+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); -+ if (cur_idle >= idle) -+ continue; -+ -+ idle = cur_idle; -+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; -+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; -+ } -+} -+ -+static void - mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - struct mtk_foe_entry *hwe; - struct mtk_foe_entry foe; - - spin_lock_bh(&ppe_lock); -+ -+ if (entry->type == MTK_FLOW_TYPE_L2) { -+ mtk_flow_entry_update_l2(ppe, entry); -+ goto out; -+ } -+ - if (entry->hash == 0xffff) - goto out; - -@@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *p - void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - spin_lock_bh(&ppe_lock); -- hlist_del_init(&entry->list); -- if (entry->hash != 0xffff) { -- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; -- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, -- MTK_FOE_STATE_BIND); -- dma_wmb(); -- } -- entry->hash = 0xffff; -+ __mtk_foe_entry_clear(ppe, entry); - spin_unlock_bh(&ppe_lock); - } - -+static int -+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ entry->type = MTK_FLOW_TYPE_L2; -+ -+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, -+ mtk_flow_l2_ht_params); -+} -+ - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -- u32 hash = mtk_ppe_hash_entry(&entry->data); -+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); -+ u32 hash; -+ -+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return mtk_foe_entry_commit_l2(ppe, entry); - -+ hash = mtk_ppe_hash_entry(&entry->data); - entry->hash = 0xffff; - spin_lock_bh(&ppe_lock); - hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); -@@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe - return 0; - } - -+static void -+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, -+ u16 hash) -+{ -+ struct mtk_flow_entry *flow_info; -+ struct mtk_foe_entry foe, *hwe; -+ struct mtk_foe_mac_info *l2; -+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; -+ int type; -+ -+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), -+ GFP_ATOMIC); -+ if (!flow_info) -+ return; -+ -+ flow_info->l2_data.base_flow = entry; -+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; -+ flow_info->hash = hash; -+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); -+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); -+ -+ hwe = &ppe->foe_table[hash]; -+ memcpy(&foe, hwe, sizeof(foe)); -+ foe.ib1 &= ib1_mask; -+ foe.ib1 |= entry->data.ib1 & ~ib1_mask; -+ -+ l2 = mtk_foe_entry_l2(&foe); -+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); -+ -+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); -+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) -+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); -+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) -+ l2->etype = ETH_P_IPV6; -+ -+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; -+ -+ __mtk_foe_entry_commit(ppe, &foe, hash); -+} -+ - void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) - { - struct hlist_head *head = &ppe->foe_flow[hash / 2]; -- struct mtk_flow_entry *entry; - struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; -+ struct mtk_flow_entry *entry; -+ struct mtk_foe_bridge key = {}; -+ struct ethhdr *eh; - bool found = false; -- -- if (hlist_empty(head)) -- return; -+ u8 *tag; - - spin_lock_bh(&ppe_lock); -+ -+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) -+ goto out; -+ - hlist_for_each_entry(entry, head, list) { -+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { -+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == -+ MTK_FOE_STATE_BIND)) -+ continue; -+ -+ entry->hash = 0xffff; -+ __mtk_foe_entry_clear(ppe, entry); -+ continue; -+ } -+ - if (found || !mtk_flow_entry_match(entry, hwe)) { - if (entry->hash != 0xffff) - entry->hash = 0xffff; -@@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe - __mtk_foe_entry_commit(ppe, &entry->data, hash); - found = true; - } -+ -+ if (found) -+ goto out; -+ -+ eh = eth_hdr(skb); -+ ether_addr_copy(key.dest_mac, eh->h_dest); -+ ether_addr_copy(key.src_mac, eh->h_source); -+ tag = skb->data - 2; -+ key.vlan = 0; -+ switch (skb->protocol) { -+#if IS_ENABLED(CONFIG_NET_DSA) -+ case htons(ETH_P_XDSA): -+ if (!netdev_uses_dsa(skb->dev) || -+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) -+ goto out; -+ -+ tag += 4; -+ if (get_unaligned_be16(tag) != ETH_P_8021Q) -+ break; -+ -+ fallthrough; -+#endif -+ case htons(ETH_P_8021Q): -+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; -+ break; -+ default: -+ break; -+ } -+ -+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); -+ if (!entry) -+ goto out; -+ -+ mtk_foe_entry_commit_subflow(ppe, entry, hash); -+ -+out: - spin_unlock_bh(&ppe_lock); - } - - int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -- u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; -- u16 timestamp; -- - mtk_flow_entry_update(ppe, entry); -- timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; - -- if (timestamp > now) -- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; -- else -- return now - timestamp; -+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); - } - - struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, -@@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ - if (!ppe) - return NULL; - -+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); -+ - /* need to allocate a separate device, since it PPE DMA access is - * not coherent. - */ ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -6,6 +6,7 @@ - - #include - #include -+#include - - #define MTK_ETH_PPE_BASE 0xc00 - -@@ -84,19 +85,16 @@ struct mtk_foe_mac_info { - u16 src_mac_lo; - }; - -+/* software-only entry type */ - struct mtk_foe_bridge { -- u32 dest_mac_hi; -- -- u16 src_mac_lo; -- u16 dest_mac_lo; -+ u8 dest_mac[ETH_ALEN]; -+ u8 src_mac[ETH_ALEN]; -+ u16 vlan; - -- u32 src_mac_hi; -+ struct {} key_end; - - u32 ib2; - -- u32 _rsv[5]; -- -- u32 udf_tsid; - struct mtk_foe_mac_info l2; - }; - -@@ -235,13 +233,33 @@ enum { - MTK_PPE_CPU_REASON_INVALID = 0x1f, - }; - -+enum { -+ MTK_FLOW_TYPE_L4, -+ MTK_FLOW_TYPE_L2, -+ MTK_FLOW_TYPE_L2_SUBFLOW, -+}; -+ - struct mtk_flow_entry { -+ union { -+ struct hlist_node list; -+ struct { -+ struct rhash_head l2_node; -+ struct hlist_head l2_flows; -+ }; -+ }; -+ u8 type; -+ s8 wed_index; -+ u16 hash; -+ union { -+ struct mtk_foe_entry data; -+ struct { -+ struct mtk_flow_entry *base_flow; -+ struct hlist_node list; -+ struct {} end; -+ } l2_data; -+ }; - struct rhash_head node; -- struct hlist_node list; - unsigned long cookie; -- struct mtk_foe_entry data; -- u16 hash; -- s8 wed_index; - }; - - struct mtk_ppe { -@@ -256,6 +274,8 @@ struct mtk_ppe { - u16 foe_check_time[MTK_PPE_ENTRIES]; - struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; - -+ struct rhashtable l2_flows; -+ - void *acct_table; - }; - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -31,6 +31,8 @@ struct mtk_flow_data { - __be16 src_port; - __be16 dst_port; - -+ u16 vlan_in; -+ - struct { - u16 id; - __be16 proto; -@@ -257,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth - return -EOPNOTSUPP; - } - -+ switch (addr_type) { -+ case 0: -+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE; -+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { -+ struct flow_match_eth_addrs match; -+ -+ flow_rule_match_eth_addrs(rule, &match); -+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); -+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN); -+ } else { -+ return -EOPNOTSUPP; -+ } -+ -+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { -+ struct flow_match_vlan match; -+ -+ flow_rule_match_vlan(rule, &match); -+ -+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) -+ return -EOPNOTSUPP; -+ -+ data.vlan_in = match.key->vlan_id; -+ } -+ break; -+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS: -+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; -+ break; -+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: -+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ - flow_action_for_each(i, act, &rule->action) { - switch (act->id) { - case FLOW_ACTION_MANGLE: -+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return -EOPNOTSUPP; - if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) - mtk_flow_offload_mangle_eth(act, &data.eth); - break; -@@ -291,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth - } - } - -- switch (addr_type) { -- case FLOW_DISSECTOR_KEY_IPV4_ADDRS: -- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; -- break; -- case FLOW_DISSECTOR_KEY_IPV6_ADDRS: -- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; -- break; -- default: -- return -EOPNOTSUPP; -- } -- - if (!is_valid_ether_addr(data.eth.h_source) || - !is_valid_ether_addr(data.eth.h_dest)) - return -EINVAL; -@@ -315,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_match_ports ports; - -+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return -EOPNOTSUPP; -+ - flow_rule_match_ports(rule, &ports); - data.src_port = ports.key->src; - data.dst_port = ports.key->dst; -- } else { -+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { - return -EOPNOTSUPP; - } - -@@ -348,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth - if (act->id != FLOW_ACTION_MANGLE) - continue; - -+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) -+ return -EOPNOTSUPP; -+ - switch (act->mangle.htype) { - case FLOW_ACT_MANGLE_HDR_TYPE_TCP: - case FLOW_ACT_MANGLE_HDR_TYPE_UDP: -@@ -373,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth - return err; - } - -+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) -+ foe.bridge.vlan = data.vlan_in; -+ - if (data.vlan.num == 1) { - if (data.vlan.proto != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; diff --git a/target/linux/generic/backport-6.1/702-v5.19-11-net-ethernet-mtk_eth_soc-wed-fix-sparse-endian-warni.patch b/target/linux/generic/backport-6.1/702-v5.19-11-net-ethernet-mtk_eth_soc-wed-fix-sparse-endian-warni.patch deleted file mode 100644 index 8f3dfe82399f..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-11-net-ethernet-mtk_eth_soc-wed-fix-sparse-endian-warni.patch +++ /dev/null @@ -1,56 +0,0 @@ -From: Felix Fietkau -Date: Fri, 8 Apr 2022 10:59:45 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc/wed: fix sparse endian warnings - -Descriptor fields are little-endian - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Reported-by: kernel test robot -Signed-off-by: Felix Fietkau -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -144,16 +144,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi - - for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { - u32 txd_size; -+ u32 ctrl; - - txd_size = dev->wlan.init_buf(buf, buf_phys, token++); - -- desc->buf0 = buf_phys; -- desc->buf1 = buf_phys + txd_size; -- desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, -- txd_size) | -- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, -- MTK_WED_BUF_SIZE - txd_size) | -- MTK_WDMA_DESC_CTRL_LAST_SEG1; -+ desc->buf0 = cpu_to_le32(buf_phys); -+ desc->buf1 = cpu_to_le32(buf_phys + txd_size); -+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | -+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, -+ MTK_WED_BUF_SIZE - txd_size) | -+ MTK_WDMA_DESC_CTRL_LAST_SEG1; -+ desc->ctrl = cpu_to_le32(ctrl); - desc->info = 0; - desc++; - -@@ -184,12 +185,14 @@ mtk_wed_free_buffer(struct mtk_wed_devic - - for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { - void *page = page_list[page_idx++]; -+ dma_addr_t buf_addr; - - if (!page) - break; - -- dma_unmap_page(dev->hw->dev, desc[i].buf0, -- PAGE_SIZE, DMA_BIDIRECTIONAL); -+ buf_addr = le32_to_cpu(desc[i].buf0); -+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, -+ DMA_BIDIRECTIONAL); - __free_page(page); - } - diff --git a/target/linux/generic/backport-6.1/702-v5.19-12-net-ethernet-mtk_eth_soc-fix-return-value-check-in-m.patch b/target/linux/generic/backport-6.1/702-v5.19-12-net-ethernet-mtk_eth_soc-fix-return-value-check-in-m.patch deleted file mode 100644 index 4ec8fe74bc7d..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-12-net-ethernet-mtk_eth_soc-fix-return-value-check-in-m.patch +++ /dev/null @@ -1,25 +0,0 @@ -From: Yang Yingliang -Date: Fri, 8 Apr 2022 11:22:46 +0800 -Subject: [PATCH] net: ethernet: mtk_eth_soc: fix return value check in - mtk_wed_add_hw() - -If syscon_regmap_lookup_by_phandle() fails, it never return NULL pointer, -change the check to IS_ERR(). - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Reported-by: Hulk Robot -Signed-off-by: Yang Yingliang -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -816,7 +816,7 @@ void mtk_wed_add_hw(struct device_node * - return; - - regs = syscon_regmap_lookup_by_phandle(np, NULL); -- if (!regs) -+ if (IS_ERR(regs)) - return; - - rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); diff --git a/target/linux/generic/backport-6.1/702-v5.19-13-net-ethernet-mtk_eth_soc-use-standard-property-for-c.patch b/target/linux/generic/backport-6.1/702-v5.19-13-net-ethernet-mtk_eth_soc-use-standard-property-for-c.patch deleted file mode 100644 index a7c5f08f100d..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-13-net-ethernet-mtk_eth_soc-use-standard-property-for-c.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Lorenzo Bianconi -Date: Mon, 11 Apr 2022 12:13:25 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: use standard property for - cci-control-port - -Rely on standard cci-control-port property to identify CCI port -reference. -Update mt7622 dts binding. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -963,7 +963,7 @@ - power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; - mediatek,ethsys = <ðsys>; - mediatek,sgmiisys = <&sgmiisys>; -- mediatek,cci-control = <&cci_control2>; -+ cci-control-port = <&cci_control2>; - mediatek,wed = <&wed0>, <&wed1>; - mediatek,pcie-mirror = <&pcie_mirror>; - mediatek,hifsys = <&hifsys>; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3188,7 +3188,7 @@ static int mtk_probe(struct platform_dev - struct regmap *cci; - - cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, -- "mediatek,cci-control"); -+ "cci-control-port"); - /* enable CPU/bus coherency */ - if (!IS_ERR(cci)) - regmap_write(cci, 0, 3); diff --git a/target/linux/generic/backport-6.1/702-v5.19-14-net-ethernet-mtk_eth_soc-use-after-free-in-__mtk_ppe.patch b/target/linux/generic/backport-6.1/702-v5.19-14-net-ethernet-mtk_eth_soc-use-after-free-in-__mtk_ppe.patch deleted file mode 100644 index 656b3a159e8d..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-14-net-ethernet-mtk_eth_soc-use-after-free-in-__mtk_ppe.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Dan Carpenter -Date: Tue, 12 Apr 2022 12:24:19 +0300 -Subject: [PATCH] net: ethernet: mtk_eth_soc: use after free in - __mtk_ppe_check_skb() - -The __mtk_foe_entry_clear() function frees "entry" so we have to use -the _safe() version of hlist_for_each_entry() to prevent a use after -free. - -Fixes: 33fc42de3327 ("net: ethernet: mtk_eth_soc: support creating mac address based offload entries") -Signed-off-by: Dan Carpenter -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -600,6 +600,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe - struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; - struct mtk_flow_entry *entry; - struct mtk_foe_bridge key = {}; -+ struct hlist_node *n; - struct ethhdr *eh; - bool found = false; - u8 *tag; -@@ -609,7 +610,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe - if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) - goto out; - -- hlist_for_each_entry(entry, head, list) { -+ hlist_for_each_entry_safe(entry, n, head, list) { - if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { - if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == - MTK_FOE_STATE_BIND)) diff --git a/target/linux/generic/backport-6.1/702-v5.19-15-net-ethernet-mtk_eth_soc-add-check-for-allocation-fa.patch b/target/linux/generic/backport-6.1/702-v5.19-15-net-ethernet-mtk_eth_soc-add-check-for-allocation-fa.patch deleted file mode 100644 index 714163c86bb0..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-15-net-ethernet-mtk_eth_soc-add-check-for-allocation-fa.patch +++ /dev/null @@ -1,22 +0,0 @@ -From: Dan Carpenter -Date: Thu, 21 Apr 2022 18:49:02 +0300 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add check for allocation failure - -Check if the kzalloc() failed. - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Signed-off-by: Dan Carpenter -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -827,6 +827,8 @@ void mtk_wed_add_hw(struct device_node * - goto unlock; - - hw = kzalloc(sizeof(*hw), GFP_KERNEL); -+ if (!hw) -+ goto unlock; - hw->node = np; - hw->regs = regs; - hw->eth = eth; diff --git a/target/linux/generic/backport-6.1/702-v5.19-16-eth-mtk_eth_soc-silence-the-GCC-12-array-bounds-warn.patch b/target/linux/generic/backport-6.1/702-v5.19-16-eth-mtk_eth_soc-silence-the-GCC-12-array-bounds-warn.patch deleted file mode 100644 index aa98745ac60e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-16-eth-mtk_eth_soc-silence-the-GCC-12-array-bounds-warn.patch +++ /dev/null @@ -1,26 +0,0 @@ -From: Jakub Kicinski -Date: Fri, 20 May 2022 12:56:03 -0700 -Subject: [PATCH] eth: mtk_eth_soc: silence the GCC 12 array-bounds warning - -GCC 12 gets upset because in mtk_foe_entry_commit_subflow() -this driver allocates a partial structure. The writes are -within bounds. - -Silence these warnings for now, our build bot runs GCC 12 -so we won't allow any new instances. - -Signed-off-by: Jakub Kicinski -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/Makefile -+++ b/drivers/net/ethernet/mediatek/Makefile -@@ -11,3 +11,8 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) + - endif - obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o - obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o -+ -+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds -+ifndef KBUILD_EXTRA_WARN -+CFLAGS_mtk_ppe.o += -Wno-array-bounds -+endif diff --git a/target/linux/generic/backport-6.1/702-v5.19-17-net-ethernet-mtk_eth_soc-rely-on-GFP_KERNEL-for-dma_.patch b/target/linux/generic/backport-6.1/702-v5.19-17-net-ethernet-mtk_eth_soc-rely-on-GFP_KERNEL-for-dma_.patch deleted file mode 100644 index 9b6321f1dafb..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-17-net-ethernet-mtk_eth_soc-rely-on-GFP_KERNEL-for-dma_.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:26 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on GFP_KERNEL for - dma_alloc_coherent whenever possible - -Rely on GFP_KERNEL for dma descriptors mappings in mtk_tx_alloc(), -mtk_rx_alloc() and mtk_init_fq_dma() since they are run in non-irq -context. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -846,7 +846,7 @@ static int mtk_init_fq_dma(struct mtk_et - eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * sizeof(struct mtk_tx_dma), - ð->phy_scratch_ring, -- GFP_ATOMIC); -+ GFP_KERNEL); - if (unlikely(!eth->scratch_ring)) - return -ENOMEM; - -@@ -1624,7 +1624,7 @@ static int mtk_tx_alloc(struct mtk_eth * - goto no_tx_mem; - - ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, -- &ring->phys, GFP_ATOMIC); -+ &ring->phys, GFP_KERNEL); - if (!ring->dma) - goto no_tx_mem; - -@@ -1642,8 +1642,7 @@ static int mtk_tx_alloc(struct mtk_eth * - */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, -- &ring->phys_pdma, -- GFP_ATOMIC); -+ &ring->phys_pdma, GFP_KERNEL); - if (!ring->dma_pdma) - goto no_tx_mem; - -@@ -1758,7 +1757,7 @@ static int mtk_rx_alloc(struct mtk_eth * - - ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * sizeof(*ring->dma), -- &ring->phys, GFP_ATOMIC); -+ &ring->phys, GFP_KERNEL); - if (!ring->dma) - return -ENOMEM; - diff --git a/target/linux/generic/backport-6.1/702-v5.19-18-net-ethernet-mtk_eth_soc-move-tx-dma-desc-configurat.patch b/target/linux/generic/backport-6.1/702-v5.19-18-net-ethernet-mtk_eth_soc-move-tx-dma-desc-configurat.patch deleted file mode 100644 index 8e16ea255669..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-18-net-ethernet-mtk_eth_soc-move-tx-dma-desc-configurat.patch +++ /dev/null @@ -1,206 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:27 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: move tx dma desc configuration in - mtk_tx_set_dma_desc - -Move tx dma descriptor configuration in mtk_tx_set_dma_desc routine. -This is a preliminary patch to introduce mt7986 ethernet support since -it relies on a different tx dma descriptor layout. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -972,18 +972,51 @@ static void setup_tx_buf(struct mtk_eth - } - } - -+static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc, -+ struct mtk_tx_dma_desc_info *info) -+{ -+ struct mtk_mac *mac = netdev_priv(dev); -+ u32 data; -+ -+ WRITE_ONCE(desc->txd1, info->addr); -+ -+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size); -+ if (info->last) -+ data |= TX_DMA_LS0; -+ WRITE_ONCE(desc->txd3, data); -+ -+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ -+ if (info->first) { -+ if (info->gso) -+ data |= TX_DMA_TSO; -+ /* tx checksum offload */ -+ if (info->csum) -+ data |= TX_DMA_CHKSUM; -+ /* vlan header offload */ -+ if (info->vlan) -+ data |= TX_DMA_INS_VLAN | info->vlan_tci; -+ } -+ WRITE_ONCE(desc->txd4, data); -+} -+ - static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, - int tx_num, struct mtk_tx_ring *ring, bool gso) - { -+ struct mtk_tx_dma_desc_info txd_info = { -+ .size = skb_headlen(skb), -+ .gso = gso, -+ .csum = skb->ip_summed == CHECKSUM_PARTIAL, -+ .vlan = skb_vlan_tag_present(skb), -+ .vlan_tci = skb_vlan_tag_get(skb), -+ .first = true, -+ .last = !skb_is_nonlinear(skb), -+ }; - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; - struct mtk_tx_dma *itxd, *txd; - struct mtk_tx_dma *itxd_pdma, *txd_pdma; - struct mtk_tx_buf *itx_buf, *tx_buf; -- dma_addr_t mapped_addr; -- unsigned int nr_frags; - int i, n_desc = 1; -- u32 txd4 = 0, fport; - int k = 0; - - itxd = ring->next_free; -@@ -991,49 +1024,32 @@ static int mtk_tx_map(struct sk_buff *sk - if (itxd == ring->last_free) - return -ENOMEM; - -- /* set the forward port */ -- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; -- txd4 |= fport; -- - itx_buf = mtk_desc_to_tx_buf(ring, itxd); - memset(itx_buf, 0, sizeof(*itx_buf)); - -- if (gso) -- txd4 |= TX_DMA_TSO; -- -- /* TX Checksum offload */ -- if (skb->ip_summed == CHECKSUM_PARTIAL) -- txd4 |= TX_DMA_CHKSUM; -- -- /* VLAN header offload */ -- if (skb_vlan_tag_present(skb)) -- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); -- -- mapped_addr = dma_map_single(eth->dma_dev, skb->data, -- skb_headlen(skb), DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) -+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) - return -ENOMEM; - -- WRITE_ONCE(itxd->txd1, mapped_addr); -+ mtk_tx_set_dma_desc(dev, itxd, &txd_info); -+ - itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : - MTK_TX_FLAGS_FPORT1; -- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), -+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, - k++); - - /* TX SG offload */ - txd = itxd; - txd_pdma = qdma_to_pdma(ring, txd); -- nr_frags = skb_shinfo(skb)->nr_frags; - -- for (i = 0; i < nr_frags; i++) { -+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - unsigned int offset = 0; - int frag_size = skb_frag_size(frag); - - while (frag_size) { -- bool last_frag = false; -- unsigned int frag_map_size; - bool new_desc = true; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || -@@ -1048,23 +1064,17 @@ static int mtk_tx_map(struct sk_buff *sk - new_desc = false; - } - -- -- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); -- mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, -- frag_map_size, -- DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) -+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); -+ txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN); -+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && -+ !(frag_size - txd_info.size); -+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, -+ offset, txd_info.size, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) - goto err_dma; - -- if (i == nr_frags - 1 && -- (frag_size - frag_map_size) == 0) -- last_frag = true; -- -- WRITE_ONCE(txd->txd1, mapped_addr); -- WRITE_ONCE(txd->txd3, (TX_DMA_SWC | -- TX_DMA_PLEN0(frag_map_size) | -- last_frag * TX_DMA_LS0)); -- WRITE_ONCE(txd->txd4, fport); -+ mtk_tx_set_dma_desc(dev, txd, &txd_info); - - tx_buf = mtk_desc_to_tx_buf(ring, txd); - if (new_desc) -@@ -1074,20 +1084,17 @@ static int mtk_tx_map(struct sk_buff *sk - tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : - MTK_TX_FLAGS_FPORT1; - -- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, -- frag_map_size, k++); -+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, -+ txd_info.size, k++); - -- frag_size -= frag_map_size; -- offset += frag_map_size; -+ frag_size -= txd_info.size; -+ offset += txd_info.size; - } - } - - /* store skb to cleanup */ - itx_buf->skb = skb; - -- WRITE_ONCE(itxd->txd4, txd4); -- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | -- (!nr_frags * TX_DMA_LS0))); - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - if (k & 0x1) - txd_pdma->txd2 |= TX_DMA_LS0; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -843,6 +843,17 @@ enum mkt_eth_capabilities { - MTK_MUX_U3_GMAC2_TO_QPHY | \ - MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) - -+struct mtk_tx_dma_desc_info { -+ dma_addr_t addr; -+ u32 size; -+ u16 vlan_tci; -+ u8 gso:1; -+ u8 csum:1; -+ u8 vlan:1; -+ u8 first:1; -+ u8 last:1; -+}; -+ - /* struct mtk_eth_data - This is the structure holding all differences - * among various plaforms - * @ana_rgc3: The offset for register ANA_RGC3 related to diff --git a/target/linux/generic/backport-6.1/702-v5.19-19-net-ethernet-mtk_eth_soc-add-txd_size-to-mtk_soc_dat.patch b/target/linux/generic/backport-6.1/702-v5.19-19-net-ethernet-mtk_eth_soc-add-txd_size-to-mtk_soc_dat.patch deleted file mode 100644 index f5206bba00b3..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-19-net-ethernet-mtk_eth_soc-add-txd_size-to-mtk_soc_dat.patch +++ /dev/null @@ -1,167 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:28 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add txd_size to mtk_soc_data - -In order to remove mtk_tx_dma size dependency, introduce txd_size in -mtk_soc_data data structure. Rely on txd_size in mtk_init_fq_dma() and -mtk_dma_free() routines. -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -838,20 +838,20 @@ static void *mtk_max_lro_buf_alloc(gfp_t - /* the qdma core needs scratch memory to be setup */ - static int mtk_init_fq_dma(struct mtk_eth *eth) - { -+ const struct mtk_soc_data *soc = eth->soc; - dma_addr_t phy_ring_tail; - int cnt = MTK_DMA_SIZE; - dma_addr_t dma_addr; - int i; - - eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, -- cnt * sizeof(struct mtk_tx_dma), -+ cnt * soc->txrx.txd_size, - ð->phy_scratch_ring, - GFP_KERNEL); - if (unlikely(!eth->scratch_ring)) - return -ENOMEM; - -- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, -- GFP_KERNEL); -+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - if (unlikely(!eth->scratch_head)) - return -ENOMEM; - -@@ -861,16 +861,19 @@ static int mtk_init_fq_dma(struct mtk_et - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; - -- phy_ring_tail = eth->phy_scratch_ring + -- (sizeof(struct mtk_tx_dma) * (cnt - 1)); -+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); - - for (i = 0; i < cnt; i++) { -- eth->scratch_ring[i].txd1 = -- (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); -+ struct mtk_tx_dma *txd; -+ -+ txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; -+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; - if (i < cnt - 1) -- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + -- ((i + 1) * sizeof(struct mtk_tx_dma))); -- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); -+ txd->txd2 = eth->phy_scratch_ring + -+ (i + 1) * soc->txrx.txd_size; -+ -+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); -+ txd->txd4 = 0; - } - - mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); -@@ -2170,6 +2173,7 @@ static int mtk_dma_init(struct mtk_eth * - - static void mtk_dma_free(struct mtk_eth *eth) - { -+ const struct mtk_soc_data *soc = eth->soc; - int i; - - for (i = 0; i < MTK_MAC_COUNT; i++) -@@ -2177,9 +2181,8 @@ static void mtk_dma_free(struct mtk_eth - netdev_reset_queue(eth->netdev[i]); - if (eth->scratch_ring) { - dma_free_coherent(eth->dma_dev, -- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), -- eth->scratch_ring, -- eth->phy_scratch_ring); -+ MTK_DMA_SIZE * soc->txrx.txd_size, -+ eth->scratch_ring, eth->phy_scratch_ring); - eth->scratch_ring = NULL; - eth->phy_scratch_ring = 0; - } -@@ -3391,6 +3394,9 @@ static const struct mtk_soc_data mt2701_ - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7623_CLKS_BITMAP, - .required_pctl = true, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - static const struct mtk_soc_data mt7621_data = { -@@ -3399,6 +3405,9 @@ static const struct mtk_soc_data mt7621_ - .required_clks = MT7621_CLKS_BITMAP, - .required_pctl = false, - .offload_version = 2, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - static const struct mtk_soc_data mt7622_data = { -@@ -3408,6 +3417,9 @@ static const struct mtk_soc_data mt7622_ - .required_clks = MT7622_CLKS_BITMAP, - .required_pctl = false, - .offload_version = 2, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - static const struct mtk_soc_data mt7623_data = { -@@ -3416,6 +3428,9 @@ static const struct mtk_soc_data mt7623_ - .required_clks = MT7623_CLKS_BITMAP, - .required_pctl = true, - .offload_version = 2, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - static const struct mtk_soc_data mt7629_data = { -@@ -3424,6 +3439,9 @@ static const struct mtk_soc_data mt7629_ - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7629_CLKS_BITMAP, - .required_pctl = false, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - static const struct mtk_soc_data rt5350_data = { -@@ -3431,6 +3449,9 @@ static const struct mtk_soc_data rt5350_ - .hw_features = MTK_HW_FEATURES_MT7628, - .required_clks = MT7628_CLKS_BITMAP, - .required_pctl = false, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma), -+ }, - }; - - const struct of_device_id of_mtk_match[] = { ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -864,6 +864,7 @@ struct mtk_tx_dma_desc_info { - * the target SoC - * @required_pctl A bool value to show whether the SoC requires - * the extra setup for those pins used by GMAC. -+ * @txd_size Tx DMA descriptor size. - */ - struct mtk_soc_data { - u32 ana_rgc3; -@@ -872,6 +873,9 @@ struct mtk_soc_data { - bool required_pctl; - u8 offload_version; - netdev_features_t hw_features; -+ struct { -+ u32 txd_size; -+ } txrx; - }; - - /* currently no SoC has more than 2 macs */ diff --git a/target/linux/generic/backport-6.1/702-v5.19-20-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_tx_.patch b/target/linux/generic/backport-6.1/702-v5.19-20-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_tx_.patch deleted file mode 100644 index ebe1ee3d59a4..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-20-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_tx_.patch +++ /dev/null @@ -1,78 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:29 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in - mtk_tx_alloc/mtk_tx_clean - -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1625,8 +1625,10 @@ static int mtk_napi_rx(struct napi_struc - - static int mtk_tx_alloc(struct mtk_eth *eth) - { -+ const struct mtk_soc_data *soc = eth->soc; - struct mtk_tx_ring *ring = ð->tx_ring; -- int i, sz = sizeof(*ring->dma); -+ int i, sz = soc->txrx.txd_size; -+ struct mtk_tx_dma *txd; - - ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), - GFP_KERNEL); -@@ -1642,8 +1644,10 @@ static int mtk_tx_alloc(struct mtk_eth * - int next = (i + 1) % MTK_DMA_SIZE; - u32 next_ptr = ring->phys + next * sz; - -- ring->dma[i].txd2 = next_ptr; -- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; -+ txd = (void *)ring->dma + i * sz; -+ txd->txd2 = next_ptr; -+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; -+ txd->txd4 = 0; - } - - /* On MT7688 (PDMA only) this driver uses the ring->dma structs -@@ -1665,7 +1669,7 @@ static int mtk_tx_alloc(struct mtk_eth * - ring->dma_size = MTK_DMA_SIZE; - atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); - ring->next_free = &ring->dma[0]; -- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; -+ ring->last_free = (void *)txd; - ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); - ring->thresh = MAX_SKB_FRAGS; - -@@ -1698,6 +1702,7 @@ no_tx_mem: - - static void mtk_tx_clean(struct mtk_eth *eth) - { -+ const struct mtk_soc_data *soc = eth->soc; - struct mtk_tx_ring *ring = ð->tx_ring; - int i; - -@@ -1710,17 +1715,15 @@ static void mtk_tx_clean(struct mtk_eth - - if (ring->dma) { - dma_free_coherent(eth->dma_dev, -- MTK_DMA_SIZE * sizeof(*ring->dma), -- ring->dma, -- ring->phys); -+ MTK_DMA_SIZE * soc->txrx.txd_size, -+ ring->dma, ring->phys); - ring->dma = NULL; - } - - if (ring->dma_pdma) { - dma_free_coherent(eth->dma_dev, -- MTK_DMA_SIZE * sizeof(*ring->dma_pdma), -- ring->dma_pdma, -- ring->phys_pdma); -+ MTK_DMA_SIZE * soc->txrx.txd_size, -+ ring->dma_pdma, ring->phys_pdma); - ring->dma_pdma = NULL; - } - } diff --git a/target/linux/generic/backport-6.1/702-v5.19-21-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_des.patch b/target/linux/generic/backport-6.1/702-v5.19-21-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_des.patch deleted file mode 100644 index 053412e7490a..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-21-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-mtk_des.patch +++ /dev/null @@ -1,109 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:30 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in - mtk_desc_to_tx_buf - -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -891,10 +891,11 @@ static inline void *mtk_qdma_phys_to_vir - return ret + (desc - ring->phys); - } - --static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, -- struct mtk_tx_dma *txd) -+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, -+ struct mtk_tx_dma *txd, -+ u32 txd_size) - { -- int idx = txd - ring->dma; -+ int idx = ((void *)txd - (void *)ring->dma) / txd_size; - - return &ring->buf[idx]; - } -@@ -1016,6 +1017,7 @@ static int mtk_tx_map(struct sk_buff *sk - }; - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; -+ const struct mtk_soc_data *soc = eth->soc; - struct mtk_tx_dma *itxd, *txd; - struct mtk_tx_dma *itxd_pdma, *txd_pdma; - struct mtk_tx_buf *itx_buf, *tx_buf; -@@ -1027,7 +1029,7 @@ static int mtk_tx_map(struct sk_buff *sk - if (itxd == ring->last_free) - return -ENOMEM; - -- itx_buf = mtk_desc_to_tx_buf(ring, itxd); -+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); - memset(itx_buf, 0, sizeof(*itx_buf)); - - txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, -@@ -1055,7 +1057,7 @@ static int mtk_tx_map(struct sk_buff *sk - while (frag_size) { - bool new_desc = true; - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || -+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || - (i & 0x1)) { - txd = mtk_qdma_phys_to_virt(ring, txd->txd2); - txd_pdma = qdma_to_pdma(ring, txd); -@@ -1079,7 +1081,8 @@ static int mtk_tx_map(struct sk_buff *sk - - mtk_tx_set_dma_desc(dev, txd, &txd_info); - -- tx_buf = mtk_desc_to_tx_buf(ring, txd); -+ tx_buf = mtk_desc_to_tx_buf(ring, txd, -+ soc->txrx.txd_size); - if (new_desc) - memset(tx_buf, 0, sizeof(*tx_buf)); - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; -@@ -1098,7 +1101,7 @@ static int mtk_tx_map(struct sk_buff *sk - /* store skb to cleanup */ - itx_buf->skb = skb; - -- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - if (k & 0x1) - txd_pdma->txd2 |= TX_DMA_LS0; - else -@@ -1116,7 +1119,7 @@ static int mtk_tx_map(struct sk_buff *sk - */ - wmb(); - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || - !netdev_xmit_more()) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); -@@ -1130,13 +1133,13 @@ static int mtk_tx_map(struct sk_buff *sk - - err_dma: - do { -- tx_buf = mtk_desc_to_tx_buf(ring, itxd); -+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); - - /* unmap dma */ - mtk_tx_unmap(eth, tx_buf, false); - - itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; -- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) -+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) - itxd_pdma->txd2 = TX_DMA_DESP2_DEF; - - itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); -@@ -1450,7 +1453,8 @@ static int mtk_poll_tx_qdma(struct mtk_e - if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) - break; - -- tx_buf = mtk_desc_to_tx_buf(ring, desc); -+ tx_buf = mtk_desc_to_tx_buf(ring, desc, -+ eth->soc->txrx.txd_size); - if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) - mac = 1; - diff --git a/target/linux/generic/backport-6.1/702-v5.19-22-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-txd_to_.patch b/target/linux/generic/backport-6.1/702-v5.19-22-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-txd_to_.patch deleted file mode 100644 index 251d583f2966..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-22-net-ethernet-mtk_eth_soc-rely-on-txd_size-in-txd_to_.patch +++ /dev/null @@ -1,39 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:31 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in txd_to_idx - -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -906,9 +906,10 @@ static struct mtk_tx_dma *qdma_to_pdma(s - return ring->dma_pdma - ring->dma + dma; - } - --static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) -+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma, -+ u32 txd_size) - { -- return ((void *)dma - (void *)ring->dma) / sizeof(*dma); -+ return ((void *)dma - (void *)ring->dma) / txd_size; - } - - static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, -@@ -1124,8 +1125,10 @@ static int mtk_tx_map(struct sk_buff *sk - !netdev_xmit_more()) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); - } else { -- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), -- ring->dma_size); -+ int next_idx; -+ -+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), -+ ring->dma_size); - mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); - } - diff --git a/target/linux/generic/backport-6.1/702-v5.19-23-net-ethernet-mtk_eth_soc-add-rxd_size-to-mtk_soc_dat.patch b/target/linux/generic/backport-6.1/702-v5.19-23-net-ethernet-mtk_eth_soc-add-rxd_size-to-mtk_soc_dat.patch deleted file mode 100644 index ec206f28d64c..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-23-net-ethernet-mtk_eth_soc-add-rxd_size-to-mtk_soc_dat.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:32 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add rxd_size to mtk_soc_data - -Similar to tx counterpart, introduce rxd_size in mtk_soc_data data -structure. -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1776,7 +1776,7 @@ static int mtk_rx_alloc(struct mtk_eth * - } - - ring->dma = dma_alloc_coherent(eth->dma_dev, -- rx_dma_size * sizeof(*ring->dma), -+ rx_dma_size * eth->soc->txrx.rxd_size, - &ring->phys, GFP_KERNEL); - if (!ring->dma) - return -ENOMEM; -@@ -1834,9 +1834,8 @@ static void mtk_rx_clean(struct mtk_eth - - if (ring->dma) { - dma_free_coherent(eth->dma_dev, -- ring->dma_size * sizeof(*ring->dma), -- ring->dma, -- ring->phys); -+ ring->dma_size * eth->soc->txrx.rxd_size, -+ ring->dma, ring->phys); - ring->dma = NULL; - } - } -@@ -3406,6 +3405,7 @@ static const struct mtk_soc_data mt2701_ - .required_pctl = true, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - -@@ -3417,6 +3417,7 @@ static const struct mtk_soc_data mt7621_ - .offload_version = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - -@@ -3429,6 +3430,7 @@ static const struct mtk_soc_data mt7622_ - .offload_version = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - -@@ -3440,6 +3442,7 @@ static const struct mtk_soc_data mt7623_ - .offload_version = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - -@@ -3451,6 +3454,7 @@ static const struct mtk_soc_data mt7629_ - .required_pctl = false, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - -@@ -3461,6 +3465,7 @@ static const struct mtk_soc_data rt5350_ - .required_pctl = false, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), -+ .rxd_size = sizeof(struct mtk_rx_dma), - }, - }; - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -865,6 +865,7 @@ struct mtk_tx_dma_desc_info { - * @required_pctl A bool value to show whether the SoC requires - * the extra setup for those pins used by GMAC. - * @txd_size Tx DMA descriptor size. -+ * @rxd_size Rx DMA descriptor size. - */ - struct mtk_soc_data { - u32 ana_rgc3; -@@ -875,6 +876,7 @@ struct mtk_soc_data { - netdev_features_t hw_features; - struct { - u32 txd_size; -+ u32 rxd_size; - } txrx; - }; - diff --git a/target/linux/generic/backport-6.1/702-v5.19-24-net-ethernet-mtk_eth_soc-rely-on-txd_size-field-in-m.patch b/target/linux/generic/backport-6.1/702-v5.19-24-net-ethernet-mtk_eth_soc-rely-on-txd_size-field-in-m.patch deleted file mode 100644 index eb92b8c7a22e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-24-net-ethernet-mtk_eth_soc-rely-on-txd_size-field-in-m.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:33 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size field in - mtk_poll_tx/mtk_poll_rx - -This is a preliminary to ad mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1265,9 +1265,12 @@ static struct mtk_rx_ring *mtk_get_rx_ri - return ð->rx_ring[0]; - - for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { -+ struct mtk_rx_dma *rxd; -+ - ring = ð->rx_ring[i]; - idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); -- if (ring->dma[idx].rxd2 & RX_DMA_DONE) { -+ rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; -+ if (rxd->rxd2 & RX_DMA_DONE) { - ring->calc_idx_update = true; - return ring; - } -@@ -1318,7 +1321,7 @@ static int mtk_poll_rx(struct napi_struc - goto rx_done; - - idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); -- rxd = &ring->dma[idx]; -+ rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; - data = ring->data[idx]; - - if (!mtk_rx_get_desc(&trxd, rxd)) -@@ -1510,7 +1513,7 @@ static int mtk_poll_tx_pdma(struct mtk_e - - mtk_tx_unmap(eth, tx_buf, true); - -- desc = &ring->dma[cpu]; -+ desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size; - ring->last_free = desc; - atomic_inc(&ring->free_count); - diff --git a/target/linux/generic/backport-6.1/702-v5.19-25-net-ethernet-mtk_eth_soc-rely-on-rxd_size-field-in-m.patch b/target/linux/generic/backport-6.1/702-v5.19-25-net-ethernet-mtk_eth_soc-rely-on-rxd_size-field-in-m.patch deleted file mode 100644 index 456eec247c59..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-25-net-ethernet-mtk_eth_soc-rely-on-rxd_size-field-in-m.patch +++ /dev/null @@ -1,68 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:34 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on rxd_size field in - mtk_rx_alloc/mtk_rx_clean - -Remove mtk_rx_dma structure layout dependency in mtk_rx_alloc/mtk_rx_clean. -Initialize to 0 rxd3 and rxd4 in mtk_rx_alloc. -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1785,18 +1785,25 @@ static int mtk_rx_alloc(struct mtk_eth * - return -ENOMEM; - - for (i = 0; i < rx_dma_size; i++) { -+ struct mtk_rx_dma *rxd; -+ - dma_addr_t dma_addr = dma_map_single(eth->dma_dev, - ring->data[i] + NET_SKB_PAD + eth->ip_align, - ring->buf_size, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; -- ring->dma[i].rxd1 = (unsigned int)dma_addr; -+ -+ rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; -+ rxd->rxd1 = (unsigned int)dma_addr; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) -- ring->dma[i].rxd2 = RX_DMA_LSO; -+ rxd->rxd2 = RX_DMA_LSO; - else -- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); -+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); -+ -+ rxd->rxd3 = 0; -+ rxd->rxd4 = 0; - } - ring->dma_size = rx_dma_size; - ring->calc_idx_update = false; -@@ -1821,14 +1828,17 @@ static void mtk_rx_clean(struct mtk_eth - - if (ring->data && ring->dma) { - for (i = 0; i < ring->dma_size; i++) { -+ struct mtk_rx_dma *rxd; -+ - if (!ring->data[i]) - continue; -- if (!ring->dma[i].rxd1) -+ -+ rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; -+ if (!rxd->rxd1) - continue; -- dma_unmap_single(eth->dma_dev, -- ring->dma[i].rxd1, -- ring->buf_size, -- DMA_FROM_DEVICE); -+ -+ dma_unmap_single(eth->dma_dev, rxd->rxd1, -+ ring->buf_size, DMA_FROM_DEVICE); - skb_free_frag(ring->data[i]); - } - kfree(ring->data); diff --git a/target/linux/generic/backport-6.1/702-v5.19-26-net-ethernet-mtk_eth_soc-introduce-device-register-m.patch b/target/linux/generic/backport-6.1/702-v5.19-26-net-ethernet-mtk_eth_soc-introduce-device-register-m.patch deleted file mode 100644 index 272f782877f7..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-26-net-ethernet-mtk_eth_soc-introduce-device-register-m.patch +++ /dev/null @@ -1,814 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:35 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce device register map - -Introduce reg_map structure to add the capability to support different -register definitions. Move register definitions in mtk_regmap structure. -This is a preliminary patch to introduce mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message lev - #define MTK_ETHTOOL_STAT(x) { #x, \ - offsetof(struct mtk_hw_stats, x) / sizeof(u64) } - -+static const struct mtk_reg_map mtk_reg_map = { -+ .tx_irq_mask = 0x1a1c, -+ .tx_irq_status = 0x1a18, -+ .pdma = { -+ .rx_ptr = 0x0900, -+ .rx_cnt_cfg = 0x0904, -+ .pcrx_ptr = 0x0908, -+ .glo_cfg = 0x0a04, -+ .rst_idx = 0x0a08, -+ .delay_irq = 0x0a0c, -+ .irq_status = 0x0a20, -+ .irq_mask = 0x0a28, -+ .int_grp = 0x0a50, -+ }, -+ .qdma = { -+ .qtx_cfg = 0x1800, -+ .rx_ptr = 0x1900, -+ .rx_cnt_cfg = 0x1904, -+ .qcrx_ptr = 0x1908, -+ .glo_cfg = 0x1a04, -+ .rst_idx = 0x1a08, -+ .delay_irq = 0x1a0c, -+ .fc_th = 0x1a10, -+ .int_grp = 0x1a20, -+ .hred = 0x1a44, -+ .ctx_ptr = 0x1b00, -+ .dtx_ptr = 0x1b04, -+ .crx_ptr = 0x1b10, -+ .drx_ptr = 0x1b14, -+ .fq_head = 0x1b20, -+ .fq_tail = 0x1b24, -+ .fq_count = 0x1b28, -+ .fq_blen = 0x1b2c, -+ }, -+ .gdm1_cnt = 0x2400, -+}; -+ -+static const struct mtk_reg_map mt7628_reg_map = { -+ .tx_irq_mask = 0x0a28, -+ .tx_irq_status = 0x0a20, -+ .pdma = { -+ .rx_ptr = 0x0900, -+ .rx_cnt_cfg = 0x0904, -+ .pcrx_ptr = 0x0908, -+ .glo_cfg = 0x0a04, -+ .rst_idx = 0x0a08, -+ .delay_irq = 0x0a0c, -+ .irq_status = 0x0a20, -+ .irq_mask = 0x0a28, -+ .int_grp = 0x0a50, -+ }, -+}; -+ - /* strings used by ethtool */ - static const struct mtk_ethtool_stats { - char str[ETH_GSTRING_LEN]; -@@ -619,8 +672,8 @@ static inline void mtk_tx_irq_disable(st - u32 val; - - spin_lock_irqsave(ð->tx_irq_lock, flags); -- val = mtk_r32(eth, eth->tx_int_mask_reg); -- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); -+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); -+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); - spin_unlock_irqrestore(ð->tx_irq_lock, flags); - } - -@@ -630,8 +683,8 @@ static inline void mtk_tx_irq_enable(str - u32 val; - - spin_lock_irqsave(ð->tx_irq_lock, flags); -- val = mtk_r32(eth, eth->tx_int_mask_reg); -- mtk_w32(eth, val | mask, eth->tx_int_mask_reg); -+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); -+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); - spin_unlock_irqrestore(ð->tx_irq_lock, flags); - } - -@@ -641,8 +694,8 @@ static inline void mtk_rx_irq_disable(st - u32 val; - - spin_lock_irqsave(ð->rx_irq_lock, flags); -- val = mtk_r32(eth, MTK_PDMA_INT_MASK); -- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); -+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); -+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); - spin_unlock_irqrestore(ð->rx_irq_lock, flags); - } - -@@ -652,8 +705,8 @@ static inline void mtk_rx_irq_enable(str - u32 val; - - spin_lock_irqsave(ð->rx_irq_lock, flags); -- val = mtk_r32(eth, MTK_PDMA_INT_MASK); -- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); -+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); -+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); - spin_unlock_irqrestore(ð->rx_irq_lock, flags); - } - -@@ -704,39 +757,39 @@ void mtk_stats_update_mac(struct mtk_mac - hw_stats->rx_checksum_errors += - mtk_r32(mac->hw, MT7628_SDM_CS_ERR); - } else { -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - unsigned int offs = hw_stats->reg_offset; - u64 stats; - -- hw_stats->rx_bytes += mtk_r32(mac->hw, -- MTK_GDM1_RX_GBCNT_L + offs); -- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs); -+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); -+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); - if (stats) - hw_stats->rx_bytes += (stats << 32); - hw_stats->rx_packets += -- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); - hw_stats->rx_overflow += -- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); - hw_stats->rx_fcs_errors += -- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); - hw_stats->rx_short_errors += -- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); - hw_stats->rx_long_errors += -- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); - hw_stats->rx_checksum_errors += -- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); - hw_stats->rx_flow_control_packets += -- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); - hw_stats->tx_skip += -- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); - hw_stats->tx_collisions += -- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); - hw_stats->tx_bytes += -- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs); -- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); -+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); - if (stats) - hw_stats->tx_bytes += (stats << 32); - hw_stats->tx_packets += -- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs); -+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); - } - - u64_stats_update_end(&hw_stats->syncp); -@@ -876,10 +929,10 @@ static int mtk_init_fq_dma(struct mtk_et - txd->txd4 = 0; - } - -- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); -- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); -- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); -- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); -+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); -+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); -+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); -+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); - - return 0; - } -@@ -1123,7 +1176,7 @@ static int mtk_tx_map(struct sk_buff *sk - if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || - !netdev_xmit_more()) -- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); -+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); - } else { - int next_idx; - -@@ -1440,6 +1493,7 @@ rx_done: - static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, - unsigned int *done, unsigned int *bytes) - { -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - struct mtk_tx_ring *ring = ð->tx_ring; - struct mtk_tx_dma *desc; - struct sk_buff *skb; -@@ -1447,7 +1501,7 @@ static int mtk_poll_tx_qdma(struct mtk_e - u32 cpu, dma; - - cpu = ring->last_free_ptr; -- dma = mtk_r32(eth, MTK_QTX_DRX_PTR); -+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr); - - desc = mtk_qdma_phys_to_virt(ring, cpu); - -@@ -1482,7 +1536,7 @@ static int mtk_poll_tx_qdma(struct mtk_e - } - - ring->last_free_ptr = cpu; -- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); -+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); - - return budget; - } -@@ -1575,24 +1629,25 @@ static void mtk_handle_status_irq(struct - static int mtk_napi_tx(struct napi_struct *napi, int budget) - { - struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - int tx_done = 0; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - mtk_handle_status_irq(eth); -- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); -+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); - tx_done = mtk_poll_tx(eth, budget); - - if (unlikely(netif_msg_intr(eth))) { - dev_info(eth->dev, - "done tx %d, intr 0x%08x/0x%x\n", tx_done, -- mtk_r32(eth, eth->tx_int_status_reg), -- mtk_r32(eth, eth->tx_int_mask_reg)); -+ mtk_r32(eth, reg_map->tx_irq_status), -+ mtk_r32(eth, reg_map->tx_irq_mask)); - } - - if (tx_done == budget) - return budget; - -- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) -+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) - return budget; - - if (napi_complete_done(napi, tx_done)) -@@ -1604,6 +1659,7 @@ static int mtk_napi_tx(struct napi_struc - static int mtk_napi_rx(struct napi_struct *napi, int budget) - { - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - int rx_done_total = 0; - - mtk_handle_status_irq(eth); -@@ -1611,21 +1667,21 @@ static int mtk_napi_rx(struct napi_struc - do { - int rx_done; - -- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); -+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status); - rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); - rx_done_total += rx_done; - - if (unlikely(netif_msg_intr(eth))) { - dev_info(eth->dev, - "done rx %d, intr 0x%08x/0x%x\n", rx_done, -- mtk_r32(eth, MTK_PDMA_INT_STATUS), -- mtk_r32(eth, MTK_PDMA_INT_MASK)); -+ mtk_r32(eth, reg_map->pdma.irq_status), -+ mtk_r32(eth, reg_map->pdma.irq_mask)); - } - - if (rx_done_total == budget) - return budget; - -- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); -+ } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT); - - if (napi_complete_done(napi, rx_done_total)) - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); -@@ -1688,20 +1744,20 @@ static int mtk_tx_alloc(struct mtk_eth * - */ - wmb(); - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); -- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); -+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { -+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); -+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), -- MTK_QTX_CRX_PTR); -- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); -+ soc->reg_map->qdma.crx_ptr); -+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, -- MTK_QTX_CFG(0)); -+ soc->reg_map->qdma.qtx_cfg); - } else { - mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); - mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); - mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); -- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); -+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); - } - - return 0; -@@ -1740,6 +1796,7 @@ static void mtk_tx_clean(struct mtk_eth - - static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) - { -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - struct mtk_rx_ring *ring; - int rx_data_len, rx_dma_size; - int i; -@@ -1808,16 +1865,18 @@ static int mtk_rx_alloc(struct mtk_eth * - ring->dma_size = rx_dma_size; - ring->calc_idx_update = false; - ring->calc_idx = rx_dma_size - 1; -- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); -+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET; - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - -- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); -- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); -+ mtk_w32(eth, ring->phys, -+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset); -+ mtk_w32(eth, rx_dma_size, -+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset); - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); -- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); -+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset); - - return 0; - } -@@ -2126,9 +2185,9 @@ static int mtk_dma_busy_wait(struct mtk_ - u32 val; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) -- reg = MTK_QDMA_GLO_CFG; -+ reg = eth->soc->reg_map->qdma.glo_cfg; - else -- reg = MTK_PDMA_GLO_CFG; -+ reg = eth->soc->reg_map->pdma.glo_cfg; - - ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, - !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), -@@ -2186,8 +2245,8 @@ static int mtk_dma_init(struct mtk_eth * - * automatically - */ - mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | -- FC_THRES_MIN, MTK_QDMA_FC_THRES); -- mtk_w32(eth, 0x0, MTK_QDMA_HRED2); -+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); -+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); - } - - return 0; -@@ -2261,13 +2320,14 @@ static irqreturn_t mtk_handle_irq_tx(int - static irqreturn_t mtk_handle_irq(int irq, void *_eth) - { - struct mtk_eth *eth = _eth; -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - -- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { -- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) -+ if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) { -+ if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT) - mtk_handle_irq_rx(irq, _eth); - } -- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { -- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) -+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { -+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) - mtk_handle_irq_tx(irq, _eth); - } - -@@ -2291,6 +2351,7 @@ static void mtk_poll_controller(struct n - static int mtk_start_dma(struct mtk_eth *eth) - { - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - int err; - - err = mtk_dma_init(eth); -@@ -2305,16 +2366,15 @@ static int mtk_start_dma(struct mtk_eth - MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | - MTK_RX_BT_32DWORDS, -- MTK_QDMA_GLO_CFG); -- -+ reg_map->qdma.glo_cfg); - mtk_w32(eth, - MTK_RX_DMA_EN | rx_2b_offset | - MTK_RX_BT_32DWORDS | MTK_MULTI_EN, -- MTK_PDMA_GLO_CFG); -+ reg_map->pdma.glo_cfg); - } else { - mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | - MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, -- MTK_PDMA_GLO_CFG); -+ reg_map->pdma.glo_cfg); - } - - return 0; -@@ -2440,8 +2500,8 @@ static int mtk_stop(struct net_device *d - cancel_work_sync(ð->tx_dim.work); - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) -- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); -- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); -+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); -+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); - - mtk_dma_free(eth); - -@@ -2495,6 +2555,7 @@ static void mtk_dim_rx(struct work_struc - { - struct dim *dim = container_of(work, struct dim, work); - struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - struct dim_cq_moder cur_profile; - u32 val, cur; - -@@ -2502,7 +2563,7 @@ static void mtk_dim_rx(struct work_struc - dim->profile_ix); - spin_lock_bh(ð->dim_lock); - -- val = mtk_r32(eth, MTK_PDMA_DELAY_INT); -+ val = mtk_r32(eth, reg_map->pdma.delay_irq); - val &= MTK_PDMA_DELAY_TX_MASK; - val |= MTK_PDMA_DELAY_RX_EN; - -@@ -2512,9 +2573,9 @@ static void mtk_dim_rx(struct work_struc - cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); - val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; - -- mtk_w32(eth, val, MTK_PDMA_DELAY_INT); -+ mtk_w32(eth, val, reg_map->pdma.delay_irq); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) -- mtk_w32(eth, val, MTK_QDMA_DELAY_INT); -+ mtk_w32(eth, val, reg_map->qdma.delay_irq); - - spin_unlock_bh(ð->dim_lock); - -@@ -2525,6 +2586,7 @@ static void mtk_dim_tx(struct work_struc - { - struct dim *dim = container_of(work, struct dim, work); - struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - struct dim_cq_moder cur_profile; - u32 val, cur; - -@@ -2532,7 +2594,7 @@ static void mtk_dim_tx(struct work_struc - dim->profile_ix); - spin_lock_bh(ð->dim_lock); - -- val = mtk_r32(eth, MTK_PDMA_DELAY_INT); -+ val = mtk_r32(eth, reg_map->pdma.delay_irq); - val &= MTK_PDMA_DELAY_RX_MASK; - val |= MTK_PDMA_DELAY_TX_EN; - -@@ -2542,9 +2604,9 @@ static void mtk_dim_tx(struct work_struc - cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); - val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; - -- mtk_w32(eth, val, MTK_PDMA_DELAY_INT); -+ mtk_w32(eth, val, reg_map->pdma.delay_irq); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) -- mtk_w32(eth, val, MTK_QDMA_DELAY_INT); -+ mtk_w32(eth, val, reg_map->qdma.delay_irq); - - spin_unlock_bh(ð->dim_lock); - -@@ -2555,6 +2617,7 @@ static int mtk_hw_init(struct mtk_eth *e - { - u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | - ETHSYS_DMA_AG_MAP_PPE; -+ const struct mtk_reg_map *reg_map = eth->soc->reg_map; - int i, val, ret; - - if (test_and_set_bit(MTK_HW_INIT, ð->state)) -@@ -2629,10 +2692,10 @@ static int mtk_hw_init(struct mtk_eth *e - mtk_rx_irq_disable(eth, ~0); - - /* FE int grouping */ -- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); -- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); -- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); -- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); -+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); -+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4); -+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); -+ mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4); - mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - - return 0; -@@ -3171,14 +3234,6 @@ static int mtk_probe(struct platform_dev - if (IS_ERR(eth->base)) - return PTR_ERR(eth->base); - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; -- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; -- } else { -- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; -- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; -- } -- - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; - eth->ip_align = NET_IP_ALIGN; -@@ -3412,6 +3467,7 @@ static int mtk_remove(struct platform_de - } - - static const struct mtk_soc_data mt2701_data = { -+ .reg_map = &mtk_reg_map, - .caps = MT7623_CAPS | MTK_HWLRO, - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7623_CLKS_BITMAP, -@@ -3423,6 +3479,7 @@ static const struct mtk_soc_data mt2701_ - }; - - static const struct mtk_soc_data mt7621_data = { -+ .reg_map = &mtk_reg_map, - .caps = MT7621_CAPS, - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7621_CLKS_BITMAP, -@@ -3435,6 +3492,7 @@ static const struct mtk_soc_data mt7621_ - }; - - static const struct mtk_soc_data mt7622_data = { -+ .reg_map = &mtk_reg_map, - .ana_rgc3 = 0x2028, - .caps = MT7622_CAPS | MTK_HWLRO, - .hw_features = MTK_HW_FEATURES, -@@ -3448,6 +3506,7 @@ static const struct mtk_soc_data mt7622_ - }; - - static const struct mtk_soc_data mt7623_data = { -+ .reg_map = &mtk_reg_map, - .caps = MT7623_CAPS | MTK_HWLRO, - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7623_CLKS_BITMAP, -@@ -3460,6 +3519,7 @@ static const struct mtk_soc_data mt7623_ - }; - - static const struct mtk_soc_data mt7629_data = { -+ .reg_map = &mtk_reg_map, - .ana_rgc3 = 0x128, - .caps = MT7629_CAPS | MTK_HWLRO, - .hw_features = MTK_HW_FEATURES, -@@ -3472,6 +3532,7 @@ static const struct mtk_soc_data mt7629_ - }; - - static const struct mtk_soc_data rt5350_data = { -+ .reg_map = &mt7628_reg_map, - .caps = MT7628_CAPS, - .hw_features = MTK_HW_FEATURES_MT7628, - .required_clks = MT7628_CLKS_BITMAP, ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -48,6 +48,8 @@ - #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) - #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) - -+#define MTK_QRX_OFFSET 0x10 -+ - #define MTK_MAX_RX_RING_NUM 4 - #define MTK_HW_LRO_DMA_SIZE 8 - -@@ -100,18 +102,6 @@ - /* Unicast Filter MAC Address Register - High */ - #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) - --/* PDMA RX Base Pointer Register */ --#define MTK_PRX_BASE_PTR0 0x900 --#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10)) -- --/* PDMA RX Maximum Count Register */ --#define MTK_PRX_MAX_CNT0 0x904 --#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10)) -- --/* PDMA RX CPU Pointer Register */ --#define MTK_PRX_CRX_IDX0 0x908 --#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10)) -- - /* PDMA HW LRO Control Registers */ - #define MTK_PDMA_LRO_CTRL_DW0 0x980 - #define MTK_LRO_EN BIT(0) -@@ -126,18 +116,19 @@ - #define MTK_ADMA_MODE BIT(15) - #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) - --/* PDMA Global Configuration Register */ --#define MTK_PDMA_GLO_CFG 0xa04 -+#define MTK_RX_DMA_LRO_EN BIT(8) - #define MTK_MULTI_EN BIT(10) - #define MTK_PDMA_SIZE_8DWORDS (1 << 4) - -+/* PDMA Global Configuration Register */ -+#define MTK_PDMA_LRO_SDL 0x3000 -+#define MTK_RX_CFG_SDL_OFFSET 16 -+ - /* PDMA Reset Index Register */ --#define MTK_PDMA_RST_IDX 0xa08 - #define MTK_PST_DRX_IDX0 BIT(16) - #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) - - /* PDMA Delay Interrupt Register */ --#define MTK_PDMA_DELAY_INT 0xa0c - #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) - #define MTK_PDMA_DELAY_RX_EN BIT(15) - #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 -@@ -151,19 +142,9 @@ - #define MTK_PDMA_DELAY_PINT_MASK 0x7f - #define MTK_PDMA_DELAY_PTIME_MASK 0xff - --/* PDMA Interrupt Status Register */ --#define MTK_PDMA_INT_STATUS 0xa20 -- --/* PDMA Interrupt Mask Register */ --#define MTK_PDMA_INT_MASK 0xa28 -- - /* PDMA HW LRO Alter Flow Delta Register */ - #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c - --/* PDMA Interrupt grouping registers */ --#define MTK_PDMA_INT_GRP1 0xa50 --#define MTK_PDMA_INT_GRP2 0xa54 -- - /* PDMA HW LRO IP Setting Registers */ - #define MTK_LRO_RX_RING0_DIP_DW0 0xb04 - #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) -@@ -185,26 +166,9 @@ - #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) - - /* QDMA TX Queue Configuration Registers */ --#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) - #define QDMA_RES_THRES 4 - --/* QDMA TX Queue Scheduler Registers */ --#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) -- --/* QDMA RX Base Pointer Register */ --#define MTK_QRX_BASE_PTR0 0x1900 -- --/* QDMA RX Maximum Count Register */ --#define MTK_QRX_MAX_CNT0 0x1904 -- --/* QDMA RX CPU Pointer Register */ --#define MTK_QRX_CRX_IDX0 0x1908 -- --/* QDMA RX DMA Pointer Register */ --#define MTK_QRX_DRX_IDX0 0x190C -- - /* QDMA Global Configuration Register */ --#define MTK_QDMA_GLO_CFG 0x1A04 - #define MTK_RX_2B_OFFSET BIT(31) - #define MTK_RX_BT_32DWORDS (3 << 11) - #define MTK_NDP_CO_PRO BIT(10) -@@ -216,20 +180,12 @@ - #define MTK_TX_DMA_EN BIT(0) - #define MTK_DMA_BUSY_TIMEOUT_US 1000000 - --/* QDMA Reset Index Register */ --#define MTK_QDMA_RST_IDX 0x1A08 -- --/* QDMA Delay Interrupt Register */ --#define MTK_QDMA_DELAY_INT 0x1A0C -- - /* QDMA Flow Control Register */ --#define MTK_QDMA_FC_THRES 0x1A10 - #define FC_THRES_DROP_MODE BIT(20) - #define FC_THRES_DROP_EN (7 << 16) - #define FC_THRES_MIN 0x4444 - - /* QDMA Interrupt Status Register */ --#define MTK_QDMA_INT_STATUS 0x1A18 - #define MTK_RX_DONE_DLY BIT(30) - #define MTK_TX_DONE_DLY BIT(28) - #define MTK_RX_DONE_INT3 BIT(19) -@@ -244,55 +200,8 @@ - #define MTK_TX_DONE_INT MTK_TX_DONE_DLY - - /* QDMA Interrupt grouping registers */ --#define MTK_QDMA_INT_GRP1 0x1a20 --#define MTK_QDMA_INT_GRP2 0x1a24 - #define MTK_RLS_DONE_INT BIT(0) - --/* QDMA Interrupt Status Register */ --#define MTK_QDMA_INT_MASK 0x1A1C -- --/* QDMA Interrupt Mask Register */ --#define MTK_QDMA_HRED2 0x1A44 -- --/* QDMA TX Forward CPU Pointer Register */ --#define MTK_QTX_CTX_PTR 0x1B00 -- --/* QDMA TX Forward DMA Pointer Register */ --#define MTK_QTX_DTX_PTR 0x1B04 -- --/* QDMA TX Release CPU Pointer Register */ --#define MTK_QTX_CRX_PTR 0x1B10 -- --/* QDMA TX Release DMA Pointer Register */ --#define MTK_QTX_DRX_PTR 0x1B14 -- --/* QDMA FQ Head Pointer Register */ --#define MTK_QDMA_FQ_HEAD 0x1B20 -- --/* QDMA FQ Head Pointer Register */ --#define MTK_QDMA_FQ_TAIL 0x1B24 -- --/* QDMA FQ Free Page Counter Register */ --#define MTK_QDMA_FQ_CNT 0x1B28 -- --/* QDMA FQ Free Page Buffer Length Register */ --#define MTK_QDMA_FQ_BLEN 0x1B2C -- --/* GMA1 counter / statics register */ --#define MTK_GDM1_RX_GBCNT_L 0x2400 --#define MTK_GDM1_RX_GBCNT_H 0x2404 --#define MTK_GDM1_RX_GPCNT 0x2408 --#define MTK_GDM1_RX_OERCNT 0x2410 --#define MTK_GDM1_RX_FERCNT 0x2414 --#define MTK_GDM1_RX_SERCNT 0x2418 --#define MTK_GDM1_RX_LENCNT 0x241c --#define MTK_GDM1_RX_CERCNT 0x2420 --#define MTK_GDM1_RX_FCCNT 0x2424 --#define MTK_GDM1_TX_SKIPCNT 0x2428 --#define MTK_GDM1_TX_COLCNT 0x242c --#define MTK_GDM1_TX_GBCNT_L 0x2430 --#define MTK_GDM1_TX_GBCNT_H 0x2434 --#define MTK_GDM1_TX_GPCNT 0x2438 - #define MTK_STAT_OFFSET 0x40 - - #define MTK_WDMA0_BASE 0x2800 -@@ -854,8 +763,46 @@ struct mtk_tx_dma_desc_info { - u8 last:1; - }; - -+struct mtk_reg_map { -+ u32 tx_irq_mask; -+ u32 tx_irq_status; -+ struct { -+ u32 rx_ptr; /* rx base pointer */ -+ u32 rx_cnt_cfg; /* rx max count configuration */ -+ u32 pcrx_ptr; /* rx cpu pointer */ -+ u32 glo_cfg; /* global configuration */ -+ u32 rst_idx; /* reset index */ -+ u32 delay_irq; /* delay interrupt */ -+ u32 irq_status; /* interrupt status */ -+ u32 irq_mask; /* interrupt mask */ -+ u32 int_grp; -+ } pdma; -+ struct { -+ u32 qtx_cfg; /* tx queue configuration */ -+ u32 rx_ptr; /* rx base pointer */ -+ u32 rx_cnt_cfg; /* rx max count configuration */ -+ u32 qcrx_ptr; /* rx cpu pointer */ -+ u32 glo_cfg; /* global configuration */ -+ u32 rst_idx; /* reset index */ -+ u32 delay_irq; /* delay interrupt */ -+ u32 fc_th; /* flow control */ -+ u32 int_grp; -+ u32 hred; /* interrupt mask */ -+ u32 ctx_ptr; /* tx acquire cpu pointer */ -+ u32 dtx_ptr; /* tx acquire dma pointer */ -+ u32 crx_ptr; /* tx release cpu pointer */ -+ u32 drx_ptr; /* tx release dma pointer */ -+ u32 fq_head; /* fq head pointer */ -+ u32 fq_tail; /* fq tail pointer */ -+ u32 fq_count; /* fq free page count */ -+ u32 fq_blen; /* fq free page buffer length */ -+ } qdma; -+ u32 gdm1_cnt; -+}; -+ - /* struct mtk_eth_data - This is the structure holding all differences - * among various plaforms -+ * @reg_map Soc register map. - * @ana_rgc3: The offset for register ANA_RGC3 related to - * sgmiisys syscon - * @caps Flags shown the extra capability for the SoC -@@ -868,6 +815,7 @@ struct mtk_tx_dma_desc_info { - * @rxd_size Rx DMA descriptor size. - */ - struct mtk_soc_data { -+ const struct mtk_reg_map *reg_map; - u32 ana_rgc3; - u32 caps; - u32 required_clks; -@@ -995,8 +943,6 @@ struct mtk_eth { - u32 tx_bytes; - struct dim tx_dim; - -- u32 tx_int_mask_reg; -- u32 tx_int_status_reg; - u32 rx_dma_l4_valid; - int ip_align; - diff --git a/target/linux/generic/backport-6.1/702-v5.19-27-net-ethernet-mtk_eth_soc-introduce-MTK_NETSYS_V2-sup.patch b/target/linux/generic/backport-6.1/702-v5.19-27-net-ethernet-mtk_eth_soc-introduce-MTK_NETSYS_V2-sup.patch deleted file mode 100644 index 4d6c94b13b8e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-27-net-ethernet-mtk_eth_soc-introduce-MTK_NETSYS_V2-sup.patch +++ /dev/null @@ -1,917 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:36 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support - -Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA -descriptors. -This is a preliminary patch to add mt7986 ethernet support. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -863,8 +863,8 @@ static inline int mtk_max_buf_size(int f - return buf_size; - } - --static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, -- struct mtk_rx_dma *dma_rxd) -+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, -+ struct mtk_rx_dma_v2 *dma_rxd) - { - rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); - if (!(rxd->rxd2 & RX_DMA_DONE)) -@@ -873,6 +873,10 @@ static inline bool mtk_rx_get_desc(struc - rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); - rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); - rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); -+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); -+ } - - return true; - } -@@ -917,7 +921,7 @@ static int mtk_init_fq_dma(struct mtk_et - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); - - for (i = 0; i < cnt; i++) { -- struct mtk_tx_dma *txd; -+ struct mtk_tx_dma_v2 *txd; - - txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; - txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; -@@ -927,6 +931,12 @@ static int mtk_init_fq_dma(struct mtk_et - - txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); - txd->txd4 = 0; -+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { -+ txd->txd5 = 0; -+ txd->txd6 = 0; -+ txd->txd7 = 0; -+ txd->txd8 = 0; -+ } - } - - mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); -@@ -1030,10 +1040,12 @@ static void setup_tx_buf(struct mtk_eth - } - } - --static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc, -- struct mtk_tx_dma_desc_info *info) -+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd, -+ struct mtk_tx_dma_desc_info *info) - { - struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_eth *eth = mac->hw; -+ struct mtk_tx_dma *desc = txd; - u32 data; - - WRITE_ONCE(desc->txd1, info->addr); -@@ -1057,6 +1069,59 @@ static void mtk_tx_set_dma_desc(struct n - WRITE_ONCE(desc->txd4, data); - } - -+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, -+ struct mtk_tx_dma_desc_info *info) -+{ -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_tx_dma_v2 *desc = txd; -+ struct mtk_eth *eth = mac->hw; -+ u32 data; -+ -+ WRITE_ONCE(desc->txd1, info->addr); -+ -+ data = TX_DMA_PLEN0(info->size); -+ if (info->last) -+ data |= TX_DMA_LS0; -+ WRITE_ONCE(desc->txd3, data); -+ -+ if (!info->qid && mac->id) -+ info->qid = MTK_QDMA_GMAC2_QID; -+ -+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ -+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); -+ WRITE_ONCE(desc->txd4, data); -+ -+ data = 0; -+ if (info->first) { -+ if (info->gso) -+ data |= TX_DMA_TSO_V2; -+ /* tx checksum offload */ -+ if (info->csum) -+ data |= TX_DMA_CHKSUM_V2; -+ } -+ WRITE_ONCE(desc->txd5, data); -+ -+ data = 0; -+ if (info->first && info->vlan) -+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; -+ WRITE_ONCE(desc->txd6, data); -+ -+ WRITE_ONCE(desc->txd7, 0); -+ WRITE_ONCE(desc->txd8, 0); -+} -+ -+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd, -+ struct mtk_tx_dma_desc_info *info) -+{ -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_eth *eth = mac->hw; -+ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ mtk_tx_set_dma_desc_v2(dev, txd, info); -+ else -+ mtk_tx_set_dma_desc_v1(dev, txd, info); -+} -+ - static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, - int tx_num, struct mtk_tx_ring *ring, bool gso) - { -@@ -1065,6 +1130,7 @@ static int mtk_tx_map(struct sk_buff *sk - .gso = gso, - .csum = skb->ip_summed == CHECKSUM_PARTIAL, - .vlan = skb_vlan_tag_present(skb), -+ .qid = skb->mark & MTK_QDMA_TX_MASK, - .vlan_tci = skb_vlan_tag_get(skb), - .first = true, - .last = !skb_is_nonlinear(skb), -@@ -1124,7 +1190,9 @@ static int mtk_tx_map(struct sk_buff *sk - } - - memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); -- txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN); -+ txd_info.size = min_t(unsigned int, frag_size, -+ soc->txrx.dma_max_len); -+ txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; - txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && - !(frag_size - txd_info.size); - txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, -@@ -1205,17 +1273,16 @@ err_dma: - return -ENOMEM; - } - --static inline int mtk_cal_txd_req(struct sk_buff *skb) -+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) - { -- int i, nfrags; -+ int i, nfrags = 1; - skb_frag_t *frag; - -- nfrags = 1; - if (skb_is_gso(skb)) { - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - nfrags += DIV_ROUND_UP(skb_frag_size(frag), -- MTK_TX_DMA_BUF_LEN); -+ eth->soc->txrx.dma_max_len); - } - } else { - nfrags += skb_shinfo(skb)->nr_frags; -@@ -1267,7 +1334,7 @@ static netdev_tx_t mtk_start_xmit(struct - if (unlikely(test_bit(MTK_RESETTING, ð->state))) - goto drop; - -- tx_num = mtk_cal_txd_req(skb); -+ tx_num = mtk_cal_txd_req(eth, skb); - if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - netif_stop_queue(dev); - netif_err(eth, tx_queued, dev, -@@ -1359,7 +1426,7 @@ static int mtk_poll_rx(struct napi_struc - int idx; - struct sk_buff *skb; - u8 *data, *new_data; -- struct mtk_rx_dma *rxd, trxd; -+ struct mtk_rx_dma_v2 *rxd, trxd; - int done = 0, bytes = 0; - - while (done < budget) { -@@ -1367,7 +1434,7 @@ static int mtk_poll_rx(struct napi_struc - unsigned int pktlen; - dma_addr_t dma_addr; - u32 hash, reason; -- int mac; -+ int mac = 0; - - ring = mtk_get_rx_ring(eth); - if (unlikely(!ring)) -@@ -1377,16 +1444,15 @@ static int mtk_poll_rx(struct napi_struc - rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; - data = ring->data[idx]; - -- if (!mtk_rx_get_desc(&trxd, rxd)) -+ if (!mtk_rx_get_desc(eth, &trxd, rxd)) - break; - - /* find out which mac the packet come from. values start at 1 */ -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || -- (trxd.rxd4 & RX_DMA_SPECIAL_TAG)) -- mac = 0; -- else -- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & -- RX_DMA_FPORT_MASK) - 1; -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1; -+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && -+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) -+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; - - if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || - !eth->netdev[mac])) -@@ -1432,7 +1498,7 @@ static int mtk_poll_rx(struct napi_struc - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); - skb->dev = netdev; - skb_put(skb, pktlen); -- if (trxd.rxd4 & eth->rx_dma_l4_valid) -+ if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); -@@ -1450,10 +1516,25 @@ static int mtk_poll_rx(struct napi_struc - mtk_ppe_check_skb(eth->ppe, skb, - trxd.rxd4 & MTK_RXD4_FOE_ENTRY); - -- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && -- (trxd.rxd2 & RX_DMA_VTAG)) -- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), -- RX_DMA_VID(trxd.rxd3)); -+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ if (trxd.rxd3 & RX_DMA_VTAG_V2) -+ __vlan_hwaccel_put_tag(skb, -+ htons(RX_DMA_VPID(trxd.rxd4)), -+ RX_DMA_VID(trxd.rxd4)); -+ } else if (trxd.rxd2 & RX_DMA_VTAG) { -+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), -+ RX_DMA_VID(trxd.rxd3)); -+ } -+ -+ /* If the device is attached to a dsa switch, the special -+ * tag inserted in VLAN field by hw switch can * be offloaded -+ * by RX HW VLAN offload. Clear vlan info. -+ */ -+ if (netdev_uses_dsa(netdev)) -+ __vlan_hwaccel_clear_tag(skb); -+ } -+ - skb_record_rx_queue(skb, 0); - napi_gro_receive(napi, skb); - -@@ -1465,7 +1546,7 @@ release_desc: - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - rxd->rxd2 = RX_DMA_LSO; - else -- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); -+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - - ring->calc_idx = idx; - -@@ -1667,7 +1748,8 @@ static int mtk_napi_rx(struct napi_struc - do { - int rx_done; - -- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status); -+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, -+ reg_map->pdma.irq_status); - rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); - rx_done_total += rx_done; - -@@ -1681,10 +1763,11 @@ static int mtk_napi_rx(struct napi_struc - if (rx_done_total == budget) - return budget; - -- } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT); -+ } while (mtk_r32(eth, reg_map->pdma.irq_status) & -+ eth->soc->txrx.rx_irq_done_mask); - - if (napi_complete_done(napi, rx_done_total)) -- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); - - return rx_done_total; - } -@@ -1694,7 +1777,7 @@ static int mtk_tx_alloc(struct mtk_eth * - const struct mtk_soc_data *soc = eth->soc; - struct mtk_tx_ring *ring = ð->tx_ring; - int i, sz = soc->txrx.txd_size; -- struct mtk_tx_dma *txd; -+ struct mtk_tx_dma_v2 *txd; - - ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), - GFP_KERNEL); -@@ -1714,13 +1797,19 @@ static int mtk_tx_alloc(struct mtk_eth * - txd->txd2 = next_ptr; - txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; - txd->txd4 = 0; -+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { -+ txd->txd5 = 0; -+ txd->txd6 = 0; -+ txd->txd7 = 0; -+ txd->txd8 = 0; -+ } - } - - /* On MT7688 (PDMA only) this driver uses the ring->dma structs - * only as the framework. The real HW descriptors are the PDMA - * descriptors in ring->dma_pdma. - */ -- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, - &ring->phys_pdma, GFP_KERNEL); - if (!ring->dma_pdma) -@@ -1800,13 +1889,11 @@ static int mtk_rx_alloc(struct mtk_eth * - struct mtk_rx_ring *ring; - int rx_data_len, rx_dma_size; - int i; -- u32 offset = 0; - - if (rx_flag == MTK_RX_FLAGS_QDMA) { - if (ring_no) - return -EINVAL; - ring = ð->rx_ring_qdma; -- offset = 0x1000; - } else { - ring = ð->rx_ring[ring_no]; - } -@@ -1842,7 +1929,7 @@ static int mtk_rx_alloc(struct mtk_eth * - return -ENOMEM; - - for (i = 0; i < rx_dma_size; i++) { -- struct mtk_rx_dma *rxd; -+ struct mtk_rx_dma_v2 *rxd; - - dma_addr_t dma_addr = dma_map_single(eth->dma_dev, - ring->data[i] + NET_SKB_PAD + eth->ip_align, -@@ -1857,26 +1944,47 @@ static int mtk_rx_alloc(struct mtk_eth * - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - rxd->rxd2 = RX_DMA_LSO; - else -- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); -+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - - rxd->rxd3 = 0; - rxd->rxd4 = 0; -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ rxd->rxd5 = 0; -+ rxd->rxd6 = 0; -+ rxd->rxd7 = 0; -+ rxd->rxd8 = 0; -+ } - } - ring->dma_size = rx_dma_size; - ring->calc_idx_update = false; - ring->calc_idx = rx_dma_size - 1; -- ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET; -+ if (rx_flag == MTK_RX_FLAGS_QDMA) -+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + -+ ring_no * MTK_QRX_OFFSET; -+ else -+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + -+ ring_no * MTK_QRX_OFFSET; - /* make sure that all changes to the dma ring are flushed before we - * continue - */ - wmb(); - -- mtk_w32(eth, ring->phys, -- reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset); -- mtk_w32(eth, rx_dma_size, -- reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset); -- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); -- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset); -+ if (rx_flag == MTK_RX_FLAGS_QDMA) { -+ mtk_w32(eth, ring->phys, -+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); -+ mtk_w32(eth, rx_dma_size, -+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); -+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), -+ reg_map->qdma.rst_idx); -+ } else { -+ mtk_w32(eth, ring->phys, -+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); -+ mtk_w32(eth, rx_dma_size, -+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); -+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), -+ reg_map->pdma.rst_idx); -+ } -+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); - - return 0; - } -@@ -2298,7 +2406,7 @@ static irqreturn_t mtk_handle_irq_rx(int - eth->rx_events++; - if (likely(napi_schedule_prep(ð->rx_napi))) { - __napi_schedule(ð->rx_napi); -- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); - } - - return IRQ_HANDLED; -@@ -2322,8 +2430,10 @@ static irqreturn_t mtk_handle_irq(int ir - struct mtk_eth *eth = _eth; - const struct mtk_reg_map *reg_map = eth->soc->reg_map; - -- if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) { -- if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT) -+ if (mtk_r32(eth, reg_map->pdma.irq_mask) & -+ eth->soc->txrx.rx_irq_done_mask) { -+ if (mtk_r32(eth, reg_map->pdma.irq_status) & -+ eth->soc->txrx.rx_irq_done_mask) - mtk_handle_irq_rx(irq, _eth); - } - if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { -@@ -2341,16 +2451,16 @@ static void mtk_poll_controller(struct n - struct mtk_eth *eth = mac->hw; - - mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); -- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); - mtk_handle_irq_rx(eth->irq[2], dev); - mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); -- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); - } - #endif - - static int mtk_start_dma(struct mtk_eth *eth) - { -- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; -+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; - const struct mtk_reg_map *reg_map = eth->soc->reg_map; - int err; - -@@ -2361,12 +2471,19 @@ static int mtk_start_dma(struct mtk_eth - } - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { -- mtk_w32(eth, -- MTK_TX_WB_DDONE | MTK_TX_DMA_EN | -- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | -- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | -- MTK_RX_BT_32DWORDS, -- reg_map->qdma.glo_cfg); -+ val = mtk_r32(eth, reg_map->qdma.glo_cfg); -+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN | -+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | -+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE; -+ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF | -+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | -+ MTK_CHK_DDONE_EN; -+ else -+ val |= MTK_RX_BT_32DWORDS; -+ mtk_w32(eth, val, reg_map->qdma.glo_cfg); -+ - mtk_w32(eth, - MTK_RX_DMA_EN | rx_2b_offset | - MTK_RX_BT_32DWORDS | MTK_MULTI_EN, -@@ -2440,7 +2557,7 @@ static int mtk_open(struct net_device *d - napi_enable(ð->tx_napi); - napi_enable(ð->rx_napi); - mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); -- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); - refcount_set(ð->dma_refcnt, 1); - } - else -@@ -2492,7 +2609,7 @@ static int mtk_stop(struct net_device *d - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); - - mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); -- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); -+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); - napi_disable(ð->tx_napi); - napi_disable(ð->rx_napi); - -@@ -2652,9 +2769,25 @@ static int mtk_hw_init(struct mtk_eth *e - return 0; - } - -- /* Non-MT7628 handling... */ -- ethsys_reset(eth, RSTCTRL_FE); -- ethsys_reset(eth, RSTCTRL_PPE); -+ val = RSTCTRL_FE | RSTCTRL_PPE; -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); -+ -+ val |= RSTCTRL_ETH; -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) -+ val |= RSTCTRL_PPE1; -+ } -+ -+ ethsys_reset(eth, val); -+ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, -+ 0x3ffffff); -+ -+ /* Set FE to PDMAv2 if necessary */ -+ val = mtk_r32(eth, MTK_FE_GLO_MISC); -+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); -+ } - - if (eth->pctl) { - /* Set GE2 driving and slew rate */ -@@ -2693,11 +2826,47 @@ static int mtk_hw_init(struct mtk_eth *e - - /* FE int grouping */ - mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); -- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4); -+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); - mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); -- mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4); -+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); - mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ /* PSE should not drop port8 and port9 packets */ -+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG); -+ -+ /* PSE Free Queue Flow Control */ -+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); -+ -+ /* PSE config input queue threshold */ -+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); -+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); -+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); -+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); -+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); -+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); -+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); -+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); -+ -+ /* PSE config output queue threshold */ -+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); -+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); -+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); -+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); -+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); -+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); -+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); -+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); -+ -+ /* GDM and CDM Threshold */ -+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); -+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); -+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); -+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); -+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); -+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); -+ } -+ - return 0; - - err_disable_pm: -@@ -3234,12 +3403,8 @@ static int mtk_probe(struct platform_dev - if (IS_ERR(eth->base)) - return PTR_ERR(eth->base); - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { -- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - eth->ip_align = NET_IP_ALIGN; -- } else { -- eth->rx_dma_l4_valid = RX_DMA_L4_VALID; -- } - - spin_lock_init(ð->page_lock); - spin_lock_init(ð->tx_irq_lock); -@@ -3475,6 +3640,10 @@ static const struct mtk_soc_data mt2701_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - -@@ -3488,6 +3657,10 @@ static const struct mtk_soc_data mt7621_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - -@@ -3502,6 +3675,10 @@ static const struct mtk_soc_data mt7622_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - -@@ -3515,6 +3692,10 @@ static const struct mtk_soc_data mt7623_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - -@@ -3528,6 +3709,10 @@ static const struct mtk_soc_data mt7629_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - -@@ -3540,6 +3725,10 @@ static const struct mtk_soc_data rt5350_ - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -+ .rx_irq_done_mask = MTK_RX_DONE_INT, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN, -+ .dma_len_offset = 16, - }, - }; - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -23,6 +23,7 @@ - #define MTK_MAX_RX_LENGTH 1536 - #define MTK_MAX_RX_LENGTH_2K 2048 - #define MTK_TX_DMA_BUF_LEN 0x3fff -+#define MTK_TX_DMA_BUF_LEN_V2 0xffff - #define MTK_DMA_SIZE 512 - #define MTK_NAPI_WEIGHT 64 - #define MTK_MAC_COUNT 2 -@@ -83,6 +84,10 @@ - #define MTK_CDMQ_IG_CTRL 0x1400 - #define MTK_CDMQ_STAG_EN BIT(0) - -+/* CDMP Ingress Control Register */ -+#define MTK_CDMP_IG_CTRL 0x400 -+#define MTK_CDMP_STAG_EN BIT(0) -+ - /* CDMP Exgress Control Register */ - #define MTK_CDMP_EG_CTRL 0x404 - -@@ -102,13 +107,38 @@ - /* Unicast Filter MAC Address Register - High */ - #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) - -+/* FE global misc reg*/ -+#define MTK_FE_GLO_MISC 0x124 -+ -+/* PSE Free Queue Flow Control */ -+#define PSE_FQFC_CFG1 0x100 -+#define PSE_FQFC_CFG2 0x104 -+#define PSE_DROP_CFG 0x108 -+ -+/* PSE Input Queue Reservation Register*/ -+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) -+ -+/* PSE Output Queue Threshold Register*/ -+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) -+ -+/* GDM and CDM Threshold */ -+#define MTK_GDM2_THRES 0x1530 -+#define MTK_CDMW0_THRES 0x164c -+#define MTK_CDMW1_THRES 0x1650 -+#define MTK_CDME0_THRES 0x1654 -+#define MTK_CDME1_THRES 0x1658 -+#define MTK_CDMM_THRES 0x165c -+ - /* PDMA HW LRO Control Registers */ - #define MTK_PDMA_LRO_CTRL_DW0 0x980 - #define MTK_LRO_EN BIT(0) - #define MTK_L3_CKS_UPD_EN BIT(7) -+#define MTK_L3_CKS_UPD_EN_V2 BIT(19) - #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) - #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) -+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) - #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) -+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) - - #define MTK_PDMA_LRO_CTRL_DW1 0x984 - #define MTK_PDMA_LRO_CTRL_DW2 0x988 -@@ -180,6 +210,13 @@ - #define MTK_TX_DMA_EN BIT(0) - #define MTK_DMA_BUSY_TIMEOUT_US 1000000 - -+/* QDMA V2 Global Configuration Register */ -+#define MTK_CHK_DDONE_EN BIT(28) -+#define MTK_DMAD_WR_WDONE BIT(26) -+#define MTK_WCOMP_EN BIT(24) -+#define MTK_RESV_BUF (0x40 << 16) -+#define MTK_MUTLI_CNT (0x4 << 12) -+ - /* QDMA Flow Control Register */ - #define FC_THRES_DROP_MODE BIT(20) - #define FC_THRES_DROP_EN (7 << 16) -@@ -199,11 +236,32 @@ - #define MTK_RX_DONE_INT MTK_RX_DONE_DLY - #define MTK_TX_DONE_INT MTK_TX_DONE_DLY - -+#define MTK_RX_DONE_INT_V2 BIT(14) -+ - /* QDMA Interrupt grouping registers */ - #define MTK_RLS_DONE_INT BIT(0) - - #define MTK_STAT_OFFSET 0x40 - -+/* QDMA TX NUM */ -+#define MTK_QDMA_TX_NUM 16 -+#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1) -+#define QID_BITS_V2(x) (((x) & 0x3f) << 16) -+#define MTK_QDMA_GMAC2_QID 8 -+ -+#define MTK_TX_DMA_BUF_SHIFT 8 -+ -+/* QDMA V2 descriptor txd6 */ -+#define TX_DMA_INS_VLAN_V2 BIT(16) -+/* QDMA V2 descriptor txd5 */ -+#define TX_DMA_CHKSUM_V2 (0x7 << 28) -+#define TX_DMA_TSO_V2 BIT(31) -+ -+/* QDMA V2 descriptor txd4 */ -+#define TX_DMA_FPORT_SHIFT_V2 8 -+#define TX_DMA_FPORT_MASK_V2 0xf -+#define TX_DMA_SWC_V2 BIT(30) -+ - #define MTK_WDMA0_BASE 0x2800 - #define MTK_WDMA1_BASE 0x2c00 - -@@ -217,10 +275,9 @@ - /* QDMA descriptor txd3 */ - #define TX_DMA_OWNER_CPU BIT(31) - #define TX_DMA_LS0 BIT(30) --#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) --#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) -+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) - #define TX_DMA_SWC BIT(14) --#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) - - /* PDMA on MT7628 */ - #define TX_DMA_DONE BIT(31) -@@ -230,12 +287,14 @@ - /* QDMA descriptor rxd2 */ - #define RX_DMA_DONE BIT(31) - #define RX_DMA_LSO BIT(30) --#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) --#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) -+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) - #define RX_DMA_VTAG BIT(15) - - /* QDMA descriptor rxd3 */ --#define RX_DMA_VID(_x) ((_x) & 0xfff) -+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) -+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) -+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) - - /* QDMA descriptor rxd4 */ - #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) -@@ -246,10 +305,15 @@ - /* QDMA descriptor rxd4 */ - #define RX_DMA_L4_VALID BIT(24) - #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ --#define RX_DMA_FPORT_SHIFT 19 --#define RX_DMA_FPORT_MASK 0x7 - #define RX_DMA_SPECIAL_TAG BIT(22) - -+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf) -+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7) -+ -+/* PDMA V2 descriptor rxd3 */ -+#define RX_DMA_VTAG_V2 BIT(0) -+#define RX_DMA_L4_VALID_V2 BIT(2) -+ - /* PHY Indirect Access Control registers */ - #define MTK_PHY_IAC 0x10004 - #define PHY_IAC_ACCESS BIT(31) -@@ -371,6 +435,16 @@ - #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) - - /* ethernet reset control register */ -+#define ETHSYS_RSTCTRL 0x34 -+#define RSTCTRL_FE BIT(6) -+#define RSTCTRL_PPE BIT(31) -+#define RSTCTRL_PPE1 BIT(30) -+#define RSTCTRL_ETH BIT(23) -+ -+/* ethernet reset check idle register */ -+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 -+ -+/* ethernet reset control register */ - #define ETHSYS_RSTCTRL 0x34 - #define RSTCTRL_FE BIT(6) - #define RSTCTRL_PPE BIT(31) -@@ -454,6 +528,17 @@ struct mtk_rx_dma { - unsigned int rxd4; - } __packed __aligned(4); - -+struct mtk_rx_dma_v2 { -+ unsigned int rxd1; -+ unsigned int rxd2; -+ unsigned int rxd3; -+ unsigned int rxd4; -+ unsigned int rxd5; -+ unsigned int rxd6; -+ unsigned int rxd7; -+ unsigned int rxd8; -+} __packed __aligned(4); -+ - struct mtk_tx_dma { - unsigned int txd1; - unsigned int txd2; -@@ -461,6 +546,17 @@ struct mtk_tx_dma { - unsigned int txd4; - } __packed __aligned(4); - -+struct mtk_tx_dma_v2 { -+ unsigned int txd1; -+ unsigned int txd2; -+ unsigned int txd3; -+ unsigned int txd4; -+ unsigned int txd5; -+ unsigned int txd6; -+ unsigned int txd7; -+ unsigned int txd8; -+} __packed __aligned(4); -+ - struct mtk_eth; - struct mtk_mac; - -@@ -647,7 +743,9 @@ enum mkt_eth_capabilities { - MTK_SHARED_INT_BIT, - MTK_TRGMII_MT7621_CLK_BIT, - MTK_QDMA_BIT, -+ MTK_NETSYS_V2_BIT, - MTK_SOC_MT7628_BIT, -+ MTK_RSTCTRL_PPE1_BIT, - - /* MUX BITS*/ - MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, -@@ -679,7 +777,9 @@ enum mkt_eth_capabilities { - #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) - #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) - #define MTK_QDMA BIT(MTK_QDMA_BIT) -+#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) - #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) -+#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) - - #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ - BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) -@@ -756,6 +856,7 @@ struct mtk_tx_dma_desc_info { - dma_addr_t addr; - u32 size; - u16 vlan_tci; -+ u16 qid; - u8 gso:1; - u8 csum:1; - u8 vlan:1; -@@ -813,6 +914,10 @@ struct mtk_reg_map { - * the extra setup for those pins used by GMAC. - * @txd_size Tx DMA descriptor size. - * @rxd_size Rx DMA descriptor size. -+ * @rx_irq_done_mask Rx irq done register mask. -+ * @rx_dma_l4_valid Rx DMA valid register mask. -+ * @dma_max_len Max DMA tx/rx buffer length. -+ * @dma_len_offset Tx/Rx DMA length field offset. - */ - struct mtk_soc_data { - const struct mtk_reg_map *reg_map; -@@ -825,6 +930,10 @@ struct mtk_soc_data { - struct { - u32 txd_size; - u32 rxd_size; -+ u32 rx_irq_done_mask; -+ u32 rx_dma_l4_valid; -+ u32 dma_max_len; -+ u32 dma_len_offset; - } txrx; - }; - -@@ -943,7 +1052,6 @@ struct mtk_eth { - u32 tx_bytes; - struct dim tx_dim; - -- u32 rx_dma_l4_valid; - int ip_align; - - struct mtk_ppe *ppe; diff --git a/target/linux/generic/backport-6.1/702-v5.19-28-net-ethernet-mtk_eth_soc-convert-ring-dma-pointer-to.patch b/target/linux/generic/backport-6.1/702-v5.19-28-net-ethernet-mtk_eth_soc-convert-ring-dma-pointer-to.patch deleted file mode 100644 index 1ecb5e71b3a7..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-28-net-ethernet-mtk_eth_soc-convert-ring-dma-pointer-to.patch +++ /dev/null @@ -1,135 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:37 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: convert ring dma pointer to void - -Simplify the code converting {tx,rx} ring dma pointer to void - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -947,18 +947,15 @@ static int mtk_init_fq_dma(struct mtk_et - return 0; - } - --static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) -+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) - { -- void *ret = ring->dma; -- -- return ret + (desc - ring->phys); -+ return ring->dma + (desc - ring->phys); - } - - static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, -- struct mtk_tx_dma *txd, -- u32 txd_size) -+ void *txd, u32 txd_size) - { -- int idx = ((void *)txd - (void *)ring->dma) / txd_size; -+ int idx = (txd - ring->dma) / txd_size; - - return &ring->buf[idx]; - } -@@ -966,13 +963,12 @@ static struct mtk_tx_buf *mtk_desc_to_tx - static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, - struct mtk_tx_dma *dma) - { -- return ring->dma_pdma - ring->dma + dma; -+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; - } - --static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma, -- u32 txd_size) -+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size) - { -- return ((void *)dma - (void *)ring->dma) / txd_size; -+ return (dma - ring->dma) / txd_size; - } - - static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, -@@ -1389,7 +1385,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri - - ring = ð->rx_ring[i]; - idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); -- rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; -+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size; - if (rxd->rxd2 & RX_DMA_DONE) { - ring->calc_idx_update = true; - return ring; -@@ -1441,7 +1437,7 @@ static int mtk_poll_rx(struct napi_struc - goto rx_done; - - idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); -- rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; -+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size; - data = ring->data[idx]; - - if (!mtk_rx_get_desc(eth, &trxd, rxd)) -@@ -1648,7 +1644,7 @@ static int mtk_poll_tx_pdma(struct mtk_e - - mtk_tx_unmap(eth, tx_buf, true); - -- desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size; -+ desc = ring->dma + cpu * eth->soc->txrx.txd_size; - ring->last_free = desc; - atomic_inc(&ring->free_count); - -@@ -1793,7 +1789,7 @@ static int mtk_tx_alloc(struct mtk_eth * - int next = (i + 1) % MTK_DMA_SIZE; - u32 next_ptr = ring->phys + next * sz; - -- txd = (void *)ring->dma + i * sz; -+ txd = ring->dma + i * sz; - txd->txd2 = next_ptr; - txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; - txd->txd4 = 0; -@@ -1823,7 +1819,7 @@ static int mtk_tx_alloc(struct mtk_eth * - - ring->dma_size = MTK_DMA_SIZE; - atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); -- ring->next_free = &ring->dma[0]; -+ ring->next_free = ring->dma; - ring->last_free = (void *)txd; - ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); - ring->thresh = MAX_SKB_FRAGS; -@@ -1938,7 +1934,7 @@ static int mtk_rx_alloc(struct mtk_eth * - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; - -- rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; -+ rxd = ring->dma + i * eth->soc->txrx.rxd_size; - rxd->rxd1 = (unsigned int)dma_addr; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) -@@ -2000,7 +1996,7 @@ static void mtk_rx_clean(struct mtk_eth - if (!ring->data[i]) - continue; - -- rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; -+ rxd = ring->dma + i * eth->soc->txrx.rxd_size; - if (!rxd->rxd1) - continue; - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -689,7 +689,7 @@ struct mtk_tx_buf { - * are present - */ - struct mtk_tx_ring { -- struct mtk_tx_dma *dma; -+ void *dma; - struct mtk_tx_buf *buf; - dma_addr_t phys; - struct mtk_tx_dma *next_free; -@@ -719,7 +719,7 @@ enum mtk_rx_flags { - * @calc_idx: The current head of ring - */ - struct mtk_rx_ring { -- struct mtk_rx_dma *dma; -+ void *dma; - u8 **data; - dma_addr_t phys; - u16 frag_size; diff --git a/target/linux/generic/backport-6.1/702-v5.19-29-net-ethernet-mtk_eth_soc-convert-scratch_ring-pointe.patch b/target/linux/generic/backport-6.1/702-v5.19-29-net-ethernet-mtk_eth_soc-convert-scratch_ring-pointe.patch deleted file mode 100644 index f7318e68bbdd..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-29-net-ethernet-mtk_eth_soc-convert-scratch_ring-pointe.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:38 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: convert scratch_ring pointer to - void - -Simplify the code converting scratch_ring pointer to void - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -923,7 +923,7 @@ static int mtk_init_fq_dma(struct mtk_et - for (i = 0; i < cnt; i++) { - struct mtk_tx_dma_v2 *txd; - -- txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; -+ txd = eth->scratch_ring + i * soc->txrx.txd_size; - txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; - if (i < cnt - 1) - txd->txd2 = eth->phy_scratch_ring + ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1029,7 +1029,7 @@ struct mtk_eth { - struct mtk_rx_ring rx_ring_qdma; - struct napi_struct tx_napi; - struct napi_struct rx_napi; -- struct mtk_tx_dma *scratch_ring; -+ void *scratch_ring; - dma_addr_t phy_scratch_ring; - void *scratch_head; - struct clk *clks[MTK_CLK_MAX]; diff --git a/target/linux/generic/backport-6.1/702-v5.19-30-net-ethernet-mtk_eth_soc-introduce-support-for-mt798.patch b/target/linux/generic/backport-6.1/702-v5.19-30-net-ethernet-mtk_eth_soc-introduce-support-for-mt798.patch deleted file mode 100644 index a4698d7814d9..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-30-net-ethernet-mtk_eth_soc-introduce-support-for-mt798.patch +++ /dev/null @@ -1,138 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 20 May 2022 20:11:39 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce support for mt7986 - chipset - -Add support for mt7986-eth driver available on mt7986 soc. - -Tested-by: Sam Shih -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -87,6 +87,43 @@ static const struct mtk_reg_map mt7628_r - }, - }; - -+static const struct mtk_reg_map mt7986_reg_map = { -+ .tx_irq_mask = 0x461c, -+ .tx_irq_status = 0x4618, -+ .pdma = { -+ .rx_ptr = 0x6100, -+ .rx_cnt_cfg = 0x6104, -+ .pcrx_ptr = 0x6108, -+ .glo_cfg = 0x6204, -+ .rst_idx = 0x6208, -+ .delay_irq = 0x620c, -+ .irq_status = 0x6220, -+ .irq_mask = 0x6228, -+ .int_grp = 0x6250, -+ }, -+ .qdma = { -+ .qtx_cfg = 0x4400, -+ .rx_ptr = 0x4500, -+ .rx_cnt_cfg = 0x4504, -+ .qcrx_ptr = 0x4508, -+ .glo_cfg = 0x4604, -+ .rst_idx = 0x4608, -+ .delay_irq = 0x460c, -+ .fc_th = 0x4610, -+ .int_grp = 0x4620, -+ .hred = 0x4644, -+ .ctx_ptr = 0x4700, -+ .dtx_ptr = 0x4704, -+ .crx_ptr = 0x4710, -+ .drx_ptr = 0x4714, -+ .fq_head = 0x4720, -+ .fq_tail = 0x4724, -+ .fq_count = 0x4728, -+ .fq_blen = 0x472c, -+ }, -+ .gdm1_cnt = 0x1c00, -+}; -+ - /* strings used by ethtool */ - static const struct mtk_ethtool_stats { - char str[ETH_GSTRING_LEN]; -@@ -110,7 +147,7 @@ static const char * const mtk_clks_sourc - "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", - "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", - "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", -- "sgmii_ck", "eth2pll", -+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1" - }; - - void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) -@@ -3712,6 +3749,21 @@ static const struct mtk_soc_data mt7629_ - }, - }; - -+static const struct mtk_soc_data mt7986_data = { -+ .reg_map = &mt7986_reg_map, -+ .ana_rgc3 = 0x128, -+ .caps = MT7986_CAPS, -+ .required_clks = MT7986_CLKS_BITMAP, -+ .required_pctl = false, -+ .txrx = { -+ .txd_size = sizeof(struct mtk_tx_dma_v2), -+ .rxd_size = sizeof(struct mtk_rx_dma_v2), -+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2, -+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, -+ .dma_len_offset = 8, -+ }, -+}; -+ - static const struct mtk_soc_data rt5350_data = { - .reg_map = &mt7628_reg_map, - .caps = MT7628_CAPS, -@@ -3734,6 +3786,7 @@ const struct of_device_id of_mtk_match[] - { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, - { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, - { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, -+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data}, - { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, - {}, - }; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -624,6 +624,10 @@ enum mtk_clks_map { - MTK_CLK_SGMII2_CDR_FB, - MTK_CLK_SGMII_CK, - MTK_CLK_ETH2PLL, -+ MTK_CLK_WOCPU0, -+ MTK_CLK_WOCPU1, -+ MTK_CLK_NETSYS0, -+ MTK_CLK_NETSYS1, - MTK_CLK_MAX - }; - -@@ -654,6 +658,16 @@ enum mtk_clks_map { - BIT(MTK_CLK_SGMII2_CDR_FB) | \ - BIT(MTK_CLK_SGMII_CK) | \ - BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) -+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ -+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ -+ BIT(MTK_CLK_SGMII_TX_250M) | \ -+ BIT(MTK_CLK_SGMII_RX_250M) | \ -+ BIT(MTK_CLK_SGMII_CDR_REF) | \ -+ BIT(MTK_CLK_SGMII_CDR_FB) | \ -+ BIT(MTK_CLK_SGMII2_TX_250M) | \ -+ BIT(MTK_CLK_SGMII2_RX_250M) | \ -+ BIT(MTK_CLK_SGMII2_CDR_REF) | \ -+ BIT(MTK_CLK_SGMII2_CDR_FB)) - - enum mtk_dev_state { - MTK_HW_INIT, -@@ -852,6 +866,10 @@ enum mkt_eth_capabilities { - MTK_MUX_U3_GMAC2_TO_QPHY | \ - MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) - -+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ -+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ -+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) -+ - struct mtk_tx_dma_desc_info { - dma_addr_t addr; - u32 size; diff --git a/target/linux/generic/backport-6.1/702-v5.19-31-net-ethernet-mtk_eth_soc-fix-error-code-in-mtk_flow_.patch b/target/linux/generic/backport-6.1/702-v5.19-31-net-ethernet-mtk_eth_soc-fix-error-code-in-mtk_flow_.patch deleted file mode 100644 index e490333a9bbf..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-31-net-ethernet-mtk_eth_soc-fix-error-code-in-mtk_flow_.patch +++ /dev/null @@ -1,25 +0,0 @@ -From: Dan Carpenter -Date: Thu, 19 May 2022 17:08:00 +0300 -Subject: [PATCH] net: ethernet: mtk_eth_soc: fix error code in - mtk_flow_offload_replace() - -Preserve the error code from mtk_foe_entry_commit(). Do not return -success. - -Fixes: c4f033d9e03e ("net: ethernet: mtk_eth_soc: rework hardware flow table management") -Signed-off-by: Dan Carpenter -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -434,7 +434,8 @@ mtk_flow_offload_replace(struct mtk_eth - memcpy(&entry->data, &foe, sizeof(entry->data)); - entry->wed_index = wed_index; - -- if (mtk_foe_entry_commit(eth->ppe, entry) < 0) -+ err = mtk_foe_entry_commit(eth->ppe, entry); -+ if (err < 0) - goto free; - - err = rhashtable_insert_fast(ð->flow_table, &entry->node, diff --git a/target/linux/generic/backport-6.1/702-v5.19-33-net-ethernet-mtk_eth_soc-enable-rx-cksum-offload-for.patch b/target/linux/generic/backport-6.1/702-v5.19-33-net-ethernet-mtk_eth_soc-enable-rx-cksum-offload-for.patch deleted file mode 100644 index d76df75dda8e..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-33-net-ethernet-mtk_eth_soc-enable-rx-cksum-offload-for.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Lorenzo Bianconi -Date: Mon, 6 Jun 2022 21:49:00 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: enable rx cksum offload for - MTK_NETSYS_V2 - -Enable rx checksum offload for mt7986 chipset. - -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/c8699805c18f7fd38315fcb8da2787676d83a32c.1654544585.git.lorenzo@kernel.org -Signed-off-by: Jakub Kicinski ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1463,8 +1463,8 @@ static int mtk_poll_rx(struct napi_struc - int done = 0, bytes = 0; - - while (done < budget) { -+ unsigned int pktlen, *rxdcsum; - struct net_device *netdev; -- unsigned int pktlen; - dma_addr_t dma_addr; - u32 hash, reason; - int mac = 0; -@@ -1531,7 +1531,13 @@ static int mtk_poll_rx(struct napi_struc - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); - skb->dev = netdev; - skb_put(skb, pktlen); -- if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid) -+ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ rxdcsum = &trxd.rxd3; -+ else -+ rxdcsum = &trxd.rxd4; -+ -+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); -@@ -3759,6 +3765,7 @@ static const struct mtk_soc_data mt7986_ - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, -+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, - .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, - .dma_len_offset = 8, - }, diff --git a/target/linux/generic/backport-6.1/702-v5.19-34-eth-mtk_ppe-fix-up-after-merge.patch b/target/linux/generic/backport-6.1/702-v5.19-34-eth-mtk_ppe-fix-up-after-merge.patch deleted file mode 100644 index 5303ca48a7e3..000000000000 --- a/target/linux/generic/backport-6.1/702-v5.19-34-eth-mtk_ppe-fix-up-after-merge.patch +++ /dev/null @@ -1,28 +0,0 @@ -From: Jakub Kicinski -Date: Thu, 19 May 2022 18:25:55 -0700 -Subject: [PATCH] eth: mtk_ppe: fix up after merge - -I missed this in the barrage of GCC 12 warnings. Commit cf2df74e202d -("net: fix dev_fill_forward_path with pppoe + bridge") changed -the pointer into an array. - -Fixes: d7e6f5836038 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") -Link: https://lore.kernel.org/r/20220520012555.2262461-1-kuba@kernel.org -Signed-off-by: Jakub Kicinski ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -90,10 +90,11 @@ mtk_flow_get_wdma_info(struct net_device - { - struct net_device_path_ctx ctx = { - .dev = dev, -- .daddr = addr, - }; - struct net_device_path path = {}; - -+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); -+ - if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) - return -1; - diff --git a/target/linux/generic/backport-6.1/703-00-v5.16-net-convert-users-of-bitmap_foo-to-linkmode_foo.patch b/target/linux/generic/backport-6.1/703-00-v5.16-net-convert-users-of-bitmap_foo-to-linkmode_foo.patch deleted file mode 100644 index 919da4d1ef19..000000000000 --- a/target/linux/generic/backport-6.1/703-00-v5.16-net-convert-users-of-bitmap_foo-to-linkmode_foo.patch +++ /dev/null @@ -1,948 +0,0 @@ -From 4973056cceacc70966396039fae99867dfafd796 Mon Sep 17 00:00:00 2001 -From: Sean Anderson -Date: Fri, 22 Oct 2021 18:41:04 -0400 -Subject: [PATCH] net: convert users of bitmap_foo() to linkmode_foo() - -This converts instances of - bitmap_foo(args..., __ETHTOOL_LINK_MODE_MASK_NBITS) -to - linkmode_foo(args...) - -I manually fixed up some lines to prevent them from being excessively -long. Otherwise, this change was generated with the following semantic -patch: - -// Generated with -// echo linux/linkmode.h > includes -// git grep -Flf includes include/ | cut -f 2- -d / | cat includes - \ -// | sort | uniq | tee new_includes | wc -l && mv new_includes includes -// and repeating until the number stopped going up -@i@ -@@ - -( - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -| - #include -) - -@depends on i@ -expression list args; -@@ - -( -- bitmap_zero(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_zero(args) -| -- bitmap_copy(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_copy(args) -| -- bitmap_and(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_and(args) -| -- bitmap_or(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_or(args) -| -- bitmap_empty(args, ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_empty(args) -| -- bitmap_andnot(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_andnot(args) -| -- bitmap_equal(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_equal(args) -| -- bitmap_intersects(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_intersects(args) -| -- bitmap_subset(args, __ETHTOOL_LINK_MODE_MASK_NBITS) -+ linkmode_subset(args) -) - -Add missing linux/mii.h include to mellanox. -DaveM - -Signed-off-by: Sean Anderson -Signed-off-by: David S. Miller ---- - drivers/net/dsa/b53/b53_common.c | 6 ++---- - drivers/net/dsa/bcm_sf2.c | 8 +++---- - drivers/net/dsa/hirschmann/hellcreek.c | 6 ++---- - drivers/net/dsa/lantiq_gswip.c | 14 ++++++------- - drivers/net/dsa/microchip/ksz8795.c | 8 +++---- - drivers/net/dsa/mv88e6xxx/chip.c | 5 ++--- - drivers/net/dsa/ocelot/felix_vsc9959.c | 8 +++---- - drivers/net/dsa/ocelot/seville_vsc9953.c | 8 +++---- - drivers/net/dsa/qca/ar9331.c | 10 ++++----- - drivers/net/dsa/sja1105/sja1105_main.c | 7 +++---- - drivers/net/dsa/xrs700x/xrs700x.c | 8 +++---- - drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 8 +++---- - drivers/net/ethernet/atheros/ag71xx.c | 8 +++---- - drivers/net/ethernet/cadence/macb_main.c | 11 +++++----- - .../net/ethernet/freescale/enetc/enetc_pf.c | 8 +++---- - .../net/ethernet/huawei/hinic/hinic_ethtool.c | 10 ++++----- - .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 5 ++--- - drivers/net/ethernet/marvell/mvneta.c | 10 ++++----- - .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 7 +++---- - .../marvell/octeontx2/nic/otx2_ethtool.c | 5 ++--- - drivers/net/ethernet/marvell/pxa168_eth.c | 3 +-- - .../net/ethernet/mellanox/mlx4/en_ethtool.c | 21 +++++++------------ - .../microchip/sparx5/sparx5_phylink.c | 7 +++---- - drivers/net/ethernet/mscc/ocelot_net.c | 7 +++---- - .../ethernet/pensando/ionic/ionic_ethtool.c | 3 +-- - .../net/ethernet/xilinx/xilinx_axienet_main.c | 8 +++---- - drivers/net/pcs/pcs-xpcs.c | 2 +- - drivers/net/phy/sfp-bus.c | 2 +- - net/ethtool/ioctl.c | 7 +++---- - 29 files changed, 87 insertions(+), 133 deletions(-) - ---- a/drivers/net/dsa/b53/b53_common.c -+++ b/drivers/net/dsa/b53/b53_common.c -@@ -1349,10 +1349,8 @@ void b53_phylink_validate(struct dsa_swi - phylink_set(mask, 100baseT_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - phylink_helper_basex_speed(state); - } ---- a/drivers/net/dsa/bcm_sf2.c -+++ b/drivers/net/dsa/bcm_sf2.c -@@ -686,7 +686,7 @@ static void bcm_sf2_sw_validate(struct d - state->interface != PHY_INTERFACE_MODE_GMII && - state->interface != PHY_INTERFACE_MODE_INTERNAL && - state->interface != PHY_INTERFACE_MODE_MOCA) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - if (port != core_readl(priv, CORE_IMP0_PRT_ID)) - dev_err(ds->dev, - "Unsupported interface: %d for port %d\n", -@@ -714,10 +714,8 @@ static void bcm_sf2_sw_validate(struct d - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, ---- a/drivers/net/dsa/hirschmann/hellcreek.c -+++ b/drivers/net/dsa/hirschmann/hellcreek.c -@@ -1476,10 +1476,8 @@ static void hellcreek_phylink_validate(s - else - phylink_set(mask, 1000baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static int ---- a/drivers/net/dsa/lantiq_gswip.c -+++ b/drivers/net/dsa/lantiq_gswip.c -@@ -1452,10 +1452,8 @@ static void gswip_phylink_set_capab(unsi - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, -@@ -1483,7 +1481,7 @@ static void gswip_xrx200_phylink_validat - goto unsupported; - break; - default: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; - } -@@ -1493,7 +1491,7 @@ static void gswip_xrx200_phylink_validat - return; - - unsupported: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); - } -@@ -1523,7 +1521,7 @@ static void gswip_xrx300_phylink_validat - goto unsupported; - break; - default: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; - } -@@ -1533,7 +1531,7 @@ static void gswip_xrx300_phylink_validat - return; - - unsupported: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); - } ---- a/drivers/net/dsa/microchip/ksz8795.c -+++ b/drivers/net/dsa/microchip/ksz8795.c -@@ -1542,15 +1542,13 @@ static void ksz8_validate(struct dsa_swi - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - return; - - unsupported: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface: %s, port: %d\n", - phy_modes(state->interface), port); - } ---- a/drivers/net/dsa/mv88e6xxx/chip.c -+++ b/drivers/net/dsa/mv88e6xxx/chip.c -@@ -683,9 +683,8 @@ static void mv88e6xxx_validate(struct ds - if (chip->info->ops->phylink_validate) - chip->info->ops->phylink_validate(chip, port, mask, state); - -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. ---- a/drivers/net/dsa/ocelot/felix_vsc9959.c -+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c -@@ -944,7 +944,7 @@ static void vsc9959_phylink_validate(str - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -966,10 +966,8 @@ static void vsc9959_phylink_validate(str - phylink_set(mask, 2500baseX_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, ---- a/drivers/net/dsa/ocelot/seville_vsc9953.c -+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c -@@ -1000,7 +1000,7 @@ static void vsc9953_phylink_validate(str - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -1019,10 +1019,8 @@ static void vsc9953_phylink_validate(str - phylink_set(mask, 2500baseX_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, ---- a/drivers/net/dsa/qca/ar9331.c -+++ b/drivers/net/dsa/qca/ar9331.c -@@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(s - goto unsupported; - break; - default: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; - } -@@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(s - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - return; - - unsupported: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface: %d, port: %d\n", - state->interface, port); - } ---- a/drivers/net/dsa/sja1105/sja1105_main.c -+++ b/drivers/net/dsa/sja1105/sja1105_main.c -@@ -1360,7 +1360,7 @@ static void sja1105_phylink_validate(str - */ - if (state->interface != PHY_INTERFACE_MODE_NA && - sja1105_phy_mode_mismatch(priv, port, state->interface)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -1380,9 +1380,8 @@ static void sja1105_phylink_validate(str - phylink_set(mask, 2500baseX_Full); - } - -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static int ---- a/drivers/net/dsa/xrs700x/xrs700x.c -+++ b/drivers/net/dsa/xrs700x/xrs700x.c -@@ -457,7 +457,7 @@ static void xrs700x_phylink_validate(str - phylink_set(mask, 1000baseT_Full); - break; - default: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; - } -@@ -468,10 +468,8 @@ static void xrs700x_phylink_validate(str - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Full); - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, ---- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c -+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c -@@ -369,9 +369,8 @@ static int xgbe_set_link_ksettings(struc - __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported); - -- bitmap_and(advertising, -- cmd->link_modes.advertising, lks->link_modes.supported, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(advertising, cmd->link_modes.advertising, -+ lks->link_modes.supported); - - if ((cmd->base.autoneg == AUTONEG_ENABLE) && - bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) { -@@ -384,8 +383,7 @@ static int xgbe_set_link_ksettings(struc - pdata->phy.autoneg = cmd->base.autoneg; - pdata->phy.speed = speed; - pdata->phy.duplex = cmd->base.duplex; -- bitmap_copy(lks->link_modes.advertising, advertising, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_copy(lks->link_modes.advertising, advertising); - - if (cmd->base.autoneg == AUTONEG_ENABLE) - XGBE_SET_ADV(lks, Autoneg); ---- a/drivers/net/ethernet/atheros/ag71xx.c -+++ b/drivers/net/ethernet/atheros/ag71xx.c -@@ -1082,14 +1082,12 @@ static void ag71xx_mac_validate(struct p - phylink_set(mask, 1000baseX_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - return; - unsupported: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - } - - static void ag71xx_mac_pcs_get_state(struct phylink_config *config, ---- a/drivers/net/ethernet/cadence/macb_main.c -+++ b/drivers/net/ethernet/cadence/macb_main.c -@@ -523,21 +523,21 @@ static void macb_validate(struct phylink - state->interface != PHY_INTERFACE_MODE_SGMII && - state->interface != PHY_INTERFACE_MODE_10GBASER && - !phy_interface_mode_is_rgmii(state->interface)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - - if (!macb_is_gem(bp) && - (state->interface == PHY_INTERFACE_MODE_GMII || - phy_interface_mode_is_rgmii(state->interface))) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - - if (state->interface == PHY_INTERFACE_MODE_10GBASER && - !(bp->caps & MACB_CAPS_HIGH_SPEED && - bp->caps & MACB_CAPS_PCS)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -576,9 +576,8 @@ static void macb_validate(struct phylink - phylink_set(mask, 1000baseT_Half); - } - out: -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, ---- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c -+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c -@@ -965,7 +965,7 @@ static void enetc_pl_mac_validate(struct - state->interface != PHY_INTERFACE_MODE_2500BASEX && - state->interface != PHY_INTERFACE_MODE_USXGMII && - !phy_interface_mode_is_rgmii(state->interface)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -988,10 +988,8 @@ static void enetc_pl_mac_validate(struct - phylink_set(mask, 2500baseX_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void enetc_pl_mac_config(struct phylink_config *config, ---- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c -+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c -@@ -322,12 +322,10 @@ static int hinic_get_link_ksettings(stru - } - } - -- bitmap_copy(link_ksettings->link_modes.supported, -- (unsigned long *)&settings.supported, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_copy(link_ksettings->link_modes.advertising, -- (unsigned long *)&settings.advertising, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_copy(link_ksettings->link_modes.supported, -+ (unsigned long *)&settings.supported); -+ linkmode_copy(link_ksettings->link_modes.advertising, -+ (unsigned long *)&settings.advertising); - - return 0; - } ---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c -@@ -467,9 +467,8 @@ static int ixgbe_set_link_ksettings(stru - * this function does not support duplex forcing, but can - * limit the advertising of the adapter to the specified speed - */ -- if (!bitmap_subset(cmd->link_modes.advertising, -- cmd->link_modes.supported, -- __ETHTOOL_LINK_MODE_MASK_NBITS)) -+ if (!linkmode_subset(cmd->link_modes.advertising, -+ cmd->link_modes.supported)) - return -EINVAL; - - /* only allow one speed at a time if no autoneg */ ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -3835,14 +3835,14 @@ static void mvneta_validate(struct phyli - */ - if (phy_interface_mode_is_8023z(state->interface)) { - if (!phylink_test(state->advertising, Autoneg)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - } else if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_rgmii(state->interface)) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -3871,10 +3871,8 @@ static void mvneta_validate(struct phyli - phylink_set(mask, 100baseT_Full); - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. ---- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c -+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c -@@ -6347,15 +6347,14 @@ static void mvpp2_phylink_validate(struc - goto empty_set; - } - -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - - phylink_helper_basex_speed(state); - return; - - empty_set: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - } - - static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, ---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c -+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c -@@ -1168,9 +1168,8 @@ static int otx2_set_link_ksettings(struc - otx2_get_link_ksettings(netdev, &cur_ks); - - /* Check requested modes against supported modes by hardware */ -- if (!bitmap_subset(cmd->link_modes.advertising, -- cur_ks.link_modes.supported, -- __ETHTOOL_LINK_MODE_MASK_NBITS)) -+ if (!linkmode_subset(cmd->link_modes.advertising, -+ cur_ks.link_modes.supported)) - return -EINVAL; - - mutex_lock(&mbox->lock); ---- a/drivers/net/ethernet/marvell/pxa168_eth.c -+++ b/drivers/net/ethernet/marvell/pxa168_eth.c -@@ -977,8 +977,7 @@ static int pxa168_init_phy(struct net_de - cmd.base.phy_address = pep->phy_addr; - cmd.base.speed = pep->phy_speed; - cmd.base.duplex = pep->phy_duplex; -- bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES); - cmd.base.autoneg = AUTONEG_ENABLE; - - if (cmd.base.speed != 0) ---- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c -@@ -39,6 +39,7 @@ - #include - #include - #include -+#include - - #include "mlx4_en.h" - #include "en_port.h" -@@ -643,10 +644,8 @@ static unsigned long *ptys2ethtool_link_ - unsigned int i; \ - cfg = &ptys2ethtool_map[reg_]; \ - cfg->speed = speed_; \ -- bitmap_zero(cfg->supported, \ -- __ETHTOOL_LINK_MODE_MASK_NBITS); \ -- bitmap_zero(cfg->advertised, \ -- __ETHTOOL_LINK_MODE_MASK_NBITS); \ -+ linkmode_zero(cfg->supported); \ -+ linkmode_zero(cfg->advertised); \ - for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ - __set_bit(modes[i], cfg->supported); \ - __set_bit(modes[i], cfg->advertised); \ -@@ -702,10 +701,8 @@ static void ptys2ethtool_update_link_mod - int i; - for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (eth_proto & MLX4_PROT_MASK(i)) -- bitmap_or(link_modes, link_modes, -- ptys2ethtool_link_mode(&ptys2ethtool_map[i], -- report), -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_or(link_modes, link_modes, -+ ptys2ethtool_link_mode(&ptys2ethtool_map[i], report)); - } - } - -@@ -716,11 +713,9 @@ static u32 ethtool2ptys_link_modes(const - u32 ptys_modes = 0; - - for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { -- if (bitmap_intersects( -- ptys2ethtool_link_mode(&ptys2ethtool_map[i], -- report), -- link_modes, -- __ETHTOOL_LINK_MODE_MASK_NBITS)) -+ ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i], -+ report); -+ if (linkmode_intersects(map_mode, link_modes)) - ptys_modes |= 1 << i; - } - return ptys_modes; ---- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c -+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c -@@ -92,12 +92,11 @@ static void sparx5_phylink_validate(stru - } - break; - default: -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void sparx5_phylink_mac_config(struct phylink_config *config, ---- a/drivers/net/ethernet/mscc/ocelot_net.c -+++ b/drivers/net/ethernet/mscc/ocelot_net.c -@@ -1509,7 +1509,7 @@ static void vsc7514_phylink_validate(str - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - -@@ -1528,9 +1528,8 @@ static void vsc7514_phylink_validate(str - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - -- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void vsc7514_phylink_mac_config(struct phylink_config *config, ---- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c -+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c -@@ -228,8 +228,7 @@ static int ionic_get_link_ksettings(stru - break; - } - -- bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_copy(ks->link_modes.advertising, ks->link_modes.supported); - - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); - ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); ---- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c -+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c -@@ -1565,7 +1565,7 @@ static void axienet_validate(struct phyl - netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", - phy_modes(state->interface), - phy_modes(lp->phy_mode)); -- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(supported); - return; - } - } -@@ -1598,10 +1598,8 @@ static void axienet_validate(struct phyl - break; - } - -- bitmap_and(supported, supported, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -- bitmap_and(state->advertising, state->advertising, mask, -- __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); - } - - static void axienet_mac_pcs_get_state(struct phylink_config *config, ---- a/drivers/net/pcs/pcs-xpcs.c -+++ b/drivers/net/pcs/pcs-xpcs.c -@@ -637,7 +637,7 @@ void xpcs_validate(struct dw_xpcs *xpcs, - if (state->interface == PHY_INTERFACE_MODE_NA) - return; - -- bitmap_zero(xpcs_supported, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(xpcs_supported); - - compat = xpcs_find_compat(xpcs->id, state->interface); - ---- a/drivers/net/phy/sfp-bus.c -+++ b/drivers/net/phy/sfp-bus.c -@@ -379,7 +379,7 @@ void sfp_parse_support(struct sfp_bus *b - if (bus->sfp_quirk) - bus->sfp_quirk->modes(id, modes); - -- bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_or(support, support, modes); - - phylink_set(support, Autoneg); - phylink_set(support, Pause); ---- a/net/ethtool/ioctl.c -+++ b/net/ethtool/ioctl.c -@@ -335,7 +335,7 @@ EXPORT_SYMBOL(ethtool_intersect_link_mas - void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, - u32 legacy_u32) - { -- bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(dst); - dst[0] = legacy_u32; - } - EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode); -@@ -350,11 +350,10 @@ bool ethtool_convert_link_mode_to_legacy - if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(ext); - -- bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS); -+ linkmode_zero(ext); - bitmap_fill(ext, 32); - bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS); -- if (bitmap_intersects(ext, src, -- __ETHTOOL_LINK_MODE_MASK_NBITS)) { -+ if (linkmode_intersects(ext, src)) { - /* src mask goes beyond bit 31 */ - retval = false; - } diff --git a/target/linux/generic/backport-6.1/703-01-v5.16-net-phylink-add-MAC-phy_interface_t-bitmap.patch b/target/linux/generic/backport-6.1/703-01-v5.16-net-phylink-add-MAC-phy_interface_t-bitmap.patch deleted file mode 100644 index 885c2fcf2527..000000000000 --- a/target/linux/generic/backport-6.1/703-01-v5.16-net-phylink-add-MAC-phy_interface_t-bitmap.patch +++ /dev/null @@ -1,24 +0,0 @@ -From 38c310eb46f5f80213a92093af11af270c209a76 Mon Sep 17 00:00:00 2001 -From: Russell King -Date: Tue, 26 Oct 2021 11:06:06 +0100 -Subject: [PATCH] net: phylink: add MAC phy_interface_t bitmap - -Add a phy_interface_t bitmap so the MAC driver can specifiy which PHY -interface modes it supports. - -Signed-off-by: Russell King -Signed-off-by: David S. Miller ---- - include/linux/phylink.h | 1 + - 1 file changed, 1 insertion(+) - ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -78,6 +78,7 @@ struct phylink_config { - bool ovr_an_inband; - void (*get_fixed_state)(struct phylink_config *config, - struct phylink_link_state *state); -+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces); - }; - - /** diff --git a/target/linux/generic/backport-6.1/703-02-v5.16-net-phylink-use-supported_interfaces-for-phylink-val.patch b/target/linux/generic/backport-6.1/703-02-v5.16-net-phylink-use-supported_interfaces-for-phylink-val.patch deleted file mode 100644 index 9800884f6e9a..000000000000 --- a/target/linux/generic/backport-6.1/703-02-v5.16-net-phylink-use-supported_interfaces-for-phylink-val.patch +++ /dev/null @@ -1,98 +0,0 @@ -From d25f3a74f30aace819163dfa54f2a4b8ca1dc932 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 26 Oct 2021 11:06:11 +0100 -Subject: [PATCH] net: phylink: use supported_interfaces for phylink - validation - -If the network device supplies a supported interface bitmap, we can use -that during phylink's validation to simplify MAC drivers in two ways by -using the supported_interfaces bitmap to: - -1. reject unsupported interfaces before calling into the MAC driver. -2. generate the set of all supported link modes across all supported - interfaces (used mainly for SFP, but also some 10G PHYs.) - -Suggested-by: Sean Anderson -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/phy/phylink.c | 36 ++++++++++++++++++++++++++++++++++++ - include/linux/phylink.h | 12 ++++++++++-- - 2 files changed, 46 insertions(+), 2 deletions(-) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -155,9 +155,45 @@ static const char *phylink_an_mode_str(u - return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; - } - -+static int phylink_validate_any(struct phylink *pl, unsigned long *supported, -+ struct phylink_link_state *state) -+{ -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, }; -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, }; -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(s); -+ struct phylink_link_state t; -+ int intf; -+ -+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) { -+ if (test_bit(intf, pl->config->supported_interfaces)) { -+ linkmode_copy(s, supported); -+ -+ t = *state; -+ t.interface = intf; -+ pl->mac_ops->validate(pl->config, s, &t); -+ linkmode_or(all_s, all_s, s); -+ linkmode_or(all_adv, all_adv, t.advertising); -+ } -+ } -+ -+ linkmode_copy(supported, all_s); -+ linkmode_copy(state->advertising, all_adv); -+ -+ return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; -+} -+ - static int phylink_validate(struct phylink *pl, unsigned long *supported, - struct phylink_link_state *state) - { -+ if (!phy_interface_empty(pl->config->supported_interfaces)) { -+ if (state->interface == PHY_INTERFACE_MODE_NA) -+ return phylink_validate_any(pl, supported, state); -+ -+ if (!test_bit(state->interface, -+ pl->config->supported_interfaces)) -+ return -EINVAL; -+ } -+ - pl->mac_ops->validate(pl->config, supported, state); - - return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -68,6 +68,8 @@ enum phylink_op_type { - * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND - * @get_fixed_state: callback to execute to determine the fixed link state, - * if MAC link is at %MLO_AN_FIXED mode. -+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx -+ * are supported by the MAC/PCS. - */ - struct phylink_config { - struct device *dev; -@@ -136,8 +138,14 @@ struct phylink_mac_ops { - * based on @state->advertising and/or @state->speed and update - * @state->interface accordingly. See phylink_helper_basex_speed(). - * -- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the -- * MAC driver to return all supported link modes. -+ * When @config->supported_interfaces has been set, phylink will iterate -+ * over the supported interfaces to determine the full capability of the -+ * MAC. The validation function must not print errors if @state->interface -+ * is set to an unexpected value. -+ * -+ * When @config->supported_interfaces is empty, phylink will call this -+ * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and -+ * expects the MAC driver to return all supported link modes. - * - * If the @state->interface mode is not supported, then the @supported - * mask must be cleared. diff --git a/target/linux/generic/backport-6.1/703-03-v5.16-net-dsa-populate-supported_interfaces-member.patch b/target/linux/generic/backport-6.1/703-03-v5.16-net-dsa-populate-supported_interfaces-member.patch deleted file mode 100644 index b10e6da0e9a7..000000000000 --- a/target/linux/generic/backport-6.1/703-03-v5.16-net-dsa-populate-supported_interfaces-member.patch +++ /dev/null @@ -1,63 +0,0 @@ -From c07c6e8eb4b38bae921f9e2f108d1e7f8e14226e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Marek=20Beh=C3=BAn?= -Date: Thu, 28 Oct 2021 18:00:14 +0100 -Subject: [PATCH] net: dsa: populate supported_interfaces member -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Add a new DSA switch operation, phylink_get_interfaces, which should -fill in which PHY_INTERFACE_MODE_* are supported by given port. - -Use this before phylink_create() to fill phylinks supported_interfaces -member, allowing phylink to determine which PHY_INTERFACE_MODEs are -supported. - -Signed-off-by: Marek Behún -[tweaked patch and description to add more complete support -- rmk] -Signed-off-by: Russell King -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 2 ++ - net/dsa/port.c | 4 ++++ - net/dsa/slave.c | 4 ++++ - 3 files changed, 10 insertions(+) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -626,6 +626,8 @@ struct dsa_switch_ops { - /* - * PHYLINK integration - */ -+ void (*phylink_get_interfaces)(struct dsa_switch *ds, int port, -+ unsigned long *supported_interfaces); - void (*phylink_validate)(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state); ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1188,6 +1188,10 @@ static int dsa_port_phylink_register(str - dp->pl_config.type = PHYLINK_DEV; - dp->pl_config.pcs_poll = ds->pcs_poll; - -+ if (ds->ops->phylink_get_interfaces) -+ ds->ops->phylink_get_interfaces(ds, dp->index, -+ dp->pl_config.supported_interfaces); -+ - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), - mode, &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { ---- a/net/dsa/slave.c -+++ b/net/dsa/slave.c -@@ -1837,6 +1837,10 @@ static int dsa_slave_phy_setup(struct ne - dp->pl_config.poll_fixed_state = true; - } - -+ if (ds->ops->phylink_get_interfaces) -+ ds->ops->phylink_get_interfaces(ds, dp->index, -+ dp->pl_config.supported_interfaces); -+ - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, - &dsa_port_phylink_mac_ops); - if (IS_ERR(dp->pl)) { diff --git a/target/linux/generic/backport-6.1/703-04-v5.17-net-dsa-consolidate-phylink-creation.patch b/target/linux/generic/backport-6.1/703-04-v5.17-net-dsa-consolidate-phylink-creation.patch deleted file mode 100644 index 9b67a8a51857..000000000000 --- a/target/linux/generic/backport-6.1/703-04-v5.17-net-dsa-consolidate-phylink-creation.patch +++ /dev/null @@ -1,149 +0,0 @@ -From 21bd64bd717dedac96f53b668144cbe37d3c12d4 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 30 Nov 2021 13:09:55 +0000 -Subject: [PATCH] net: dsa: consolidate phylink creation -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The code in port.c and slave.c creating the phylink instance is very -similar - let's consolidate this into a single function. - -Signed-off-by: Russell King (Oracle) -Reviewed-by: Marek Behún -Reviewed-by: Andrew Lunn -Signed-off-by: Jakub Kicinski ---- - net/dsa/dsa_priv.h | 2 +- - net/dsa/port.c | 44 ++++++++++++++++++++++++++++---------------- - net/dsa/slave.c | 19 +++---------------- - 3 files changed, 32 insertions(+), 33 deletions(-) - ---- a/net/dsa/dsa_priv.h -+++ b/net/dsa/dsa_priv.h -@@ -261,13 +261,13 @@ int dsa_port_mrp_add_ring_role(const str - const struct switchdev_obj_ring_role_mrp *mrp); - int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, - const struct switchdev_obj_ring_role_mrp *mrp); -+int dsa_port_phylink_create(struct dsa_port *dp); - int dsa_port_link_register_of(struct dsa_port *dp); - void dsa_port_link_unregister_of(struct dsa_port *dp); - int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); - void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); - int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); - void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); --extern const struct phylink_mac_ops dsa_port_phylink_mac_ops; - - static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, - const struct net_device *dev) ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1092,7 +1092,7 @@ static void dsa_port_phylink_mac_link_up - speed, duplex, tx_pause, rx_pause); - } - --const struct phylink_mac_ops dsa_port_phylink_mac_ops = { -+static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { - .validate = dsa_port_phylink_validate, - .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, - .mac_config = dsa_port_phylink_mac_config, -@@ -1101,6 +1101,30 @@ const struct phylink_mac_ops dsa_port_ph - .mac_link_up = dsa_port_phylink_mac_link_up, - }; - -+int dsa_port_phylink_create(struct dsa_port *dp) -+{ -+ struct dsa_switch *ds = dp->ds; -+ phy_interface_t mode; -+ int err; -+ -+ err = of_get_phy_mode(dp->dn, &mode); -+ if (err) -+ mode = PHY_INTERFACE_MODE_NA; -+ -+ if (ds->ops->phylink_get_interfaces) -+ ds->ops->phylink_get_interfaces(ds, dp->index, -+ dp->pl_config.supported_interfaces); -+ -+ dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), -+ mode, &dsa_port_phylink_mac_ops); -+ if (IS_ERR(dp->pl)) { -+ pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); -+ return PTR_ERR(dp->pl); -+ } -+ -+ return 0; -+} -+ - static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) - { - struct dsa_switch *ds = dp->ds; -@@ -1177,27 +1201,15 @@ static int dsa_port_phylink_register(str - { - struct dsa_switch *ds = dp->ds; - struct device_node *port_dn = dp->dn; -- phy_interface_t mode; - int err; - -- err = of_get_phy_mode(port_dn, &mode); -- if (err) -- mode = PHY_INTERFACE_MODE_NA; -- - dp->pl_config.dev = ds->dev; - dp->pl_config.type = PHYLINK_DEV; - dp->pl_config.pcs_poll = ds->pcs_poll; - -- if (ds->ops->phylink_get_interfaces) -- ds->ops->phylink_get_interfaces(ds, dp->index, -- dp->pl_config.supported_interfaces); -- -- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), -- mode, &dsa_port_phylink_mac_ops); -- if (IS_ERR(dp->pl)) { -- pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); -- return PTR_ERR(dp->pl); -- } -+ err = dsa_port_phylink_create(dp); -+ if (err) -+ return err; - - err = phylink_of_phy_connect(dp->pl, port_dn, 0); - if (err && err != -ENODEV) { ---- a/net/dsa/slave.c -+++ b/net/dsa/slave.c -@@ -1817,14 +1817,9 @@ static int dsa_slave_phy_setup(struct ne - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - struct device_node *port_dn = dp->dn; - struct dsa_switch *ds = dp->ds; -- phy_interface_t mode; - u32 phy_flags = 0; - int ret; - -- ret = of_get_phy_mode(port_dn, &mode); -- if (ret) -- mode = PHY_INTERFACE_MODE_NA; -- - dp->pl_config.dev = &slave_dev->dev; - dp->pl_config.type = PHYLINK_NETDEV; - -@@ -1837,17 +1832,9 @@ static int dsa_slave_phy_setup(struct ne - dp->pl_config.poll_fixed_state = true; - } - -- if (ds->ops->phylink_get_interfaces) -- ds->ops->phylink_get_interfaces(ds, dp->index, -- dp->pl_config.supported_interfaces); -- -- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, -- &dsa_port_phylink_mac_ops); -- if (IS_ERR(dp->pl)) { -- netdev_err(slave_dev, -- "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); -- return PTR_ERR(dp->pl); -- } -+ ret = dsa_port_phylink_create(dp); -+ if (ret) -+ return ret; - - if (ds->ops->get_phy_flags) - phy_flags = ds->ops->get_phy_flags(ds, dp->index); diff --git a/target/linux/generic/backport-6.1/703-05-v5.17-net-dsa-replace-phylink_get_interfaces-with-phylink_.patch b/target/linux/generic/backport-6.1/703-05-v5.17-net-dsa-replace-phylink_get_interfaces-with-phylink_.patch deleted file mode 100644 index d70b9aa6a471..000000000000 --- a/target/linux/generic/backport-6.1/703-05-v5.17-net-dsa-replace-phylink_get_interfaces-with-phylink_.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 072eea6c22b2af680c3949e64f9adde278c71e68 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 30 Nov 2021 13:10:01 +0000 -Subject: [PATCH] net: dsa: replace phylink_get_interfaces() with - phylink_get_caps() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Phylink needs slightly more information than phylink_get_interfaces() -allows us to get from the DSA drivers - we need the MAC capabilities. -Replace the phylink_get_interfaces() method with phylink_get_caps() to -allow DSA drivers to fill in the phylink_config MAC capabilities field -as well. - -Signed-off-by: Russell King (Oracle) -Reviewed-by: Marek Behún -Reviewed-by: Andrew Lunn -Signed-off-by: Jakub Kicinski ---- - include/net/dsa.h | 4 ++-- - net/dsa/port.c | 5 ++--- - 2 files changed, 4 insertions(+), 5 deletions(-) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -626,8 +626,8 @@ struct dsa_switch_ops { - /* - * PHYLINK integration - */ -- void (*phylink_get_interfaces)(struct dsa_switch *ds, int port, -- unsigned long *supported_interfaces); -+ void (*phylink_get_caps)(struct dsa_switch *ds, int port, -+ struct phylink_config *config); - void (*phylink_validate)(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state); ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1111,9 +1111,8 @@ int dsa_port_phylink_create(struct dsa_p - if (err) - mode = PHY_INTERFACE_MODE_NA; - -- if (ds->ops->phylink_get_interfaces) -- ds->ops->phylink_get_interfaces(ds, dp->index, -- dp->pl_config.supported_interfaces); -+ if (ds->ops->phylink_get_caps) -+ ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); - - dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), - mode, &dsa_port_phylink_mac_ops); diff --git a/target/linux/generic/backport-6.1/703-06-v5.18-net-dsa-add-support-for-phylink-mac_select_pcs.patch b/target/linux/generic/backport-6.1/703-06-v5.18-net-dsa-add-support-for-phylink-mac_select_pcs.patch deleted file mode 100644 index 2af5cb9d9db7..000000000000 --- a/target/linux/generic/backport-6.1/703-06-v5.18-net-dsa-add-support-for-phylink-mac_select_pcs.patch +++ /dev/null @@ -1,59 +0,0 @@ -From bde018222c6b084ac32933a9f933581dd83da18e Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Thu, 17 Feb 2022 18:30:35 +0000 -Subject: [PATCH] net: dsa: add support for phylink mac_select_pcs() - -Add DSA support for the phylink mac_select_pcs() method so DSA drivers -can return provide phylink with the appropriate PCS for the PHY -interface mode. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 3 +++ - net/dsa/port.c | 15 +++++++++++++++ - 2 files changed, 18 insertions(+) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -631,6 +631,9 @@ struct dsa_switch_ops { - void (*phylink_validate)(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state); -+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, -+ int port, -+ phy_interface_t iface); - int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, - struct phylink_link_state *state); - void (*phylink_mac_config)(struct dsa_switch *ds, int port, ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1028,6 +1028,20 @@ static void dsa_port_phylink_mac_pcs_get - } - } - -+static struct phylink_pcs * -+dsa_port_phylink_mac_select_pcs(struct phylink_config *config, -+ phy_interface_t interface) -+{ -+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); -+ struct dsa_switch *ds = dp->ds; -+ struct phylink_pcs *pcs = NULL; -+ -+ if (ds->ops->phylink_mac_select_pcs) -+ pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); -+ -+ return pcs; -+} -+ - static void dsa_port_phylink_mac_config(struct phylink_config *config, - unsigned int mode, - const struct phylink_link_state *state) -@@ -1094,6 +1108,7 @@ static void dsa_port_phylink_mac_link_up - - static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { - .validate = dsa_port_phylink_validate, -+ .mac_select_pcs = dsa_port_phylink_mac_select_pcs, - .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, - .mac_config = dsa_port_phylink_mac_config, - .mac_an_restart = dsa_port_phylink_mac_an_restart, diff --git a/target/linux/generic/backport-6.1/703-07-v5.16-net-phy-add-phy_interface_t-bitmap-support.patch b/target/linux/generic/backport-6.1/703-07-v5.16-net-phy-add-phy_interface_t-bitmap-support.patch deleted file mode 100644 index 1a7817b0f96d..000000000000 --- a/target/linux/generic/backport-6.1/703-07-v5.16-net-phy-add-phy_interface_t-bitmap-support.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 8e20f591f204f8db7f1182918f8e2285d3f589e0 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 26 Oct 2021 11:06:01 +0100 -Subject: [PATCH] net: phy: add phy_interface_t bitmap support - -Add support for a bitmap for phy interface modes, which includes: -- a macro to declare the interface bitmap -- an inline helper to zero the interface bitmap -- an inline helper to detect an empty interface bitmap -- inline helpers to do a bitwise AND and OR operations on two interface - bitmaps - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - include/linux/phy.h | 34 ++++++++++++++++++++++++++++++++++ - 1 file changed, 34 insertions(+) - ---- a/include/linux/phy.h -+++ b/include/linux/phy.h -@@ -155,6 +155,40 @@ typedef enum { - PHY_INTERFACE_MODE_MAX, - } phy_interface_t; - -+/* PHY interface mode bitmap handling */ -+#define DECLARE_PHY_INTERFACE_MASK(name) \ -+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX) -+ -+static inline void phy_interface_zero(unsigned long *intf) -+{ -+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX); -+} -+ -+static inline bool phy_interface_empty(const unsigned long *intf) -+{ -+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX); -+} -+ -+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a, -+ const unsigned long *b) -+{ -+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX); -+} -+ -+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a, -+ const unsigned long *b) -+{ -+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX); -+} -+ -+static inline void phy_interface_set_rgmii(unsigned long *intf) -+{ -+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf); -+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf); -+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf); -+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf); -+} -+ - /* - * phy_supported_speeds - return all speeds currently supported by a PHY device - */ diff --git a/target/linux/generic/backport-6.1/703-08-v5.17-net-phylink-add-mac_select_pcs-method-to-phylink_mac.patch b/target/linux/generic/backport-6.1/703-08-v5.17-net-phylink-add-mac_select_pcs-method-to-phylink_mac.patch deleted file mode 100644 index d826877e7dc6..000000000000 --- a/target/linux/generic/backport-6.1/703-08-v5.17-net-phylink-add-mac_select_pcs-method-to-phylink_mac.patch +++ /dev/null @@ -1,197 +0,0 @@ -From d1e86325af377129adb7fc6f34eb044ca6068b47 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 15 Dec 2021 15:34:15 +0000 -Subject: [PATCH] net: phylink: add mac_select_pcs() method to phylink_mac_ops - -mac_select_pcs() allows us to have an explicit point to query which -PCS the MAC wishes to use for a particular PHY interface mode, thereby -allowing us to add support to validate the link settings with the PCS. - -Phylink will also use this to select the PCS to be used during a major -configuration event without the MAC driver needing to call -phylink_set_pcs(). - -Note that if mac_select_pcs() is present, the supported_interfaces -bitmap must be filled in; this avoids mac_select_pcs() being called -with PHY_INTERFACE_MODE_NA when we want to get support for all -interface types. Phylink will return an error in phylink_create() -unless this condition is satisfied. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/phy/phylink.c | 68 +++++++++++++++++++++++++++++++++------ - include/linux/phylink.h | 18 +++++++++++ - 2 files changed, 77 insertions(+), 9 deletions(-) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -155,6 +155,23 @@ static const char *phylink_an_mode_str(u - return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; - } - -+static int phylink_validate_mac_and_pcs(struct phylink *pl, -+ unsigned long *supported, -+ struct phylink_link_state *state) -+{ -+ struct phylink_pcs *pcs; -+ -+ if (pl->mac_ops->mac_select_pcs) { -+ pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); -+ if (IS_ERR(pcs)) -+ return PTR_ERR(pcs); -+ } -+ -+ pl->mac_ops->validate(pl->config, supported, state); -+ -+ return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; -+} -+ - static int phylink_validate_any(struct phylink *pl, unsigned long *supported, - struct phylink_link_state *state) - { -@@ -170,9 +187,10 @@ static int phylink_validate_any(struct p - - t = *state; - t.interface = intf; -- pl->mac_ops->validate(pl->config, s, &t); -- linkmode_or(all_s, all_s, s); -- linkmode_or(all_adv, all_adv, t.advertising); -+ if (!phylink_validate_mac_and_pcs(pl, s, &t)) { -+ linkmode_or(all_s, all_s, s); -+ linkmode_or(all_adv, all_adv, t.advertising); -+ } - } - } - -@@ -194,9 +212,7 @@ static int phylink_validate(struct phyli - return -EINVAL; - } - -- pl->mac_ops->validate(pl->config, supported, state); -- -- return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; -+ return phylink_validate_mac_and_pcs(pl, supported, state); - } - - static int phylink_parse_fixedlink(struct phylink *pl, -@@ -486,10 +502,21 @@ static void phylink_mac_pcs_an_restart(s - static void phylink_major_config(struct phylink *pl, bool restart, - const struct phylink_link_state *state) - { -+ struct phylink_pcs *pcs = NULL; - int err; - - phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); - -+ if (pl->mac_ops->mac_select_pcs) { -+ pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); -+ if (IS_ERR(pcs)) { -+ phylink_err(pl, -+ "mac_select_pcs unexpectedly failed: %pe\n", -+ pcs); -+ return; -+ } -+ } -+ - if (pl->mac_ops->mac_prepare) { - err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode, - state->interface); -@@ -500,6 +527,12 @@ static void phylink_major_config(struct - } - } - -+ /* If we have a new PCS, switch to the new PCS after preparing the MAC -+ * for the change. -+ */ -+ if (pcs) -+ phylink_set_pcs(pl, pcs); -+ - phylink_mac_config(pl, state); - - if (pl->pcs_ops) { -@@ -879,6 +912,14 @@ struct phylink *phylink_create(struct ph - struct phylink *pl; - int ret; - -+ /* Validate the supplied configuration */ -+ if (mac_ops->mac_select_pcs && -+ phy_interface_empty(config->supported_interfaces)) { -+ dev_err(config->dev, -+ "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n"); -+ return ERR_PTR(-EINVAL); -+ } -+ - pl = kzalloc(sizeof(*pl), GFP_KERNEL); - if (!pl) - return ERR_PTR(-ENOMEM); -@@ -946,9 +987,10 @@ EXPORT_SYMBOL_GPL(phylink_create); - * @pl: a pointer to a &struct phylink returned from phylink_create() - * @pcs: a pointer to the &struct phylink_pcs - * -- * Bind the MAC PCS to phylink. This may be called after phylink_create(), -- * in mac_prepare() or mac_config() methods if it is desired to dynamically -- * change the PCS. -+ * Bind the MAC PCS to phylink. This may be called after phylink_create(). -+ * If it is desired to dynamically change the PCS, then the preferred method -+ * is to use mac_select_pcs(), but it may also be called in mac_prepare() -+ * or mac_config(). - * - * Please note that there are behavioural changes with the mac_config() - * callback if a PCS is present (denoting a newer setup) so removing a PCS -@@ -959,6 +1001,14 @@ void phylink_set_pcs(struct phylink *pl, - { - pl->pcs = pcs; - pl->pcs_ops = pcs->ops; -+ -+ if (!pl->phylink_disable_state && -+ pl->cfg_link_an_mode == MLO_AN_INBAND) { -+ if (pl->config->pcs_poll || pcs->poll) -+ mod_timer(&pl->link_poll, jiffies + HZ); -+ else -+ del_timer(&pl->link_poll); -+ } - } - EXPORT_SYMBOL_GPL(phylink_set_pcs); - ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -86,6 +86,7 @@ struct phylink_config { - /** - * struct phylink_mac_ops - MAC operations structure. - * @validate: Validate and update the link configuration. -+ * @mac_select_pcs: Select a PCS for the interface mode. - * @mac_pcs_get_state: Read the current link state from the hardware. - * @mac_prepare: prepare for a major reconfiguration of the interface. - * @mac_config: configure the MAC for the selected mode and state. -@@ -100,6 +101,8 @@ struct phylink_mac_ops { - void (*validate)(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state); -+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, -+ phy_interface_t interface); - void (*mac_pcs_get_state)(struct phylink_config *config, - struct phylink_link_state *state); - int (*mac_prepare)(struct phylink_config *config, unsigned int mode, -@@ -152,6 +155,21 @@ struct phylink_mac_ops { - */ - void validate(struct phylink_config *config, unsigned long *supported, - struct phylink_link_state *state); -+/** -+ * mac_select_pcs: Select a PCS for the interface mode. -+ * @config: a pointer to a &struct phylink_config. -+ * @interface: PHY interface mode for PCS -+ * -+ * Return the &struct phylink_pcs for the specified interface mode, or -+ * NULL if none is required, or an error pointer on error. -+ * -+ * This must not modify any state. It is used to query which PCS should -+ * be used. Phylink will use this during validation to ensure that the -+ * configuration is valid, and when setting a configuration to internally -+ * set the PCS that will be used. -+ */ -+struct phylink_pcs *mac_select_pcs(struct phylink_config *config, -+ phy_interface_t interface); - - /** - * mac_pcs_get_state() - Read the current inband link state from the hardware diff --git a/target/linux/generic/backport-6.1/703-09-v5.17-net-phylink-add-generic-validate-implementation.patch b/target/linux/generic/backport-6.1/703-09-v5.17-net-phylink-add-generic-validate-implementation.patch deleted file mode 100644 index f30a566c81e9..000000000000 --- a/target/linux/generic/backport-6.1/703-09-v5.17-net-phylink-add-generic-validate-implementation.patch +++ /dev/null @@ -1,341 +0,0 @@ -From 34ae2c09d46a2d0abd907e139b466f798e4095a8 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 15 Nov 2021 10:00:27 +0000 -Subject: [PATCH] net: phylink: add generic validate implementation - -Add a generic validate() implementation using the supported_interfaces -and a bitmask of MAC pause/speed/duplex capabilities. This allows us -to entirely eliminate many driver private validate() implementations. - -We expose the underlying phylink_get_linkmodes() function so that -drivers which have special needs can still benefit from conversion. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/phy/phylink.c | 252 ++++++++++++++++++++++++++++++++++++++ - include/linux/phylink.h | 31 +++++ - 2 files changed, 283 insertions(+) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -172,6 +172,258 @@ static int phylink_validate_mac_and_pcs( - return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; - } - -+static void phylink_caps_to_linkmodes(unsigned long *linkmodes, -+ unsigned long caps) -+{ -+ if (caps & MAC_SYM_PAUSE) -+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes); -+ -+ if (caps & MAC_ASYM_PAUSE) -+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes); -+ -+ if (caps & MAC_10HD) -+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); -+ -+ if (caps & MAC_10FD) -+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); -+ -+ if (caps & MAC_100HD) { -+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_100FD) { -+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_1000HD) -+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes); -+ -+ if (caps & MAC_1000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_2500FD) { -+ __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_5000FD) -+ __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes); -+ -+ if (caps & MAC_10000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_25000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_40000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_50000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_56000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_100000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_200000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes); -+ } -+ -+ if (caps & MAC_400000FD) { -+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, -+ linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes); -+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes); -+ } -+} -+ -+/** -+ * phylink_get_linkmodes() - get acceptable link modes -+ * @linkmodes: ethtool linkmode mask (must be already initialised) -+ * @interface: phy interface mode defined by &typedef phy_interface_t -+ * @mac_capabilities: bitmask of MAC capabilities -+ * -+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that -+ * are supported by the @interface mode and @mac_capabilities. @linkmodes -+ * must have been initialised previously. -+ */ -+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface, -+ unsigned long mac_capabilities) -+{ -+ unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; -+ -+ switch (interface) { -+ case PHY_INTERFACE_MODE_USXGMII: -+ caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD; -+ fallthrough; -+ -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_QSGMII: -+ case PHY_INTERFACE_MODE_SGMII: -+ case PHY_INTERFACE_MODE_GMII: -+ caps |= MAC_1000HD | MAC_1000FD; -+ fallthrough; -+ -+ case PHY_INTERFACE_MODE_REVRMII: -+ case PHY_INTERFACE_MODE_RMII: -+ case PHY_INTERFACE_MODE_REVMII: -+ case PHY_INTERFACE_MODE_MII: -+ caps |= MAC_10HD | MAC_10FD; -+ fallthrough; -+ -+ case PHY_INTERFACE_MODE_100BASEX: -+ caps |= MAC_100HD | MAC_100FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_TBI: -+ case PHY_INTERFACE_MODE_MOCA: -+ case PHY_INTERFACE_MODE_RTBI: -+ case PHY_INTERFACE_MODE_1000BASEX: -+ caps |= MAC_1000HD; -+ fallthrough; -+ case PHY_INTERFACE_MODE_TRGMII: -+ caps |= MAC_1000FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_2500BASEX: -+ caps |= MAC_2500FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_5GBASER: -+ caps |= MAC_5000FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_XGMII: -+ case PHY_INTERFACE_MODE_RXAUI: -+ case PHY_INTERFACE_MODE_XAUI: -+ case PHY_INTERFACE_MODE_10GBASER: -+ case PHY_INTERFACE_MODE_10GKR: -+ caps |= MAC_10000FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_25GBASER: -+ caps |= MAC_25000FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_XLGMII: -+ caps |= MAC_40000FD; -+ break; -+ -+ case PHY_INTERFACE_MODE_INTERNAL: -+ caps |= ~0; -+ break; -+ -+ case PHY_INTERFACE_MODE_NA: -+ case PHY_INTERFACE_MODE_MAX: -+ case PHY_INTERFACE_MODE_SMII: -+ break; -+ } -+ -+ phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities); -+} -+EXPORT_SYMBOL_GPL(phylink_get_linkmodes); -+ -+/** -+ * phylink_generic_validate() - generic validate() callback implementation -+ * @config: a pointer to a &struct phylink_config. -+ * @supported: ethtool bitmask for supported link modes. -+ * @state: a pointer to a &struct phylink_link_state. -+ * -+ * Generic implementation of the validate() callback that MAC drivers can -+ * use when they pass the range of supported interfaces and MAC capabilities. -+ * This makes use of phylink_get_linkmodes(). -+ */ -+void phylink_generic_validate(struct phylink_config *config, -+ unsigned long *supported, -+ struct phylink_link_state *state) -+{ -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -+ -+ phylink_set_port_modes(mask); -+ phylink_set(mask, Autoneg); -+ phylink_get_linkmodes(mask, state->interface, config->mac_capabilities); -+ -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); -+} -+EXPORT_SYMBOL_GPL(phylink_generic_validate); -+ - static int phylink_validate_any(struct phylink *pl, unsigned long *supported, - struct phylink_link_state *state) - { ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -20,6 +20,29 @@ enum { - MLO_AN_PHY = 0, /* Conventional PHY */ - MLO_AN_FIXED, /* Fixed-link mode */ - MLO_AN_INBAND, /* In-band protocol */ -+ -+ MAC_SYM_PAUSE = BIT(0), -+ MAC_ASYM_PAUSE = BIT(1), -+ MAC_10HD = BIT(2), -+ MAC_10FD = BIT(3), -+ MAC_10 = MAC_10HD | MAC_10FD, -+ MAC_100HD = BIT(4), -+ MAC_100FD = BIT(5), -+ MAC_100 = MAC_100HD | MAC_100FD, -+ MAC_1000HD = BIT(6), -+ MAC_1000FD = BIT(7), -+ MAC_1000 = MAC_1000HD | MAC_1000FD, -+ MAC_2500FD = BIT(8), -+ MAC_5000FD = BIT(9), -+ MAC_10000FD = BIT(10), -+ MAC_20000FD = BIT(11), -+ MAC_25000FD = BIT(12), -+ MAC_40000FD = BIT(13), -+ MAC_50000FD = BIT(14), -+ MAC_56000FD = BIT(15), -+ MAC_100000FD = BIT(16), -+ MAC_200000FD = BIT(17), -+ MAC_400000FD = BIT(18), - }; - - static inline bool phylink_autoneg_inband(unsigned int mode) -@@ -70,6 +93,7 @@ enum phylink_op_type { - * if MAC link is at %MLO_AN_FIXED mode. - * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx - * are supported by the MAC/PCS. -+ * @mac_capabilities: MAC pause/speed/duplex capabilities. - */ - struct phylink_config { - struct device *dev; -@@ -81,6 +105,7 @@ struct phylink_config { - void (*get_fixed_state)(struct phylink_config *config, - struct phylink_link_state *state); - DECLARE_PHY_INTERFACE_MASK(supported_interfaces); -+ unsigned long mac_capabilities; - }; - - /** -@@ -462,6 +487,12 @@ void pcs_link_up(struct phylink_pcs *pcs - phy_interface_t interface, int speed, int duplex); - #endif - -+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface, -+ unsigned long mac_capabilities); -+void phylink_generic_validate(struct phylink_config *config, -+ unsigned long *supported, -+ struct phylink_link_state *state); -+ - struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, - phy_interface_t iface, - const struct phylink_mac_ops *mac_ops); diff --git a/target/linux/generic/backport-6.1/703-10-v5.16-net-dsa-introduce-helpers-for-iterating-through-port.patch b/target/linux/generic/backport-6.1/703-10-v5.16-net-dsa-introduce-helpers-for-iterating-through-port.patch deleted file mode 100644 index a55623519ce9..000000000000 --- a/target/linux/generic/backport-6.1/703-10-v5.16-net-dsa-introduce-helpers-for-iterating-through-port.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 82b318983c515f29b8b3a0dad9f6a5fe8a68a7f4 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Wed, 20 Oct 2021 20:49:49 +0300 -Subject: [PATCH] net: dsa: introduce helpers for iterating through ports using - dp - -Since the DSA conversion from the ds->ports array into the dst->ports -list, the DSA API has encouraged driver writers, as well as the core -itself, to write inefficient code. - -Currently, code that wants to filter by a specific type of port when -iterating, like {!unused, user, cpu, dsa}, uses the dsa_is_*_port helper. -Under the hood, this uses dsa_to_port which iterates again through -dst->ports. But the driver iterates through the port list already, so -the complexity is quadratic for the typical case of a single-switch -tree. - -This patch introduces some iteration helpers where the iterator is -already a struct dsa_port *dp, so that the other variant of the -filtering functions, dsa_port_is_{unused,user,cpu_dsa}, can be used -directly on the iterator. This eliminates the second lookup. - -These functions can be used both by the core and by drivers. - -Signed-off-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -476,6 +476,34 @@ static inline bool dsa_is_user_port(stru - return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER; - } - -+#define dsa_tree_for_each_user_port(_dp, _dst) \ -+ list_for_each_entry((_dp), &(_dst)->ports, list) \ -+ if (dsa_port_is_user((_dp))) -+ -+#define dsa_switch_for_each_port(_dp, _ds) \ -+ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ -+ if ((_dp)->ds == (_ds)) -+ -+#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \ -+ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \ -+ if ((_dp)->ds == (_ds)) -+ -+#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \ -+ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \ -+ if ((_dp)->ds == (_ds)) -+ -+#define dsa_switch_for_each_available_port(_dp, _ds) \ -+ dsa_switch_for_each_port((_dp), (_ds)) \ -+ if (!dsa_port_is_unused((_dp))) -+ -+#define dsa_switch_for_each_user_port(_dp, _ds) \ -+ dsa_switch_for_each_port((_dp), (_ds)) \ -+ if (dsa_port_is_user((_dp))) -+ -+#define dsa_switch_for_each_cpu_port(_dp, _ds) \ -+ dsa_switch_for_each_port((_dp), (_ds)) \ -+ if (dsa_port_is_cpu((_dp))) -+ - static inline u32 dsa_user_ports(struct dsa_switch *ds) - { - u32 mask = 0; diff --git a/target/linux/generic/backport-6.1/703-11-v5.17-net-phylink-add-pcs_validate-method.patch b/target/linux/generic/backport-6.1/703-11-v5.17-net-phylink-add-pcs_validate-method.patch deleted file mode 100644 index 524ce9bd9265..000000000000 --- a/target/linux/generic/backport-6.1/703-11-v5.17-net-phylink-add-pcs_validate-method.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 0d22d4b626a4eaa3196019092eb6c1919e9f8caa Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 15 Dec 2021 15:34:20 +0000 -Subject: [PATCH] net: phylink: add pcs_validate() method - -Add a hook for PCS to validate the link parameters. This avoids MAC -drivers having to have knowledge of their PCS in their validate() -method, thereby allowing several MAC drivers to be simplfied. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/phy/phylink.c | 31 +++++++++++++++++++++++++++++++ - include/linux/phylink.h | 20 ++++++++++++++++++++ - 2 files changed, 51 insertions(+) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -160,13 +160,44 @@ static int phylink_validate_mac_and_pcs( - struct phylink_link_state *state) - { - struct phylink_pcs *pcs; -+ int ret; - -+ /* Get the PCS for this interface mode */ - if (pl->mac_ops->mac_select_pcs) { - pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); - if (IS_ERR(pcs)) - return PTR_ERR(pcs); -+ } else { -+ pcs = pl->pcs; - } - -+ if (pcs) { -+ /* The PCS, if present, must be setup before phylink_create() -+ * has been called. If the ops is not initialised, print an -+ * error and backtrace rather than oopsing the kernel. -+ */ -+ if (!pcs->ops) { -+ phylink_err(pl, "interface %s: uninitialised PCS\n", -+ phy_modes(state->interface)); -+ dump_stack(); -+ return -EINVAL; -+ } -+ -+ /* Validate the link parameters with the PCS */ -+ if (pcs->ops->pcs_validate) { -+ ret = pcs->ops->pcs_validate(pcs, supported, state); -+ if (ret < 0 || phylink_is_empty_linkmode(supported)) -+ return -EINVAL; -+ -+ /* Ensure the advertising mask is a subset of the -+ * supported mask. -+ */ -+ linkmode_and(state->advertising, state->advertising, -+ supported); -+ } -+ } -+ -+ /* Then validate the link parameters with the MAC */ - pl->mac_ops->validate(pl->config, supported, state); - - return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -398,6 +398,7 @@ struct phylink_pcs { - - /** - * struct phylink_pcs_ops - MAC PCS operations structure. -+ * @pcs_validate: validate the link configuration. - * @pcs_get_state: read the current MAC PCS link state from the hardware. - * @pcs_config: configure the MAC PCS for the selected mode and state. - * @pcs_an_restart: restart 802.3z BaseX autonegotiation. -@@ -405,6 +406,8 @@ struct phylink_pcs { - * (where necessary). - */ - struct phylink_pcs_ops { -+ int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported, -+ const struct phylink_link_state *state); - void (*pcs_get_state)(struct phylink_pcs *pcs, - struct phylink_link_state *state); - int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode, -@@ -418,6 +421,23 @@ struct phylink_pcs_ops { - - #if 0 /* For kernel-doc purposes only. */ - /** -+ * pcs_validate() - validate the link configuration. -+ * @pcs: a pointer to a &struct phylink_pcs. -+ * @supported: ethtool bitmask for supported link modes. -+ * @state: a const pointer to a &struct phylink_link_state. -+ * -+ * Validate the interface mode, and advertising's autoneg bit, removing any -+ * media ethtool link modes that would not be supportable from the supported -+ * mask. Phylink will propagate the changes to the advertising mask. See the -+ * &struct phylink_mac_ops validate() method. -+ * -+ * Returns -EINVAL if the interface mode/autoneg mode is not supported. -+ * Returns non-zero positive if the link state can be supported. -+ */ -+int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, -+ const struct phylink_link_state *state); -+ -+/** - * pcs_get_state() - Read the current inband link state from the hardware - * @pcs: a pointer to a &struct phylink_pcs. - * @state: a pointer to a &struct phylink_link_state. diff --git a/target/linux/generic/backport-6.1/703-12-v5.17-net-phylink-add-legacy_pre_march2020-indicator.patch b/target/linux/generic/backport-6.1/703-12-v5.17-net-phylink-add-legacy_pre_march2020-indicator.patch deleted file mode 100644 index 16d5da9c70e3..000000000000 --- a/target/linux/generic/backport-6.1/703-12-v5.17-net-phylink-add-legacy_pre_march2020-indicator.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 3e5b1feccea7db576353ffc302f78d522e4116e6 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Thu, 9 Dec 2021 13:11:32 +0000 -Subject: [PATCH] net: phylink: add legacy_pre_march2020 indicator - -Add a boolean to phylink_config to indicate whether a driver has not -been updated for the changes in commit 7cceb599d15d ("net: phylink: -avoid mac_config calls"), and thus are reliant on the old behaviour. - -We were currently keying the phylink behaviour on the presence of a -PCS, but this is sub-optimal for modern drivers that may not have a -PCS. - -This commit merely introduces the new flag, but does not add any use, -since we need all legacy drivers to set this flag before it can be -used. Once these legacy drivers have been updated, we can remove this -flag. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - include/linux/phylink.h | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -84,6 +84,8 @@ enum phylink_op_type { - * struct phylink_config - PHYLINK configuration structure - * @dev: a pointer to a struct device associated with the MAC - * @type: operation type of PHYLINK instance -+ * @legacy_pre_march2020: driver has not been updated for March 2020 updates -+ * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") - * @pcs_poll: MAC PCS cannot provide link change interrupt - * @poll_fixed_state: if true, starts link_poll, - * if MAC link is at %MLO_AN_FIXED mode. -@@ -98,6 +100,7 @@ enum phylink_op_type { - struct phylink_config { - struct device *dev; - enum phylink_op_type type; -+ bool legacy_pre_march2020; - bool pcs_poll; - bool poll_fixed_state; - bool mac_managed_pm; diff --git a/target/linux/generic/backport-6.1/703-13-v5.17-net-dsa-mark-DSA-phylink-as-legacy_pre_march2020.patch b/target/linux/generic/backport-6.1/703-13-v5.17-net-dsa-mark-DSA-phylink-as-legacy_pre_march2020.patch deleted file mode 100644 index 849881942e1a..000000000000 --- a/target/linux/generic/backport-6.1/703-13-v5.17-net-dsa-mark-DSA-phylink-as-legacy_pre_march2020.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 0a9f0794d9bd67e590a9488afe87fbb0419d9539 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Thu, 9 Dec 2021 13:11:38 +0000 -Subject: [PATCH] net: dsa: mark DSA phylink as legacy_pre_march2020 - -The majority of DSA drivers do not make use of the PCS support, and -thus operate in legacy mode. In order to preserve this behaviour in -future, we need to set the legacy_pre_march2020 flag so phylink knows -this may require the legacy calls. - -There are some DSA drivers that do make use of PCS support, and these -will continue operating as before - legacy_pre_march2020 will not -prevent split-PCS support enabling the newer phylink behaviour. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - net/dsa/port.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1126,6 +1126,13 @@ int dsa_port_phylink_create(struct dsa_p - if (err) - mode = PHY_INTERFACE_MODE_NA; - -+ /* Presence of phylink_mac_link_state or phylink_mac_an_restart is -+ * an indicator of a legacy phylink driver. -+ */ -+ if (ds->ops->phylink_mac_link_state || -+ ds->ops->phylink_mac_an_restart) -+ dp->pl_config.legacy_pre_march2020 = true; -+ - if (ds->ops->phylink_get_caps) - ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config); - diff --git a/target/linux/generic/backport-6.1/703-14-v5.17-net-phylink-use-legacy_pre_march2020.patch b/target/linux/generic/backport-6.1/703-14-v5.17-net-phylink-use-legacy_pre_march2020.patch deleted file mode 100644 index 73e53068b8a1..000000000000 --- a/target/linux/generic/backport-6.1/703-14-v5.17-net-phylink-use-legacy_pre_march2020.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 001f4261fe4d5ae710cf1f445b6cae6d9d3ae26e Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Thu, 9 Dec 2021 13:11:48 +0000 -Subject: [PATCH] net: phylink: use legacy_pre_march2020 - -Use the legacy flag to indicate whether we should operate in legacy -mode. This allows us to stop using the presence of a PCS as an -indicator to the age of the phylink user, and make PCS presence -optional. - -Legacy mode involves: -1) calling mac_config() whenever the link comes up -2) calling mac_config() whenever the inband advertisement changes, - possibly followed by a call to mac_an_restart() -3) making use of mac_an_restart() -4) making use of mac_pcs_get_state() - -All the above functionality was moved to a seperate "PCS" block of -operations in March 2020. - -Update the documents to indicate that the differences that this flag -makes. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/phylink.c | 12 ++++++------ - include/linux/phylink.h | 17 +++++++++++++++++ - 2 files changed, 23 insertions(+), 6 deletions(-) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -777,7 +777,7 @@ static void phylink_mac_pcs_an_restart(s - phylink_autoneg_inband(pl->cur_link_an_mode)) { - if (pl->pcs_ops) - pl->pcs_ops->pcs_an_restart(pl->pcs); -- else -+ else if (pl->config->legacy_pre_march2020) - pl->mac_ops->mac_an_restart(pl->config); - } - } -@@ -855,7 +855,7 @@ static int phylink_change_inband_advert( - if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) - return 0; - -- if (!pl->pcs_ops) { -+ if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { - /* Legacy method */ - phylink_mac_config(pl, &pl->link_config); - phylink_mac_pcs_an_restart(pl); -@@ -900,7 +900,8 @@ static void phylink_mac_pcs_get_state(st - - if (pl->pcs_ops) - pl->pcs_ops->pcs_get_state(pl->pcs, state); -- else if (pl->mac_ops->mac_pcs_get_state) -+ else if (pl->mac_ops->mac_pcs_get_state && -+ pl->config->legacy_pre_march2020) - pl->mac_ops->mac_pcs_get_state(pl->config, state); - else - state->link = 0; -@@ -1094,12 +1095,11 @@ static void phylink_resolve(struct work_ - } - phylink_major_config(pl, false, &link_state); - pl->link_config.interface = link_state.interface; -- } else if (!pl->pcs_ops) { -+ } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { - /* The interface remains unchanged, only the speed, - * duplex or pause settings have changed. Call the - * old mac_config() method to configure the MAC/PCS -- * only if we do not have a PCS installed (an -- * unconverted user.) -+ * only if we do not have a legacy MAC driver. - */ - phylink_mac_config(pl, &link_state); - } ---- a/include/linux/phylink.h -+++ b/include/linux/phylink.h -@@ -210,6 +210,10 @@ struct phylink_pcs *mac_select_pcs(struc - * negotiation completion state in @state->an_complete, and link up state - * in @state->link. If possible, @state->lp_advertising should also be - * populated. -+ * -+ * Note: This is a legacy method. This function will not be called unless -+ * legacy_pre_march2020 is set in &struct phylink_config and there is no -+ * PCS attached. - */ - void mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state); -@@ -250,6 +254,15 @@ int mac_prepare(struct phylink_config *c - * guaranteed to be correct, and so any mac_config() implementation must - * never reference these fields. - * -+ * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set -+ * in their &phylnk_config and which don't have a PCS), this function will be -+ * called on each link up event, and to also change the in-band advert. For -+ * non-legacy drivers, it will only be called to reconfigure the MAC for a -+ * "major" change in e.g. interface mode. It will not be called for changes -+ * in speed, duplex or pause modes or to change the in-band advertisement. -+ * In any case, it is strongly preferred that speed, duplex and pause settings -+ * are handled in the mac_link_up() method and not in this method. -+ * - * (this requires a rewrite - please refer to mac_link_up() for situations - * where the PCS and MAC are not tightly integrated.) - * -@@ -334,6 +347,10 @@ int mac_finish(struct phylink_config *co - /** - * mac_an_restart() - restart 802.3z BaseX autonegotiation - * @config: a pointer to a &struct phylink_config. -+ * -+ * Note: This is a legacy method. This function will not be called unless -+ * legacy_pre_march2020 is set in &struct phylink_config and there is no -+ * PCS attached. - */ - void mac_an_restart(struct phylink_config *config); - diff --git a/target/linux/generic/backport-6.1/703-15-v5.18-net-phy-phylink-fix-DSA-mac_select_pcs-introduction.patch b/target/linux/generic/backport-6.1/703-15-v5.18-net-phy-phylink-fix-DSA-mac_select_pcs-introduction.patch deleted file mode 100644 index 9e5061aaed83..000000000000 --- a/target/linux/generic/backport-6.1/703-15-v5.18-net-phy-phylink-fix-DSA-mac_select_pcs-introduction.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 1054457006d4a14de4ae4132030e33d7eedaeba1 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 21 Feb 2022 17:10:52 +0000 -Subject: [PATCH] net: phy: phylink: fix DSA mac_select_pcs() introduction - -Vladimir Oltean reports that probing on DSA drivers that aren't yet -populating supported_interfaces now fails. Fix this by allowing -phylink to detect whether DSA actually provides an underlying -mac_select_pcs() implementation. - -Reported-by: Vladimir Oltean -Fixes: bde018222c6b ("net: dsa: add support for phylink mac_select_pcs()") -Signed-off-by: Russell King (Oracle) -Tested-by: Vladimir Oltean -Link: https://lore.kernel.org/r/E1nMCD6-00A0wC-FG@rmk-PC.armlinux.org.uk -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/phylink.c | 14 +++++++++++--- - net/dsa/port.c | 2 +- - 2 files changed, 12 insertions(+), 4 deletions(-) - ---- a/drivers/net/phy/phylink.c -+++ b/drivers/net/phy/phylink.c -@@ -74,6 +74,7 @@ struct phylink { - struct work_struct resolve; - - bool mac_link_dropped; -+ bool using_mac_select_pcs; - - struct sfp_bus *sfp_bus; - bool sfp_may_have_phy; -@@ -163,7 +164,7 @@ static int phylink_validate_mac_and_pcs( - int ret; - - /* Get the PCS for this interface mode */ -- if (pl->mac_ops->mac_select_pcs) { -+ if (pl->using_mac_select_pcs) { - pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); - if (IS_ERR(pcs)) - return PTR_ERR(pcs); -@@ -790,7 +791,7 @@ static void phylink_major_config(struct - - phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); - -- if (pl->mac_ops->mac_select_pcs) { -+ if (pl->using_mac_select_pcs) { - pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); - if (IS_ERR(pcs)) { - phylink_err(pl, -@@ -1192,11 +1193,17 @@ struct phylink *phylink_create(struct ph - phy_interface_t iface, - const struct phylink_mac_ops *mac_ops) - { -+ bool using_mac_select_pcs = false; - struct phylink *pl; - int ret; - -- /* Validate the supplied configuration */ - if (mac_ops->mac_select_pcs && -+ mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) != -+ ERR_PTR(-EOPNOTSUPP)) -+ using_mac_select_pcs = true; -+ -+ /* Validate the supplied configuration */ -+ if (using_mac_select_pcs && - phy_interface_empty(config->supported_interfaces)) { - dev_err(config->dev, - "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n"); -@@ -1220,6 +1227,7 @@ struct phylink *phylink_create(struct ph - return ERR_PTR(-EINVAL); - } - -+ pl->using_mac_select_pcs = using_mac_select_pcs; - pl->phy_state.interface = iface; - pl->link_interface = iface; - if (iface == PHY_INTERFACE_MODE_MOCA) ---- a/net/dsa/port.c -+++ b/net/dsa/port.c -@@ -1033,8 +1033,8 @@ dsa_port_phylink_mac_select_pcs(struct p - phy_interface_t interface) - { - struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); -+ struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); - struct dsa_switch *ds = dp->ds; -- struct phylink_pcs *pcs = NULL; - - if (ds->ops->phylink_mac_select_pcs) - pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface); diff --git a/target/linux/generic/backport-6.1/703-16-v5.16-net-mvneta-populate-supported_interfaces-member.patch b/target/linux/generic/backport-6.1/703-16-v5.16-net-mvneta-populate-supported_interfaces-member.patch deleted file mode 100644 index f21fa4b2778e..000000000000 --- a/target/linux/generic/backport-6.1/703-16-v5.16-net-mvneta-populate-supported_interfaces-member.patch +++ /dev/null @@ -1,48 +0,0 @@ -From fdedb695e6a8657302341cda81d519ef04f9acaa Mon Sep 17 00:00:00 2001 -From: Russell King -Date: Wed, 27 Oct 2021 10:03:43 +0100 -Subject: [PATCH] net: mvneta: populate supported_interfaces member - -Populate the phy_interface_t bitmap for the Marvell mvneta driver with -interfaces modes supported by the MAC. - -Signed-off-by: Russell King -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/marvell/mvneta.c | 25 +++++++++++++++++++++++++ - 1 file changed, 25 insertions(+) - ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -5180,6 +5180,31 @@ static int mvneta_probe(struct platform_ - - pp->phylink_config.dev = &dev->dev; - pp->phylink_config.type = PHYLINK_NETDEV; -+ phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_QSGMII, -+ pp->phylink_config.supported_interfaces); -+ if (comphy) { -+ /* If a COMPHY is present, we can support any of the serdes -+ * modes and switch between them. -+ */ -+ __set_bit(PHY_INTERFACE_MODE_SGMII, -+ pp->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, -+ pp->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, -+ pp->phylink_config.supported_interfaces); -+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { -+ /* No COMPHY, with only 2500BASE-X mode supported */ -+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, -+ pp->phylink_config.supported_interfaces); -+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || -+ phy_mode == PHY_INTERFACE_MODE_SGMII) { -+ /* No COMPHY, we can switch between 1000BASE-X and SGMII */ -+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, -+ pp->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_SGMII, -+ pp->phylink_config.supported_interfaces); -+ } - - phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, - phy_mode, &mvneta_phylink_ops); diff --git a/target/linux/generic/backport-6.1/703-17-v5.16-net-mvneta-remove-interface-checks-in-mvneta_validat.patch b/target/linux/generic/backport-6.1/703-17-v5.16-net-mvneta-remove-interface-checks-in-mvneta_validat.patch deleted file mode 100644 index e287e39d6a4e..000000000000 --- a/target/linux/generic/backport-6.1/703-17-v5.16-net-mvneta-remove-interface-checks-in-mvneta_validat.patch +++ /dev/null @@ -1,35 +0,0 @@ -From d9ca72807ecb236f679b960c70ef5b7d4a5f0222 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 27 Oct 2021 10:03:48 +0100 -Subject: [PATCH] net: mvneta: remove interface checks in mvneta_validate() - -As phylink checks the interface mode against the supported_interfaces -bitmap, we no longer need to validate the interface mode in the -validation function. Remove this to simplify it. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/marvell/mvneta.c | 11 ++--------- - 1 file changed, 2 insertions(+), 9 deletions(-) - ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -3833,15 +3833,8 @@ static void mvneta_validate(struct phyli - * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... - * When = 1 (1000BASE-X) this field must be set to 1." - */ -- if (phy_interface_mode_is_8023z(state->interface)) { -- if (!phylink_test(state->advertising, Autoneg)) { -- linkmode_zero(supported); -- return; -- } -- } else if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_QSGMII && -- state->interface != PHY_INTERFACE_MODE_SGMII && -- !phy_interface_mode_is_rgmii(state->interface)) { -+ if (phy_interface_mode_is_8023z(state->interface) && -+ !phylink_test(state->advertising, Autoneg)) { - linkmode_zero(supported); - return; - } diff --git a/target/linux/generic/backport-6.1/703-18-v5.16-net-mvneta-drop-use-of-phylink_helper_basex_speed.patch b/target/linux/generic/backport-6.1/703-18-v5.16-net-mvneta-drop-use-of-phylink_helper_basex_speed.patch deleted file mode 100644 index 9121612bf829..000000000000 --- a/target/linux/generic/backport-6.1/703-18-v5.16-net-mvneta-drop-use-of-phylink_helper_basex_speed.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 099cbfa286ab937d8213c2dc5c0b401969b78042 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 27 Oct 2021 10:03:53 +0100 -Subject: [PATCH] net: mvneta: drop use of phylink_helper_basex_speed() - -Now that we have a better method to select SFP interface modes, we -no longer need to use phylink_helper_basex_speed() in a driver's -validation function, and we can also get rid of our hack to indicate -both 1000base-X and 2500base-X if the comphy is present to make that -work. Remove this hack and use of phylink_helper_basex_speed(). - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/marvell/mvneta.c | 12 +++--------- - 1 file changed, 3 insertions(+), 9 deletions(-) - ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -3824,8 +3824,6 @@ static void mvneta_validate(struct phyli - unsigned long *supported, - struct phylink_link_state *state) - { -- struct net_device *ndev = to_net_dev(config->dev); -- struct mvneta_port *pp = netdev_priv(ndev); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* We only support QSGMII, SGMII, 802.3z and RGMII modes. -@@ -3847,11 +3845,12 @@ static void mvneta_validate(struct phyli - phylink_set(mask, Pause); - - /* Half-duplex at speeds higher than 100Mbit is unsupported */ -- if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { -+ if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } -- if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { -+ -+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } -@@ -3866,11 +3865,6 @@ static void mvneta_validate(struct phyli - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -- -- /* We can only operate at 2500BaseX or 1000BaseX. If requested -- * to advertise both, only report advertising at 2500BaseX. -- */ -- phylink_helper_basex_speed(state); - } - - static void mvneta_mac_pcs_get_state(struct phylink_config *config, diff --git a/target/linux/generic/backport-6.1/703-19-v5.17-net-mvneta-use-phylink_generic_validate.patch b/target/linux/generic/backport-6.1/703-19-v5.17-net-mvneta-use-phylink_generic_validate.patch deleted file mode 100644 index 209dfbc0de93..000000000000 --- a/target/linux/generic/backport-6.1/703-19-v5.17-net-mvneta-use-phylink_generic_validate.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 02a0988b98930491db95966fb8086072e47dabb6 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 15 Nov 2021 10:00:32 +0000 -Subject: [PATCH] net: mvneta: use phylink_generic_validate() - -Convert mvneta to use phylink_generic_validate() for the bulk of its -validate() implementation. This network adapter has a restriction -that for 802.3z links, autonegotiation must be enabled. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/marvell/mvneta.c | 34 ++++----------------------- - 1 file changed, 4 insertions(+), 30 deletions(-) - ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -3824,8 +3824,6 @@ static void mvneta_validate(struct phyli - unsigned long *supported, - struct phylink_link_state *state) - { -- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- - /* We only support QSGMII, SGMII, 802.3z and RGMII modes. - * When in 802.3z mode, we must have AN enabled: - * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... -@@ -3837,34 +3835,7 @@ static void mvneta_validate(struct phyli - return; - } - -- /* Allow all the expected bits */ -- phylink_set(mask, Autoneg); -- phylink_set_port_modes(mask); -- -- /* Asymmetric pause is unsupported */ -- phylink_set(mask, Pause); -- -- /* Half-duplex at speeds higher than 100Mbit is unsupported */ -- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseX_Full); -- } -- -- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { -- phylink_set(mask, 2500baseT_Full); -- phylink_set(mask, 2500baseX_Full); -- } -- -- if (!phy_interface_mode_is_8023z(state->interface)) { -- /* 10M and 100M are only supported in non-802.3z mode */ -- phylink_set(mask, 10baseT_Half); -- phylink_set(mask, 10baseT_Full); -- phylink_set(mask, 100baseT_Half); -- phylink_set(mask, 100baseT_Full); -- } -- -- linkmode_and(supported, supported, mask); -- linkmode_and(state->advertising, state->advertising, mask); -+ phylink_generic_validate(config, supported, state); - } - - static void mvneta_mac_pcs_get_state(struct phylink_config *config, -@@ -5167,6 +5138,9 @@ static int mvneta_probe(struct platform_ - - pp->phylink_config.dev = &dev->dev; - pp->phylink_config.type = PHYLINK_NETDEV; -+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | -+ MAC_100 | MAC_1000FD | MAC_2500FD; -+ - phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_QSGMII, - pp->phylink_config.supported_interfaces); diff --git a/target/linux/generic/backport-6.1/703-20-v5.17-net-mvneta-mark-as-a-legacy_pre_march2020-driver.patch b/target/linux/generic/backport-6.1/703-20-v5.17-net-mvneta-mark-as-a-legacy_pre_march2020-driver.patch deleted file mode 100644 index 31717565bf17..000000000000 --- a/target/linux/generic/backport-6.1/703-20-v5.17-net-mvneta-mark-as-a-legacy_pre_march2020-driver.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 2106be4fdf3223d9c5bd485e6ef094139e3197ba Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Sun, 12 Dec 2021 13:01:21 +0000 -Subject: [PATCH] net: mvneta: mark as a legacy_pre_march2020 driver -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -mvneta provides mac_an_restart and mac_pcs_get_state methods, so needs -to be marked as a legacy driver. Marek spotted that mvneta had stopped -working in 2500base-X mode - thanks for reporting. - -Reported-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/marvell/mvneta.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/ethernet/marvell/mvneta.c -+++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -5138,6 +5138,7 @@ static int mvneta_probe(struct platform_ - - pp->phylink_config.dev = &dev->dev; - pp->phylink_config.type = PHYLINK_NETDEV; -+ pp->phylink_config.legacy_pre_march2020 = true; - pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | - MAC_100 | MAC_1000FD | MAC_2500FD; - diff --git a/target/linux/generic/backport-6.1/704-01-v5.17-net-mtk_eth_soc-populate-supported_interfaces-member.patch b/target/linux/generic/backport-6.1/704-01-v5.17-net-mtk_eth_soc-populate-supported_interfaces-member.patch deleted file mode 100644 index b6fe0dad4c87..000000000000 --- a/target/linux/generic/backport-6.1/704-01-v5.17-net-mtk_eth_soc-populate-supported_interfaces-member.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 83800d29f0c578e82554e7d4c6bfdbdf9b6cf428 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 16 Nov 2021 10:06:43 +0000 -Subject: [PATCH] net: mtk_eth_soc: populate supported_interfaces member - -Populate the phy interface mode bitmap for the Mediatek driver with -interfaces modes supported by the MAC. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++++++++++++ - 1 file changed, 20 insertions(+) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3355,6 +3355,26 @@ static int mtk_add_mac(struct mtk_eth *e - - mac->phylink_config.dev = ð->netdev[id]->dev; - mac->phylink_config.type = PHYLINK_NETDEV; -+ __set_bit(PHY_INTERFACE_MODE_MII, -+ mac->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_GMII, -+ mac->phylink_config.supported_interfaces); -+ -+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) -+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); -+ -+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) -+ __set_bit(PHY_INTERFACE_MODE_TRGMII, -+ mac->phylink_config.supported_interfaces); -+ -+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { -+ __set_bit(PHY_INTERFACE_MODE_SGMII, -+ mac->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, -+ mac->phylink_config.supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, -+ mac->phylink_config.supported_interfaces); -+ } - - phylink = phylink_create(&mac->phylink_config, - of_fwnode_handle(mac->of_node), diff --git a/target/linux/generic/backport-6.1/704-02-v5.17-net-mtk_eth_soc-remove-interface-checks-in-mtk_valid.patch b/target/linux/generic/backport-6.1/704-02-v5.17-net-mtk_eth_soc-remove-interface-checks-in-mtk_valid.patch deleted file mode 100644 index 0a33ab009340..000000000000 --- a/target/linux/generic/backport-6.1/704-02-v5.17-net-mtk_eth_soc-remove-interface-checks-in-mtk_valid.patch +++ /dev/null @@ -1,75 +0,0 @@ -From db81ca153814475d7e07365d46a4d1134bd122e2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 16 Nov 2021 10:06:48 +0000 -Subject: [PATCH] net: mtk_eth_soc: remove interface checks in mtk_validate() - -As phylink checks the interface mode against the supported_interfaces -bitmap, we no longer need to validate the interface mode, nor handle -PHY_INTERFACE_MODE_NA in the validation function. Remove these to -simplify the implementation. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 34 --------------------- - 1 file changed, 34 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -568,24 +568,8 @@ static void mtk_validate(struct phylink_ - unsigned long *supported, - struct phylink_link_state *state) - { -- struct mtk_mac *mac = container_of(config, struct mtk_mac, -- phylink_config); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_MII && -- state->interface != PHY_INTERFACE_MODE_GMII && -- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) && -- phy_interface_mode_is_rgmii(state->interface)) && -- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && -- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) && -- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) && -- (state->interface == PHY_INTERFACE_MODE_SGMII || -- phy_interface_mode_is_8023z(state->interface)))) { -- linkmode_zero(supported); -- return; -- } -- - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - -@@ -612,7 +596,6 @@ static void mtk_validate(struct phylink_ - case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_RMII: - case PHY_INTERFACE_MODE_REVMII: -- case PHY_INTERFACE_MODE_NA: - default: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); -@@ -621,23 +604,6 @@ static void mtk_validate(struct phylink_ - break; - } - -- if (state->interface == PHY_INTERFACE_MODE_NA) { -- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseX_Full); -- phylink_set(mask, 2500baseX_Full); -- } -- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) { -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseT_Half); -- phylink_set(mask, 1000baseX_Full); -- } -- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) { -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseT_Half); -- } -- } -- - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - diff --git a/target/linux/generic/backport-6.1/704-03-v5.17-net-mtk_eth_soc-drop-use-of-phylink_helper_basex_spe.patch b/target/linux/generic/backport-6.1/704-03-v5.17-net-mtk_eth_soc-drop-use-of-phylink_helper_basex_spe.patch deleted file mode 100644 index f8cc8105a4c0..000000000000 --- a/target/linux/generic/backport-6.1/704-03-v5.17-net-mtk_eth_soc-drop-use-of-phylink_helper_basex_spe.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 71d927494463c4f016d828e1134da26b7e961af5 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 16 Nov 2021 10:06:53 +0000 -Subject: [PATCH] net: mtk_eth_soc: drop use of phylink_helper_basex_speed() - -Now that we have a better method to select SFP interface modes, we -no longer need to use phylink_helper_basex_speed() in a driver's -validation function, and we can also get rid of our hack to indicate -both 1000base-X and 2500base-X if the comphy is present to make that -work. Remove this hack and use of phylink_helper_basex_speed(). - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -578,8 +578,9 @@ static void mtk_validate(struct phylink_ - phylink_set(mask, 1000baseT_Full); - break; - case PHY_INTERFACE_MODE_1000BASEX: -- case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 1000baseX_Full); -+ break; -+ case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 2500baseX_Full); - break; - case PHY_INTERFACE_MODE_GMII: -@@ -609,11 +610,6 @@ static void mtk_validate(struct phylink_ - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -- -- /* We can only operate at 2500BaseX or 1000BaseX. If requested -- * to advertise both, only report advertising at 2500BaseX. -- */ -- phylink_helper_basex_speed(state); - } - - static const struct phylink_mac_ops mtk_phylink_ops = { diff --git a/target/linux/generic/backport-6.1/704-04-v5.17-net-mtk_eth_soc-use-phylink_generic_validate.patch b/target/linux/generic/backport-6.1/704-04-v5.17-net-mtk_eth_soc-use-phylink_generic_validate.patch deleted file mode 100644 index f695991ec1ff..000000000000 --- a/target/linux/generic/backport-6.1/704-04-v5.17-net-mtk_eth_soc-use-phylink_generic_validate.patch +++ /dev/null @@ -1,84 +0,0 @@ -From a4238f6ce151afa331375d74a5033b76da637644 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 16 Nov 2021 10:06:58 +0000 -Subject: [PATCH] net: mtk_eth_soc: use phylink_generic_validate() - -mtk_eth_soc has no special behaviour in its validation implementation, -so can be switched to phylink_generic_validate(). - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 53 ++------------------- - 1 file changed, 4 insertions(+), 49 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -564,56 +564,8 @@ static void mtk_mac_link_up(struct phyli - mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); - } - --static void mtk_validate(struct phylink_config *config, -- unsigned long *supported, -- struct phylink_link_state *state) --{ -- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- -- phylink_set_port_modes(mask); -- phylink_set(mask, Autoneg); -- -- switch (state->interface) { -- case PHY_INTERFACE_MODE_TRGMII: -- phylink_set(mask, 1000baseT_Full); -- break; -- case PHY_INTERFACE_MODE_1000BASEX: -- phylink_set(mask, 1000baseX_Full); -- break; -- case PHY_INTERFACE_MODE_2500BASEX: -- phylink_set(mask, 2500baseX_Full); -- break; -- case PHY_INTERFACE_MODE_GMII: -- case PHY_INTERFACE_MODE_RGMII: -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- case PHY_INTERFACE_MODE_RGMII_TXID: -- phylink_set(mask, 1000baseT_Half); -- fallthrough; -- case PHY_INTERFACE_MODE_SGMII: -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseX_Full); -- fallthrough; -- case PHY_INTERFACE_MODE_MII: -- case PHY_INTERFACE_MODE_RMII: -- case PHY_INTERFACE_MODE_REVMII: -- default: -- phylink_set(mask, 10baseT_Half); -- phylink_set(mask, 10baseT_Full); -- phylink_set(mask, 100baseT_Half); -- phylink_set(mask, 100baseT_Full); -- break; -- } -- -- phylink_set(mask, Pause); -- phylink_set(mask, Asym_Pause); -- -- linkmode_and(supported, supported, mask); -- linkmode_and(state->advertising, state->advertising, mask); --} -- - static const struct phylink_mac_ops mtk_phylink_ops = { -- .validate = mtk_validate, -+ .validate = phylink_generic_validate, - .mac_pcs_get_state = mtk_mac_pcs_get_state, - .mac_an_restart = mtk_mac_an_restart, - .mac_config = mtk_mac_config, -@@ -3317,6 +3269,9 @@ static int mtk_add_mac(struct mtk_eth *e - - mac->phylink_config.dev = ð->netdev[id]->dev; - mac->phylink_config.type = PHYLINK_NETDEV; -+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | -+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; -+ - __set_bit(PHY_INTERFACE_MODE_MII, - mac->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_GMII, diff --git a/target/linux/generic/backport-6.1/704-05-v5.17-net-mtk_eth_soc-mark-as-a-legacy_pre_march2020-drive.patch b/target/linux/generic/backport-6.1/704-05-v5.17-net-mtk_eth_soc-mark-as-a-legacy_pre_march2020-drive.patch deleted file mode 100644 index cbff1bfbbc4c..000000000000 --- a/target/linux/generic/backport-6.1/704-05-v5.17-net-mtk_eth_soc-mark-as-a-legacy_pre_march2020-drive.patch +++ /dev/null @@ -1,29 +0,0 @@ -From b06515367facfadcf5e70cf6f39db749cf4eb5e3 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Thu, 9 Dec 2021 13:11:43 +0000 -Subject: [PATCH] net: mtk_eth_soc: mark as a legacy_pre_march2020 driver - -mtk_eth_soc has not been updated for commit 7cceb599d15d ("net: phylink: -avoid mac_config calls"), and makes use of state->speed and -state->duplex in contravention of the phylink documentation. This makes -reliant on the legacy behaviours, so mark it as a legacy driver. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3269,6 +3269,10 @@ static int mtk_add_mac(struct mtk_eth *e - - mac->phylink_config.dev = ð->netdev[id]->dev; - mac->phylink_config.type = PHYLINK_NETDEV; -+ /* This driver makes use of state->speed/state->duplex in -+ * mac_config -+ */ -+ mac->phylink_config.legacy_pre_march2020 = true; - mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; - diff --git a/target/linux/generic/backport-6.1/704-06-v5.19-eth-mtk_eth_soc-remove-a-copy-of-the-NAPI_POLL_WEIGH.patch b/target/linux/generic/backport-6.1/704-06-v5.19-eth-mtk_eth_soc-remove-a-copy-of-the-NAPI_POLL_WEIGH.patch deleted file mode 100644 index c0b4a61cb6e1..000000000000 --- a/target/linux/generic/backport-6.1/704-06-v5.19-eth-mtk_eth_soc-remove-a-copy-of-the-NAPI_POLL_WEIGH.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 889e3691b9d6573de133da1f5e78f590e52152cd Mon Sep 17 00:00:00 2001 -From: Jakub Kicinski -Date: Thu, 28 Apr 2022 14:23:13 -0700 -Subject: [PATCH] eth: mtk_eth_soc: remove a copy of the NAPI_POLL_WEIGHT - define - -Defining local versions of NAPI_POLL_WEIGHT with the same -values in the drivers just makes refactoring harder. - -Signed-off-by: Jakub Kicinski -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++-- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 - - 2 files changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3568,9 +3568,9 @@ static int mtk_probe(struct platform_dev - */ - init_dummy_netdev(ð->dummy_dev); - netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, -- MTK_NAPI_WEIGHT); -+ NAPI_POLL_WEIGHT); - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, -- MTK_NAPI_WEIGHT); -+ NAPI_POLL_WEIGHT); - - platform_set_drvdata(pdev, eth); - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -25,7 +25,6 @@ - #define MTK_TX_DMA_BUF_LEN 0x3fff - #define MTK_TX_DMA_BUF_LEN_V2 0xffff - #define MTK_DMA_SIZE 512 --#define MTK_NAPI_WEIGHT 64 - #define MTK_MAC_COUNT 2 - #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) - #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) diff --git a/target/linux/generic/backport-6.1/704-07-v5.19-mtk_eth_soc-remove-unused-mac-mode.patch b/target/linux/generic/backport-6.1/704-07-v5.19-mtk_eth_soc-remove-unused-mac-mode.patch deleted file mode 100644 index 5940ac27df9c..000000000000 --- a/target/linux/generic/backport-6.1/704-07-v5.19-mtk_eth_soc-remove-unused-mac-mode.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0600bdde1fae75fb9bad72033d28edddc72b44b2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:54:31 +0100 -Subject: [PATCH 01/12] net: mtk_eth_soc: remove unused mac->mode - -mac->mode is only ever written to in one location, and is thus -superflous. Remove it. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 - - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 - - 2 files changed, 2 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3264,7 +3264,6 @@ static int mtk_add_mac(struct mtk_eth *e - - /* mac config is not set */ - mac->interface = PHY_INTERFACE_MODE_NA; -- mac->mode = MLO_AN_PHY; - mac->speed = SPEED_UNKNOWN; - - mac->phylink_config.dev = ð->netdev[id]->dev; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1086,7 +1086,6 @@ struct mtk_eth { - struct mtk_mac { - int id; - phy_interface_t interface; -- unsigned int mode; - int speed; - struct device_node *of_node; - struct phylink *phylink; diff --git a/target/linux/generic/backport-6.1/704-08-v5.19-net-mtk_eth_soc-remove-unused-sgmii-flags.patch b/target/linux/generic/backport-6.1/704-08-v5.19-net-mtk_eth_soc-remove-unused-sgmii-flags.patch deleted file mode 100644 index a15914bd5531..000000000000 --- a/target/linux/generic/backport-6.1/704-08-v5.19-net-mtk_eth_soc-remove-unused-sgmii-flags.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 5a7a2f4b29d7546244da7d8bbc1962fce5b230f2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:54:36 +0100 -Subject: [PATCH 02/12] net: mtk_eth_soc: remove unused sgmii flags - -The "flags" member of struct mtk_sgmii appears to be unused, as are -the MTK_SGMII_PHYSPEED_* and MTK_HAS_FLAGS() macros. Remove them. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 -------- - 1 file changed, 8 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -957,23 +957,15 @@ struct mtk_soc_data { - /* currently no SoC has more than 2 macs */ - #define MTK_MAX_DEVS 2 - --#define MTK_SGMII_PHYSPEED_AN BIT(31) --#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0) --#define MTK_SGMII_PHYSPEED_1000 BIT(0) --#define MTK_SGMII_PHYSPEED_2500 BIT(1) --#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x)) -- - /* struct mtk_sgmii - This is the structure holding sgmii regmap and its - * characteristics - * @regmap: The register map pointing at the range used to setup - * SGMII modes -- * @flags: The enum refers to which mode the sgmii wants to run on - * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap - */ - - struct mtk_sgmii { - struct regmap *regmap[MTK_MAX_DEVS]; -- u32 flags[MTK_MAX_DEVS]; - u32 ana_rgc3; - }; - diff --git a/target/linux/generic/backport-6.1/704-09-v5.19-net-mtk_eth_soc-add-mask-and-update-PCS-speed-defini.patch b/target/linux/generic/backport-6.1/704-09-v5.19-net-mtk_eth_soc-add-mask-and-update-PCS-speed-defini.patch deleted file mode 100644 index e16bc875e523..000000000000 --- a/target/linux/generic/backport-6.1/704-09-v5.19-net-mtk_eth_soc-add-mask-and-update-PCS-speed-defini.patch +++ /dev/null @@ -1,40 +0,0 @@ -From bc5e93e0cd22e360eda23859b939280205567580 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:54:42 +0100 -Subject: [PATCH 03/12] net: mtk_eth_soc: add mask and update PCS speed - definitions - -The PCS speed setting is a two bit field, but it is defined as two -separate bits. Add a bitfield mask for the speed definitions, an - use the FIELD_PREP() macro to define each PCS speed. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include "mtk_ppe.h" - - #define MTK_QDMA_PAGE_SIZE 2048 -@@ -474,9 +475,10 @@ - #define SGMSYS_SGMII_MODE 0x20 - #define SGMII_IF_MODE_BIT0 BIT(0) - #define SGMII_SPEED_DUPLEX_AN BIT(1) --#define SGMII_SPEED_10 0x0 --#define SGMII_SPEED_100 BIT(2) --#define SGMII_SPEED_1000 BIT(3) -+#define SGMII_SPEED_MASK GENMASK(3, 2) -+#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0) -+#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1) -+#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2) - #define SGMII_DUPLEX_FULL BIT(4) - #define SGMII_IF_MODE_BIT5 BIT(5) - #define SGMII_REMOTE_FAULT_DIS BIT(8) diff --git a/target/linux/generic/backport-6.1/704-10-v5.19-net-mtk_eth_soc-correct-802.3z-speed-setting.patch b/target/linux/generic/backport-6.1/704-10-v5.19-net-mtk_eth_soc-correct-802.3z-speed-setting.patch deleted file mode 100644 index fb1ee4e310eb..000000000000 --- a/target/linux/generic/backport-6.1/704-10-v5.19-net-mtk_eth_soc-correct-802.3z-speed-setting.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 7da3f901f8ecb425105fad39a0f5de73306abe52 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:54:47 +0100 -Subject: [PATCH 04/12] net: mtk_eth_soc: correct 802.3z speed setting - -Phylink does not guarantee that state->speed will be set correctly in -the mac_config() call, so it's a bug that the driver makes use of it. -Moreover, it is making use of it in a function that is only ever called -for 1000BASE-X and 2500BASE-X which operate at a fixed speed which -happens to be the same setting irrespective of the interface mode. We -can simply remove the switch statement and just set the SGMII interface -speed. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_sgmii.c | 18 +++++------------- - 1 file changed, 5 insertions(+), 13 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -34,6 +34,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, - return 0; - } - -+/* For SGMII interface mode */ - int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) - { - unsigned int val; -@@ -60,6 +61,9 @@ int mtk_sgmii_setup_mode_an(struct mtk_s - return 0; - } - -+/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a -+ * fixed speed. -+ */ - int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - const struct phylink_link_state *state) - { -@@ -82,19 +86,7 @@ int mtk_sgmii_setup_mode_force(struct mt - /* SGMII force mode setting */ - regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); - val &= ~SGMII_IF_MODE_MASK; -- -- switch (state->speed) { -- case SPEED_10: -- val |= SGMII_SPEED_10; -- break; -- case SPEED_100: -- val |= SGMII_SPEED_100; -- break; -- case SPEED_2500: -- case SPEED_1000: -- val |= SGMII_SPEED_1000; -- break; -- } -+ val |= SGMII_SPEED_1000; - - if (state->duplex == DUPLEX_FULL) - val |= SGMII_DUPLEX_FULL; diff --git a/target/linux/generic/backport-6.1/704-11-v5.19-net-mtk_eth_soc-correct-802.3z-duplex-setting.patch b/target/linux/generic/backport-6.1/704-11-v5.19-net-mtk_eth_soc-correct-802.3z-duplex-setting.patch deleted file mode 100644 index 78444903a8c2..000000000000 --- a/target/linux/generic/backport-6.1/704-11-v5.19-net-mtk_eth_soc-correct-802.3z-duplex-setting.patch +++ /dev/null @@ -1,101 +0,0 @@ -From a459187390bb221827f9c07866c3a5ffbdf9622b Mon Sep 17 00:00:00 2001 -From: Russell King -Date: Wed, 18 May 2022 15:54:52 +0100 -Subject: [PATCH 05/12] net: mtk_eth_soc: correct 802.3z duplex setting - -Phylink does not guarantee that state->duplex will be set correctly in -the mac_config() call, so it's a bug that the driver makes use of it. - -Move the 802.3z PCS duplex configuration to mac_link_up(). - -Signed-off-by: Russell King -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 +++++++++++---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + - drivers/net/ethernet/mediatek/mtk_sgmii.c | 22 +++++++++++++++------ - 3 files changed, 29 insertions(+), 10 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -533,8 +533,18 @@ static void mtk_mac_link_up(struct phyli - { - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); -- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); -+ u32 mcr; - -+ if (phy_interface_mode_is_8023z(interface)) { -+ struct mtk_eth *eth = mac->hw; -+ -+ /* Decide how GMAC and SGMIISYS be mapped */ -+ int sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -+ 0 : mac->id; -+ mtk_sgmii_link_up(eth->sgmii, sid, speed, duplex); -+ } -+ -+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | - MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | - MAC_MCR_FORCE_RX_FC); -@@ -3268,9 +3278,7 @@ static int mtk_add_mac(struct mtk_eth *e - - mac->phylink_config.dev = ð->netdev[id]->dev; - mac->phylink_config.type = PHYLINK_NETDEV; -- /* This driver makes use of state->speed/state->duplex in -- * mac_config -- */ -+ /* This driver makes use of state->speed in mac_config */ - mac->phylink_config.legacy_pre_march2020 = true; - mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1104,6 +1104,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, - int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); - int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - const struct phylink_link_state *state); -+void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); - void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); - - int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -83,14 +83,10 @@ int mtk_sgmii_setup_mode_force(struct mt - val &= ~SGMII_AN_ENABLE; - regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); - -- /* SGMII force mode setting */ -+ /* Set the speed etc but leave the duplex unchanged */ - regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); -- val &= ~SGMII_IF_MODE_MASK; -+ val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; - val |= SGMII_SPEED_1000; -- -- if (state->duplex == DUPLEX_FULL) -- val |= SGMII_DUPLEX_FULL; -- - regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); - - /* Release PHYA power down state */ -@@ -101,6 +97,20 @@ int mtk_sgmii_setup_mode_force(struct mt - return 0; - } - -+/* For 1000BASE-X and 2500BASE-X interface modes */ -+void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) -+{ -+ unsigned int val; -+ -+ /* SGMII force duplex setting */ -+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); -+ val &= ~SGMII_DUPLEX_FULL; -+ if (duplex == DUPLEX_FULL) -+ val |= SGMII_DUPLEX_FULL; -+ -+ regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); -+} -+ - void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) - { - struct mtk_sgmii *ss = eth->sgmii; diff --git a/target/linux/generic/backport-6.1/704-12-v5.19-net-mtk_eth_soc-stop-passing-phylink-state-to-sgmii-.patch b/target/linux/generic/backport-6.1/704-12-v5.19-net-mtk_eth_soc-stop-passing-phylink-state-to-sgmii-.patch deleted file mode 100644 index 6556bb7d07d6..000000000000 --- a/target/linux/generic/backport-6.1/704-12-v5.19-net-mtk_eth_soc-stop-passing-phylink-state-to-sgmii-.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 4ce5a0bd3958ed248f0325bfcb95339f7c74feb2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:54:57 +0100 -Subject: [PATCH 06/12] net: mtk_eth_soc: stop passing phylink state to sgmii - setup - -Now that mtk_sgmii_setup_mode_force() only uses the interface mode -from the phylink state, pass just the interface mode into this -function. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- - drivers/net/ethernet/mediatek/mtk_sgmii.c | 4 ++-- - 3 files changed, 4 insertions(+), 4 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -437,7 +437,7 @@ static void mtk_mac_config(struct phylin - /* Setup SGMIISYS with the determined property */ - if (state->interface != PHY_INTERFACE_MODE_SGMII) - err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, -- state); -+ state->interface); - else if (phylink_autoneg_inband(mode)) - err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1103,7 +1103,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, - u32 ana_rgc3); - int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); - int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -- const struct phylink_link_state *state); -+ phy_interface_t interface); - void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); - void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); - ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -65,7 +65,7 @@ int mtk_sgmii_setup_mode_an(struct mtk_s - * fixed speed. - */ - int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -- const struct phylink_link_state *state) -+ phy_interface_t interface) - { - unsigned int val; - -@@ -74,7 +74,7 @@ int mtk_sgmii_setup_mode_force(struct mt - - regmap_read(ss->regmap[id], ss->ana_rgc3, &val); - val &= ~RG_PHY_SPEED_MASK; -- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) -+ if (interface == PHY_INTERFACE_MODE_2500BASEX) - val |= RG_PHY_SPEED_3_125G; - regmap_write(ss->regmap[id], ss->ana_rgc3, val); - diff --git a/target/linux/generic/backport-6.1/704-13-v5.19-net-mtk_eth_soc-provide-mtk_sgmii_config.patch b/target/linux/generic/backport-6.1/704-13-v5.19-net-mtk_eth_soc-provide-mtk_sgmii_config.patch deleted file mode 100644 index 0e22c7fd6741..000000000000 --- a/target/linux/generic/backport-6.1/704-13-v5.19-net-mtk_eth_soc-provide-mtk_sgmii_config.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 1ec619ee4a052fb9ac48b57554ac2722a0bfe73c Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:02 +0100 -Subject: [PATCH 07/12] net: mtk_eth_soc: provide mtk_sgmii_config() - -Provide mtk_sgmii_config() to wrap up the decisions about which SGMII -configuration will be called. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 7 +------ - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 ++--- - drivers/net/ethernet/mediatek/mtk_sgmii.c | 20 +++++++++++++++++--- - 3 files changed, 20 insertions(+), 12 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -435,12 +435,7 @@ static void mtk_mac_config(struct phylin - 0 : mac->id; - - /* Setup SGMIISYS with the determined property */ -- if (state->interface != PHY_INTERFACE_MODE_SGMII) -- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, -- state->interface); -- else if (phylink_autoneg_inband(mode)) -- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); -- -+ err = mtk_sgmii_config(eth->sgmii, sid, mode, state->interface); - if (err) - goto init_err; - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1101,9 +1101,8 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne - - int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, - u32 ana_rgc3); --int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); --int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -- phy_interface_t interface); -+int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, -+ phy_interface_t interface); - void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); - void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); - ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -35,7 +35,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, - } - - /* For SGMII interface mode */ --int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) -+static int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) - { - unsigned int val; - -@@ -64,8 +64,8 @@ int mtk_sgmii_setup_mode_an(struct mtk_s - /* For 1000BASE-X and 2500BASE-X interface modes, which operate at a - * fixed speed. - */ --int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -- phy_interface_t interface) -+static int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -+ phy_interface_t interface) - { - unsigned int val; - -@@ -97,6 +97,20 @@ int mtk_sgmii_setup_mode_force(struct mt - return 0; - } - -+int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, -+ phy_interface_t interface) -+{ -+ int err = 0; -+ -+ /* Setup SGMIISYS with the determined property */ -+ if (interface != PHY_INTERFACE_MODE_SGMII) -+ err = mtk_sgmii_setup_mode_force(ss, id, interface); -+ else if (phylink_autoneg_inband(mode)) -+ err = mtk_sgmii_setup_mode_an(ss, id); -+ -+ return err; -+} -+ - /* For 1000BASE-X and 2500BASE-X interface modes */ - void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) - { diff --git a/target/linux/generic/backport-6.1/704-14-v5.19-net-mtk_eth_soc-add-fixme-comment-for-state-speed-us.patch b/target/linux/generic/backport-6.1/704-14-v5.19-net-mtk_eth_soc-add-fixme-comment-for-state-speed-us.patch deleted file mode 100644 index 8080a2ca441e..000000000000 --- a/target/linux/generic/backport-6.1/704-14-v5.19-net-mtk_eth_soc-add-fixme-comment-for-state-speed-us.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 650a49bc65df6b0e0051a8f62d7c22d95a8f350d Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:07 +0100 -Subject: [PATCH 08/12] net: mtk_eth_soc: add fixme comment for state->speed - use - -Add a fixme comment for the last remaining incorrect usage of -state->speed in the mac_config() method, which is strangely in a code -path which is only run when the PHY interface mode changes. - -This means if we are in RGMII mode, changes in state->speed will not -cause the INTF_MODE, TRGMII_RCK_CTRL and TRGMII_TCK_CTRL registers to -be set according to the speed, nor will the TRGPLL clock be set to the -correct value. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -374,6 +374,14 @@ static void mtk_mac_config(struct phylin - state->interface)) - goto err_phy; - } else { -+ /* FIXME: this is incorrect. Not only does it -+ * use state->speed (which is not guaranteed -+ * to be correct) but it also makes use of it -+ * in a code path that will only be reachable -+ * when the PHY interface mode changes, not -+ * when the speed changes. Consequently, RGMII -+ * is probably broken. -+ */ - mtk_gmac0_rgmii_adjust(mac->hw, - state->interface, - state->speed); diff --git a/target/linux/generic/backport-6.1/704-15-v5.19-net-mtk_eth_soc-move-MAC_MCR-setting-to-mac_finish.patch b/target/linux/generic/backport-6.1/704-15-v5.19-net-mtk_eth_soc-move-MAC_MCR-setting-to-mac_finish.patch deleted file mode 100644 index 337c6112d66f..000000000000 --- a/target/linux/generic/backport-6.1/704-15-v5.19-net-mtk_eth_soc-move-MAC_MCR-setting-to-mac_finish.patch +++ /dev/null @@ -1,79 +0,0 @@ -From 0e37ad71b2ff772009595002da2860999e98e14e Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:12 +0100 -Subject: [PATCH 09/12] net: mtk_eth_soc: move MAC_MCR setting to mac_finish() - -Move the setting of the MTK_MAC_MCR register from the end of mac_config -into the phylink mac_finish() method, to keep it as the very last write -that is done during configuration. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 33 ++++++++++++++------- - 1 file changed, 22 insertions(+), 11 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -316,8 +316,8 @@ static void mtk_mac_config(struct phylin - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); - struct mtk_eth *eth = mac->hw; -- u32 mcr_cur, mcr_new, sid, i; - int val, ge_mode, err = 0; -+ u32 sid, i; - - /* MT76x8 has no hardware settings between for the MAC */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && -@@ -455,6 +455,25 @@ static void mtk_mac_config(struct phylin - return; - } - -+ return; -+ -+err_phy: -+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, -+ mac->id, phy_modes(state->interface)); -+ return; -+ -+init_err: -+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, -+ mac->id, phy_modes(state->interface), err); -+} -+ -+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, -+ phy_interface_t interface) -+{ -+ struct mtk_mac *mac = container_of(config, struct mtk_mac, -+ phylink_config); -+ u32 mcr_cur, mcr_new; -+ - /* Setup gmac */ - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur; -@@ -466,16 +485,7 @@ static void mtk_mac_config(struct phylin - if (mcr_new != mcr_cur) - mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - -- return; -- --err_phy: -- dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, -- mac->id, phy_modes(state->interface)); -- return; -- --init_err: -- dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, -- mac->id, phy_modes(state->interface), err); -+ return 0; - } - - static void mtk_mac_pcs_get_state(struct phylink_config *config, -@@ -582,6 +592,7 @@ static const struct phylink_mac_ops mtk_ - .mac_pcs_get_state = mtk_mac_pcs_get_state, - .mac_an_restart = mtk_mac_an_restart, - .mac_config = mtk_mac_config, -+ .mac_finish = mtk_mac_finish, - .mac_link_down = mtk_mac_link_down, - .mac_link_up = mtk_mac_link_up, - }; diff --git a/target/linux/generic/backport-6.1/704-16-v5.19-net-mtk_eth_soc-move-restoration-of-SYSCFG0-to-mac_f.patch b/target/linux/generic/backport-6.1/704-16-v5.19-net-mtk_eth_soc-move-restoration-of-SYSCFG0-to-mac_f.patch deleted file mode 100644 index b03ef436bdbb..000000000000 --- a/target/linux/generic/backport-6.1/704-16-v5.19-net-mtk_eth_soc-move-restoration-of-SYSCFG0-to-mac_f.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 21089867278deb2a110b685e3cd33f64f9ce41e2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:17 +0100 -Subject: [PATCH 10/12] net: mtk_eth_soc: move restoration of SYSCFG0 to - mac_finish() - -The SGMIISYS configuration is performed while ETHSYS_SYSCFG0 is in a -disabled state. In order to preserve this when we switch to phylink_pcs -we need to move the restoration of this register to the mac_finish() -callback. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +++++++++-- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + - 2 files changed, 10 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -447,8 +447,8 @@ static void mtk_mac_config(struct phylin - if (err) - goto init_err; - -- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, -- SYSCFG0_SGMII_MASK, val); -+ /* Save the syscfg0 value for mac_finish */ -+ mac->syscfg0 = val; - } else if (phylink_autoneg_inband(mode)) { - dev_err(eth->dev, - "In-band mode not supported in non SGMII mode!\n"); -@@ -472,8 +472,15 @@ static int mtk_mac_finish(struct phylink - { - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); -+ struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new; - -+ /* Enable SGMII */ -+ if (interface == PHY_INTERFACE_MODE_SGMII || -+ phy_interface_mode_is_8023z(interface)) -+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, -+ SYSCFG0_SGMII_MASK, mac->syscfg0); -+ - /* Setup gmac */ - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1088,6 +1088,7 @@ struct mtk_mac { - struct mtk_hw_stats *hw_stats; - __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; - int hwlro_ip_cnt; -+ unsigned int syscfg0; - }; - - /* the struct describing the SoC. these are declared in the soc_xyz.c files */ diff --git a/target/linux/generic/backport-6.1/704-17-v5.19-net-mtk_eth_soc-convert-code-structure-to-suit-split.patch b/target/linux/generic/backport-6.1/704-17-v5.19-net-mtk_eth_soc-convert-code-structure-to-suit-split.patch deleted file mode 100644 index 4c84703cd94c..000000000000 --- a/target/linux/generic/backport-6.1/704-17-v5.19-net-mtk_eth_soc-convert-code-structure-to-suit-split.patch +++ /dev/null @@ -1,254 +0,0 @@ -From 901f3fbe13c3e56f0742e02717ccbfabbc95c463 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:22 +0100 -Subject: [PATCH 11/12] net: mtk_eth_soc: convert code structure to suit split - PCS support - -Provide a mtk_pcs structure which encapsulates everything that the PCS -functions need (the regmap and ana_rgc3 offset), and use this in the -PCS functions. Provide shim functions to convert from the existing -"mtk_sgmii_*" interface to the converted PCS functions. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 ++- - drivers/net/ethernet/mediatek/mtk_sgmii.c | 123 +++++++++++--------- - 2 files changed, 79 insertions(+), 59 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -959,16 +959,23 @@ struct mtk_soc_data { - /* currently no SoC has more than 2 macs */ - #define MTK_MAX_DEVS 2 - --/* struct mtk_sgmii - This is the structure holding sgmii regmap and its -- * characteristics -+/* struct mtk_pcs - This structure holds each sgmii regmap and associated -+ * data - * @regmap: The register map pointing at the range used to setup - * SGMII modes - * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap - */ -+struct mtk_pcs { -+ struct regmap *regmap; -+ u32 ana_rgc3; -+}; - -+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its -+ * characteristics -+ * @pcs Array of individual PCS structures -+ */ - struct mtk_sgmii { -- struct regmap *regmap[MTK_MAX_DEVS]; -- u32 ana_rgc3; -+ struct mtk_pcs pcs[MTK_MAX_DEVS]; - }; - - /* struct mtk_eth - This is the main datasructure for holding the state ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -9,90 +9,71 @@ - - #include - #include -+#include - #include - - #include "mtk_eth_soc.h" - --int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) --{ -- struct device_node *np; -- int i; -- -- ss->ana_rgc3 = ana_rgc3; -- -- for (i = 0; i < MTK_MAX_DEVS; i++) { -- np = of_parse_phandle(r, "mediatek,sgmiisys", i); -- if (!np) -- break; -- -- ss->regmap[i] = syscon_node_to_regmap(np); -- of_node_put(np); -- if (IS_ERR(ss->regmap[i])) -- return PTR_ERR(ss->regmap[i]); -- } -- -- return 0; --} -- - /* For SGMII interface mode */ --static int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) -+static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) - { - unsigned int val; - -- if (!ss->regmap[id]) -+ if (!mpcs->regmap) - return -EINVAL; - - /* Setup the link timer and QPHY power up inside SGMIISYS */ -- regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER, -+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, - SGMII_LINK_TIMER_DEFAULT); - -- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); -+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val |= SGMII_REMOTE_FAULT_DIS; -- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); -+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); - -- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); -+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val |= SGMII_AN_RESTART; -- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); -+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); - -- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val); -+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); - val &= ~SGMII_PHYA_PWD; -- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val); -+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); - - return 0; -+ - } - - /* For 1000BASE-X and 2500BASE-X interface modes, which operate at a - * fixed speed. - */ --static int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, -- phy_interface_t interface) -+static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, -+ phy_interface_t interface) - { - unsigned int val; - -- if (!ss->regmap[id]) -+ if (!mpcs->regmap) - return -EINVAL; - -- regmap_read(ss->regmap[id], ss->ana_rgc3, &val); -+ regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); - val &= ~RG_PHY_SPEED_MASK; - if (interface == PHY_INTERFACE_MODE_2500BASEX) - val |= RG_PHY_SPEED_3_125G; -- regmap_write(ss->regmap[id], ss->ana_rgc3, val); -+ regmap_write(mpcs->regmap, mpcs->ana_rgc3, val); - - /* Disable SGMII AN */ -- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); -+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val &= ~SGMII_AN_ENABLE; -- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); -+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); - - /* Set the speed etc but leave the duplex unchanged */ -- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); -+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; - val |= SGMII_SPEED_1000; -- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); -+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); - - /* Release PHYA power down state */ -- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val); -+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); - val &= ~SGMII_PHYA_PWD; -- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val); -+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); - - return 0; - } -@@ -100,44 +81,76 @@ static int mtk_sgmii_setup_mode_force(st - int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, - phy_interface_t interface) - { -+ struct mtk_pcs *mpcs = &ss->pcs[id]; - int err = 0; - - /* Setup SGMIISYS with the determined property */ - if (interface != PHY_INTERFACE_MODE_SGMII) -- err = mtk_sgmii_setup_mode_force(ss, id, interface); -+ err = mtk_pcs_setup_mode_force(mpcs, interface); - else if (phylink_autoneg_inband(mode)) -- err = mtk_sgmii_setup_mode_an(ss, id); -+ err = mtk_pcs_setup_mode_an(mpcs); - - return err; - } - --/* For 1000BASE-X and 2500BASE-X interface modes */ --void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) -+static void mtk_pcs_restart_an(struct mtk_pcs *mpcs) -+{ -+ unsigned int val; -+ -+ if (!mpcs->regmap) -+ return; -+ -+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); -+ val |= SGMII_AN_RESTART; -+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); -+} -+ -+static void mtk_pcs_link_up(struct mtk_pcs *mpcs, int speed, int duplex) - { - unsigned int val; - - /* SGMII force duplex setting */ -- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); -+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val &= ~SGMII_DUPLEX_FULL; - if (duplex == DUPLEX_FULL) - val |= SGMII_DUPLEX_FULL; - -- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); -+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); -+} -+ -+/* For 1000BASE-X and 2500BASE-X interface modes */ -+void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) -+{ -+ mtk_pcs_link_up(&ss->pcs[id], speed, duplex); -+} -+ -+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) -+{ -+ struct device_node *np; -+ int i; -+ -+ for (i = 0; i < MTK_MAX_DEVS; i++) { -+ np = of_parse_phandle(r, "mediatek,sgmiisys", i); -+ if (!np) -+ break; -+ -+ ss->pcs[i].ana_rgc3 = ana_rgc3; -+ ss->pcs[i].regmap = syscon_node_to_regmap(np); -+ of_node_put(np); -+ if (IS_ERR(ss->pcs[i].regmap)) -+ return PTR_ERR(ss->pcs[i].regmap); -+ } -+ -+ return 0; - } - - void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) - { -- struct mtk_sgmii *ss = eth->sgmii; -- unsigned int val, sid; -+ unsigned int sid; - - /* Decide how GMAC and SGMIISYS be mapped */ - sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? - 0 : mac_id; - -- if (!ss->regmap[sid]) -- return; -- -- regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val); -- val |= SGMII_AN_RESTART; -- regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val); -+ mtk_pcs_restart_an(ð->sgmii->pcs[sid]); - } diff --git a/target/linux/generic/backport-6.1/704-18-v5.19-net-mtk_eth_soc-partially-convert-to-phylink_pcs.patch b/target/linux/generic/backport-6.1/704-18-v5.19-net-mtk_eth_soc-partially-convert-to-phylink_pcs.patch deleted file mode 100644 index b585867935e3..000000000000 --- a/target/linux/generic/backport-6.1/704-18-v5.19-net-mtk_eth_soc-partially-convert-to-phylink_pcs.patch +++ /dev/null @@ -1,262 +0,0 @@ -From 14a44ab0330d290fade1403a920e299cc56d7300 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Wed, 18 May 2022 15:55:28 +0100 -Subject: [PATCH 12/12] net: mtk_eth_soc: partially convert to phylink_pcs - -Partially convert mtk_eth_soc to phylink_pcs, moving the configuration, -link up and AN restart over. However, it seems mac_pcs_get_state() -doesn't actually get the state from the PCS, so we can't convert that -over without a better understanding of the hardware. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 49 ++++++++---------- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 7 ++- - drivers/net/ethernet/mediatek/mtk_sgmii.c | 55 +++++++++++---------- - 3 files changed, 53 insertions(+), 58 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -310,6 +310,25 @@ static void mtk_gmac0_rgmii_adjust(struc - mtk_w32(eth, val, TRGMII_TCK_CTRL); - } - -+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, -+ phy_interface_t interface) -+{ -+ struct mtk_mac *mac = container_of(config, struct mtk_mac, -+ phylink_config); -+ struct mtk_eth *eth = mac->hw; -+ unsigned int sid; -+ -+ if (interface == PHY_INTERFACE_MODE_SGMII || -+ phy_interface_mode_is_8023z(interface)) { -+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -+ 0 : mac->id; -+ -+ return mtk_sgmii_select_pcs(eth->sgmii, sid); -+ } -+ -+ return NULL; -+} -+ - static void mtk_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state) - { -@@ -317,7 +336,7 @@ static void mtk_mac_config(struct phylin - phylink_config); - struct mtk_eth *eth = mac->hw; - int val, ge_mode, err = 0; -- u32 sid, i; -+ u32 i; - - /* MT76x8 has no hardware settings between for the MAC */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && -@@ -438,15 +457,6 @@ static void mtk_mac_config(struct phylin - SYSCFG0_SGMII_MASK, - ~(u32)SYSCFG0_SGMII_MASK); - -- /* Decide how GMAC and SGMIISYS be mapped */ -- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -- 0 : mac->id; -- -- /* Setup SGMIISYS with the determined property */ -- err = mtk_sgmii_config(eth->sgmii, sid, mode, state->interface); -- if (err) -- goto init_err; -- - /* Save the syscfg0 value for mac_finish */ - mac->syscfg0 = val; - } else if (phylink_autoneg_inband(mode)) { -@@ -527,14 +537,6 @@ static void mtk_mac_pcs_get_state(struct - state->pause |= MLO_PAUSE_TX; - } - --static void mtk_mac_an_restart(struct phylink_config *config) --{ -- struct mtk_mac *mac = container_of(config, struct mtk_mac, -- phylink_config); -- -- mtk_sgmii_restart_an(mac->hw, mac->id); --} -- - static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) - { -@@ -555,15 +557,6 @@ static void mtk_mac_link_up(struct phyli - phylink_config); - u32 mcr; - -- if (phy_interface_mode_is_8023z(interface)) { -- struct mtk_eth *eth = mac->hw; -- -- /* Decide how GMAC and SGMIISYS be mapped */ -- int sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -- 0 : mac->id; -- mtk_sgmii_link_up(eth->sgmii, sid, speed, duplex); -- } -- - mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | - MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | -@@ -596,8 +589,8 @@ static void mtk_mac_link_up(struct phyli - - static const struct phylink_mac_ops mtk_phylink_ops = { - .validate = phylink_generic_validate, -+ .mac_select_pcs = mtk_mac_select_pcs, - .mac_pcs_get_state = mtk_mac_pcs_get_state, -- .mac_an_restart = mtk_mac_an_restart, - .mac_config = mtk_mac_config, - .mac_finish = mtk_mac_finish, - .mac_link_down = mtk_mac_link_down, ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -964,10 +964,12 @@ struct mtk_soc_data { - * @regmap: The register map pointing at the range used to setup - * SGMII modes - * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap -+ * @pcs: Phylink PCS structure - */ - struct mtk_pcs { - struct regmap *regmap; - u32 ana_rgc3; -+ struct phylink_pcs pcs; - }; - - /* struct mtk_sgmii - This is the structure holding sgmii regmap and its -@@ -1107,12 +1109,9 @@ void mtk_stats_update_mac(struct mtk_mac - void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); - u32 mtk_r32(struct mtk_eth *eth, unsigned reg); - -+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id); - int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, - u32 ana_rgc3); --int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, -- phy_interface_t interface); --void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); --void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); - - int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); - int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); ---- a/drivers/net/ethernet/mediatek/mtk_sgmii.c -+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c -@@ -14,14 +14,16 @@ - - #include "mtk_eth_soc.h" - -+static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs) -+{ -+ return container_of(pcs, struct mtk_pcs, pcs); -+} -+ - /* For SGMII interface mode */ - static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) - { - unsigned int val; - -- if (!mpcs->regmap) -- return -EINVAL; -- - /* Setup the link timer and QPHY power up inside SGMIISYS */ - regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, - SGMII_LINK_TIMER_DEFAULT); -@@ -50,9 +52,6 @@ static int mtk_pcs_setup_mode_force(stru - { - unsigned int val; - -- if (!mpcs->regmap) -- return -EINVAL; -- - regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); - val &= ~RG_PHY_SPEED_MASK; - if (interface == PHY_INTERFACE_MODE_2500BASEX) -@@ -78,10 +77,12 @@ static int mtk_pcs_setup_mode_force(stru - return 0; - } - --int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, -- phy_interface_t interface) -+static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, -+ phy_interface_t interface, -+ const unsigned long *advertising, -+ bool permit_pause_to_mac) - { -- struct mtk_pcs *mpcs = &ss->pcs[id]; -+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - int err = 0; - - /* Setup SGMIISYS with the determined property */ -@@ -93,22 +94,25 @@ int mtk_sgmii_config(struct mtk_sgmii *s - return err; - } - --static void mtk_pcs_restart_an(struct mtk_pcs *mpcs) -+static void mtk_pcs_restart_an(struct phylink_pcs *pcs) - { -+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - unsigned int val; - -- if (!mpcs->regmap) -- return; -- - regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val |= SGMII_AN_RESTART; - regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); - } - --static void mtk_pcs_link_up(struct mtk_pcs *mpcs, int speed, int duplex) -+static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, -+ phy_interface_t interface, int speed, int duplex) - { -+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - unsigned int val; - -+ if (!phy_interface_mode_is_8023z(interface)) -+ return; -+ - /* SGMII force duplex setting */ - regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val &= ~SGMII_DUPLEX_FULL; -@@ -118,11 +122,11 @@ static void mtk_pcs_link_up(struct mtk_p - regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); - } - --/* For 1000BASE-X and 2500BASE-X interface modes */ --void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) --{ -- mtk_pcs_link_up(&ss->pcs[id], speed, duplex); --} -+static const struct phylink_pcs_ops mtk_pcs_ops = { -+ .pcs_config = mtk_pcs_config, -+ .pcs_an_restart = mtk_pcs_restart_an, -+ .pcs_link_up = mtk_pcs_link_up, -+}; - - int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) - { -@@ -139,18 +143,17 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, - of_node_put(np); - if (IS_ERR(ss->pcs[i].regmap)) - return PTR_ERR(ss->pcs[i].regmap); -+ -+ ss->pcs[i].pcs.ops = &mtk_pcs_ops; - } - - return 0; - } - --void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) -+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id) - { -- unsigned int sid; -- -- /* Decide how GMAC and SGMIISYS be mapped */ -- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -- 0 : mac_id; -+ if (!ss->pcs[id].regmap) -+ return NULL; - -- mtk_pcs_restart_an(ð->sgmii->pcs[sid]); -+ return &ss->pcs[id].pcs; - } diff --git a/target/linux/generic/backport-6.1/705-01-v5.17-net-dsa-mt7530-iterate-using-dsa_switch_for_each_use.patch b/target/linux/generic/backport-6.1/705-01-v5.17-net-dsa-mt7530-iterate-using-dsa_switch_for_each_use.patch deleted file mode 100644 index 4142fd1d5081..000000000000 --- a/target/linux/generic/backport-6.1/705-01-v5.17-net-dsa-mt7530-iterate-using-dsa_switch_for_each_use.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 505560028b6deb9b4385cf6100f05ca6f4aacaf8 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Mon, 6 Dec 2021 18:57:49 +0200 -Subject: [PATCH 01/13] net: dsa: mt7530: iterate using - dsa_switch_for_each_user_port in bridging ops - -Avoid repeated calls to dsa_to_port() (some hidden behind dsa_is_user_port -and some in plain sight) by keeping two struct dsa_port references: one -to the port passed as argument, and another to the other ports of the -switch that we're iterating over. - -dsa_to_port(ds, i) gets replaced by other_dp, i gets replaced by -other_port which is derived from other_dp->index, dsa_is_user_port is -handled by the DSA iterator. - -Signed-off-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/mt7530.c | 52 +++++++++++++++++++++++----------------- - 1 file changed, 30 insertions(+), 22 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -1190,27 +1190,31 @@ static int - mt7530_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) - { -- struct mt7530_priv *priv = ds->priv; -+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; - u32 port_bitmap = BIT(MT7530_CPU_PORT); -- int i; -+ struct mt7530_priv *priv = ds->priv; - - mutex_lock(&priv->reg_mutex); - -- for (i = 0; i < MT7530_NUM_PORTS; i++) { -+ dsa_switch_for_each_user_port(other_dp, ds) { -+ int other_port = other_dp->index; -+ -+ if (dp == other_dp) -+ continue; -+ - /* Add this port to the port matrix of the other ports in the - * same bridge. If the port is disabled, port matrix is kept - * and not being setup until the port becomes enabled. - */ -- if (dsa_is_user_port(ds, i) && i != port) { -- if (dsa_to_port(ds, i)->bridge_dev != bridge) -- continue; -- if (priv->ports[i].enable) -- mt7530_set(priv, MT7530_PCR_P(i), -- PCR_MATRIX(BIT(port))); -- priv->ports[i].pm |= PCR_MATRIX(BIT(port)); -+ if (other_dp->bridge_dev != bridge) -+ continue; - -- port_bitmap |= BIT(i); -- } -+ if (priv->ports[other_port].enable) -+ mt7530_set(priv, MT7530_PCR_P(other_port), -+ PCR_MATRIX(BIT(port))); -+ priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); -+ -+ port_bitmap |= BIT(other_port); - } - - /* Add the all other ports to this port matrix. */ -@@ -1315,24 +1319,28 @@ static void - mt7530_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) - { -+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; - struct mt7530_priv *priv = ds->priv; -- int i; - - mutex_lock(&priv->reg_mutex); - -- for (i = 0; i < MT7530_NUM_PORTS; i++) { -+ dsa_switch_for_each_user_port(other_dp, ds) { -+ int other_port = other_dp->index; -+ -+ if (dp == other_dp) -+ continue; -+ - /* Remove this port from the port matrix of the other ports - * in the same bridge. If the port is disabled, port matrix - * is kept and not being setup until the port becomes enabled. - */ -- if (dsa_is_user_port(ds, i) && i != port) { -- if (dsa_to_port(ds, i)->bridge_dev != bridge) -- continue; -- if (priv->ports[i].enable) -- mt7530_clear(priv, MT7530_PCR_P(i), -- PCR_MATRIX(BIT(port))); -- priv->ports[i].pm &= ~PCR_MATRIX(BIT(port)); -- } -+ if (other_dp->bridge_dev != bridge) -+ continue; -+ -+ if (priv->ports[other_port].enable) -+ mt7530_clear(priv, MT7530_PCR_P(other_port), -+ PCR_MATRIX(BIT(port))); -+ priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); - } - - /* Set the cpu port to be the only one in the port matrix of diff --git a/target/linux/generic/backport-6.1/705-02-v5.19-net-dsa-mt7530-populate-supported_interfaces-and-mac.patch b/target/linux/generic/backport-6.1/705-02-v5.19-net-dsa-mt7530-populate-supported_interfaces-and-mac.patch deleted file mode 100644 index 435f282845ff..000000000000 --- a/target/linux/generic/backport-6.1/705-02-v5.19-net-dsa-mt7530-populate-supported_interfaces-and-mac.patch +++ /dev/null @@ -1,166 +0,0 @@ -From a1da54bcd664fc27169386db966575675ac3ccb0 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:01 +0100 -Subject: [PATCH 02/13] net: dsa: mt7530: populate supported_interfaces and - mac_capabilities -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Populate the supported interfaces and MAC capabilities for mt7530, -mt7531 and mt7621 DSA switches. Filling this in will enable phylink -to pre-check the PHY interface mode against the the supported -interfaces bitmap prior to calling the validate function, and will -eventually allow us to convert to using the generic validation. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 74 ++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/mt7530.h | 2 ++ - 2 files changed, 76 insertions(+) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2412,6 +2412,32 @@ mt7531_setup(struct dsa_switch *ds) - return 0; - } - -+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, -+ struct phylink_config *config) -+{ -+ switch (port) { -+ case 0 ... 4: /* Internal phy */ -+ __set_bit(PHY_INTERFACE_MODE_GMII, -+ config->supported_interfaces); -+ break; -+ -+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ -+ phy_interface_set_rgmii(config->supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_MII, -+ config->supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_GMII, -+ config->supported_interfaces); -+ break; -+ -+ case 6: /* 1st cpu port */ -+ __set_bit(PHY_INTERFACE_MODE_RGMII, -+ config->supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_TRGMII, -+ config->supported_interfaces); -+ break; -+ } -+} -+ - static bool - mt7530_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -@@ -2448,6 +2474,37 @@ static bool mt7531_is_rgmii_port(struct - return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); - } - -+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, -+ struct phylink_config *config) -+{ -+ struct mt7530_priv *priv = ds->priv; -+ -+ switch (port) { -+ case 0 ... 4: /* Internal phy */ -+ __set_bit(PHY_INTERFACE_MODE_GMII, -+ config->supported_interfaces); -+ break; -+ -+ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ -+ if (mt7531_is_rgmii_port(priv, port)) { -+ phy_interface_set_rgmii(config->supported_interfaces); -+ break; -+ } -+ fallthrough; -+ -+ case 6: /* 1st cpu port supports sgmii/8023z only */ -+ __set_bit(PHY_INTERFACE_MODE_SGMII, -+ config->supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, -+ config->supported_interfaces); -+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, -+ config->supported_interfaces); -+ -+ config->mac_capabilities |= MAC_2500FD; -+ break; -+ } -+} -+ - static bool - mt7531_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -@@ -2924,6 +2981,18 @@ mt7531_cpu_port_config(struct dsa_switch - return 0; - } - -+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, -+ struct phylink_config *config) -+{ -+ struct mt7530_priv *priv = ds->priv; -+ -+ /* This switch only supports full-duplex at 1Gbps */ -+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | -+ MAC_10 | MAC_100 | MAC_1000FD; -+ -+ priv->info->mac_port_get_caps(ds, port, config); -+} -+ - static void - mt7530_mac_port_validate(struct dsa_switch *ds, int port, - unsigned long *supported) -@@ -3159,6 +3228,7 @@ static const struct dsa_switch_ops mt753 - .port_vlan_del = mt7530_port_vlan_del, - .port_mirror_add = mt753x_port_mirror_add, - .port_mirror_del = mt753x_port_mirror_del, -+ .phylink_get_caps = mt753x_phylink_get_caps, - .phylink_validate = mt753x_phylink_validate, - .phylink_mac_link_state = mt753x_phylink_mac_link_state, - .phylink_mac_config = mt753x_phylink_mac_config, -@@ -3176,6 +3246,7 @@ static const struct mt753x_info mt753x_t - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, -+ .mac_port_get_caps = mt7530_mac_port_get_caps, - .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, -@@ -3187,6 +3258,7 @@ static const struct mt753x_info mt753x_t - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, -+ .mac_port_get_caps = mt7530_mac_port_get_caps, - .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, -@@ -3199,6 +3271,7 @@ static const struct mt753x_info mt753x_t - .phy_write = mt7531_ind_phy_write, - .pad_setup = mt7531_pad_setup, - .cpu_port_config = mt7531_cpu_port_config, -+ .mac_port_get_caps = mt7531_mac_port_get_caps, - .phy_mode_supported = mt7531_phy_mode_supported, - .mac_port_validate = mt7531_mac_port_validate, - .mac_port_get_state = mt7531_phylink_mac_link_state, -@@ -3261,6 +3334,7 @@ mt7530_probe(struct mdio_device *mdiodev - */ - if (!priv->info->sw_setup || !priv->info->pad_setup || - !priv->info->phy_read || !priv->info->phy_write || -+ !priv->info->mac_port_get_caps || - !priv->info->phy_mode_supported || - !priv->info->mac_port_validate || - !priv->info->mac_port_get_state || !priv->info->mac_port_config) ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -769,6 +769,8 @@ struct mt753x_info { - int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); - int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface); - int (*cpu_port_config)(struct dsa_switch *ds, int port); -+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port, -+ struct phylink_config *config); - bool (*phy_mode_supported)(struct dsa_switch *ds, int port, - const struct phylink_link_state *state); - void (*mac_port_validate)(struct dsa_switch *ds, int port, diff --git a/target/linux/generic/backport-6.1/705-03-v5.19-net-dsa-mt7530-remove-interface-checks.patch b/target/linux/generic/backport-6.1/705-03-v5.19-net-dsa-mt7530-remove-interface-checks.patch deleted file mode 100644 index 38a24dc46b0d..000000000000 --- a/target/linux/generic/backport-6.1/705-03-v5.19-net-dsa-mt7530-remove-interface-checks.patch +++ /dev/null @@ -1,172 +0,0 @@ -From e3f6719e2269868ca129b05da50cd55786848954 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:06 +0100 -Subject: [PATCH 03/13] net: dsa: mt7530: remove interface checks -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -As phylink checks the interface mode against the supported_interfaces -bitmap, we no longer need to validate the interface mode, nor handle -PHY_INTERFACE_MODE_NA in the validation function. Remove these to -simplify the implementation. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 82 ---------------------------------------- - drivers/net/dsa/mt7530.h | 2 - - 2 files changed, 84 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2438,37 +2438,6 @@ static void mt7530_mac_port_get_caps(str - } - } - --static bool --mt7530_phy_mode_supported(struct dsa_switch *ds, int port, -- const struct phylink_link_state *state) --{ -- struct mt7530_priv *priv = ds->priv; -- -- switch (port) { -- case 0 ... 4: /* Internal phy */ -- if (state->interface != PHY_INTERFACE_MODE_GMII) -- return false; -- break; -- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ -- if (!phy_interface_mode_is_rgmii(state->interface) && -- state->interface != PHY_INTERFACE_MODE_MII && -- state->interface != PHY_INTERFACE_MODE_GMII) -- return false; -- break; -- case 6: /* 1st cpu port */ -- if (state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_TRGMII) -- return false; -- break; -- default: -- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, -- port); -- return false; -- } -- -- return true; --} -- - static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) - { - return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); -@@ -2505,44 +2474,6 @@ static void mt7531_mac_port_get_caps(str - } - } - --static bool --mt7531_phy_mode_supported(struct dsa_switch *ds, int port, -- const struct phylink_link_state *state) --{ -- struct mt7530_priv *priv = ds->priv; -- -- switch (port) { -- case 0 ... 4: /* Internal phy */ -- if (state->interface != PHY_INTERFACE_MODE_GMII) -- return false; -- break; -- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ -- if (mt7531_is_rgmii_port(priv, port)) -- return phy_interface_mode_is_rgmii(state->interface); -- fallthrough; -- case 6: /* 1st cpu port supports sgmii/8023z only */ -- if (state->interface != PHY_INTERFACE_MODE_SGMII && -- !phy_interface_mode_is_8023z(state->interface)) -- return false; -- break; -- default: -- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, -- port); -- return false; -- } -- -- return true; --} -- --static bool --mt753x_phy_mode_supported(struct dsa_switch *ds, int port, -- const struct phylink_link_state *state) --{ -- struct mt7530_priv *priv = ds->priv; -- -- return priv->info->phy_mode_supported(ds, port, state); --} -- - static int - mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state) - { -@@ -2797,9 +2728,6 @@ mt753x_phylink_mac_config(struct dsa_swi - struct mt7530_priv *priv = ds->priv; - u32 mcr_cur, mcr_new; - -- if (!mt753x_phy_mode_supported(ds, port, state)) -- goto unsupported; -- - switch (port) { - case 0 ... 4: /* Internal phy */ - if (state->interface != PHY_INTERFACE_MODE_GMII) -@@ -3015,12 +2943,6 @@ mt753x_phylink_validate(struct dsa_switc - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct mt7530_priv *priv = ds->priv; - -- if (state->interface != PHY_INTERFACE_MODE_NA && -- !mt753x_phy_mode_supported(ds, port, state)) { -- linkmode_zero(supported); -- return; -- } -- - phylink_set_port_modes(mask); - - if (state->interface != PHY_INTERFACE_MODE_TRGMII && -@@ -3247,7 +3169,6 @@ static const struct mt753x_info mt753x_t - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, -@@ -3259,7 +3180,6 @@ static const struct mt753x_info mt753x_t - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .phy_mode_supported = mt7530_phy_mode_supported, - .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, -@@ -3272,7 +3192,6 @@ static const struct mt753x_info mt753x_t - .pad_setup = mt7531_pad_setup, - .cpu_port_config = mt7531_cpu_port_config, - .mac_port_get_caps = mt7531_mac_port_get_caps, -- .phy_mode_supported = mt7531_phy_mode_supported, - .mac_port_validate = mt7531_mac_port_validate, - .mac_port_get_state = mt7531_phylink_mac_link_state, - .mac_port_config = mt7531_mac_config, -@@ -3335,7 +3254,6 @@ mt7530_probe(struct mdio_device *mdiodev - if (!priv->info->sw_setup || !priv->info->pad_setup || - !priv->info->phy_read || !priv->info->phy_write || - !priv->info->mac_port_get_caps || -- !priv->info->phy_mode_supported || - !priv->info->mac_port_validate || - !priv->info->mac_port_get_state || !priv->info->mac_port_config) - return -EINVAL; ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -771,8 +771,6 @@ struct mt753x_info { - int (*cpu_port_config)(struct dsa_switch *ds, int port); - void (*mac_port_get_caps)(struct dsa_switch *ds, int port, - struct phylink_config *config); -- bool (*phy_mode_supported)(struct dsa_switch *ds, int port, -- const struct phylink_link_state *state); - void (*mac_port_validate)(struct dsa_switch *ds, int port, - unsigned long *supported); - int (*mac_port_get_state)(struct dsa_switch *ds, int port, diff --git a/target/linux/generic/backport-6.1/705-04-v5.19-net-dsa-mt7530-drop-use-of-phylink_helper_basex_spee.patch b/target/linux/generic/backport-6.1/705-04-v5.19-net-dsa-mt7530-drop-use-of-phylink_helper_basex_spee.patch deleted file mode 100644 index e607a3c7d2cf..000000000000 --- a/target/linux/generic/backport-6.1/705-04-v5.19-net-dsa-mt7530-drop-use-of-phylink_helper_basex_spee.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 58344a3b85f1bd5ffddfc2c11f6f2bf688b5f990 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:12 +0100 -Subject: [PATCH 04/13] net: dsa: mt7530: drop use of - phylink_helper_basex_speed() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Now that we have a better method to select SFP interface modes, we -no longer need to use phylink_helper_basex_speed() in a driver's -validation function. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 5 ----- - 1 file changed, 5 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2967,11 +2967,6 @@ mt753x_phylink_validate(struct dsa_switc - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -- -- /* We can only operate at 2500BaseX or 1000BaseX. If requested -- * to advertise both, only report advertising at 2500BaseX. -- */ -- phylink_helper_basex_speed(state); - } - - static int diff --git a/target/linux/generic/backport-6.1/705-05-v5.19-net-dsa-mt7530-only-indicate-linkmodes-that-can-be-s.patch b/target/linux/generic/backport-6.1/705-05-v5.19-net-dsa-mt7530-only-indicate-linkmodes-that-can-be-s.patch deleted file mode 100644 index 63a331763030..000000000000 --- a/target/linux/generic/backport-6.1/705-05-v5.19-net-dsa-mt7530-only-indicate-linkmodes-that-can-be-s.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 3c1d788a62dc648d1846049b66119ebb69dedd52 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:17 +0100 -Subject: [PATCH 05/13] net: dsa: mt7530: only indicate linkmodes that can be - supported -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Now that mt7530 is not using the basex helper, it becomes unnecessary to -indicate support for both 1000baseX and 2500baseX when one of the 803.3z -PHY interface modes is being selected. Ensure that the driver indicates -only those linkmodes that can actually be supported by the PHY interface -mode. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 12 ++++++++---- - drivers/net/dsa/mt7530.h | 1 + - 2 files changed, 9 insertions(+), 4 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2545,12 +2545,13 @@ static int mt7531_rgmii_setup(struct mt7 - } - - static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, -+ phy_interface_t interface, - unsigned long *supported) - { - /* Port5 supports ethier RGMII or SGMII. - * Port6 supports SGMII only. - */ -- if (port == 6) { -+ if (port == 6 && interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(supported, 2500baseX_Full); - phylink_set(supported, 2500baseT_Full); - } -@@ -2923,16 +2924,18 @@ static void mt753x_phylink_get_caps(stru - - static void - mt7530_mac_port_validate(struct dsa_switch *ds, int port, -+ phy_interface_t interface, - unsigned long *supported) - { - } - - static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, -+ phy_interface_t interface, - unsigned long *supported) - { - struct mt7530_priv *priv = ds->priv; - -- mt7531_sgmii_validate(priv, port, supported); -+ mt7531_sgmii_validate(priv, port, interface, supported); - } - - static void -@@ -2955,12 +2958,13 @@ mt753x_phylink_validate(struct dsa_switc - } - - /* This switch only supports 1G full-duplex. */ -- if (state->interface != PHY_INTERFACE_MODE_MII) { -+ if (state->interface != PHY_INTERFACE_MODE_MII && -+ state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - -- priv->info->mac_port_validate(ds, port, mask); -+ priv->info->mac_port_validate(ds, port, state->interface, mask); - - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -772,6 +772,7 @@ struct mt753x_info { - void (*mac_port_get_caps)(struct dsa_switch *ds, int port, - struct phylink_config *config); - void (*mac_port_validate)(struct dsa_switch *ds, int port, -+ phy_interface_t interface, - unsigned long *supported); - int (*mac_port_get_state)(struct dsa_switch *ds, int port, - struct phylink_link_state *state); diff --git a/target/linux/generic/backport-6.1/705-06-v5.19-net-dsa-mt7530-switch-to-use-phylink_get_linkmodes.patch b/target/linux/generic/backport-6.1/705-06-v5.19-net-dsa-mt7530-switch-to-use-phylink_get_linkmodes.patch deleted file mode 100644 index 97699d4c5e33..000000000000 --- a/target/linux/generic/backport-6.1/705-06-v5.19-net-dsa-mt7530-switch-to-use-phylink_get_linkmodes.patch +++ /dev/null @@ -1,131 +0,0 @@ -From 1c2211cb15dd3957fb26c0e1615eceb5db851ad6 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:22 +0100 -Subject: [PATCH 06/13] net: dsa: mt7530: switch to use phylink_get_linkmodes() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Switch mt7530 to use phylink_get_linkmodes() to generate the ethtool -linkmodes that can be supported. We are unable to use the generic -helper for this as pause modes are dependent on the interface as -the Autoneg bit depends on the interface mode. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 57 ++++------------------------------------ - 1 file changed, 5 insertions(+), 52 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2544,19 +2544,6 @@ static int mt7531_rgmii_setup(struct mt7 - return 0; - } - --static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, -- phy_interface_t interface, -- unsigned long *supported) --{ -- /* Port5 supports ethier RGMII or SGMII. -- * Port6 supports SGMII only. -- */ -- if (port == 6 && interface == PHY_INTERFACE_MODE_2500BASEX) { -- phylink_set(supported, 2500baseX_Full); -- phylink_set(supported, 2500baseT_Full); -- } --} -- - static void - mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, -@@ -2923,51 +2910,21 @@ static void mt753x_phylink_get_caps(stru - } - - static void --mt7530_mac_port_validate(struct dsa_switch *ds, int port, -- phy_interface_t interface, -- unsigned long *supported) --{ --} -- --static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, -- phy_interface_t interface, -- unsigned long *supported) --{ -- struct mt7530_priv *priv = ds->priv; -- -- mt7531_sgmii_validate(priv, port, interface, supported); --} -- --static void - mt753x_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) - { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- struct mt7530_priv *priv = ds->priv; -+ u32 caps; -+ -+ caps = dsa_to_port(ds, port)->pl_config.mac_capabilities; - - phylink_set_port_modes(mask); -+ phylink_get_linkmodes(mask, state->interface, caps); - - if (state->interface != PHY_INTERFACE_MODE_TRGMII && -- !phy_interface_mode_is_8023z(state->interface)) { -- phylink_set(mask, 10baseT_Half); -- phylink_set(mask, 10baseT_Full); -- phylink_set(mask, 100baseT_Half); -- phylink_set(mask, 100baseT_Full); -+ !phy_interface_mode_is_8023z(state->interface)) - phylink_set(mask, Autoneg); -- } -- -- /* This switch only supports 1G full-duplex. */ -- if (state->interface != PHY_INTERFACE_MODE_MII && -- state->interface != PHY_INTERFACE_MODE_2500BASEX) { -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 1000baseX_Full); -- } -- -- priv->info->mac_port_validate(ds, port, state->interface, mask); -- -- phylink_set(mask, Pause); -- phylink_set(mask, Asym_Pause); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -@@ -3168,7 +3125,6 @@ static const struct mt753x_info mt753x_t - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, - }, -@@ -3179,7 +3135,6 @@ static const struct mt753x_info mt753x_t - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .mac_port_validate = mt7530_mac_port_validate, - .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, - }, -@@ -3191,7 +3146,6 @@ static const struct mt753x_info mt753x_t - .pad_setup = mt7531_pad_setup, - .cpu_port_config = mt7531_cpu_port_config, - .mac_port_get_caps = mt7531_mac_port_get_caps, -- .mac_port_validate = mt7531_mac_port_validate, - .mac_port_get_state = mt7531_phylink_mac_link_state, - .mac_port_config = mt7531_mac_config, - .mac_pcs_an_restart = mt7531_sgmii_restart_an, -@@ -3253,7 +3207,6 @@ mt7530_probe(struct mdio_device *mdiodev - if (!priv->info->sw_setup || !priv->info->pad_setup || - !priv->info->phy_read || !priv->info->phy_write || - !priv->info->mac_port_get_caps || -- !priv->info->mac_port_validate || - !priv->info->mac_port_get_state || !priv->info->mac_port_config) - return -EINVAL; - diff --git a/target/linux/generic/backport-6.1/705-07-v5.19-net-dsa-mt7530-partially-convert-to-phylink_pcs.patch b/target/linux/generic/backport-6.1/705-07-v5.19-net-dsa-mt7530-partially-convert-to-phylink_pcs.patch deleted file mode 100644 index b9b74833d223..000000000000 --- a/target/linux/generic/backport-6.1/705-07-v5.19-net-dsa-mt7530-partially-convert-to-phylink_pcs.patch +++ /dev/null @@ -1,385 +0,0 @@ -From fd993fd59d96d5e2d5972ec4ca1f9651025c987b Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:27 +0100 -Subject: [PATCH 07/13] net: dsa: mt7530: partially convert to phylink_pcs -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Partially convert the mt7530 driver to use phylink's PCS support. This -is a partial implementation as we don't move anything into the -pcs_config method yet - this driver supports SGMII or 1000BASE-X -without in-band. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 144 +++++++++++++++++++++++---------------- - drivers/net/dsa/mt7530.h | 21 +++--- - 2 files changed, 95 insertions(+), 70 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -24,6 +24,11 @@ - - #include "mt7530.h" - -+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) -+{ -+ return container_of(pcs, struct mt753x_pcs, pcs); -+} -+ - /* String, offset, and register size in bytes if different from 4 bytes */ - static const struct mt7530_mib_desc mt7530_mib[] = { - MIB_DESC(1, 0x00, "TxDrop"), -@@ -2544,12 +2549,11 @@ static int mt7531_rgmii_setup(struct mt7 - return 0; - } - --static void --mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, -- unsigned int mode, phy_interface_t interface, -- int speed, int duplex) -+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, -+ phy_interface_t interface, int speed, int duplex) - { -- struct mt7530_priv *priv = ds->priv; -+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; -+ int port = pcs_to_mt753x_pcs(pcs)->port; - unsigned int val; - - /* For adjusting speed and duplex of SGMII force mode. */ -@@ -2575,6 +2579,9 @@ mt7531_sgmii_link_up_force(struct dsa_sw - - /* MT7531 SGMII 1G force mode can only work in full duplex mode, - * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not. -+ * -+ * The speed check is unnecessary as the MAC capabilities apply -+ * this restriction. --rmk - */ - if ((speed == SPEED_10 || speed == SPEED_100) && - duplex != DUPLEX_FULL) -@@ -2650,9 +2657,10 @@ static int mt7531_sgmii_setup_mode_an(st - return 0; - } - --static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port) -+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs) - { -- struct mt7530_priv *priv = ds->priv; -+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; -+ int port = pcs_to_mt753x_pcs(pcs)->port; - u32 val; - - /* Only restart AN when AN is enabled */ -@@ -2709,6 +2717,24 @@ mt753x_mac_config(struct dsa_switch *ds, - return priv->info->mac_port_config(ds, port, mode, state->interface); - } - -+static struct phylink_pcs * -+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, -+ phy_interface_t interface) -+{ -+ struct mt7530_priv *priv = ds->priv; -+ -+ switch (interface) { -+ case PHY_INTERFACE_MODE_TRGMII: -+ case PHY_INTERFACE_MODE_SGMII: -+ case PHY_INTERFACE_MODE_1000BASEX: -+ case PHY_INTERFACE_MODE_2500BASEX: -+ return &priv->pcs[port].pcs; -+ -+ default: -+ return NULL; -+ } -+} -+ - static void - mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, - const struct phylink_link_state *state) -@@ -2770,17 +2796,6 @@ unsupported: - mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); - } - --static void --mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port) --{ -- struct mt7530_priv *priv = ds->priv; -- -- if (!priv->info->mac_pcs_an_restart) -- return; -- -- priv->info->mac_pcs_an_restart(ds, port); --} -- - static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface) -@@ -2790,16 +2805,13 @@ static void mt753x_phylink_mac_link_down - mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); - } - --static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port, -- unsigned int mode, phy_interface_t interface, -- int speed, int duplex) -+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs, -+ unsigned int mode, -+ phy_interface_t interface, -+ int speed, int duplex) - { -- struct mt7530_priv *priv = ds->priv; -- -- if (!priv->info->mac_pcs_link_up) -- return; -- -- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex); -+ if (pcs->ops->pcs_link_up) -+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); - } - - static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, -@@ -2812,8 +2824,6 @@ static void mt753x_phylink_mac_link_up(s - struct mt7530_priv *priv = ds->priv; - u32 mcr; - -- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex); -- - mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; - - /* MT753x MAC works in 1G full duplex mode for all up-clocked -@@ -2891,6 +2901,8 @@ mt7531_cpu_port_config(struct dsa_switch - return ret; - mt7530_write(priv, MT7530_PMCR_P(port), - PMCR_CPU_PORT_SETTING(priv->id)); -+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED, -+ interface, speed, DUPLEX_FULL); - mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, - speed, DUPLEX_FULL, true, true); - -@@ -2930,16 +2942,13 @@ mt753x_phylink_validate(struct dsa_switc - linkmode_and(state->advertising, state->advertising, mask); - } - --static int --mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, -- struct phylink_link_state *state) -+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, -+ struct phylink_link_state *state) - { -- struct mt7530_priv *priv = ds->priv; -+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; -+ int port = pcs_to_mt753x_pcs(pcs)->port; - u32 pmsr; - -- if (port < 0 || port >= MT7530_NUM_PORTS) -- return -EINVAL; -- - pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); - - state->link = (pmsr & PMSR_LINK); -@@ -2966,8 +2975,6 @@ mt7530_phylink_mac_link_state(struct dsa - state->pause |= MLO_PAUSE_RX; - if (pmsr & PMSR_TX_FC) - state->pause |= MLO_PAUSE_TX; -- -- return 1; - } - - static int -@@ -3009,32 +3016,49 @@ mt7531_sgmii_pcs_get_state_an(struct mt7 - return 0; - } - --static int --mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port, -- struct phylink_link_state *state) -+static void mt7531_pcs_get_state(struct phylink_pcs *pcs, -+ struct phylink_link_state *state) - { -- struct mt7530_priv *priv = ds->priv; -+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; -+ int port = pcs_to_mt753x_pcs(pcs)->port; - - if (state->interface == PHY_INTERFACE_MODE_SGMII) -- return mt7531_sgmii_pcs_get_state_an(priv, port, state); -- -- return -EOPNOTSUPP; -+ mt7531_sgmii_pcs_get_state_an(priv, port, state); -+ else -+ state->link = false; - } - --static int --mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port, -- struct phylink_link_state *state) -+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode, -+ phy_interface_t interface, -+ const unsigned long *advertising, -+ bool permit_pause_to_mac) - { -- struct mt7530_priv *priv = ds->priv; -+ return 0; -+} - -- return priv->info->mac_port_get_state(ds, port, state); -+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) -+{ - } - -+static const struct phylink_pcs_ops mt7530_pcs_ops = { -+ .pcs_get_state = mt7530_pcs_get_state, -+ .pcs_config = mt753x_pcs_config, -+ .pcs_an_restart = mt7530_pcs_an_restart, -+}; -+ -+static const struct phylink_pcs_ops mt7531_pcs_ops = { -+ .pcs_get_state = mt7531_pcs_get_state, -+ .pcs_config = mt753x_pcs_config, -+ .pcs_an_restart = mt7531_pcs_an_restart, -+ .pcs_link_up = mt7531_pcs_link_up, -+}; -+ - static int - mt753x_setup(struct dsa_switch *ds) - { - struct mt7530_priv *priv = ds->priv; - int ret = priv->info->sw_setup(ds); -+ int i; - - if (ret) - return ret; -@@ -3047,6 +3071,13 @@ mt753x_setup(struct dsa_switch *ds) - if (ret && priv->irq) - mt7530_free_irq_common(priv); - -+ /* Initialise the PCS devices */ -+ for (i = 0; i < priv->ds->num_ports; i++) { -+ priv->pcs[i].pcs.ops = priv->info->pcs_ops; -+ priv->pcs[i].priv = priv; -+ priv->pcs[i].port = i; -+ } -+ - return ret; - } - -@@ -3108,9 +3139,8 @@ static const struct dsa_switch_ops mt753 - .port_mirror_del = mt753x_port_mirror_del, - .phylink_get_caps = mt753x_phylink_get_caps, - .phylink_validate = mt753x_phylink_validate, -- .phylink_mac_link_state = mt753x_phylink_mac_link_state, -+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, - .phylink_mac_config = mt753x_phylink_mac_config, -- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, - .phylink_mac_link_down = mt753x_phylink_mac_link_down, - .phylink_mac_link_up = mt753x_phylink_mac_link_up, - .get_mac_eee = mt753x_get_mac_eee, -@@ -3120,36 +3150,34 @@ static const struct dsa_switch_ops mt753 - static const struct mt753x_info mt753x_table[] = { - [ID_MT7621] = { - .id = ID_MT7621, -+ .pcs_ops = &mt7530_pcs_ops, - .sw_setup = mt7530_setup, - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, - }, - [ID_MT7530] = { - .id = ID_MT7530, -+ .pcs_ops = &mt7530_pcs_ops, - .sw_setup = mt7530_setup, - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, - .pad_setup = mt7530_pad_clk_setup, - .mac_port_get_caps = mt7530_mac_port_get_caps, -- .mac_port_get_state = mt7530_phylink_mac_link_state, - .mac_port_config = mt7530_mac_config, - }, - [ID_MT7531] = { - .id = ID_MT7531, -+ .pcs_ops = &mt7531_pcs_ops, - .sw_setup = mt7531_setup, - .phy_read = mt7531_ind_phy_read, - .phy_write = mt7531_ind_phy_write, - .pad_setup = mt7531_pad_setup, - .cpu_port_config = mt7531_cpu_port_config, - .mac_port_get_caps = mt7531_mac_port_get_caps, -- .mac_port_get_state = mt7531_phylink_mac_link_state, - .mac_port_config = mt7531_mac_config, -- .mac_pcs_an_restart = mt7531_sgmii_restart_an, -- .mac_pcs_link_up = mt7531_sgmii_link_up_force, - }, - }; - -@@ -3207,7 +3235,7 @@ mt7530_probe(struct mdio_device *mdiodev - if (!priv->info->sw_setup || !priv->info->pad_setup || - !priv->info->phy_read || !priv->info->phy_write || - !priv->info->mac_port_get_caps || -- !priv->info->mac_port_get_state || !priv->info->mac_port_config) -+ !priv->info->mac_port_config) - return -EINVAL; - - priv->id = priv->info->id; ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigne - - struct mt7530_priv; - -+struct mt753x_pcs { -+ struct phylink_pcs pcs; -+ struct mt7530_priv *priv; -+ int port; -+}; -+ - /* struct mt753x_info - This is the main data structure for holding the specific - * part for each supported device - * @sw_setup: Holding the handler to a device initialization -@@ -752,18 +758,14 @@ struct mt7530_priv; - * port - * @mac_port_validate: Holding the way to set addition validate type for a - * certan MAC port -- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain -- * MAC port - * @mac_port_config: Holding the way setting up the PHY attribute to a - * certain MAC port -- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a -- * certain MAC port -- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs -- * of the certain MAC port - */ - struct mt753x_info { - enum mt753x_id id; - -+ const struct phylink_pcs_ops *pcs_ops; -+ - int (*sw_setup)(struct dsa_switch *ds); - int (*phy_read)(struct mt7530_priv *priv, int port, int regnum); - int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); -@@ -774,15 +776,9 @@ struct mt753x_info { - void (*mac_port_validate)(struct dsa_switch *ds, int port, - phy_interface_t interface, - unsigned long *supported); -- int (*mac_port_get_state)(struct dsa_switch *ds, int port, -- struct phylink_link_state *state); - int (*mac_port_config)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); -- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port); -- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port, -- unsigned int mode, phy_interface_t interface, -- int speed, int duplex); - }; - - /* struct mt7530_priv - This is the main data structure for holding the state -@@ -824,6 +820,7 @@ struct mt7530_priv { - u8 mirror_tx; - - struct mt7530_port ports[MT7530_NUM_PORTS]; -+ struct mt753x_pcs pcs[MT7530_NUM_PORTS]; - /* protect among processes for registers access*/ - struct mutex reg_mutex; - int irq; diff --git a/target/linux/generic/backport-6.1/705-08-v5.19-net-dsa-mt7530-move-autoneg-handling-to-PCS-validati.patch b/target/linux/generic/backport-6.1/705-08-v5.19-net-dsa-mt7530-move-autoneg-handling-to-PCS-validati.patch deleted file mode 100644 index 4aa013aad791..000000000000 --- a/target/linux/generic/backport-6.1/705-08-v5.19-net-dsa-mt7530-move-autoneg-handling-to-PCS-validati.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 2b0ee6768f3ac09072e5fd60b36580924e1cfa1c Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:32 +0100 -Subject: [PATCH 08/13] net: dsa: mt7530: move autoneg handling to PCS - validation -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Move the autoneg bit handling to the PCS validation, which allows us to -get rid of mt753x_phylink_validate() and rely on the default -phylink_generic_validate() implementation for the MAC side. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 28 ++++++++++------------------ - 1 file changed, 10 insertions(+), 18 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2921,25 +2921,16 @@ static void mt753x_phylink_get_caps(stru - priv->info->mac_port_get_caps(ds, port, config); - } - --static void --mt753x_phylink_validate(struct dsa_switch *ds, int port, -- unsigned long *supported, -- struct phylink_link_state *state) --{ -- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- u32 caps; -- -- caps = dsa_to_port(ds, port)->pl_config.mac_capabilities; -- -- phylink_set_port_modes(mask); -- phylink_get_linkmodes(mask, state->interface, caps); -+static int mt753x_pcs_validate(struct phylink_pcs *pcs, -+ unsigned long *supported, -+ const struct phylink_link_state *state) -+{ -+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ -+ if (state->interface == PHY_INTERFACE_MODE_TRGMII || -+ phy_interface_mode_is_8023z(state->interface)) -+ phylink_clear(supported, Autoneg); - -- if (state->interface != PHY_INTERFACE_MODE_TRGMII && -- !phy_interface_mode_is_8023z(state->interface)) -- phylink_set(mask, Autoneg); -- -- linkmode_and(supported, supported, mask); -- linkmode_and(state->advertising, state->advertising, mask); -+ return 0; - } - - static void mt7530_pcs_get_state(struct phylink_pcs *pcs, -@@ -3041,12 +3032,14 @@ static void mt7530_pcs_an_restart(struct - } - - static const struct phylink_pcs_ops mt7530_pcs_ops = { -+ .pcs_validate = mt753x_pcs_validate, - .pcs_get_state = mt7530_pcs_get_state, - .pcs_config = mt753x_pcs_config, - .pcs_an_restart = mt7530_pcs_an_restart, - }; - - static const struct phylink_pcs_ops mt7531_pcs_ops = { -+ .pcs_validate = mt753x_pcs_validate, - .pcs_get_state = mt7531_pcs_get_state, - .pcs_config = mt753x_pcs_config, - .pcs_an_restart = mt7531_pcs_an_restart, -@@ -3138,7 +3131,6 @@ static const struct dsa_switch_ops mt753 - .port_mirror_add = mt753x_port_mirror_add, - .port_mirror_del = mt753x_port_mirror_del, - .phylink_get_caps = mt753x_phylink_get_caps, -- .phylink_validate = mt753x_phylink_validate, - .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, - .phylink_mac_config = mt753x_phylink_mac_config, - .phylink_mac_link_down = mt753x_phylink_mac_link_down, diff --git a/target/linux/generic/backport-6.1/705-09-v5.19-net-dsa-mt7530-mark-as-non-legacy.patch b/target/linux/generic/backport-6.1/705-09-v5.19-net-dsa-mt7530-mark-as-non-legacy.patch deleted file mode 100644 index d921d78733d4..000000000000 --- a/target/linux/generic/backport-6.1/705-09-v5.19-net-dsa-mt7530-mark-as-non-legacy.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 5bc26de9bfaa6bb5539c09d4435dced98f429cfc Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 11 Apr 2022 10:46:37 +0100 -Subject: [PATCH 09/13] net: dsa: mt7530: mark as non-legacy -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The mt7530 driver does not make use of the speed, duplex, pause or -advertisement in its phylink_mac_config() implementation, so it can be -marked as a non-legacy driver. - -Tested-by: Marek Behún -Signed-off-by: Russell King (Oracle) -Signed-off-by: Paolo Abeni ---- - drivers/net/dsa/mt7530.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2918,6 +2918,12 @@ static void mt753x_phylink_get_caps(stru - config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000FD; - -+ /* This driver does not make use of the speed, duplex, pause or the -+ * advertisement in its mac_config, so it is safe to mark this driver -+ * as non-legacy. -+ */ -+ config->legacy_pre_march2020 = false; -+ - priv->info->mac_port_get_caps(ds, port, config); - } - diff --git a/target/linux/generic/backport-6.1/705-10-v5.19-net-dsa-mt753x-fix-pcs-conversion-regression.patch b/target/linux/generic/backport-6.1/705-10-v5.19-net-dsa-mt753x-fix-pcs-conversion-regression.patch deleted file mode 100644 index e797b1ecb425..000000000000 --- a/target/linux/generic/backport-6.1/705-10-v5.19-net-dsa-mt753x-fix-pcs-conversion-regression.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 1f15b5e8733115cee65342bcaafeaf0368809fae Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Mon, 25 Apr 2022 22:28:02 +0100 -Subject: [PATCH 10/13] net: dsa: mt753x: fix pcs conversion regression - -Daniel Golle reports that the conversion of mt753x to phylink PCS caused -an oops as below. - -The problem is with the placement of the PCS initialisation, which -occurs after mt7531_setup() has been called. However, burited in this -function is a call to setup the CPU port, which requires the PCS -structure to be already setup. - -Fix this by changing the initialisation order. - -Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020 -Mem abort info: - ESR = 0x96000005 - EC = 0x25: DABT (current EL), IL = 32 bits - SET = 0, FnV = 0 - EA = 0, S1PTW = 0 - FSC = 0x05: level 1 translation fault -Data abort info: - ISV = 0, ISS = 0x00000005 - CM = 0, WnR = 0 -user pgtable: 4k pages, 39-bit VAs, pgdp=0000000046057000 -[0000000000000020] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 -Internal error: Oops: 96000005 [#1] SMP -Modules linked in: -CPU: 0 PID: 32 Comm: kworker/u4:1 Tainted: G S 5.18.0-rc3-next-20220422+ #0 -Hardware name: Bananapi BPI-R64 (DT) -Workqueue: events_unbound deferred_probe_work_func -pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) -pc : mt7531_cpu_port_config+0xcc/0x1b0 -lr : mt7531_cpu_port_config+0xc0/0x1b0 -sp : ffffffc008d5b980 -x29: ffffffc008d5b990 x28: ffffff80060562c8 x27: 00000000f805633b -x26: ffffff80001a8880 x25: 00000000000009c4 x24: 0000000000000016 -x23: ffffff8005eb6470 x22: 0000000000003600 x21: ffffff8006948080 -x20: 0000000000000000 x19: 0000000000000006 x18: 0000000000000000 -x17: 0000000000000001 x16: 0000000000000001 x15: 02963607fcee069e -x14: 0000000000000000 x13: 0000000000000030 x12: 0101010101010101 -x11: ffffffc037302000 x10: 0000000000000870 x9 : ffffffc008d5b800 -x8 : ffffff800028f950 x7 : 0000000000000001 x6 : 00000000662b3000 -x5 : 00000000000002f0 x4 : 0000000000000000 x3 : ffffff800028f080 -x2 : 0000000000000000 x1 : ffffff800028f080 x0 : 0000000000000000 -Call trace: - mt7531_cpu_port_config+0xcc/0x1b0 - mt753x_cpu_port_enable+0x24/0x1f0 - mt7531_setup+0x49c/0x5c0 - mt753x_setup+0x20/0x31c - dsa_register_switch+0x8bc/0x1020 - mt7530_probe+0x118/0x200 - mdio_probe+0x30/0x64 - really_probe.part.0+0x98/0x280 - __driver_probe_device+0x94/0x140 - driver_probe_device+0x40/0x114 - __device_attach_driver+0xb0/0x10c - bus_for_each_drv+0x64/0xa0 - __device_attach+0xa8/0x16c - device_initial_probe+0x10/0x20 - bus_probe_device+0x94/0x9c - deferred_probe_work_func+0x80/0xb4 - process_one_work+0x200/0x3a0 - worker_thread+0x260/0x4c0 - kthread+0xd4/0xe0 - ret_from_fork+0x10/0x20 -Code: 9409e911 937b7e60 8b0002a0 f9405800 (f9401005) ----[ end trace 0000000000000000 ]--- - -Reported-by: Daniel Golle -Tested-by: Daniel Golle -Fixes: cbd1f243bc41 ("net: dsa: mt7530: partially convert to phylink_pcs") -Signed-off-by: Russell King (Oracle) -Reviewed-by: Florian Fainelli -Link: https://lore.kernel.org/r/E1nj6FW-007WZB-5Y@rmk-PC.armlinux.org.uk -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/mt7530.c | 18 +++++++++--------- - 1 file changed, 9 insertions(+), 9 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -3056,9 +3056,16 @@ static int - mt753x_setup(struct dsa_switch *ds) - { - struct mt7530_priv *priv = ds->priv; -- int ret = priv->info->sw_setup(ds); -- int i; -+ int i, ret; - -+ /* Initialise the PCS devices */ -+ for (i = 0; i < priv->ds->num_ports; i++) { -+ priv->pcs[i].pcs.ops = priv->info->pcs_ops; -+ priv->pcs[i].priv = priv; -+ priv->pcs[i].port = i; -+ } -+ -+ ret = priv->info->sw_setup(ds); - if (ret) - return ret; - -@@ -3070,13 +3077,6 @@ mt753x_setup(struct dsa_switch *ds) - if (ret && priv->irq) - mt7530_free_irq_common(priv); - -- /* Initialise the PCS devices */ -- for (i = 0; i < priv->ds->num_ports; i++) { -- priv->pcs[i].pcs.ops = priv->info->pcs_ops; -- priv->pcs[i].priv = priv; -- priv->pcs[i].port = i; -- } -- - return ret; - } - diff --git a/target/linux/generic/backport-6.1/705-11-v6.0-net-dsa-mt7530-rework-mt7530_hw_vlan_-add-del.patch b/target/linux/generic/backport-6.1/705-11-v6.0-net-dsa-mt7530-rework-mt7530_hw_vlan_-add-del.patch deleted file mode 100644 index 26d207ca6ab7..000000000000 --- a/target/linux/generic/backport-6.1/705-11-v6.0-net-dsa-mt7530-rework-mt7530_hw_vlan_-add-del.patch +++ /dev/null @@ -1,87 +0,0 @@ -From e26be16262e1fc1e9f1798c12762663bd9c265c6 Mon Sep 17 00:00:00 2001 -From: Frank Wunderlich -Date: Fri, 10 Jun 2022 19:05:37 +0200 -Subject: [PATCH 11/13] net: dsa: mt7530: rework mt7530_hw_vlan_{add,del} - -Rework vlan_add/vlan_del functions in preparation for dynamic cpu port. - -Currently BIT(MT7530_CPU_PORT) is added to new_members, even though -mt7530_port_vlan_add() will be called on the CPU port too. - -Let DSA core decide when to call port_vlan_add for the CPU port, rather -than doing it implicitly. - -We can do autonomous forwarding in a certain VLAN, but not add br0 to that -VLAN and avoid flooding the CPU with those packets, if software knows it -doesn't need to process them. - -Suggested-by: Vladimir Oltean -Signed-off-by: Frank Wunderlich -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/mt7530.c | 30 ++++++++++++------------------ - 1 file changed, 12 insertions(+), 18 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -1536,11 +1536,11 @@ static void - mt7530_hw_vlan_add(struct mt7530_priv *priv, - struct mt7530_hw_vlan_entry *entry) - { -+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port); - u8 new_members; - u32 val; - -- new_members = entry->old_members | BIT(entry->port) | -- BIT(MT7530_CPU_PORT); -+ new_members = entry->old_members | BIT(entry->port); - - /* Validate the entry with independent learning, create egress tag per - * VLAN and joining the port as one of the port members. -@@ -1551,22 +1551,20 @@ mt7530_hw_vlan_add(struct mt7530_priv *p - - /* Decide whether adding tag or not for those outgoing packets from the - * port inside the VLAN. -- */ -- val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG : -- MT7530_VLAN_EGRESS_TAG; -- mt7530_rmw(priv, MT7530_VAWD2, -- ETAG_CTRL_P_MASK(entry->port), -- ETAG_CTRL_P(entry->port, val)); -- -- /* CPU port is always taken as a tagged port for serving more than one -+ * CPU port is always taken as a tagged port for serving more than one - * VLANs across and also being applied with egress type stack mode for - * that VLAN tags would be appended after hardware special tag used as - * DSA tag. - */ -+ if (dsa_port_is_cpu(dp)) -+ val = MT7530_VLAN_EGRESS_STACK; -+ else if (entry->untagged) -+ val = MT7530_VLAN_EGRESS_UNTAG; -+ else -+ val = MT7530_VLAN_EGRESS_TAG; - mt7530_rmw(priv, MT7530_VAWD2, -- ETAG_CTRL_P_MASK(MT7530_CPU_PORT), -- ETAG_CTRL_P(MT7530_CPU_PORT, -- MT7530_VLAN_EGRESS_STACK)); -+ ETAG_CTRL_P_MASK(entry->port), -+ ETAG_CTRL_P(entry->port, val)); - } - - static void -@@ -1585,11 +1583,7 @@ mt7530_hw_vlan_del(struct mt7530_priv *p - return; - } - -- /* If certain member apart from CPU port is still alive in the VLAN, -- * the entry would be kept valid. Otherwise, the entry is got to be -- * disabled. -- */ -- if (new_members && new_members != BIT(MT7530_CPU_PORT)) { -+ if (new_members) { - val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | - VLAN_VALID; - mt7530_write(priv, MT7530_VAWD1, val); diff --git a/target/linux/generic/backport-6.1/705-12-v6.0-net-dsa-mt7530-rework-mt753-01-_setup.patch b/target/linux/generic/backport-6.1/705-12-v6.0-net-dsa-mt7530-rework-mt753-01-_setup.patch deleted file mode 100644 index b02705cc7011..000000000000 --- a/target/linux/generic/backport-6.1/705-12-v6.0-net-dsa-mt7530-rework-mt753-01-_setup.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 1f0dfd443eea7fc3e818e96f7c8264913ba41859 Mon Sep 17 00:00:00 2001 -From: Frank Wunderlich -Date: Fri, 10 Jun 2022 19:05:38 +0200 -Subject: [PATCH 12/13] net: dsa: mt7530: rework mt753[01]_setup - -Enumerate available cpu-ports instead of using hardcoded constant. - -Suggested-by: Vladimir Oltean -Signed-off-by: Frank Wunderlich -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/mt7530.c | 25 +++++++++++++++++++++---- - 1 file changed, 21 insertions(+), 4 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2101,11 +2101,12 @@ static int - mt7530_setup(struct dsa_switch *ds) - { - struct mt7530_priv *priv = ds->priv; -+ struct device_node *dn = NULL; - struct device_node *phy_node; - struct device_node *mac_np; - struct mt7530_dummy_poll p; - phy_interface_t interface; -- struct device_node *dn; -+ struct dsa_port *cpu_dp; - u32 id, val; - int ret, i; - -@@ -2113,7 +2114,19 @@ mt7530_setup(struct dsa_switch *ds) - * controller also is the container for two GMACs nodes representing - * as two netdev instances. - */ -- dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; -+ dsa_switch_for_each_cpu_port(cpu_dp, ds) { -+ dn = cpu_dp->master->dev.of_node->parent; -+ /* It doesn't matter which CPU port is found first, -+ * their masters should share the same parent OF node -+ */ -+ break; -+ } -+ -+ if (!dn) { -+ dev_err(ds->dev, "parent OF node of DSA master not found"); -+ return -EINVAL; -+ } -+ - ds->assisted_learning_on_cpu_port = true; - ds->mtu_enforcement_ingress = true; - -@@ -2286,6 +2299,7 @@ mt7531_setup(struct dsa_switch *ds) - { - struct mt7530_priv *priv = ds->priv; - struct mt7530_dummy_poll p; -+ struct dsa_port *cpu_dp; - u32 val, id; - int ret, i; - -@@ -2360,8 +2374,11 @@ mt7531_setup(struct dsa_switch *ds) - CORE_PLL_GROUP4, val); - - /* BPDU to CPU port */ -- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK, -- BIT(MT7530_CPU_PORT)); -+ dsa_switch_for_each_cpu_port(cpu_dp, ds) { -+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK, -+ BIT(cpu_dp->index)); -+ break; -+ } - mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, - MT753X_BPDU_CPU_ONLY); - diff --git a/target/linux/generic/backport-6.1/705-13-v6.0-net-dsa-mt7530-get-cpu-port-via-dp-cpu_dp-instead-of.patch b/target/linux/generic/backport-6.1/705-13-v6.0-net-dsa-mt7530-get-cpu-port-via-dp-cpu_dp-instead-of.patch deleted file mode 100644 index 2bfa1ef438e7..000000000000 --- a/target/linux/generic/backport-6.1/705-13-v6.0-net-dsa-mt7530-get-cpu-port-via-dp-cpu_dp-instead-of.patch +++ /dev/null @@ -1,117 +0,0 @@ -From ad2606f6fafae3a7d41c4f2af5554c8f6adecbc7 Mon Sep 17 00:00:00 2001 -From: Frank Wunderlich -Date: Fri, 10 Jun 2022 19:05:39 +0200 -Subject: [PATCH 13/13] net: dsa: mt7530: get cpu-port via dp->cpu_dp instead - of constant - -Replace last occurences of hardcoded cpu-port by cpu_dp member of -dsa_port struct. - -Now the constant can be dropped. - -Suggested-by: Vladimir Oltean -Signed-off-by: Frank Wunderlich -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/mt7530.c | 27 ++++++++++++++++++++------- - drivers/net/dsa/mt7530.h | 1 - - 2 files changed, 20 insertions(+), 8 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -1040,6 +1040,7 @@ static int - mt7530_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) - { -+ struct dsa_port *dp = dsa_to_port(ds, port); - struct mt7530_priv *priv = ds->priv; - - mutex_lock(&priv->reg_mutex); -@@ -1048,7 +1049,11 @@ mt7530_port_enable(struct dsa_switch *ds - * restore the port matrix if the port is the member of a certain - * bridge. - */ -- priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT)); -+ if (dsa_port_is_user(dp)) { -+ struct dsa_port *cpu_dp = dp->cpu_dp; -+ -+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index)); -+ } - priv->ports[port].enable = true; - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, - priv->ports[port].pm); -@@ -1196,7 +1201,8 @@ mt7530_port_bridge_join(struct dsa_switc - struct net_device *bridge) - { - struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; -- u32 port_bitmap = BIT(MT7530_CPU_PORT); -+ struct dsa_port *cpu_dp = dp->cpu_dp; -+ u32 port_bitmap = BIT(cpu_dp->index); - struct mt7530_priv *priv = ds->priv; - - mutex_lock(&priv->reg_mutex); -@@ -1273,9 +1279,12 @@ mt7530_port_set_vlan_unaware(struct dsa_ - * the CPU port get out of VLAN filtering mode. - */ - if (all_user_ports_removed) { -- mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT), -+ struct dsa_port *dp = dsa_to_port(ds, port); -+ struct dsa_port *cpu_dp = dp->cpu_dp; -+ -+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index), - PCR_MATRIX(dsa_user_ports(priv->ds))); -- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG -+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG - | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); - } - } -@@ -1325,6 +1334,7 @@ mt7530_port_bridge_leave(struct dsa_swit - struct net_device *bridge) - { - struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; -+ struct dsa_port *cpu_dp = dp->cpu_dp; - struct mt7530_priv *priv = ds->priv; - - mutex_lock(&priv->reg_mutex); -@@ -1353,8 +1363,8 @@ mt7530_port_bridge_leave(struct dsa_swit - */ - if (priv->ports[port].enable) - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, -- PCR_MATRIX(BIT(MT7530_CPU_PORT))); -- priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); -+ PCR_MATRIX(BIT(cpu_dp->index))); -+ priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); - - /* When a port is removed from the bridge, the port would be set up - * back to the default as is at initial boot which is a VLAN-unaware -@@ -1517,6 +1527,9 @@ static int - mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) - { -+ struct dsa_port *dp = dsa_to_port(ds, port); -+ struct dsa_port *cpu_dp = dp->cpu_dp; -+ - if (vlan_filtering) { - /* The port is being kept as VLAN-unaware port when bridge is - * set up with vlan_filtering not being set, Otherwise, the -@@ -1524,7 +1537,7 @@ mt7530_port_vlan_filtering(struct dsa_sw - * for becoming a VLAN-aware port. - */ - mt7530_port_set_vlan_aware(ds, port); -- mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT); -+ mt7530_port_set_vlan_aware(ds, cpu_dp->index); - } else { - mt7530_port_set_vlan_unaware(ds, port); - } ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -8,7 +8,6 @@ - - #define MT7530_NUM_PORTS 7 - #define MT7530_NUM_PHYS 5 --#define MT7530_CPU_PORT 6 - #define MT7530_NUM_FDB_RECORDS 2048 - #define MT7530_ALL_MEMBERS 0xff - diff --git a/target/linux/generic/backport-6.1/706-00-v6.0-net-ethernet-mtk_eth_soc-rely-on-page_pool-for-singl.patch b/target/linux/generic/backport-6.1/706-00-v6.0-net-ethernet-mtk_eth_soc-rely-on-page_pool-for-singl.patch deleted file mode 100644 index c5501fb04023..000000000000 --- a/target/linux/generic/backport-6.1/706-00-v6.0-net-ethernet-mtk_eth_soc-rely-on-page_pool-for-singl.patch +++ /dev/null @@ -1,330 +0,0 @@ -From 23233e577ef973c2c5d0dd757a0a4605e34ecb57 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:19:36 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on page_pool for single page - buffers - -Rely on page_pool allocator for single page buffers in order to keep -them dma mapped and add skb recycling support. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/Kconfig | 1 + - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++----- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++ - 3 files changed, 156 insertions(+), 40 deletions(-) - ---- a/drivers/net/ethernet/mediatek/Kconfig -+++ b/drivers/net/ethernet/mediatek/Kconfig -@@ -16,6 +16,7 @@ config NET_MEDIATEK_SOC - depends on NET_DSA || !NET_DSA - select PHYLINK - select DIMLIB -+ select PAGE_POOL - help - This driver supports the gigabit ethernet MACs in the - MediaTek SoC family. ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1389,6 +1389,68 @@ static void mtk_update_rx_cpu_idx(struct - } - } - -+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, -+ struct xdp_rxq_info *xdp_q, -+ int id, int size) -+{ -+ struct page_pool_params pp_params = { -+ .order = 0, -+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, -+ .pool_size = size, -+ .nid = NUMA_NO_NODE, -+ .dev = eth->dma_dev, -+ .dma_dir = DMA_FROM_DEVICE, -+ .offset = MTK_PP_HEADROOM, -+ .max_len = MTK_PP_MAX_BUF_SIZE, -+ }; -+ struct page_pool *pp; -+ int err; -+ -+ pp = page_pool_create(&pp_params); -+ if (IS_ERR(pp)) -+ return pp; -+ -+ err = xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, -+ eth->rx_napi.napi_id); -+ if (err < 0) -+ goto err_free_pp; -+ -+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp); -+ if (err) -+ goto err_unregister_rxq; -+ -+ return pp; -+ -+err_unregister_rxq: -+ xdp_rxq_info_unreg(xdp_q); -+err_free_pp: -+ page_pool_destroy(pp); -+ -+ return ERR_PTR(err); -+} -+ -+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr, -+ gfp_t gfp_mask) -+{ -+ struct page *page; -+ -+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN); -+ if (!page) -+ return NULL; -+ -+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM; -+ return page_address(page); -+} -+ -+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi) -+{ -+ if (ring->page_pool) -+ page_pool_put_full_page(ring->page_pool, -+ virt_to_head_page(data), napi); -+ else -+ skb_free_frag(data); -+} -+ - static int mtk_poll_rx(struct napi_struct *napi, int budget, - struct mtk_eth *eth) - { -@@ -1402,9 +1464,9 @@ static int mtk_poll_rx(struct napi_struc - - while (done < budget) { - unsigned int pktlen, *rxdcsum; -+ u32 hash, reason, reserve_len; - struct net_device *netdev; - dma_addr_t dma_addr; -- u32 hash, reason; - int mac = 0; - - ring = mtk_get_rx_ring(eth); -@@ -1435,36 +1497,54 @@ static int mtk_poll_rx(struct napi_struc - goto release_desc; - - /* alloc new buffer */ -- if (ring->frag_size <= PAGE_SIZE) -- new_data = napi_alloc_frag(ring->frag_size); -- else -- new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); -- if (unlikely(!new_data)) { -- netdev->stats.rx_dropped++; -- goto release_desc; -- } -- dma_addr = dma_map_single(eth->dma_dev, -- new_data + NET_SKB_PAD + -- eth->ip_align, -- ring->buf_size, -- DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { -- skb_free_frag(new_data); -- netdev->stats.rx_dropped++; -- goto release_desc; -- } -+ if (ring->page_pool) { -+ new_data = mtk_page_pool_get_buff(ring->page_pool, -+ &dma_addr, -+ GFP_ATOMIC); -+ if (unlikely(!new_data)) { -+ netdev->stats.rx_dropped++; -+ goto release_desc; -+ } -+ } else { -+ if (ring->frag_size <= PAGE_SIZE) -+ new_data = napi_alloc_frag(ring->frag_size); -+ else -+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); -+ -+ if (unlikely(!new_data)) { -+ netdev->stats.rx_dropped++; -+ goto release_desc; -+ } - -- dma_unmap_single(eth->dma_dev, trxd.rxd1, -- ring->buf_size, DMA_FROM_DEVICE); -+ dma_addr = dma_map_single(eth->dma_dev, -+ new_data + NET_SKB_PAD + eth->ip_align, -+ ring->buf_size, DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, -+ dma_addr))) { -+ skb_free_frag(new_data); -+ netdev->stats.rx_dropped++; -+ goto release_desc; -+ } -+ -+ dma_unmap_single(eth->dma_dev, trxd.rxd1, -+ ring->buf_size, DMA_FROM_DEVICE); -+ } - - /* receive data */ - skb = build_skb(data, ring->frag_size); - if (unlikely(!skb)) { -- skb_free_frag(data); -+ mtk_rx_put_buff(ring, data, true); - netdev->stats.rx_dropped++; - goto skip_rx; - } -- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); -+ -+ if (ring->page_pool) { -+ reserve_len = MTK_PP_HEADROOM; -+ skb_mark_for_recycle(skb); -+ } else { -+ reserve_len = NET_SKB_PAD + NET_IP_ALIGN; -+ } -+ skb_reserve(skb, reserve_len); - - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); - skb->dev = netdev; -@@ -1518,7 +1598,6 @@ static int mtk_poll_rx(struct napi_struc - skip_rx: - ring->data[idx] = new_data; - rxd->rxd1 = (unsigned int)dma_addr; -- - release_desc: - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - rxd->rxd2 = RX_DMA_LSO; -@@ -1526,7 +1605,6 @@ release_desc: - rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - - ring->calc_idx = idx; -- - done++; - } - -@@ -1890,13 +1968,15 @@ static int mtk_rx_alloc(struct mtk_eth * - if (!ring->data) - return -ENOMEM; - -- for (i = 0; i < rx_dma_size; i++) { -- if (ring->frag_size <= PAGE_SIZE) -- ring->data[i] = netdev_alloc_frag(ring->frag_size); -- else -- ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); -- if (!ring->data[i]) -- return -ENOMEM; -+ if (!eth->hwlro) { -+ struct page_pool *pp; -+ -+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, -+ rx_dma_size); -+ if (IS_ERR(pp)) -+ return PTR_ERR(pp); -+ -+ ring->page_pool = pp; - } - - ring->dma = dma_alloc_coherent(eth->dma_dev, -@@ -1907,16 +1987,33 @@ static int mtk_rx_alloc(struct mtk_eth * - - for (i = 0; i < rx_dma_size; i++) { - struct mtk_rx_dma_v2 *rxd; -- -- dma_addr_t dma_addr = dma_map_single(eth->dma_dev, -- ring->data[i] + NET_SKB_PAD + eth->ip_align, -- ring->buf_size, -- DMA_FROM_DEVICE); -- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) -- return -ENOMEM; -+ dma_addr_t dma_addr; -+ void *data; - - rxd = ring->dma + i * eth->soc->txrx.rxd_size; -+ if (ring->page_pool) { -+ data = mtk_page_pool_get_buff(ring->page_pool, -+ &dma_addr, GFP_KERNEL); -+ if (!data) -+ return -ENOMEM; -+ } else { -+ if (ring->frag_size <= PAGE_SIZE) -+ data = netdev_alloc_frag(ring->frag_size); -+ else -+ data = mtk_max_lro_buf_alloc(GFP_KERNEL); -+ -+ if (!data) -+ return -ENOMEM; -+ -+ dma_addr = dma_map_single(eth->dma_dev, -+ data + NET_SKB_PAD + eth->ip_align, -+ ring->buf_size, DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, -+ dma_addr))) -+ return -ENOMEM; -+ } - rxd->rxd1 = (unsigned int)dma_addr; -+ ring->data[i] = data; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - rxd->rxd2 = RX_DMA_LSO; -@@ -1932,6 +2029,7 @@ static int mtk_rx_alloc(struct mtk_eth * - rxd->rxd8 = 0; - } - } -+ - ring->dma_size = rx_dma_size; - ring->calc_idx_update = false; - ring->calc_idx = rx_dma_size - 1; -@@ -1983,7 +2081,7 @@ static void mtk_rx_clean(struct mtk_eth - - dma_unmap_single(eth->dma_dev, rxd->rxd1, - ring->buf_size, DMA_FROM_DEVICE); -- skb_free_frag(ring->data[i]); -+ mtk_rx_put_buff(ring, ring->data[i], false); - } - kfree(ring->data); - ring->data = NULL; -@@ -1995,6 +2093,13 @@ static void mtk_rx_clean(struct mtk_eth - ring->dma, ring->phys); - ring->dma = NULL; - } -+ -+ if (ring->page_pool) { -+ if (xdp_rxq_info_is_reg(&ring->xdp_q)) -+ xdp_rxq_info_unreg(&ring->xdp_q); -+ page_pool_destroy(ring->page_pool); -+ ring->page_pool = NULL; -+ } - } - - static int mtk_hwlro_rx_init(struct mtk_eth *eth) ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -18,6 +18,8 @@ - #include - #include - #include -+#include -+#include - #include "mtk_ppe.h" - - #define MTK_QDMA_PAGE_SIZE 2048 -@@ -49,6 +51,11 @@ - #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) - #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) - -+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM -+#define MTK_PP_PAD (MTK_PP_HEADROOM + \ -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD) -+ - #define MTK_QRX_OFFSET 0x10 - - #define MTK_MAX_RX_RING_NUM 4 -@@ -743,6 +750,9 @@ struct mtk_rx_ring { - bool calc_idx_update; - u16 calc_idx; - u32 crx_idx_reg; -+ /* page_pool */ -+ struct page_pool *page_pool; -+ struct xdp_rxq_info xdp_q; - }; - - enum mkt_eth_capabilities { diff --git a/target/linux/generic/backport-6.1/706-01-v6.0-net-ethernet-mtk_eth_soc-add-basic-XDP-support.patch b/target/linux/generic/backport-6.1/706-01-v6.0-net-ethernet-mtk_eth_soc-add-basic-XDP-support.patch deleted file mode 100644 index d94bdabd714f..000000000000 --- a/target/linux/generic/backport-6.1/706-01-v6.0-net-ethernet-mtk_eth_soc-add-basic-XDP-support.patch +++ /dev/null @@ -1,291 +0,0 @@ -From 7c26c20da5d420cde55618263be4aa2f6de53056 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:19:37 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add basic XDP support - -Introduce basic XDP support to mtk_eth_soc driver. -Supported XDP verdicts: -- XDP_PASS -- XDP_DROP -- XDP_REDIRECT - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 162 +++++++++++++++++--- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 + - 2 files changed, 145 insertions(+), 19 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1389,6 +1389,11 @@ static void mtk_update_rx_cpu_idx(struct - } - } - -+static bool mtk_page_pool_enabled(struct mtk_eth *eth) -+{ -+ return !eth->hwlro; -+} -+ - static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, - struct xdp_rxq_info *xdp_q, - int id, int size) -@@ -1451,11 +1456,52 @@ static void mtk_rx_put_buff(struct mtk_r - skb_free_frag(data); - } - -+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, -+ struct xdp_buff *xdp, struct net_device *dev) -+{ -+ struct bpf_prog *prog; -+ u32 act = XDP_PASS; -+ -+ rcu_read_lock(); -+ -+ prog = rcu_dereference(eth->prog); -+ if (!prog) -+ goto out; -+ -+ act = bpf_prog_run_xdp(prog, xdp); -+ switch (act) { -+ case XDP_PASS: -+ goto out; -+ case XDP_REDIRECT: -+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) { -+ act = XDP_DROP; -+ break; -+ } -+ goto out; -+ default: -+ bpf_warn_invalid_xdp_action(act); -+ fallthrough; -+ case XDP_ABORTED: -+ trace_xdp_exception(dev, prog, act); -+ fallthrough; -+ case XDP_DROP: -+ break; -+ } -+ -+ page_pool_put_full_page(ring->page_pool, -+ virt_to_head_page(xdp->data), true); -+out: -+ rcu_read_unlock(); -+ -+ return act; -+} -+ - static int mtk_poll_rx(struct napi_struct *napi, int budget, - struct mtk_eth *eth) - { - struct dim_sample dim_sample = {}; - struct mtk_rx_ring *ring; -+ bool xdp_flush = false; - int idx; - struct sk_buff *skb; - u8 *data, *new_data; -@@ -1464,9 +1510,9 @@ static int mtk_poll_rx(struct napi_struc - - while (done < budget) { - unsigned int pktlen, *rxdcsum; -- u32 hash, reason, reserve_len; - struct net_device *netdev; - dma_addr_t dma_addr; -+ u32 hash, reason; - int mac = 0; - - ring = mtk_get_rx_ring(eth); -@@ -1496,8 +1542,14 @@ static int mtk_poll_rx(struct napi_struc - if (unlikely(test_bit(MTK_RESETTING, ð->state))) - goto release_desc; - -+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); -+ - /* alloc new buffer */ - if (ring->page_pool) { -+ struct page *page = virt_to_head_page(data); -+ struct xdp_buff xdp; -+ u32 ret; -+ - new_data = mtk_page_pool_get_buff(ring->page_pool, - &dma_addr, - GFP_ATOMIC); -@@ -1505,6 +1557,34 @@ static int mtk_poll_rx(struct napi_struc - netdev->stats.rx_dropped++; - goto release_desc; - } -+ -+ dma_sync_single_for_cpu(eth->dma_dev, -+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM, -+ pktlen, page_pool_get_dma_dir(ring->page_pool)); -+ -+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); -+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen, -+ false); -+ xdp_buff_clear_frags_flag(&xdp); -+ -+ ret = mtk_xdp_run(eth, ring, &xdp, netdev); -+ if (ret == XDP_REDIRECT) -+ xdp_flush = true; -+ -+ if (ret != XDP_PASS) -+ goto skip_rx; -+ -+ skb = build_skb(data, PAGE_SIZE); -+ if (unlikely(!skb)) { -+ page_pool_put_full_page(ring->page_pool, -+ page, true); -+ netdev->stats.rx_dropped++; -+ goto skip_rx; -+ } -+ -+ skb_reserve(skb, xdp.data - xdp.data_hard_start); -+ skb_put(skb, xdp.data_end - xdp.data); -+ skb_mark_for_recycle(skb); - } else { - if (ring->frag_size <= PAGE_SIZE) - new_data = napi_alloc_frag(ring->frag_size); -@@ -1528,27 +1608,20 @@ static int mtk_poll_rx(struct napi_struc - - dma_unmap_single(eth->dma_dev, trxd.rxd1, - ring->buf_size, DMA_FROM_DEVICE); -- } - -- /* receive data */ -- skb = build_skb(data, ring->frag_size); -- if (unlikely(!skb)) { -- mtk_rx_put_buff(ring, data, true); -- netdev->stats.rx_dropped++; -- goto skip_rx; -- } -+ skb = build_skb(data, ring->frag_size); -+ if (unlikely(!skb)) { -+ netdev->stats.rx_dropped++; -+ skb_free_frag(data); -+ goto skip_rx; -+ } - -- if (ring->page_pool) { -- reserve_len = MTK_PP_HEADROOM; -- skb_mark_for_recycle(skb); -- } else { -- reserve_len = NET_SKB_PAD + NET_IP_ALIGN; -+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); -+ skb_put(skb, pktlen); - } -- skb_reserve(skb, reserve_len); - -- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); - skb->dev = netdev; -- skb_put(skb, pktlen); -+ bytes += skb->len; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) - rxdcsum = &trxd.rxd3; -@@ -1560,7 +1633,6 @@ static int mtk_poll_rx(struct napi_struc - else - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, netdev); -- bytes += pktlen; - - hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; - if (hash != MTK_RXD4_FOE_ENTRY) { -@@ -1623,6 +1695,9 @@ rx_done: - &dim_sample); - net_dim(ð->rx_dim, dim_sample); - -+ if (xdp_flush) -+ xdp_do_flush_map(); -+ - return done; - } - -@@ -1968,7 +2043,7 @@ static int mtk_rx_alloc(struct mtk_eth * - if (!ring->data) - return -ENOMEM; - -- if (!eth->hwlro) { -+ if (mtk_page_pool_enabled(eth)) { - struct page_pool *pp; - - pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, -@@ -2710,6 +2785,48 @@ static int mtk_stop(struct net_device *d - return 0; - } - -+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog, -+ struct netlink_ext_ack *extack) -+{ -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_eth *eth = mac->hw; -+ struct bpf_prog *old_prog; -+ bool need_update; -+ -+ if (eth->hwlro) { -+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) { -+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); -+ return -EOPNOTSUPP; -+ } -+ -+ need_update = !!eth->prog != !!prog; -+ if (netif_running(dev) && need_update) -+ mtk_stop(dev); -+ -+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); -+ if (old_prog) -+ bpf_prog_put(old_prog); -+ -+ if (netif_running(dev) && need_update) -+ return mtk_open(dev); -+ -+ return 0; -+} -+ -+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp) -+{ -+ switch (xdp->command) { -+ case XDP_SETUP_PROG: -+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack); -+ default: -+ return -EINVAL; -+ } -+} -+ - static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) - { - regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, -@@ -3005,6 +3122,12 @@ static int mtk_change_mtu(struct net_dev - struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new; - -+ if (rcu_access_pointer(eth->prog) && -+ length > MTK_PP_MAX_BUF_SIZE) { -+ netdev_err(dev, "Invalid MTU for XDP mode\n"); -+ return -EINVAL; -+ } -+ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; -@@ -3332,6 +3455,7 @@ static const struct net_device_ops mtk_n - .ndo_poll_controller = mtk_poll_controller, - #endif - .ndo_setup_tc = mtk_eth_setup_tc, -+ .ndo_bpf = mtk_xdp, - }; - - static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1086,6 +1086,8 @@ struct mtk_eth { - - struct mtk_ppe *ppe; - struct rhashtable flow_table; -+ -+ struct bpf_prog __rcu *prog; - }; - - /* struct mtk_mac - the structure that holds the info about the MACs of the diff --git a/target/linux/generic/backport-6.1/706-02-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-ethtool-count.patch b/target/linux/generic/backport-6.1/706-02-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-ethtool-count.patch deleted file mode 100644 index cf4d658684ba..000000000000 --- a/target/linux/generic/backport-6.1/706-02-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-ethtool-count.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 916a6ee836d6b7b8ef1ed5f0515e256ca60e9968 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:19:38 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp ethtool counters - -Report xdp stats through ethtool - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 +++++++++++++++++++-- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 ++++++++++ - 2 files changed, 36 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -34,6 +34,10 @@ MODULE_PARM_DESC(msg_level, "Message lev - #define MTK_ETHTOOL_STAT(x) { #x, \ - offsetof(struct mtk_hw_stats, x) / sizeof(u64) } - -+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \ -+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \ -+ sizeof(u64) } -+ - static const struct mtk_reg_map mtk_reg_map = { - .tx_irq_mask = 0x1a1c, - .tx_irq_status = 0x1a18, -@@ -141,6 +145,13 @@ static const struct mtk_ethtool_stats { - MTK_ETHTOOL_STAT(rx_long_errors), - MTK_ETHTOOL_STAT(rx_checksum_errors), - MTK_ETHTOOL_STAT(rx_flow_control_packets), -+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect), -+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass), -+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop), -+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx), -+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors), -+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit), -+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors), - }; - - static const char * const mtk_clks_source_name[] = { -@@ -1459,6 +1470,9 @@ static void mtk_rx_put_buff(struct mtk_r - static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, - struct xdp_buff *xdp, struct net_device *dev) - { -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_hw_stats *hw_stats = mac->hw_stats; -+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop; - struct bpf_prog *prog; - u32 act = XDP_PASS; - -@@ -1471,13 +1485,16 @@ static u32 mtk_xdp_run(struct mtk_eth *e - act = bpf_prog_run_xdp(prog, xdp); - switch (act) { - case XDP_PASS: -- goto out; -+ count = &hw_stats->xdp_stats.rx_xdp_pass; -+ goto update_stats; - case XDP_REDIRECT: - if (unlikely(xdp_do_redirect(dev, xdp, prog))) { - act = XDP_DROP; - break; - } -- goto out; -+ -+ count = &hw_stats->xdp_stats.rx_xdp_redirect; -+ goto update_stats; - default: - bpf_warn_invalid_xdp_action(act); - fallthrough; -@@ -1490,6 +1507,11 @@ static u32 mtk_xdp_run(struct mtk_eth *e - - page_pool_put_full_page(ring->page_pool, - virt_to_head_page(xdp->data), true); -+ -+update_stats: -+ u64_stats_update_begin(&hw_stats->syncp); -+ *count = *count + 1; -+ u64_stats_update_end(&hw_stats->syncp); - out: - rcu_read_unlock(); - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -568,6 +568,16 @@ struct mtk_tx_dma_v2 { - struct mtk_eth; - struct mtk_mac; - -+struct mtk_xdp_stats { -+ u64 rx_xdp_redirect; -+ u64 rx_xdp_pass; -+ u64 rx_xdp_drop; -+ u64 rx_xdp_tx; -+ u64 rx_xdp_tx_errors; -+ u64 tx_xdp_xmit; -+ u64 tx_xdp_xmit_errors; -+}; -+ - /* struct mtk_hw_stats - the structure that holds the traffic statistics. - * @stats_lock: make sure that stats operations are atomic - * @reg_offset: the status register offset of the SoC -@@ -591,6 +601,8 @@ struct mtk_hw_stats { - u64 rx_checksum_errors; - u64 rx_flow_control_packets; - -+ struct mtk_xdp_stats xdp_stats; -+ - spinlock_t stats_lock; - u32 reg_offset; - struct u64_stats_sync syncp; diff --git a/target/linux/generic/backport-6.1/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch b/target/linux/generic/backport-6.1/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch deleted file mode 100644 index 58b49f28a25f..000000000000 --- a/target/linux/generic/backport-6.1/706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch +++ /dev/null @@ -1,340 +0,0 @@ -From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:19:39 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support - -Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function -pointer. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++--- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +- - 2 files changed, 180 insertions(+), 22 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -988,15 +988,26 @@ static void mtk_tx_unmap(struct mtk_eth - } - } - -- tx_buf->flags = 0; -- if (tx_buf->skb && -- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) { -- if (napi) -- napi_consume_skb(tx_buf->skb, napi); -+ if (tx_buf->type == MTK_TYPE_SKB) { -+ if (tx_buf->data && -+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ struct sk_buff *skb = tx_buf->data; -+ -+ if (napi) -+ napi_consume_skb(skb, napi); -+ else -+ dev_kfree_skb_any(skb); -+ } -+ } else if (tx_buf->data) { -+ struct xdp_frame *xdpf = tx_buf->data; -+ -+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX) -+ xdp_return_frame_rx_napi(xdpf); - else -- dev_kfree_skb_any(tx_buf->skb); -+ xdp_return_frame(xdpf); - } -- tx_buf->skb = NULL; -+ tx_buf->flags = 0; -+ tx_buf->data = NULL; - } - - static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, -@@ -1013,7 +1024,7 @@ static void setup_tx_buf(struct mtk_eth - dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len1, size); - } else { -- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; -+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; - txd->txd1 = mapped_addr; - txd->txd2 = TX_DMA_PLEN0(size); - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); -@@ -1189,7 +1200,7 @@ static int mtk_tx_map(struct sk_buff *sk - soc->txrx.txd_size); - if (new_desc) - memset(tx_buf, 0, sizeof(*tx_buf)); -- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; -+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; - tx_buf->flags |= MTK_TX_FLAGS_PAGE0; - tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : - MTK_TX_FLAGS_FPORT1; -@@ -1203,7 +1214,8 @@ static int mtk_tx_map(struct sk_buff *sk - } - - /* store skb to cleanup */ -- itx_buf->skb = skb; -+ itx_buf->type = MTK_TYPE_SKB; -+ itx_buf->data = skb; - - if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - if (k & 0x1) -@@ -1415,13 +1427,14 @@ static struct page_pool *mtk_create_page - .pool_size = size, - .nid = NUMA_NO_NODE, - .dev = eth->dma_dev, -- .dma_dir = DMA_FROM_DEVICE, - .offset = MTK_PP_HEADROOM, - .max_len = MTK_PP_MAX_BUF_SIZE, - }; - struct page_pool *pp; - int err; - -+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL -+ : DMA_FROM_DEVICE; - pp = page_pool_create(&pp_params); - if (IS_ERR(pp)) - return pp; -@@ -1467,6 +1480,122 @@ static void mtk_rx_put_buff(struct mtk_r - skb_free_frag(data); - } - -+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, -+ struct net_device *dev, bool dma_map) -+{ -+ const struct mtk_soc_data *soc = eth->soc; -+ struct mtk_tx_ring *ring = ð->tx_ring; -+ struct mtk_tx_dma_desc_info txd_info = { -+ .size = xdpf->len, -+ .first = true, -+ .last = true, -+ }; -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_tx_dma *txd, *txd_pdma; -+ int err = 0, index = 0, n_desc = 1; -+ struct mtk_tx_buf *tx_buf; -+ -+ if (unlikely(test_bit(MTK_RESETTING, ð->state))) -+ return -EBUSY; -+ -+ if (unlikely(atomic_read(&ring->free_count) <= 1)) -+ return -EBUSY; -+ -+ spin_lock(ð->page_lock); -+ -+ txd = ring->next_free; -+ if (txd == ring->last_free) { -+ err = -ENOMEM; -+ goto out; -+ } -+ -+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); -+ memset(tx_buf, 0, sizeof(*tx_buf)); -+ -+ if (dma_map) { /* ndo_xdp_xmit */ -+ txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data, -+ txd_info.size, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) { -+ err = -ENOMEM; -+ goto out; -+ } -+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; -+ } else { -+ struct page *page = virt_to_head_page(xdpf->data); -+ -+ txd_info.addr = page_pool_get_dma_addr(page) + -+ sizeof(*xdpf) + xdpf->headroom; -+ dma_sync_single_for_device(eth->dma_dev, txd_info.addr, -+ txd_info.size, -+ DMA_BIDIRECTIONAL); -+ } -+ mtk_tx_set_dma_desc(dev, txd, &txd_info); -+ -+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; -+ -+ txd_pdma = qdma_to_pdma(ring, txd); -+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size, -+ index++); -+ -+ /* store xdpf for cleanup */ -+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; -+ tx_buf->data = xdpf; -+ -+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { -+ if (index & 1) -+ txd_pdma->txd2 |= TX_DMA_LS0; -+ else -+ txd_pdma->txd2 |= TX_DMA_LS1; -+ } -+ -+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); -+ atomic_sub(n_desc, &ring->free_count); -+ -+ /* make sure that all changes to the dma ring are flushed before we -+ * continue -+ */ -+ wmb(); -+ -+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { -+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); -+ } else { -+ int idx; -+ -+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size); -+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), -+ MT7628_TX_CTX_IDX0); -+ } -+out: -+ spin_unlock(ð->page_lock); -+ -+ return err; -+} -+ -+static int mtk_xdp_xmit(struct net_device *dev, int num_frame, -+ struct xdp_frame **frames, u32 flags) -+{ -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_hw_stats *hw_stats = mac->hw_stats; -+ struct mtk_eth *eth = mac->hw; -+ int i, nxmit = 0; -+ -+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) -+ return -EINVAL; -+ -+ for (i = 0; i < num_frame; i++) { -+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) -+ break; -+ nxmit++; -+ } -+ -+ u64_stats_update_begin(&hw_stats->syncp); -+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit; -+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit; -+ u64_stats_update_end(&hw_stats->syncp); -+ -+ return nxmit; -+} -+ - static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, - struct xdp_buff *xdp, struct net_device *dev) - { -@@ -1495,6 +1624,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e - - count = &hw_stats->xdp_stats.rx_xdp_redirect; - goto update_stats; -+ case XDP_TX: { -+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); -+ -+ if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) { -+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors; -+ act = XDP_DROP; -+ break; -+ } -+ -+ count = &hw_stats->xdp_stats.rx_xdp_tx; -+ goto update_stats; -+ } - default: - bpf_warn_invalid_xdp_action(act); - fallthrough; -@@ -1728,9 +1869,8 @@ static int mtk_poll_tx_qdma(struct mtk_e - { - const struct mtk_reg_map *reg_map = eth->soc->reg_map; - struct mtk_tx_ring *ring = ð->tx_ring; -- struct mtk_tx_dma *desc; -- struct sk_buff *skb; - struct mtk_tx_buf *tx_buf; -+ struct mtk_tx_dma *desc; - u32 cpu, dma; - - cpu = ring->last_free_ptr; -@@ -1751,15 +1891,21 @@ static int mtk_poll_tx_qdma(struct mtk_e - if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) - mac = 1; - -- skb = tx_buf->skb; -- if (!skb) -+ if (!tx_buf->data) - break; - -- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->type == MTK_TYPE_SKB && -+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ struct sk_buff *skb = tx_buf->data; -+ - bytes[mac] += skb->len; - done[mac]++; - budget--; -+ } else if (tx_buf->type == MTK_TYPE_XDP_TX || -+ tx_buf->type == MTK_TYPE_XDP_NDO) { -+ budget--; - } -+ - mtk_tx_unmap(eth, tx_buf, true); - - ring->last_free = desc; -@@ -1778,9 +1924,8 @@ static int mtk_poll_tx_pdma(struct mtk_e - unsigned int *done, unsigned int *bytes) - { - struct mtk_tx_ring *ring = ð->tx_ring; -- struct mtk_tx_dma *desc; -- struct sk_buff *skb; - struct mtk_tx_buf *tx_buf; -+ struct mtk_tx_dma *desc; - u32 cpu, dma; - - cpu = ring->cpu_idx; -@@ -1788,14 +1933,18 @@ static int mtk_poll_tx_pdma(struct mtk_e - - while ((cpu != dma) && budget) { - tx_buf = &ring->buf[cpu]; -- skb = tx_buf->skb; -- if (!skb) -+ if (!tx_buf->data) - break; - -- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->type == MTK_TYPE_SKB && -+ tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ struct sk_buff *skb = tx_buf->data; - bytes[0] += skb->len; - done[0]++; - budget--; -+ } else if (tx_buf->type == MTK_TYPE_XDP_TX || -+ tx_buf->type == MTK_TYPE_XDP_NDO) { -+ budget--; - } - - mtk_tx_unmap(eth, tx_buf, true); -@@ -3478,6 +3627,7 @@ static const struct net_device_ops mtk_n - #endif - .ndo_setup_tc = mtk_eth_setup_tc, - .ndo_bpf = mtk_xdp, -+ .ndo_xdp_xmit = mtk_xdp_xmit, - }; - - static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -694,6 +694,12 @@ enum mtk_dev_state { - MTK_RESETTING - }; - -+enum mtk_tx_buf_type { -+ MTK_TYPE_SKB, -+ MTK_TYPE_XDP_TX, -+ MTK_TYPE_XDP_NDO, -+}; -+ - /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at - * by the TX descriptor s - * @skb: The SKB pointer of the packet being sent -@@ -703,7 +709,9 @@ enum mtk_dev_state { - * @dma_len1: The length of the second segment - */ - struct mtk_tx_buf { -- struct sk_buff *skb; -+ enum mtk_tx_buf_type type; -+ void *data; -+ - u32 flags; - DEFINE_DMA_UNMAP_ADDR(dma_addr0); - DEFINE_DMA_UNMAP_LEN(dma_len0); diff --git a/target/linux/generic/backport-6.1/706-04-v6.0-net-ethernet-mtk_eth_soc-add-support-for-page_pool_g.patch b/target/linux/generic/backport-6.1/706-04-v6.0-net-ethernet-mtk_eth_soc-add-support-for-page_pool_g.patch deleted file mode 100644 index e93e0df544ac..000000000000 --- a/target/linux/generic/backport-6.1/706-04-v6.0-net-ethernet-mtk_eth_soc-add-support-for-page_pool_g.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 84b9cd389036d4a262d8cee794d56c04095358a7 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:19:40 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for - page_pool_get_stats - -Introduce support for the page_pool stats API into mtk_eth_soc driver. -Report page_pool stats through ethtool. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/Kconfig | 1 + - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++-- - 2 files changed, 35 insertions(+), 3 deletions(-) - ---- a/drivers/net/ethernet/mediatek/Kconfig -+++ b/drivers/net/ethernet/mediatek/Kconfig -@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC - select PHYLINK - select DIMLIB - select PAGE_POOL -+ select PAGE_POOL_STATS - help - This driver supports the gigabit ethernet MACs in the - MediaTek SoC family. ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3488,11 +3488,18 @@ static void mtk_get_strings(struct net_d - int i; - - switch (stringset) { -- case ETH_SS_STATS: -+ case ETH_SS_STATS: { -+ struct mtk_mac *mac = netdev_priv(dev); -+ - for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { - memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } -+ if (mtk_page_pool_enabled(mac->hw)) -+ page_pool_ethtool_stats_get_strings(data); -+ break; -+ } -+ default: - break; - } - } -@@ -3500,13 +3507,35 @@ static void mtk_get_strings(struct net_d - static int mtk_get_sset_count(struct net_device *dev, int sset) - { - switch (sset) { -- case ETH_SS_STATS: -- return ARRAY_SIZE(mtk_ethtool_stats); -+ case ETH_SS_STATS: { -+ int count = ARRAY_SIZE(mtk_ethtool_stats); -+ struct mtk_mac *mac = netdev_priv(dev); -+ -+ if (mtk_page_pool_enabled(mac->hw)) -+ count += page_pool_ethtool_stats_get_count(); -+ return count; -+ } - default: - return -EOPNOTSUPP; - } - } - -+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) -+{ -+ struct page_pool_stats stats = {}; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { -+ struct mtk_rx_ring *ring = ð->rx_ring[i]; -+ -+ if (!ring->page_pool) -+ continue; -+ -+ page_pool_get_stats(ring->page_pool, &stats); -+ } -+ page_pool_ethtool_stats_get(data, &stats); -+} -+ - static void mtk_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) - { -@@ -3534,6 +3563,8 @@ static void mtk_get_ethtool_stats(struct - - for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) - *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); -+ if (mtk_page_pool_enabled(mac->hw)) -+ mtk_ethtool_pp_stats(mac->hw, data_dst); - } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); - } - diff --git a/target/linux/generic/backport-6.1/706-05-v6.0-net-ethernet-mtk_eth_soc-introduce-mtk_xdp_frame_map.patch b/target/linux/generic/backport-6.1/706-05-v6.0-net-ethernet-mtk_eth_soc-introduce-mtk_xdp_frame_map.patch deleted file mode 100644 index 8e6895fe97fc..000000000000 --- a/target/linux/generic/backport-6.1/706-05-v6.0-net-ethernet-mtk_eth_soc-introduce-mtk_xdp_frame_map.patch +++ /dev/null @@ -1,113 +0,0 @@ -From b16fe6d82b71fa0dd5c957bc22d66a694976d6eb Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Wed, 27 Jul 2022 23:20:50 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_xdp_frame_map - utility routine - -This is a preliminary patch to add xdp multi-frag support to mtk_eth_soc -driver - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 68 +++++++++++++-------- - 1 file changed, 42 insertions(+), 26 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1480,6 +1480,41 @@ static void mtk_rx_put_buff(struct mtk_r - skb_free_frag(data); - } - -+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, -+ struct mtk_tx_dma_desc_info *txd_info, -+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf, -+ void *data, u16 headroom, int index, bool dma_map) -+{ -+ struct mtk_tx_ring *ring = ð->tx_ring; -+ struct mtk_mac *mac = netdev_priv(dev); -+ struct mtk_tx_dma *txd_pdma; -+ -+ if (dma_map) { /* ndo_xdp_xmit */ -+ txd_info->addr = dma_map_single(eth->dma_dev, data, -+ txd_info->size, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) -+ return -ENOMEM; -+ -+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; -+ } else { -+ struct page *page = virt_to_head_page(data); -+ -+ txd_info->addr = page_pool_get_dma_addr(page) + -+ sizeof(struct xdp_frame) + headroom; -+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr, -+ txd_info->size, DMA_BIDIRECTIONAL); -+ } -+ mtk_tx_set_dma_desc(dev, txd, txd_info); -+ -+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; -+ -+ txd_pdma = qdma_to_pdma(ring, txd); -+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, -+ index); -+ -+ return 0; -+} -+ - static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, - struct net_device *dev, bool dma_map) - { -@@ -1490,9 +1525,8 @@ static int mtk_xdp_submit_frame(struct m - .first = true, - .last = true, - }; -- struct mtk_mac *mac = netdev_priv(dev); -- struct mtk_tx_dma *txd, *txd_pdma; - int err = 0, index = 0, n_desc = 1; -+ struct mtk_tx_dma *txd, *txd_pdma; - struct mtk_tx_buf *tx_buf; - - if (unlikely(test_bit(MTK_RESETTING, ð->state))) -@@ -1512,36 +1546,18 @@ static int mtk_xdp_submit_frame(struct m - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); - memset(tx_buf, 0, sizeof(*tx_buf)); - -- if (dma_map) { /* ndo_xdp_xmit */ -- txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data, -- txd_info.size, DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) { -- err = -ENOMEM; -- goto out; -- } -- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; -- } else { -- struct page *page = virt_to_head_page(xdpf->data); -- -- txd_info.addr = page_pool_get_dma_addr(page) + -- sizeof(*xdpf) + xdpf->headroom; -- dma_sync_single_for_device(eth->dma_dev, txd_info.addr, -- txd_info.size, -- DMA_BIDIRECTIONAL); -- } -- mtk_tx_set_dma_desc(dev, txd, &txd_info); -- -- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; -- -- txd_pdma = qdma_to_pdma(ring, txd); -- setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size, -- index++); -+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, -+ xdpf->data, xdpf->headroom, index, -+ dma_map); -+ if (err < 0) -+ goto out; - - /* store xdpf for cleanup */ - tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; - tx_buf->data = xdpf; - - if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { -+ txd_pdma = qdma_to_pdma(ring, txd); - if (index & 1) - txd_pdma->txd2 |= TX_DMA_LS0; - else diff --git a/target/linux/generic/backport-6.1/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch b/target/linux/generic/backport-6.1/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch deleted file mode 100644 index 23e4a4dfcbf4..000000000000 --- a/target/linux/generic/backport-6.1/706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch +++ /dev/null @@ -1,218 +0,0 @@ -From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Wed, 27 Jul 2022 23:20:51 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support - -Add the capability to map non-linear xdp frames in XDP_TX and -ndo_xdp_xmit callback. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++------- - 1 file changed, 82 insertions(+), 43 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -988,23 +988,22 @@ static void mtk_tx_unmap(struct mtk_eth - } - } - -- if (tx_buf->type == MTK_TYPE_SKB) { -- if (tx_buf->data && -- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->type == MTK_TYPE_SKB) { - struct sk_buff *skb = tx_buf->data; - - if (napi) - napi_consume_skb(skb, napi); - else - dev_kfree_skb_any(skb); -- } -- } else if (tx_buf->data) { -- struct xdp_frame *xdpf = tx_buf->data; -+ } else { -+ struct xdp_frame *xdpf = tx_buf->data; - -- if (napi && tx_buf->type == MTK_TYPE_XDP_TX) -- xdp_return_frame_rx_napi(xdpf); -- else -- xdp_return_frame(xdpf); -+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX) -+ xdp_return_frame_rx_napi(xdpf); -+ else -+ xdp_return_frame(xdpf); -+ } - } - tx_buf->flags = 0; - tx_buf->data = NULL; -@@ -1507,6 +1506,8 @@ static int mtk_xdp_frame_map(struct mtk_ - mtk_tx_set_dma_desc(dev, txd, txd_info); - - tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; -+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; -+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; - - txd_pdma = qdma_to_pdma(ring, txd); - setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, -@@ -1518,43 +1519,69 @@ static int mtk_xdp_frame_map(struct mtk_ - static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, - struct net_device *dev, bool dma_map) - { -+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); - const struct mtk_soc_data *soc = eth->soc; - struct mtk_tx_ring *ring = ð->tx_ring; - struct mtk_tx_dma_desc_info txd_info = { - .size = xdpf->len, - .first = true, -- .last = true, -+ .last = !xdp_frame_has_frags(xdpf), - }; -- int err = 0, index = 0, n_desc = 1; -- struct mtk_tx_dma *txd, *txd_pdma; -- struct mtk_tx_buf *tx_buf; -+ int err, index = 0, n_desc = 1, nr_frags; -+ struct mtk_tx_dma *htxd, *txd, *txd_pdma; -+ struct mtk_tx_buf *htx_buf, *tx_buf; -+ void *data = xdpf->data; - - if (unlikely(test_bit(MTK_RESETTING, ð->state))) - return -EBUSY; - -- if (unlikely(atomic_read(&ring->free_count) <= 1)) -+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; -+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) - return -EBUSY; - - spin_lock(ð->page_lock); - - txd = ring->next_free; - if (txd == ring->last_free) { -- err = -ENOMEM; -- goto out; -+ spin_unlock(ð->page_lock); -+ return -ENOMEM; - } -+ htxd = txd; - - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); - memset(tx_buf, 0, sizeof(*tx_buf)); -+ htx_buf = tx_buf; - -- err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, -- xdpf->data, xdpf->headroom, index, -- dma_map); -- if (err < 0) -- goto out; -+ for (;;) { -+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, -+ data, xdpf->headroom, index, dma_map); -+ if (err < 0) -+ goto unmap; -+ -+ if (txd_info.last) -+ break; - -+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { -+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2); -+ txd_pdma = qdma_to_pdma(ring, txd); -+ if (txd == ring->last_free) -+ goto unmap; -+ -+ tx_buf = mtk_desc_to_tx_buf(ring, txd, -+ soc->txrx.txd_size); -+ memset(tx_buf, 0, sizeof(*tx_buf)); -+ n_desc++; -+ } -+ -+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); -+ txd_info.size = skb_frag_size(&sinfo->frags[index]); -+ txd_info.last = index + 1 == nr_frags; -+ data = skb_frag_address(&sinfo->frags[index]); -+ -+ index++; -+ } - /* store xdpf for cleanup */ -- tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; -- tx_buf->data = xdpf; -+ htx_buf->data = xdpf; - - if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - txd_pdma = qdma_to_pdma(ring, txd); -@@ -1581,7 +1608,24 @@ static int mtk_xdp_submit_frame(struct m - mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), - MT7628_TX_CTX_IDX0); - } --out: -+ -+ spin_unlock(ð->page_lock); -+ -+ return 0; -+ -+unmap: -+ while (htxd != txd) { -+ txd_pdma = qdma_to_pdma(ring, htxd); -+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); -+ mtk_tx_unmap(eth, tx_buf, false); -+ -+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; -+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) -+ txd_pdma->txd2 = TX_DMA_DESP2_DEF; -+ -+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); -+ } -+ - spin_unlock(ð->page_lock); - - return err; -@@ -1910,18 +1954,15 @@ static int mtk_poll_tx_qdma(struct mtk_e - if (!tx_buf->data) - break; - -- if (tx_buf->type == MTK_TYPE_SKB && -- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -- struct sk_buff *skb = tx_buf->data; -+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->type == MTK_TYPE_SKB) { -+ struct sk_buff *skb = tx_buf->data; - -- bytes[mac] += skb->len; -- done[mac]++; -- budget--; -- } else if (tx_buf->type == MTK_TYPE_XDP_TX || -- tx_buf->type == MTK_TYPE_XDP_NDO) { -+ bytes[mac] += skb->len; -+ done[mac]++; -+ } - budget--; - } -- - mtk_tx_unmap(eth, tx_buf, true); - - ring->last_free = desc; -@@ -1952,17 +1993,15 @@ static int mtk_poll_tx_pdma(struct mtk_e - if (!tx_buf->data) - break; - -- if (tx_buf->type == MTK_TYPE_SKB && -- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -- struct sk_buff *skb = tx_buf->data; -- bytes[0] += skb->len; -- done[0]++; -- budget--; -- } else if (tx_buf->type == MTK_TYPE_XDP_TX || -- tx_buf->type == MTK_TYPE_XDP_NDO) { -+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { -+ if (tx_buf->type == MTK_TYPE_SKB) { -+ struct sk_buff *skb = tx_buf->data; -+ -+ bytes[0] += skb->len; -+ done[0]++; -+ } - budget--; - } -- - mtk_tx_unmap(eth, tx_buf, true); - - desc = ring->dma + cpu * eth->soc->txrx.txd_size; diff --git a/target/linux/generic/backport-6.1/710-v6.0-net-ethernet-mtk_eth_soc-fix-hw-hash-reporting-for-M.patch b/target/linux/generic/backport-6.1/710-v6.0-net-ethernet-mtk_eth_soc-fix-hw-hash-reporting-for-M.patch deleted file mode 100644 index 817b3e10fdf1..000000000000 --- a/target/linux/generic/backport-6.1/710-v6.0-net-ethernet-mtk_eth_soc-fix-hw-hash-reporting-for-M.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 0cf731f9ebb5bf6f252055bebf4463a5c0bd490b Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Tue, 23 Aug 2022 14:24:07 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: fix hw hash reporting for - MTK_NETSYS_V2 - -Properly report hw rx hash for mt7986 chipset accroding to the new dma -descriptor layout. - -Fixes: 197c9e9b17b11 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset") -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/091394ea4e705fbb35f828011d98d0ba33808f69.1661257293.git.lorenzo@kernel.org -Signed-off-by: Paolo Abeni ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 22 +++++++++++---------- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +++++ - 2 files changed, 17 insertions(+), 10 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1846,10 +1846,19 @@ static int mtk_poll_rx(struct napi_struc - skb->dev = netdev; - bytes += skb->len; - -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; -+ if (hash != MTK_RXD5_FOE_ENTRY) -+ skb_set_hash(skb, jhash_1word(hash, 0), -+ PKT_HASH_TYPE_L4); - rxdcsum = &trxd.rxd3; -- else -+ } else { -+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; -+ if (hash != MTK_RXD4_FOE_ENTRY) -+ skb_set_hash(skb, jhash_1word(hash, 0), -+ PKT_HASH_TYPE_L4); - rxdcsum = &trxd.rxd4; -+ } - - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) - skb->ip_summed = CHECKSUM_UNNECESSARY; -@@ -1857,16 +1866,9 @@ static int mtk_poll_rx(struct napi_struc - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, netdev); - -- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; -- if (hash != MTK_RXD4_FOE_ENTRY) { -- hash = jhash_1word(hash, 0); -- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); -- } -- - reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); - if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) -- mtk_ppe_check_skb(eth->ppe, skb, -- trxd.rxd4 & MTK_RXD4_FOE_ENTRY); -+ mtk_ppe_check_skb(eth->ppe, skb, hash); - - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -314,6 +314,11 @@ - #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ - #define RX_DMA_SPECIAL_TAG BIT(22) - -+/* PDMA descriptor rxd5 */ -+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0) -+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) -+#define MTK_RXD5_SRC_PORT GENMASK(29, 26) -+ - #define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf) - #define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7) - diff --git a/target/linux/generic/backport-6.1/711-v6.0-01-net-ethernet-mtk_eth_soc-fix-off-by-one-check-of-ARR.patch b/target/linux/generic/backport-6.1/711-v6.0-01-net-ethernet-mtk_eth_soc-fix-off-by-one-check-of-ARR.patch deleted file mode 100644 index 0de8ab4376c0..000000000000 --- a/target/linux/generic/backport-6.1/711-v6.0-01-net-ethernet-mtk_eth_soc-fix-off-by-one-check-of-ARR.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Tom Rix -Date: Sat, 16 Jul 2022 17:46:54 -0400 -Subject: [PATCH] net: ethernet: mtk_eth_soc: fix off by one check of - ARRAY_SIZE - -In mtk_wed_tx_ring_setup(.., int idx, ..), idx is used as an index here - struct mtk_wed_ring *ring = &dev->tx_ring[idx]; - -The bounds of idx are checked here - BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); - -If idx is the size of the array, it will pass this check and overflow. -So change the check to >= . - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Signed-off-by: Tom Rix -Link: https://lore.kernel.org/r/20220716214654.1540240-1-trix@redhat.com -Signed-off-by: Jakub Kicinski ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -651,7 +651,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev - * WDMA RX. - */ - -- BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); -+ BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); - - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) - return -ENOMEM; diff --git a/target/linux/generic/backport-6.1/711-v6.0-02-net-ethernet-mtk_ppe-fix-possible-NULL-pointer-deref.patch b/target/linux/generic/backport-6.1/711-v6.0-02-net-ethernet-mtk_ppe-fix-possible-NULL-pointer-deref.patch deleted file mode 100644 index fc6e2464688c..000000000000 --- a/target/linux/generic/backport-6.1/711-v6.0-02-net-ethernet-mtk_ppe-fix-possible-NULL-pointer-deref.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Lorenzo Bianconi -Date: Mon, 18 Jul 2022 11:51:53 +0200 -Subject: [PATCH] net: ethernet: mtk_ppe: fix possible NULL pointer dereference - in mtk_flow_get_wdma_info - -odev pointer can be NULL in mtk_flow_offload_replace routine according -to the flower action rules. Fix possible NULL pointer dereference in -mtk_flow_get_wdma_info. - -Fixes: a333215e10cb5 ("net: ethernet: mtk_eth_soc: implement flow offloading to WED devices") -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/4e1685bc4976e21e364055f6bee86261f8f9ee93.1658137753.git.lorenzo@kernel.org -Signed-off-by: Jakub Kicinski ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -93,6 +93,9 @@ mtk_flow_get_wdma_info(struct net_device - }; - struct net_device_path path = {}; - -+ if (!ctx.dev) -+ return -ENODEV; -+ - memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); - - if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) diff --git a/target/linux/generic/backport-6.1/711-v6.0-03-net-ethernet-mtk-ppe-fix-traffic-offload-with-bridge.patch b/target/linux/generic/backport-6.1/711-v6.0-03-net-ethernet-mtk-ppe-fix-traffic-offload-with-bridge.patch deleted file mode 100644 index c0720152d65d..000000000000 --- a/target/linux/generic/backport-6.1/711-v6.0-03-net-ethernet-mtk-ppe-fix-traffic-offload-with-bridge.patch +++ /dev/null @@ -1,64 +0,0 @@ -From: Lorenzo Bianconi -Date: Fri, 22 Jul 2022 09:06:19 +0200 -Subject: [PATCH] net: ethernet: mtk-ppe: fix traffic offload with bridged wlan - -A typical flow offload scenario for OpenWrt users is routed traffic -received by the wan interface that is redirected to a wlan device -belonging to the lan bridge. Current implementation fails to -fill wdma offload info in mtk_flow_get_wdma_info() since odev device is -the local bridge. Fix the issue running dev_fill_forward_path routine in -mtk_flow_get_wdma_info in order to identify the wlan device. - -Tested-by: Paolo Valerio -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -88,32 +88,28 @@ mtk_flow_offload_mangle_eth(const struct - static int - mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) - { -- struct net_device_path_ctx ctx = { -- .dev = dev, -- }; -- struct net_device_path path = {}; -+ struct net_device_path_stack stack; -+ struct net_device_path *path; -+ int err; - -- if (!ctx.dev) -+ if (!dev) - return -ENODEV; - -- memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); -- - if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) - return -1; - -- if (!dev->netdev_ops->ndo_fill_forward_path) -- return -1; -- -- if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) -- return -1; -+ err = dev_fill_forward_path(dev, addr, &stack); -+ if (err) -+ return err; - -- if (path.type != DEV_PATH_MTK_WDMA) -+ path = &stack.path[stack.num_paths - 1]; -+ if (path->type != DEV_PATH_MTK_WDMA) - return -1; - -- info->wdma_idx = path.mtk_wdma.wdma_idx; -- info->queue = path.mtk_wdma.queue; -- info->bss = path.mtk_wdma.bss; -- info->wcid = path.mtk_wdma.wcid; -+ info->wdma_idx = path->mtk_wdma.wdma_idx; -+ info->queue = path->mtk_wdma.queue; -+ info->bss = path->mtk_wdma.bss; -+ info->wcid = path->mtk_wdma.wcid; - - return 0; - } diff --git a/target/linux/generic/backport-6.1/711-v6.0-04-net-ethernet-mtk_eth_soc-remove-mtk_foe_entry_timest.patch b/target/linux/generic/backport-6.1/711-v6.0-04-net-ethernet-mtk_eth_soc-remove-mtk_foe_entry_timest.patch deleted file mode 100644 index 3c28e8355182..000000000000 --- a/target/linux/generic/backport-6.1/711-v6.0-04-net-ethernet-mtk_eth_soc-remove-mtk_foe_entry_timest.patch +++ /dev/null @@ -1,33 +0,0 @@ -From c9daab322313087afde8c46f41df3c628410ae20 Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Mon, 5 Sep 2022 14:46:01 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: remove mtk_foe_entry_timestamp - -Get rid of mtk_foe_entry_timestamp routine since it is no longer used. - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_ppe.h | 11 ----------- - 1 file changed, 11 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -302,17 +302,6 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s - __mtk_ppe_check_skb(ppe, skb, hash); - } - --static inline int --mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash) --{ -- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1); -- -- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) -- return -1; -- -- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1); --} -- - int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, - u8 pse_port, u8 *src_mac, u8 *dest_mac); - int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); diff --git a/target/linux/generic/backport-6.1/712-v6.0-net-ethernet-mtk_eth_soc-enable-XDP-support-just-for.patch b/target/linux/generic/backport-6.1/712-v6.0-net-ethernet-mtk_eth_soc-enable-XDP-support-just-for.patch deleted file mode 100644 index f6fc7340794b..000000000000 --- a/target/linux/generic/backport-6.1/712-v6.0-net-ethernet-mtk_eth_soc-enable-XDP-support-just-for.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 5e69163d3b9931098922b3fc2f8e786af8c1f37e Mon Sep 17 00:00:00 2001 -From: Lorenzo Bianconi -Date: Tue, 13 Sep 2022 15:03:05 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: enable XDP support just for - MT7986 SoC -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Disable page_pool/XDP support for MT7621 SoC in order fix a regression -introduce adding XDP for MT7986 SoC. There is no a real use case for XDP -on MT7621 since it is a low-end cpu. Moreover this patch reduces the -memory footprint. - -Tested-by: Sergio Paracuellos -Tested-by: Arınç ÜNAL -Fixes: 23233e577ef9 ("net: ethernet: mtk_eth_soc: rely on page_pool for single page buffers") -Signed-off-by: Lorenzo Bianconi -Link: https://lore.kernel.org/r/2bf31e27b888c43228b0d84dd2ef5033338269e2.1663074002.git.lorenzo@kernel.org -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1413,7 +1413,7 @@ static void mtk_update_rx_cpu_idx(struct - - static bool mtk_page_pool_enabled(struct mtk_eth *eth) - { -- return !eth->hwlro; -+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); - } - - static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, diff --git a/target/linux/generic/backport-6.1/713-v6.0-net-ethernet-mtk_eth_soc-move-gdma_to_ppe-and-ppe_ba.patch b/target/linux/generic/backport-6.1/713-v6.0-net-ethernet-mtk_eth_soc-move-gdma_to_ppe-and-ppe_ba.patch deleted file mode 100644 index f8bbea6c8586..000000000000 --- a/target/linux/generic/backport-6.1/713-v6.0-net-ethernet-mtk_eth_soc-move-gdma_to_ppe-and-ppe_ba.patch +++ /dev/null @@ -1,127 +0,0 @@ -From patchwork Thu Sep 8 19:33:38 2022 -Content-Type: text/plain; charset="utf-8" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -X-Patchwork-Submitter: Lorenzo Bianconi -X-Patchwork-Id: 12970556 -X-Patchwork-Delegate: kuba@kernel.org -From: Lorenzo Bianconi -To: netdev@vger.kernel.org -Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com, - Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com, - kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com, - linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com, - Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com, - ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com, - devicetree@vger.kernel.org, robh@kernel.org -Subject: [PATCH net-next 03/12] net: ethernet: mtk_eth_soc: move gdma_to_ppe - and ppe_base definitions in mtk register map -Date: Thu, 8 Sep 2022 21:33:37 +0200 -Message-Id: - <95938fc9cbe0223714be2658a49ca58e9baace00.1662661555.git.lorenzo@kernel.org> -X-Mailer: git-send-email 2.37.3 -In-Reply-To: -References: -MIME-Version: 1.0 -Precedence: bulk -List-ID: -X-Mailing-List: netdev@vger.kernel.org -X-Patchwork-Delegate: kuba@kernel.org - -This is a preliminary patch to introduce mt7986 hw packet engine. - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 +++++++++++---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 ++- - drivers/net/ethernet/mediatek/mtk_ppe.h | 2 -- - 3 files changed, 13 insertions(+), 7 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -73,6 +73,8 @@ static const struct mtk_reg_map mtk_reg_ - .fq_blen = 0x1b2c, - }, - .gdm1_cnt = 0x2400, -+ .gdma_to_ppe0 = 0x4444, -+ .ppe_base = 0x0c00, - }; - - static const struct mtk_reg_map mt7628_reg_map = { -@@ -126,6 +128,8 @@ static const struct mtk_reg_map mt7986_r - .fq_blen = 0x472c, - }, - .gdm1_cnt = 0x1c00, -+ .gdma_to_ppe0 = 0x3333, -+ .ppe_base = 0x2000, - }; - - /* strings used by ethtool */ -@@ -2925,6 +2929,7 @@ static int mtk_open(struct net_device *d - - /* we run 2 netdevs on the same dma ring so we only bring it up once */ - if (!refcount_read(ð->dma_refcnt)) { -+ const struct mtk_soc_data *soc = eth->soc; - u32 gdm_config = MTK_GDMA_TO_PDMA; - int err; - -@@ -2934,15 +2939,15 @@ static int mtk_open(struct net_device *d - return err; - } - -- if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) -- gdm_config = MTK_GDMA_TO_PPE; -+ if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0) -+ gdm_config = soc->reg_map->gdma_to_ppe0; - - mtk_gdm_config(eth, gdm_config); - - napi_enable(ð->tx_napi); - napi_enable(ð->rx_napi); - mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); -- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); -+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); - refcount_set(ð->dma_refcnt, 1); - } - else -@@ -4048,7 +4053,9 @@ static int mtk_probe(struct platform_dev - } - - if (eth->soc->offload_version) { -- eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); -+ u32 ppe_addr = eth->soc->reg_map->ppe_base; -+ -+ eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2); - if (!eth->ppe) { - err = -ENOMEM; - goto err_free_dev; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -105,7 +105,6 @@ - #define MTK_GDMA_TCS_EN BIT(21) - #define MTK_GDMA_UCS_EN BIT(20) - #define MTK_GDMA_TO_PDMA 0x0 --#define MTK_GDMA_TO_PPE 0x4444 - #define MTK_GDMA_DROP_ALL 0x7777 - - /* Unicast Filter MAC Address Register - Low */ -@@ -953,6 +952,8 @@ struct mtk_reg_map { - u32 fq_blen; /* fq free page buffer length */ - } qdma; - u32 gdm1_cnt; -+ u32 gdma_to_ppe0; -+ u32 ppe_base; - }; - - /* struct mtk_eth_data - This is the structure holding all differences ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -8,8 +8,6 @@ - #include - #include - --#define MTK_ETH_PPE_BASE 0xc00 -- - #define MTK_PPE_ENTRIES_SHIFT 3 - #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) - #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) diff --git a/target/linux/generic/backport-6.1/714-v6.0-net-ethernet-mtk_eth_soc-move-ppe-table-hash-offset-.patch b/target/linux/generic/backport-6.1/714-v6.0-net-ethernet-mtk_eth_soc-move-ppe-table-hash-offset-.patch deleted file mode 100644 index 7bed2785c99b..000000000000 --- a/target/linux/generic/backport-6.1/714-v6.0-net-ethernet-mtk_eth_soc-move-ppe-table-hash-offset-.patch +++ /dev/null @@ -1,199 +0,0 @@ -From patchwork Thu Sep 8 19:33:38 2022 -Content-Type: text/plain; charset="utf-8" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -X-Patchwork-Submitter: Lorenzo Bianconi -X-Patchwork-Id: 12970557 -X-Patchwork-Delegate: kuba@kernel.org -From: Lorenzo Bianconi -To: netdev@vger.kernel.org -Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com, - Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com, - kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com, - linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com, - Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com, - ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com, - devicetree@vger.kernel.org, robh@kernel.org -Subject: [PATCH net-next 04/12] net: ethernet: mtk_eth_soc: move ppe table - hash offset to mtk_soc_data structure -Date: Thu, 8 Sep 2022 21:33:38 +0200 -Message-Id: - -X-Mailer: git-send-email 2.37.3 -In-Reply-To: -References: -MIME-Version: 1.0 -Precedence: bulk -List-ID: -X-Mailing-List: netdev@vger.kernel.org -X-Patchwork-Delegate: kuba@kernel.org - -This is a preliminary patch to introduce mt7986 hw packet engine. - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++++ - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 ++ - drivers/net/ethernet/mediatek/mtk_ppe.c | 24 +++++++++++++++------ - drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +- - 4 files changed, 25 insertions(+), 7 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4151,6 +4151,7 @@ static const struct mtk_soc_data mt7621_ - .required_clks = MT7621_CLKS_BITMAP, - .required_pctl = false, - .offload_version = 2, -+ .hash_offset = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4169,6 +4170,7 @@ static const struct mtk_soc_data mt7622_ - .required_clks = MT7622_CLKS_BITMAP, - .required_pctl = false, - .offload_version = 2, -+ .hash_offset = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4186,6 +4188,7 @@ static const struct mtk_soc_data mt7623_ - .required_clks = MT7623_CLKS_BITMAP, - .required_pctl = true, - .offload_version = 2, -+ .hash_offset = 2, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4219,6 +4222,7 @@ static const struct mtk_soc_data mt7986_ - .caps = MT7986_CAPS, - .required_clks = MT7986_CLKS_BITMAP, - .required_pctl = false, -+ .hash_offset = 4, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -967,6 +967,7 @@ struct mtk_reg_map { - * the target SoC - * @required_pctl A bool value to show whether the SoC requires - * the extra setup for those pins used by GMAC. -+ * @hash_offset Flow table hash offset. - * @txd_size Tx DMA descriptor size. - * @rxd_size Rx DMA descriptor size. - * @rx_irq_done_mask Rx irq done register mask. -@@ -981,6 +982,7 @@ struct mtk_soc_data { - u32 required_clks; - bool required_pctl; - u8 offload_version; -+ u8 hash_offset; - netdev_features_t hw_features; - struct { - u32 txd_size; ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -88,7 +88,7 @@ static void mtk_ppe_cache_enable(struct - enable * MTK_PPE_CACHE_CTL_EN); - } - --static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) -+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e) - { - u32 hv1, hv2, hv3; - u32 hash; -@@ -122,7 +122,7 @@ static u32 mtk_ppe_hash_entry(struct mtk - hash = (hash >> 24) | ((hash & 0xffffff) << 8); - hash ^= hv1 ^ hv2 ^ hv3; - hash ^= hash >> 16; -- hash <<= 1; -+ hash <<= (ffs(eth->soc->hash_offset) - 1); - hash &= MTK_PPE_ENTRIES - 1; - - return hash; -@@ -540,15 +540,16 @@ mtk_foe_entry_commit_l2(struct mtk_ppe * - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); -+ const struct mtk_soc_data *soc = ppe->eth->soc; - u32 hash; - - if (type == MTK_PPE_PKT_TYPE_BRIDGE) - return mtk_foe_entry_commit_l2(ppe, entry); - -- hash = mtk_ppe_hash_entry(&entry->data); -+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data); - entry->hash = 0xffff; - spin_lock_bh(&ppe_lock); -- hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); -+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]); - spin_unlock_bh(&ppe_lock); - - return 0; -@@ -558,6 +559,7 @@ static void - mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, - u16 hash) - { -+ const struct mtk_soc_data *soc = ppe->eth->soc; - struct mtk_flow_entry *flow_info; - struct mtk_foe_entry foe, *hwe; - struct mtk_foe_mac_info *l2; -@@ -572,7 +574,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ - flow_info->l2_data.base_flow = entry; - flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; - flow_info->hash = hash; -- hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); -+ hlist_add_head(&flow_info->list, -+ &ppe->foe_flow[hash / soc->hash_offset]); - hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); - - hwe = &ppe->foe_table[hash]; -@@ -596,7 +599,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ - - void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) - { -- struct hlist_head *head = &ppe->foe_flow[hash / 2]; -+ const struct mtk_soc_data *soc = ppe->eth->soc; -+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset]; - struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; - struct mtk_flow_entry *entry; - struct mtk_foe_bridge key = {}; -@@ -680,9 +684,11 @@ int mtk_foe_entry_idle_time(struct mtk_p - struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, - int version) - { -+ const struct mtk_soc_data *soc = eth->soc; - struct device *dev = eth->dev; - struct mtk_foe_entry *foe; - struct mtk_ppe *ppe; -+ u32 foe_flow_size; - - ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); - if (!ppe) -@@ -705,6 +711,12 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ - - ppe->foe_table = foe; - -+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) * -+ sizeof(*ppe->foe_flow); -+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL); -+ if (!ppe->foe_flow) -+ return NULL; -+ - mtk_ppe_debugfs_init(ppe); - - return ppe; ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -270,7 +270,7 @@ struct mtk_ppe { - dma_addr_t foe_phys; - - u16 foe_check_time[MTK_PPE_ENTRIES]; -- struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; -+ struct hlist_head *foe_flow; - - struct rhashtable l2_flows; - diff --git a/target/linux/generic/backport-6.1/715-v6.0-net-ethernet-mtk_eth_soc-add-the-capability-to-run-m.patch b/target/linux/generic/backport-6.1/715-v6.0-net-ethernet-mtk_eth_soc-add-the-capability-to-run-m.patch deleted file mode 100644 index 93860e414573..000000000000 --- a/target/linux/generic/backport-6.1/715-v6.0-net-ethernet-mtk_eth_soc-add-the-capability-to-run-m.patch +++ /dev/null @@ -1,318 +0,0 @@ -From patchwork Thu Sep 8 19:33:39 2022 -Content-Type: text/plain; charset="utf-8" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -X-Patchwork-Submitter: Lorenzo Bianconi -X-Patchwork-Id: 12970559 -X-Patchwork-Delegate: kuba@kernel.org -Return-Path: -From: Lorenzo Bianconi -To: netdev@vger.kernel.org -Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com, - Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com, - kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com, - linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com, - Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com, - ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com, - devicetree@vger.kernel.org, robh@kernel.org -Subject: [PATCH net-next 05/12] net: ethernet: mtk_eth_soc: add the capability - to run multiple ppe -Date: Thu, 8 Sep 2022 21:33:39 +0200 -Message-Id: - -X-Mailer: git-send-email 2.37.3 -In-Reply-To: -References: -MIME-Version: 1.0 -Precedence: bulk -List-ID: -X-Mailing-List: netdev@vger.kernel.org -X-Patchwork-Delegate: kuba@kernel.org - -mt7986 chipset support multiple packet engines for wlan <-> eth -packet forwarding. - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 ++++++++++++------- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- - drivers/net/ethernet/mediatek/mtk_ppe.c | 14 +++++--- - drivers/net/ethernet/mediatek/mtk_ppe.h | 9 +++-- - .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 8 ++--- - .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +++---- - 6 files changed, 48 insertions(+), 33 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1872,7 +1872,7 @@ static int mtk_poll_rx(struct napi_struc - - reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); - if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) -- mtk_ppe_check_skb(eth->ppe, skb, hash); -+ mtk_ppe_check_skb(eth->ppe[0], skb, hash); - - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -@@ -2930,7 +2930,8 @@ static int mtk_open(struct net_device *d - /* we run 2 netdevs on the same dma ring so we only bring it up once */ - if (!refcount_read(ð->dma_refcnt)) { - const struct mtk_soc_data *soc = eth->soc; -- u32 gdm_config = MTK_GDMA_TO_PDMA; -+ u32 gdm_config; -+ int i; - int err; - - err = mtk_start_dma(eth); -@@ -2939,8 +2940,11 @@ static int mtk_open(struct net_device *d - return err; - } - -- if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0) -- gdm_config = soc->reg_map->gdma_to_ppe0; -+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) -+ mtk_ppe_start(eth->ppe[i]); -+ -+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe0 -+ : MTK_GDMA_TO_PDMA; - - mtk_gdm_config(eth, gdm_config); - -@@ -2985,6 +2989,7 @@ static int mtk_stop(struct net_device *d - { - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; -+ int i; - - phylink_stop(mac->phylink); - -@@ -3012,8 +3017,8 @@ static int mtk_stop(struct net_device *d - - mtk_dma_free(eth); - -- if (eth->soc->offload_version) -- mtk_ppe_stop(eth->ppe); -+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) -+ mtk_ppe_stop(eth->ppe[i]); - - return 0; - } -@@ -4053,12 +4058,19 @@ static int mtk_probe(struct platform_dev - } - - if (eth->soc->offload_version) { -- u32 ppe_addr = eth->soc->reg_map->ppe_base; -+ u32 num_ppe; - -- eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2); -- if (!eth->ppe) { -- err = -ENOMEM; -- goto err_free_dev; -+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; -+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); -+ for (i = 0; i < num_ppe; i++) { -+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; -+ -+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, -+ eth->soc->offload_version, i); -+ if (!eth->ppe[i]) { -+ err = -ENOMEM; -+ goto err_free_dev; -+ } - } - - err = mtk_eth_offload_init(eth); ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1112,7 +1112,7 @@ struct mtk_eth { - - int ip_align; - -- struct mtk_ppe *ppe; -+ struct mtk_ppe *ppe[2]; - struct rhashtable flow_table; - - struct bpf_prog __rcu *prog; ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -682,7 +682,7 @@ int mtk_foe_entry_idle_time(struct mtk_p - } - - struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, -- int version) -+ int version, int index) - { - const struct mtk_soc_data *soc = eth->soc; - struct device *dev = eth->dev; -@@ -717,7 +717,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ - if (!ppe->foe_flow) - return NULL; - -- mtk_ppe_debugfs_init(ppe); -+ mtk_ppe_debugfs_init(ppe, index); - - return ppe; - } -@@ -738,10 +738,13 @@ static void mtk_ppe_init_foe_table(struc - ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC; - } - --int mtk_ppe_start(struct mtk_ppe *ppe) -+void mtk_ppe_start(struct mtk_ppe *ppe) - { - u32 val; - -+ if (!ppe) -+ return; -+ - mtk_ppe_init_foe_table(ppe); - ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); - -@@ -809,8 +812,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) - ppe_w32(ppe, MTK_PPE_GLO_CFG, val); - - ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); -- -- return 0; - } - - int mtk_ppe_stop(struct mtk_ppe *ppe) -@@ -818,6 +819,9 @@ int mtk_ppe_stop(struct mtk_ppe *ppe) - u32 val; - int i; - -+ if (!ppe) -+ return 0; -+ - for (i = 0; i < MTK_PPE_ENTRIES; i++) - ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, - MTK_FOE_STATE_INVALID); ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -247,6 +247,7 @@ struct mtk_flow_entry { - }; - u8 type; - s8 wed_index; -+ u8 ppe_index; - u16 hash; - union { - struct mtk_foe_entry data; -@@ -265,6 +266,7 @@ struct mtk_ppe { - struct device *dev; - void __iomem *base; - int version; -+ char dirname[5]; - - struct mtk_foe_entry *foe_table; - dma_addr_t foe_phys; -@@ -277,8 +279,9 @@ struct mtk_ppe { - void *acct_table; - }; - --struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); --int mtk_ppe_start(struct mtk_ppe *ppe); -+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, -+ int version, int index); -+void mtk_ppe_start(struct mtk_ppe *ppe); - int mtk_ppe_stop(struct mtk_ppe *ppe); - - void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); -@@ -317,6 +320,6 @@ int mtk_foe_entry_set_wdma(struct mtk_fo - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); --int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); -+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); - - #endif ---- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -@@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct ino - inode->i_private); - } - --int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) -+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index) - { - static const struct file_operations fops_all = { - .open = mtk_ppe_debugfs_foe_open_all, -@@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe - .llseek = seq_lseek, - .release = single_release, - }; -- - static const struct file_operations fops_bind = { - .open = mtk_ppe_debugfs_foe_open_bind, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - }; -- - struct dentry *root; - -- root = debugfs_create_dir("mtk_ppe", NULL); -+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index); -+ -+ root = debugfs_create_dir(ppe->dirname, NULL); - if (!root) - return -ENOMEM; - ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -434,7 +434,7 @@ mtk_flow_offload_replace(struct mtk_eth - memcpy(&entry->data, &foe, sizeof(entry->data)); - entry->wed_index = wed_index; - -- err = mtk_foe_entry_commit(eth->ppe, entry); -+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry); - if (err < 0) - goto free; - -@@ -446,7 +446,7 @@ mtk_flow_offload_replace(struct mtk_eth - return 0; - - clear: -- mtk_foe_entry_clear(eth->ppe, entry); -+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); - free: - kfree(entry); - if (wed_index >= 0) -@@ -464,7 +464,7 @@ mtk_flow_offload_destroy(struct mtk_eth - if (!entry) - return -ENOENT; - -- mtk_foe_entry_clear(eth->ppe, entry); -+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); - rhashtable_remove_fast(ð->flow_table, &entry->node, - mtk_flow_ht_params); - if (entry->wed_index >= 0) -@@ -485,7 +485,7 @@ mtk_flow_offload_stats(struct mtk_eth *e - if (!entry) - return -ENOENT; - -- idle = mtk_foe_entry_idle_time(eth->ppe, entry); -+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); - f->stats.lastused = jiffies - idle * HZ; - - return 0; -@@ -537,7 +537,7 @@ mtk_eth_setup_tc_block(struct net_device - struct flow_block_cb *block_cb; - flow_setup_cb_t *cb; - -- if (!eth->ppe || !eth->ppe->foe_table) -+ if (!eth->soc->offload_version) - return -EOPNOTSUPP; - - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) -@@ -590,8 +590,5 @@ int mtk_eth_setup_tc(struct net_device * - - int mtk_eth_offload_init(struct mtk_eth *eth) - { -- if (!eth->ppe || !eth->ppe->foe_table) -- return 0; -- - return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); - } diff --git a/target/linux/generic/backport-6.1/716-v6.0-net-ethernet-mtk_eth_soc-move-wdma_base-definitions-.patch b/target/linux/generic/backport-6.1/716-v6.0-net-ethernet-mtk_eth_soc-move-wdma_base-definitions-.patch deleted file mode 100644 index a4b285632e1a..000000000000 --- a/target/linux/generic/backport-6.1/716-v6.0-net-ethernet-mtk_eth_soc-move-wdma_base-definitions-.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 0dcbe607cec32ccae23b02a641b8bd6191a328ae Mon Sep 17 00:00:00 2001 -Message-Id: <0dcbe607cec32ccae23b02a641b8bd6191a328ae.1662243796.git.lorenzo@kernel.org> -In-Reply-To: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org> -References: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org> -From: Lorenzo Bianconi -Date: Tue, 23 Aug 2022 23:09:05 +0200 -Subject: [PATCH net-next 2/4] net: ethernet: mtk_eth_soc: move wdma_base - definitions in mtk register map - -This is a preliminary patch to introduce mt7986 wed support. - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 ++++++++++------ - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 +--- - 2 files changed, 11 insertions(+), 9 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -75,6 +75,10 @@ static const struct mtk_reg_map mtk_reg_ - .gdm1_cnt = 0x2400, - .gdma_to_ppe0 = 0x4444, - .ppe_base = 0x0c00, -+ .wdma_base = { -+ [0] = 0x2800, -+ [1] = 0x2c00, -+ }, - }; - - static const struct mtk_reg_map mt7628_reg_map = { -@@ -130,6 +134,10 @@ static const struct mtk_reg_map mt7986_r - .gdm1_cnt = 0x1c00, - .gdma_to_ppe0 = 0x3333, - .ppe_base = 0x2000, -+ .wdma_base = { -+ [0] = 0x4800, -+ [1] = 0x4c00, -+ }, - }; - - /* strings used by ethtool */ -@@ -3970,16 +3978,12 @@ static int mtk_probe(struct platform_dev - for (i = 0;; i++) { - struct device_node *np = of_parse_phandle(pdev->dev.of_node, - "mediatek,wed", i); -- static const u32 wdma_regs[] = { -- MTK_WDMA0_BASE, -- MTK_WDMA1_BASE -- }; - void __iomem *wdma; - -- if (!np || i >= ARRAY_SIZE(wdma_regs)) -+ if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) - break; - -- wdma = eth->base + wdma_regs[i]; -+ wdma = eth->base + eth->soc->reg_map->wdma_base[i]; - mtk_wed_add_hw(np, eth, wdma, i); - } - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -268,9 +268,6 @@ - #define TX_DMA_FPORT_MASK_V2 0xf - #define TX_DMA_SWC_V2 BIT(30) - --#define MTK_WDMA0_BASE 0x2800 --#define MTK_WDMA1_BASE 0x2c00 -- - /* QDMA descriptor txd4 */ - #define TX_DMA_CHKSUM (0x7 << 29) - #define TX_DMA_TSO BIT(28) -@@ -954,6 +951,7 @@ struct mtk_reg_map { - u32 gdm1_cnt; - u32 gdma_to_ppe0; - u32 ppe_base; -+ u32 wdma_base[2]; - }; - - /* struct mtk_eth_data - This is the structure holding all differences diff --git a/target/linux/generic/backport-6.1/717-v6.0-net-ethernet-mtk_eth_soc-add-foe_entry_size-to-mtk_e.patch b/target/linux/generic/backport-6.1/717-v6.0-net-ethernet-mtk_eth_soc-add-foe_entry_size-to-mtk_e.patch deleted file mode 100644 index 2bce63c4c01b..000000000000 --- a/target/linux/generic/backport-6.1/717-v6.0-net-ethernet-mtk_eth_soc-add-foe_entry_size-to-mtk_e.patch +++ /dev/null @@ -1,251 +0,0 @@ -From e3c27d869fccc1f2b8d0b4cde4763ab223874e8c Mon Sep 17 00:00:00 2001 -Message-Id: -In-Reply-To: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org> -References: <43a21841ce0175d29f23c34a65ceaaf9dd7eb8b7.1662243796.git.lorenzo@kernel.org> -From: Lorenzo Bianconi -Date: Sun, 21 Aug 2022 17:51:17 +0200 -Subject: [PATCH net-next 3/4] net: ethernet: mtk_eth_soc: add foe_entry_size - to mtk_eth_soc - -Introduce foe_entry_size to mtk_eth_soc data structure since mt7986 -relies on a bigger mtk_foe_entry data structure. - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 + - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++++ - drivers/net/ethernet/mediatek/mtk_ppe.c | 55 +++++++++++-------- - drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +- - .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 2 +- - 5 files changed, 48 insertions(+), 24 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4168,6 +4168,7 @@ static const struct mtk_soc_data mt7621_ - .required_pctl = false, - .offload_version = 2, - .hash_offset = 2, -+ .foe_entry_size = sizeof(struct mtk_foe_entry), - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4187,6 +4188,7 @@ static const struct mtk_soc_data mt7622_ - .required_pctl = false, - .offload_version = 2, - .hash_offset = 2, -+ .foe_entry_size = sizeof(struct mtk_foe_entry), - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4205,6 +4207,7 @@ static const struct mtk_soc_data mt7623_ - .required_pctl = true, - .offload_version = 2, - .hash_offset = 2, -+ .foe_entry_size = sizeof(struct mtk_foe_entry), - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -966,6 +966,7 @@ struct mtk_reg_map { - * @required_pctl A bool value to show whether the SoC requires - * the extra setup for those pins used by GMAC. - * @hash_offset Flow table hash offset. -+ * @foe_entry_size Foe table entry size. - * @txd_size Tx DMA descriptor size. - * @rxd_size Rx DMA descriptor size. - * @rx_irq_done_mask Rx irq done register mask. -@@ -981,6 +982,7 @@ struct mtk_soc_data { - bool required_pctl; - u8 offload_version; - u8 hash_offset; -+ u16 foe_entry_size; - netdev_features_t hw_features; - struct { - u32 txd_size; -@@ -1141,6 +1143,14 @@ struct mtk_mac { - /* the struct describing the SoC. these are declared in the soc_xyz.c files */ - extern const struct of_device_id of_mtk_match[]; - -+static inline struct mtk_foe_entry * -+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) -+{ -+ const struct mtk_soc_data *soc = ppe->eth->soc; -+ -+ return ppe->foe_table + hash * soc->foe_entry_size; -+} -+ - /* read the hardware status register */ - void mtk_stats_update_mac(struct mtk_mac *mac); - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -410,9 +410,10 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - - hlist_del_init(&entry->list); - if (entry->hash != 0xffff) { -- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; -- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, -- MTK_FOE_STATE_BIND); -+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); -+ -+ hwe->ib1 &= ~MTK_FOE_IB1_STATE; -+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND); - dma_wmb(); - } - entry->hash = 0xffff; -@@ -451,7 +452,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe - int cur_idle; - u32 ib1; - -- hwe = &ppe->foe_table[cur->hash]; -+ hwe = mtk_foe_get_entry(ppe, cur->hash); - ib1 = READ_ONCE(hwe->ib1); - - if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { -@@ -473,8 +474,8 @@ mtk_flow_entry_update_l2(struct mtk_ppe - static void - mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -+ struct mtk_foe_entry foe = {}; - struct mtk_foe_entry *hwe; -- struct mtk_foe_entry foe; - - spin_lock_bh(&ppe_lock); - -@@ -486,8 +487,8 @@ mtk_flow_entry_update(struct mtk_ppe *pp - if (entry->hash == 0xffff) - goto out; - -- hwe = &ppe->foe_table[entry->hash]; -- memcpy(&foe, hwe, sizeof(foe)); -+ hwe = mtk_foe_get_entry(ppe, entry->hash); -+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); - if (!mtk_flow_entry_match(entry, &foe)) { - entry->hash = 0xffff; - goto out; -@@ -511,8 +512,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p - entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; - entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); - -- hwe = &ppe->foe_table[hash]; -- memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); -+ hwe = mtk_foe_get_entry(ppe, hash); -+ memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size); - wmb(); - hwe->ib1 = entry->ib1; - -@@ -561,7 +562,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ - { - const struct mtk_soc_data *soc = ppe->eth->soc; - struct mtk_flow_entry *flow_info; -- struct mtk_foe_entry foe, *hwe; -+ struct mtk_foe_entry foe = {}, *hwe; - struct mtk_foe_mac_info *l2; - u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; - int type; -@@ -578,8 +579,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ - &ppe->foe_flow[hash / soc->hash_offset]); - hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); - -- hwe = &ppe->foe_table[hash]; -- memcpy(&foe, hwe, sizeof(foe)); -+ hwe = mtk_foe_get_entry(ppe, hash); -+ memcpy(&foe, hwe, soc->foe_entry_size); - foe.ib1 &= ib1_mask; - foe.ib1 |= entry->data.ib1 & ~ib1_mask; - -@@ -601,7 +602,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe - { - const struct mtk_soc_data *soc = ppe->eth->soc; - struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset]; -- struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; -+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); - struct mtk_flow_entry *entry; - struct mtk_foe_bridge key = {}; - struct hlist_node *n; -@@ -686,9 +687,9 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ - { - const struct mtk_soc_data *soc = eth->soc; - struct device *dev = eth->dev; -- struct mtk_foe_entry *foe; - struct mtk_ppe *ppe; - u32 foe_flow_size; -+ void *foe; - - ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); - if (!ppe) -@@ -704,7 +705,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ - ppe->dev = dev; - ppe->version = version; - -- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), -+ foe = dmam_alloc_coherent(ppe->dev, -+ MTK_PPE_ENTRIES * soc->foe_entry_size, - &ppe->foe_phys, GFP_KERNEL); - if (!foe) - return NULL; -@@ -727,15 +729,21 @@ static void mtk_ppe_init_foe_table(struc - static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; - int i, k; - -- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table)); -+ memset(ppe->foe_table, 0, -+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size); - - if (!IS_ENABLED(CONFIG_SOC_MT7621)) - return; - - /* skip all entries that cross the 1024 byte boundary */ -- for (i = 0; i < MTK_PPE_ENTRIES; i += 128) -- for (k = 0; k < ARRAY_SIZE(skip); k++) -- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC; -+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) { -+ for (k = 0; k < ARRAY_SIZE(skip); k++) { -+ struct mtk_foe_entry *hwe; -+ -+ hwe = mtk_foe_get_entry(ppe, i + skip[k]); -+ hwe->ib1 |= MTK_FOE_IB1_STATIC; -+ } -+ } - } - - void mtk_ppe_start(struct mtk_ppe *ppe) -@@ -822,9 +830,12 @@ int mtk_ppe_stop(struct mtk_ppe *ppe) - if (!ppe) - return 0; - -- for (i = 0; i < MTK_PPE_ENTRIES; i++) -- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, -- MTK_FOE_STATE_INVALID); -+ for (i = 0; i < MTK_PPE_ENTRIES; i++) { -+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i); -+ -+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, -+ MTK_FOE_STATE_INVALID); -+ } - - mtk_ppe_cache_enable(ppe, false); - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -268,7 +268,7 @@ struct mtk_ppe { - int version; - char dirname[5]; - -- struct mtk_foe_entry *foe_table; -+ void *foe_table; - dma_addr_t foe_phys; - - u16 foe_check_time[MTK_PPE_ENTRIES]; ---- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -@@ -79,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file - int i; - - for (i = 0; i < MTK_PPE_ENTRIES; i++) { -- struct mtk_foe_entry *entry = &ppe->foe_table[i]; -+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i); - struct mtk_foe_mac_info *l2; - struct mtk_flow_addr_info ai = {}; - unsigned char h_source[ETH_ALEN]; diff --git a/target/linux/generic/backport-6.1/718-v6.0-net-ethernet-mtk_eth_soc-fix-typo-in-__mtk_foe_entry.patch b/target/linux/generic/backport-6.1/718-v6.0-net-ethernet-mtk_eth_soc-fix-typo-in-__mtk_foe_entry.patch deleted file mode 100644 index 77daf6c8bfe4..000000000000 --- a/target/linux/generic/backport-6.1/718-v6.0-net-ethernet-mtk_eth_soc-fix-typo-in-__mtk_foe_entry.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 12ff69304c83c679ca01ef3db963ab0db9de19fb Mon Sep 17 00:00:00 2001 -Message-Id: <12ff69304c83c679ca01ef3db963ab0db9de19fb.1662332102.git.lorenzo@kernel.org> -In-Reply-To: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org> -References: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org> -From: Lorenzo Bianconi -Date: Mon, 5 Sep 2022 00:43:43 +0200 -Subject: [PATCH net-next 2/6] net: ethernet: mtk_eth_soc: fix typo in - __mtk_foe_entry_clear - -Set ib1 state to MTK_FOE_STATE_UNBIND in __mtk_foe_entry_clear routine. - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -413,7 +413,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); - - hwe->ib1 &= ~MTK_FOE_IB1_STATE; -- hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND); -+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND); - dma_wmb(); - } - entry->hash = 0xffff; diff --git a/target/linux/generic/backport-6.1/719-v6.0-net-ethernet-mtk_eth_soc-check-max-allowed-value-in-.patch b/target/linux/generic/backport-6.1/719-v6.0-net-ethernet-mtk_eth_soc-check-max-allowed-value-in-.patch deleted file mode 100644 index 7ab6d486b225..000000000000 --- a/target/linux/generic/backport-6.1/719-v6.0-net-ethernet-mtk_eth_soc-check-max-allowed-value-in-.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 4253e6e2b795a18ab534adcd5c313d3fc4150975 Mon Sep 17 00:00:00 2001 -Message-Id: <4253e6e2b795a18ab534adcd5c313d3fc4150975.1662332102.git.lorenzo@kernel.org> -In-Reply-To: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org> -References: <2a60545635c2705312299384f4e9fec2f2a3acd6.1662332102.git.lorenzo@kernel.org> -From: Lorenzo Bianconi -Date: Mon, 5 Sep 2022 00:48:52 +0200 -Subject: [PATCH net-next 3/6] net: ethernet: mtk_eth_soc: check max allowed - value in mtk_ppe_check_skb - -Check theoretical OOB accesses in mtk_ppe_check_skb routine - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_ppe.h | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -294,6 +294,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s - if (!ppe) - return; - -+ if (hash > MTK_PPE_HASH_MASK) -+ return; -+ - now = (u16)jiffies; - diff = now - ppe->foe_check_time[hash]; - if (diff < HZ / 10) diff --git a/target/linux/generic/backport-6.1/720-v6.0-net-ethernet-mtk_eth_wed-add-mtk_wed_configure_irq-a.patch b/target/linux/generic/backport-6.1/720-v6.0-net-ethernet-mtk_eth_wed-add-mtk_wed_configure_irq-a.patch deleted file mode 100644 index a8c88daf1ffc..000000000000 --- a/target/linux/generic/backport-6.1/720-v6.0-net-ethernet-mtk_eth_wed-add-mtk_wed_configure_irq-a.patch +++ /dev/null @@ -1,189 +0,0 @@ -From e5ecb4f619197b93fa682d722452dc8412864cdb Mon Sep 17 00:00:00 2001 -Message-Id: -From: Lorenzo Bianconi -Date: Fri, 26 Aug 2022 01:12:57 +0200 -Subject: [PATCH net-next 1/5] net: ethernet: mtk_eth_wed: add - mtk_wed_configure_irq and mtk_wed_dma_{enable/disable} - -Introduce mtk_wed_configure_irq, mtk_wed_dma_enable and mtk_wed_dma_disable -utility routines. -This is a preliminary patch to introduce mt7986 wed support. - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_wed.c | 87 +++++++++++++------- - drivers/net/ethernet/mediatek/mtk_wed_regs.h | 6 +- - 2 files changed, 64 insertions(+), 29 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -237,9 +237,30 @@ mtk_wed_set_ext_int(struct mtk_wed_devic - } - - static void --mtk_wed_stop(struct mtk_wed_device *dev) -+mtk_wed_dma_disable(struct mtk_wed_device *dev) - { -+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); -+ -+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); -+ -+ wed_clr(dev, MTK_WED_GLO_CFG, -+ MTK_WED_GLO_CFG_TX_DMA_EN | -+ MTK_WED_GLO_CFG_RX_DMA_EN); -+ - regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); -+ wdma_m32(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_TX_DMA_EN | -+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); -+} -+ -+static void -+mtk_wed_stop(struct mtk_wed_device *dev) -+{ -+ mtk_wed_dma_disable(dev); - mtk_wed_set_ext_int(dev, false); - - wed_clr(dev, MTK_WED_CTRL, -@@ -252,15 +273,6 @@ mtk_wed_stop(struct mtk_wed_device *dev) - wdma_w32(dev, MTK_WDMA_INT_MASK, 0); - wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); - wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); -- -- wed_clr(dev, MTK_WED_GLO_CFG, -- MTK_WED_GLO_CFG_TX_DMA_EN | -- MTK_WED_GLO_CFG_RX_DMA_EN); -- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | -- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); -- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, -- MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); - } - - static void -@@ -313,7 +325,10 @@ mtk_wed_hw_init_early(struct mtk_wed_dev - MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; - wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); - -- wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); -+ wdma_set(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - - offset = dev->hw->index ? 0x04000400 : 0; - wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); -@@ -520,43 +535,38 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_d - } - - static void --mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) -+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) - { -- u32 wdma_mask; -- u32 val; -- int i; -- -- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -- if (!dev->tx_wdma[i].desc) -- mtk_wed_wdma_ring_setup(dev, i, 16); -- -- wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); -- -- mtk_wed_hw_init(dev); -+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); - -+ /* wed control cr set */ - wed_set(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WDMA_INT_AGENT_EN | - MTK_WED_CTRL_WPDMA_INT_AGENT_EN | - MTK_WED_CTRL_WED_TX_BM_EN | - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - -- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); -+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, -+ MTK_WED_PCIE_INT_TRIGGER_STATUS); - - wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, - MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | - MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); - -- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, -- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); -- -+ /* initail wdma interrupt agent */ - wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); - wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); - - wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); - wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); -- - wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); - wed_w32(dev, MTK_WED_INT_MASK, irq_mask); -+} -+ -+static void -+mtk_wed_dma_enable(struct mtk_wed_device *dev) -+{ -+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); - - wed_set(dev, MTK_WED_GLO_CFG, - MTK_WED_GLO_CFG_TX_DMA_EN | -@@ -567,6 +577,26 @@ mtk_wed_start(struct mtk_wed_device *dev - wed_set(dev, MTK_WED_WDMA_GLO_CFG, - MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); - -+ wdma_set(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_TX_DMA_EN | -+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); -+} -+ -+static void -+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) -+{ -+ u32 val; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -+ if (!dev->tx_wdma[i].desc) -+ mtk_wed_wdma_ring_setup(dev, i, 16); -+ -+ mtk_wed_hw_init(dev); -+ mtk_wed_configure_irq(dev, irq_mask); -+ - mtk_wed_set_ext_int(dev, true); - val = dev->wlan.wpdma_phys | - MTK_PCIE_MIRROR_MAP_EN | -@@ -577,6 +607,7 @@ mtk_wed_start(struct mtk_wed_device *dev - val |= BIT(0); - regmap_write(dev->hw->mirror, dev->hw->index * 4, val); - -+ mtk_wed_dma_enable(dev); - dev->running = true; - } - ---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -224,7 +224,11 @@ struct mtk_wdma_desc { - #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) - - #define MTK_WDMA_GLO_CFG 0x204 --#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) -+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0) -+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2) -+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26) -+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27) -+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28) - - #define MTK_WDMA_RESET_IDX 0x208 - #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) diff --git a/target/linux/generic/backport-6.1/721-v6.0-net-ethernet-mtk_eth_wed-add-wed-support-for-mt7986-.patch b/target/linux/generic/backport-6.1/721-v6.0-net-ethernet-mtk_eth_wed-add-wed-support-for-mt7986-.patch deleted file mode 100644 index bfca7b20e453..000000000000 --- a/target/linux/generic/backport-6.1/721-v6.0-net-ethernet-mtk_eth_wed-add-wed-support-for-mt7986-.patch +++ /dev/null @@ -1,942 +0,0 @@ -From 463a71af080fbc77339bee2037fb1e081e3824f7 Mon Sep 17 00:00:00 2001 -Message-Id: <463a71af080fbc77339bee2037fb1e081e3824f7.1662886034.git.lorenzo@kernel.org> -In-Reply-To: -References: -From: Lorenzo Bianconi -Date: Sat, 27 Aug 2022 16:15:14 +0200 -Subject: [PATCH net-next 2/5] net: ethernet: mtk_eth_wed: add wed support for - mt7986 chipset - -Introduce Wireless Etherne Dispatcher support on transmission side -for mt7986 chipset - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 34 +- - drivers/net/ethernet/mediatek/mtk_wed.c | 371 ++++++++++++++---- - drivers/net/ethernet/mediatek/mtk_wed.h | 8 +- - .../net/ethernet/mediatek/mtk_wed_debugfs.c | 3 + - drivers/net/ethernet/mediatek/mtk_wed_regs.h | 81 +++- - include/linux/soc/mediatek/mtk_wed.h | 8 + - 6 files changed, 408 insertions(+), 97 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3895,6 +3895,7 @@ void mtk_eth_set_dma_device(struct mtk_e - - static int mtk_probe(struct platform_device *pdev) - { -+ struct resource *res = NULL; - struct device_node *mac_np; - struct mtk_eth *eth; - int err, i; -@@ -3975,16 +3976,31 @@ static int mtk_probe(struct platform_dev - } - } - -- for (i = 0;; i++) { -- struct device_node *np = of_parse_phandle(pdev->dev.of_node, -- "mediatek,wed", i); -- void __iomem *wdma; -- -- if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) -- break; -- -- wdma = eth->base + eth->soc->reg_map->wdma_base[i]; -- mtk_wed_add_hw(np, eth, wdma, i); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) -+ return -EINVAL; -+ } -+ -+ if (eth->soc->offload_version) { -+ for (i = 0;; i++) { -+ struct device_node *np; -+ phys_addr_t wdma_phy; -+ u32 wdma_base; -+ -+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) -+ break; -+ -+ np = of_parse_phandle(pdev->dev.of_node, -+ "mediatek,wed", i); -+ if (!np) -+ break; -+ -+ wdma_base = eth->soc->reg_map->wdma_base[i]; -+ wdma_phy = res ? res->start + wdma_base : 0; -+ mtk_wed_add_hw(np, eth, eth->base + wdma_base, -+ wdma_phy, i); -+ } - } - - for (i = 0; i < 3; i++) { ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -25,6 +25,11 @@ - - #define MTK_WED_TX_RING_SIZE 2048 - #define MTK_WED_WDMA_RING_SIZE 1024 -+#define MTK_WED_MAX_GROUP_SIZE 0x100 -+#define MTK_WED_VLD_GROUP_SIZE 0x40 -+#define MTK_WED_PER_GROUP_PKT 128 -+ -+#define MTK_WED_FBUF_SIZE 128 - - static struct mtk_wed_hw *hw_list[2]; - static DEFINE_MUTEX(hw_lock); -@@ -150,10 +155,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi - - desc->buf0 = cpu_to_le32(buf_phys); - desc->buf1 = cpu_to_le32(buf_phys + txd_size); -- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | -- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, -- MTK_WED_BUF_SIZE - txd_size) | -- MTK_WDMA_DESC_CTRL_LAST_SEG1; -+ -+ if (dev->hw->version == 1) -+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | -+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, -+ MTK_WED_BUF_SIZE - txd_size) | -+ MTK_WDMA_DESC_CTRL_LAST_SEG1; -+ else -+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | -+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, -+ MTK_WED_BUF_SIZE - txd_size) | -+ MTK_WDMA_DESC_CTRL_LAST_SEG0; - desc->ctrl = cpu_to_le32(ctrl); - desc->info = 0; - desc++; -@@ -209,7 +221,7 @@ mtk_wed_free_ring(struct mtk_wed_device - if (!ring->desc) - return; - -- dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), -+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, - ring->desc, ring->desc_phys); - } - -@@ -229,6 +241,14 @@ mtk_wed_set_ext_int(struct mtk_wed_devic - { - u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; - -+ if (dev->hw->version == 1) -+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; -+ else -+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | -+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | -+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | -+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; -+ - if (!dev->hw->num_flows) - mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; - -@@ -237,6 +257,20 @@ mtk_wed_set_ext_int(struct mtk_wed_devic - } - - static void -+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) -+{ -+ if (enable) { -+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); -+ wed_w32(dev, MTK_WED_TXP_DW1, -+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); -+ } else { -+ wed_w32(dev, MTK_WED_TXP_DW1, -+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); -+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); -+ } -+} -+ -+static void - mtk_wed_dma_disable(struct mtk_wed_device *dev) - { - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -@@ -249,12 +283,22 @@ mtk_wed_dma_disable(struct mtk_wed_devic - MTK_WED_GLO_CFG_TX_DMA_EN | - MTK_WED_GLO_CFG_RX_DMA_EN); - -- regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); - wdma_m32(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); -+ -+ if (dev->hw->version == 1) { -+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); -+ wdma_m32(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); -+ } else { -+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); -+ -+ mtk_wed_set_512_support(dev, false); -+ } - } - - static void -@@ -293,7 +337,7 @@ mtk_wed_detach(struct mtk_wed_device *de - mtk_wed_free_buffer(dev); - mtk_wed_free_tx_rings(dev); - -- if (of_dma_is_coherent(wlan_node)) -+ if (of_dma_is_coherent(wlan_node) && hw->hifsys) - regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, - BIT(hw->index), BIT(hw->index)); - -@@ -308,14 +352,69 @@ mtk_wed_detach(struct mtk_wed_device *de - mutex_unlock(&hw_lock); - } - -+#define PCIE_BASE_ADDR0 0x11280000 -+static void -+mtk_wed_bus_init(struct mtk_wed_device *dev) -+{ -+ struct device_node *np = dev->hw->eth->dev->of_node; -+ struct regmap *regs; -+ u32 val; -+ -+ regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie"); -+ if (IS_ERR(regs)) -+ return; -+ -+ regmap_update_bits(regs, 0, BIT(0), BIT(0)); -+ -+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL, -+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); -+ -+ /* pcie interrupt control: pola/source selection */ -+ wed_set(dev, MTK_WED_PCIE_INT_CTRL, -+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | -+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); -+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -+ -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); -+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); -+ -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -+ -+ /* pcie interrupt status trigger register */ -+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); -+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); -+ -+ /* pola setting */ -+ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -+ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); -+} -+ -+static void -+mtk_wed_set_wpdma(struct mtk_wed_device *dev) -+{ -+ if (dev->hw->version == 1) { -+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); -+ } else { -+ mtk_wed_bus_init(dev); -+ -+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); -+ } -+} -+ - static void - mtk_wed_hw_init_early(struct mtk_wed_device *dev) - { - u32 mask, set; -- u32 offset; - - mtk_wed_stop(dev); - mtk_wed_reset(dev, MTK_WED_RESET_WED); -+ mtk_wed_set_wpdma(dev); - - mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | -@@ -325,17 +424,33 @@ mtk_wed_hw_init_early(struct mtk_wed_dev - MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; - wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); - -- wdma_set(dev, MTK_WDMA_GLO_CFG, -- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); -- -- offset = dev->hw->index ? 0x04000400 : 0; -- wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); -- wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); -+ if (dev->hw->version == 1) { -+ u32 offset = dev->hw->index ? 0x04000400 : 0; - -- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); -- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); -+ wdma_set(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); -+ -+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); -+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); -+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, -+ MTK_PCIE_BASE(dev->hw->index)); -+ } else { -+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); -+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); -+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, -+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, -+ MTK_WDMA_INT_STATUS) | -+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, -+ MTK_WDMA_GLO_CFG)); -+ -+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, -+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, -+ MTK_WDMA_RING_TX(0)) | -+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, -+ MTK_WDMA_RING_RX(0))); -+ } - } - - static void -@@ -355,37 +470,65 @@ mtk_wed_hw_init(struct mtk_wed_device *d - - wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); - -- wed_w32(dev, MTK_WED_TX_BM_TKID, -- FIELD_PREP(MTK_WED_TX_BM_TKID_START, -- dev->wlan.token_start) | -- FIELD_PREP(MTK_WED_TX_BM_TKID_END, -- dev->wlan.token_start + dev->wlan.nbuf - 1)); -- - wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); - -- wed_w32(dev, MTK_WED_TX_BM_DYN_THR, -- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | -- MTK_WED_TX_BM_DYN_THR_HI); -+ if (dev->hw->version == 1) { -+ wed_w32(dev, MTK_WED_TX_BM_TKID, -+ FIELD_PREP(MTK_WED_TX_BM_TKID_START, -+ dev->wlan.token_start) | -+ FIELD_PREP(MTK_WED_TX_BM_TKID_END, -+ dev->wlan.token_start + -+ dev->wlan.nbuf - 1)); -+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR, -+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | -+ MTK_WED_TX_BM_DYN_THR_HI); -+ } else { -+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2, -+ FIELD_PREP(MTK_WED_TX_BM_TKID_START, -+ dev->wlan.token_start) | -+ FIELD_PREP(MTK_WED_TX_BM_TKID_END, -+ dev->wlan.token_start + -+ dev->wlan.nbuf - 1)); -+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR, -+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | -+ MTK_WED_TX_BM_DYN_THR_HI_V2); -+ wed_w32(dev, MTK_WED_TX_TKID_CTRL, -+ MTK_WED_TX_TKID_CTRL_PAUSE | -+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, -+ dev->buf_ring.size / 128) | -+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, -+ dev->buf_ring.size / 128)); -+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, -+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | -+ MTK_WED_TX_TKID_DYN_THR_HI); -+ } - - mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - -- wed_set(dev, MTK_WED_CTRL, -- MTK_WED_CTRL_WED_TX_BM_EN | -- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -+ if (dev->hw->version == 1) -+ wed_set(dev, MTK_WED_CTRL, -+ MTK_WED_CTRL_WED_TX_BM_EN | -+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -+ else -+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); - - wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); - } - - static void --mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) -+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) - { -+ void *head = (void *)ring->desc; - int i; - - for (i = 0; i < size; i++) { -- desc[i].buf0 = 0; -- desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); -- desc[i].buf1 = 0; -- desc[i].info = 0; -+ struct mtk_wdma_desc *desc; -+ -+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); -+ desc->buf0 = 0; -+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); -+ desc->buf1 = 0; -+ desc->info = 0; - } - } - -@@ -436,12 +579,10 @@ mtk_wed_reset_dma(struct mtk_wed_device - int i; - - for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { -- struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; -- -- if (!desc) -+ if (!dev->tx_ring[i].desc) - continue; - -- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); -+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); - } - - if (mtk_wed_poll_busy(dev)) -@@ -498,16 +639,16 @@ mtk_wed_reset_dma(struct mtk_wed_device - - static int - mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, -- int size) -+ int size, u32 desc_size) - { -- ring->desc = dma_alloc_coherent(dev->hw->dev, -- size * sizeof(*ring->desc), -+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, - &ring->desc_phys, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - -+ ring->desc_size = desc_size; - ring->size = size; -- mtk_wed_ring_reset(ring->desc, size); -+ mtk_wed_ring_reset(ring, size); - - return 0; - } -@@ -515,9 +656,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device - static int - mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) - { -+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; - struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; - -- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) -+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) - return -ENOMEM; - - wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, -@@ -546,16 +688,41 @@ mtk_wed_configure_irq(struct mtk_wed_dev - MTK_WED_CTRL_WED_TX_BM_EN | - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - -- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, -- MTK_WED_PCIE_INT_TRIGGER_STATUS); -+ if (dev->hw->version == 1) { -+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, -+ MTK_WED_PCIE_INT_TRIGGER_STATUS); -+ -+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, -+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | -+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); - -- wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, -- MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | -- MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); -+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); -+ } else { -+ /* initail tx interrupt trigger */ -+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, -+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | -+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | -+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | -+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, -+ dev->wlan.tx_tbit[0]) | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, -+ dev->wlan.tx_tbit[1])); -+ -+ /* initail txfree interrupt trigger */ -+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, -+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | -+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, -+ dev->wlan.txfree_tbit)); -+ -+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); -+ wed_set(dev, MTK_WED_WDMA_INT_CTRL, -+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, -+ dev->wdma_idx)); -+ } - -- /* initail wdma interrupt agent */ - wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); -- wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); - - wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); - wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); -@@ -580,14 +747,28 @@ mtk_wed_dma_enable(struct mtk_wed_device - wdma_set(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); -+ -+ if (dev->hw->version == 1) { -+ wdma_set(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); -+ } else { -+ wed_set(dev, MTK_WED_WPDMA_CTRL, -+ MTK_WED_WPDMA_CTRL_SDL1_FIXED); -+ -+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | -+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); -+ -+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, -+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | -+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); -+ } - } - - static void - mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) - { -- u32 val; - int i; - - for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -@@ -598,14 +779,17 @@ mtk_wed_start(struct mtk_wed_device *dev - mtk_wed_configure_irq(dev, irq_mask); - - mtk_wed_set_ext_int(dev, true); -- val = dev->wlan.wpdma_phys | -- MTK_PCIE_MIRROR_MAP_EN | -- FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); -- -- if (dev->hw->index) -- val |= BIT(1); -- val |= BIT(0); -- regmap_write(dev->hw->mirror, dev->hw->index * 4, val); -+ -+ if (dev->hw->version == 1) { -+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | -+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, -+ dev->hw->index); -+ -+ val |= BIT(0) | (BIT(1) * !!dev->hw->index); -+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val); -+ } else { -+ mtk_wed_set_512_support(dev, true); -+ } - - mtk_wed_dma_enable(dev); - dev->running = true; -@@ -639,7 +823,9 @@ mtk_wed_attach(struct mtk_wed_device *de - goto out; - } - -- dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); -+ dev_info(&dev->wlan.pci_dev->dev, -+ "attaching wed device %d version %d\n", -+ hw->index, hw->version); - - dev->hw = hw; - dev->dev = hw->dev; -@@ -657,7 +843,9 @@ mtk_wed_attach(struct mtk_wed_device *de - } - - mtk_wed_hw_init_early(dev); -- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); -+ if (hw->hifsys) -+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, -+ BIT(hw->index), 0); - - out: - mutex_unlock(&hw_lock); -@@ -684,7 +872,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev - - BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); - -- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) -+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, -+ sizeof(*ring->desc))) - return -ENOMEM; - - if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) -@@ -711,21 +900,21 @@ static int - mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) - { - struct mtk_wed_ring *ring = &dev->txfree_ring; -- int i; -+ int i, index = dev->hw->version == 1; - - /* - * For txfree event handling, the same DMA ring is shared between WED - * and WLAN. The WLAN driver accesses the ring index registers through - * WED - */ -- ring->reg_base = MTK_WED_RING_RX(1); -+ ring->reg_base = MTK_WED_RING_RX(index); - ring->wpdma = regs; - - for (i = 0; i < 12; i += 4) { - u32 val = readl(regs + i); - -- wed_w32(dev, MTK_WED_RING_RX(1) + i, val); -- wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); -+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val); -+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); - } - - return 0; -@@ -734,11 +923,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed - static u32 - mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) - { -- u32 val; -+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; -+ -+ if (dev->hw->version == 1) -+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; -+ else -+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | -+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | -+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | -+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; - - val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); - wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); -- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; -+ val &= ext_mask; - if (!dev->hw->num_flows) - val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; - if (val && net_ratelimit()) -@@ -813,7 +1010,8 @@ out: - } - - void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -- void __iomem *wdma, int index) -+ void __iomem *wdma, phys_addr_t wdma_phy, -+ int index) - { - static const struct mtk_wed_ops wed_ops = { - .attach = mtk_wed_attach, -@@ -860,26 +1058,33 @@ void mtk_wed_add_hw(struct device_node * - hw = kzalloc(sizeof(*hw), GFP_KERNEL); - if (!hw) - goto unlock; -+ - hw->node = np; - hw->regs = regs; - hw->eth = eth; - hw->dev = &pdev->dev; -+ hw->wdma_phy = wdma_phy; - hw->wdma = wdma; - hw->index = index; - hw->irq = irq; -- hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, -- "mediatek,pcie-mirror"); -- hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, -- "mediatek,hifsys"); -- if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { -- kfree(hw); -- goto unlock; -- } -+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; - -- if (!index) { -- regmap_write(hw->mirror, 0, 0); -- regmap_write(hw->mirror, 4, 0); -+ if (hw->version == 1) { -+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, -+ "mediatek,pcie-mirror"); -+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, -+ "mediatek,hifsys"); -+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { -+ kfree(hw); -+ goto unlock; -+ } -+ -+ if (!index) { -+ regmap_write(hw->mirror, 0, 0); -+ regmap_write(hw->mirror, 4, 0); -+ } - } -+ - mtk_wed_hw_add_debugfs(hw); - - hw_list[index] = hw; ---- a/drivers/net/ethernet/mediatek/mtk_wed.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed.h -@@ -18,11 +18,13 @@ struct mtk_wed_hw { - struct regmap *hifsys; - struct device *dev; - void __iomem *wdma; -+ phys_addr_t wdma_phy; - struct regmap *mirror; - struct dentry *debugfs_dir; - struct mtk_wed_device *wed_dev; - u32 debugfs_reg; - u32 num_flows; -+ u8 version; - char dirname[5]; - int irq; - int index; -@@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device * - } - - void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -- void __iomem *wdma, int index); -+ void __iomem *wdma, phys_addr_t wdma_phy, -+ int index); - void mtk_wed_exit(void); - int mtk_wed_flow_add(int index); - void mtk_wed_flow_remove(int index); - #else - static inline void - mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, -- void __iomem *wdma, int index) -+ void __iomem *wdma, phys_addr_t wdma_phy, -+ int index) - { - } - static inline void ---- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c -@@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void - DUMP_WDMA(WDMA_GLO_CFG), - DUMP_WDMA_RING(WDMA_RING_RX(0)), - DUMP_WDMA_RING(WDMA_RING_RX(1)), -+ -+ DUMP_STR("TX FREE"), -+ DUMP_WED(WED_RX_MIB(0)), - }; - struct mtk_wed_hw *hw = s->private; - struct mtk_wed_device *dev = hw->wed_dev; ---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -5,6 +5,7 @@ - #define __MTK_WED_REGS_H - - #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) -+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0) - #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) - #define MTK_WDMA_DESC_CTRL_BURST BIT(16) - #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) -@@ -41,6 +42,7 @@ struct mtk_wdma_desc { - #define MTK_WED_CTRL_RESERVE_EN BIT(12) - #define MTK_WED_CTRL_RESERVE_BUSY BIT(13) - #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) -+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) - #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) - - #define MTK_WED_EXT_INT_STATUS 0x020 -@@ -57,7 +59,8 @@ struct mtk_wdma_desc { - #define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) - #define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) - #define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) --#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) -+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22) -+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23) - #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) - #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ - MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ -@@ -65,8 +68,7 @@ struct mtk_wdma_desc { - MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ - MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ - MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ -- MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ -- MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) -+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR) - - #define MTK_WED_EXT_INT_MASK 0x028 - -@@ -81,6 +83,7 @@ struct mtk_wdma_desc { - #define MTK_WED_TX_BM_BASE 0x084 - - #define MTK_WED_TX_BM_TKID 0x088 -+#define MTK_WED_TX_BM_TKID_V2 0x0c8 - #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) - #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) - -@@ -94,7 +97,25 @@ struct mtk_wdma_desc { - - #define MTK_WED_TX_BM_DYN_THR 0x0a0 - #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) -+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0) - #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) -+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16) -+ -+#define MTK_WED_TX_TKID_CTRL 0x0c0 -+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0) -+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16) -+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28) -+ -+#define MTK_WED_TX_TKID_DYN_THR 0x0e0 -+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0) -+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16) -+ -+#define MTK_WED_TXP_DW0 0x120 -+#define MTK_WED_TXP_DW1 0x124 -+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16) -+#define MTK_WED_TXDP_CTRL 0x130 -+#define MTK_WED_TXDP_DW9_OVERWR BIT(9) -+#define MTK_WED_RX_BM_TKID_MIB 0x1cc - - #define MTK_WED_INT_STATUS 0x200 - #define MTK_WED_INT_MASK 0x204 -@@ -125,6 +146,7 @@ struct mtk_wdma_desc { - #define MTK_WED_RESET_IDX_RX GENMASK(17, 16) - - #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) -+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4) - - #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) - -@@ -155,21 +177,62 @@ struct mtk_wdma_desc { - #define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) - #define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) - -+/* CONFIG_MEDIATEK_NETSYS_V2 */ -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20) -+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21) -+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24) -+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28) -+ - #define MTK_WED_WPDMA_RESET_IDX 0x50c - #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) - #define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) - -+#define MTK_WED_WPDMA_CTRL 0x518 -+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31) -+ - #define MTK_WED_WPDMA_INT_CTRL 0x520 - #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) - - #define MTK_WED_WPDMA_INT_MASK 0x524 - -+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530 -+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0) -+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1) -+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2) -+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8) -+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9) -+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) -+ -+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534 -+ -+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 -+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) -+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1) -+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2) -+ - #define MTK_WED_PCIE_CFG_BASE 0x560 - -+#define MTK_WED_PCIE_CFG_BASE 0x560 -+#define MTK_WED_PCIE_CFG_INTM 0x564 -+#define MTK_WED_PCIE_CFG_MSIS 0x568 - #define MTK_WED_PCIE_INT_TRIGGER 0x570 - #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) - -+#define MTK_WED_PCIE_INT_CTRL 0x57c -+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) -+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) -+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12) -+ - #define MTK_WED_WPDMA_CFG_BASE 0x580 -+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584 -+#define MTK_WED_WPDMA_CFG_TX 0x588 -+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c - - #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) - #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) -@@ -203,15 +266,24 @@ struct mtk_wdma_desc { - #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) - #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) - -+#define MTK_WED_WDMA_INT_CLR 0xa24 -+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16) -+ - #define MTK_WED_WDMA_INT_TRIGGER 0xa28 - #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) - - #define MTK_WED_WDMA_INT_CTRL 0xa2c - #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) - -+#define MTK_WED_WDMA_CFG_BASE 0xaa0 - #define MTK_WED_WDMA_OFFSET0 0xaa4 - #define MTK_WED_WDMA_OFFSET1 0xaa8 - -+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0) -+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16) -+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0) -+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16) -+ - #define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) - #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) - #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) -@@ -221,6 +293,7 @@ struct mtk_wdma_desc { - #define MTK_WED_RING_OFS_CPU_IDX 0x08 - #define MTK_WED_RING_OFS_DMA_IDX 0x0c - -+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10) - #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) - - #define MTK_WDMA_GLO_CFG 0x204 -@@ -234,6 +307,8 @@ struct mtk_wdma_desc { - #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) - #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) - -+#define MTK_WDMA_INT_STATUS 0x220 -+ - #define MTK_WDMA_INT_MASK 0x228 - #define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) - #define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) ---- a/include/linux/soc/mediatek/mtk_wed.h -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -14,6 +14,7 @@ struct mtk_wdma_desc; - struct mtk_wed_ring { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; -+ u32 desc_size; - int size; - - u32 reg_base; -@@ -45,10 +46,17 @@ struct mtk_wed_device { - struct pci_dev *pci_dev; - - u32 wpdma_phys; -+ u32 wpdma_int; -+ u32 wpdma_mask; -+ u32 wpdma_tx; -+ u32 wpdma_txfree; - - u16 token_start; - unsigned int nbuf; - -+ u8 tx_tbit[MTK_WED_TX_QUEUES]; -+ u8 txfree_tbit; -+ - u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); - int (*offload_enable)(struct mtk_wed_device *wed); - void (*offload_disable)(struct mtk_wed_device *wed); diff --git a/target/linux/generic/backport-6.1/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch b/target/linux/generic/backport-6.1/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch deleted file mode 100644 index 70b7bad31f2d..000000000000 --- a/target/linux/generic/backport-6.1/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch +++ /dev/null @@ -1,237 +0,0 @@ -From 6e1df49f330dce7c58a39d6772f1385b6887bb03 Mon Sep 17 00:00:00 2001 -Message-Id: <6e1df49f330dce7c58a39d6772f1385b6887bb03.1662990860.git.lorenzo@kernel.org> -From: Lorenzo Bianconi -Date: Thu, 8 Sep 2022 11:26:10 +0200 -Subject: [PATCH net-next] net: ethernet: mtk_eth_wed: add axi bus support - -Other than pcie bus, introduce support for axi bus to mtk wed driver. -Axi bus is used to connect mt7986-wmac soc chip available on mt7986 -device. - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_wed.c | 116 +++++++++++++------ - drivers/net/ethernet/mediatek/mtk_wed_regs.h | 2 + - include/linux/soc/mediatek/mtk_wed.h | 11 +- - 3 files changed, 91 insertions(+), 38 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -85,11 +85,31 @@ static struct mtk_wed_hw * - mtk_wed_assign(struct mtk_wed_device *dev) - { - struct mtk_wed_hw *hw; -+ int i; -+ -+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { -+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; -+ if (!hw) -+ return NULL; -+ -+ if (!hw->wed_dev) -+ goto out; -+ -+ if (hw->version == 1) -+ return NULL; -+ -+ /* MT7986 WED devices do not have any pcie slot restrictions */ -+ } -+ /* MT7986 PCIE or AXI */ -+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { -+ hw = hw_list[i]; -+ if (hw && !hw->wed_dev) -+ goto out; -+ } - -- hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; -- if (!hw || hw->wed_dev) -- return NULL; -+ return NULL; - -+out: - hw->wed_dev = dev; - return hw; - } -@@ -322,7 +342,6 @@ mtk_wed_stop(struct mtk_wed_device *dev) - static void - mtk_wed_detach(struct mtk_wed_device *dev) - { -- struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; - struct mtk_wed_hw *hw = dev->hw; - - mutex_lock(&hw_lock); -@@ -337,9 +356,14 @@ mtk_wed_detach(struct mtk_wed_device *de - mtk_wed_free_buffer(dev); - mtk_wed_free_tx_rings(dev); - -- if (of_dma_is_coherent(wlan_node) && hw->hifsys) -- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, -- BIT(hw->index), BIT(hw->index)); -+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { -+ struct device_node *wlan_node; -+ -+ wlan_node = dev->wlan.pci_dev->dev.of_node; -+ if (of_dma_is_coherent(wlan_node) && hw->hifsys) -+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, -+ BIT(hw->index), BIT(hw->index)); -+ } - - if (!hw_list[!hw->index]->wed_dev && - hw->eth->dma_dev != hw->eth->dev) -@@ -356,40 +380,54 @@ mtk_wed_detach(struct mtk_wed_device *de - static void - mtk_wed_bus_init(struct mtk_wed_device *dev) - { -- struct device_node *np = dev->hw->eth->dev->of_node; -- struct regmap *regs; -- u32 val; -- -- regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie"); -- if (IS_ERR(regs)) -- return; -+ switch (dev->wlan.bus_type) { -+ case MTK_WED_BUS_PCIE: { -+ struct device_node *np = dev->hw->eth->dev->of_node; -+ struct regmap *regs; -+ u32 val; -+ -+ regs = syscon_regmap_lookup_by_phandle(np, -+ "mediatek,wed-pcie"); -+ if (IS_ERR(regs)) -+ break; - -- regmap_update_bits(regs, 0, BIT(0), BIT(0)); -+ regmap_update_bits(regs, 0, BIT(0), BIT(0)); - -- wed_w32(dev, MTK_WED_PCIE_INT_CTRL, -- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); -+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL, -+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); - -- /* pcie interrupt control: pola/source selection */ -- wed_set(dev, MTK_WED_PCIE_INT_CTRL, -- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | -- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); -- wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -- -- val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -- val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); -- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); -- -- val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -- val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -- -- /* pcie interrupt status trigger register */ -- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); -- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); -- -- /* pola setting */ -- val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -- wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); -+ /* pcie interrupt control: pola/source selection */ -+ wed_set(dev, MTK_WED_PCIE_INT_CTRL, -+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | -+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); -+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -+ -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); -+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); -+ -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM); -+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE); -+ -+ /* pcie interrupt status trigger register */ -+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); -+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); -+ -+ /* pola setting */ -+ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL); -+ wed_set(dev, MTK_WED_PCIE_INT_CTRL, -+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); -+ break; -+ } -+ case MTK_WED_BUS_AXI: -+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, -+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); -+ break; -+ default: -+ break; -+ } - } - - static void -@@ -800,12 +838,14 @@ mtk_wed_attach(struct mtk_wed_device *de - __releases(RCU) - { - struct mtk_wed_hw *hw; -+ struct device *device; - int ret = 0; - - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), - "mtk_wed_attach without holding the RCU read lock"); - -- if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || -+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && -+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || - !try_module_get(THIS_MODULE)) - ret = -ENODEV; - -@@ -823,8 +863,10 @@ mtk_wed_attach(struct mtk_wed_device *de - goto out; - } - -- dev_info(&dev->wlan.pci_dev->dev, -- "attaching wed device %d version %d\n", -+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE -+ ? &dev->wlan.pci_dev->dev -+ : &dev->wlan.platform_dev->dev; -+ dev_info(device, "attaching wed device %d version %d\n", - hw->index, hw->version); - - dev->hw = hw; ---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -198,6 +198,8 @@ struct mtk_wdma_desc { - - #define MTK_WED_WPDMA_INT_CTRL 0x520 - #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) -+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22) -+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16) - - #define MTK_WED_WPDMA_INT_MASK 0x524 - ---- a/include/linux/soc/mediatek/mtk_wed.h -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -11,6 +11,11 @@ - struct mtk_wed_hw; - struct mtk_wdma_desc; - -+enum mtk_wed_bus_tye { -+ MTK_WED_BUS_PCIE, -+ MTK_WED_BUS_AXI, -+}; -+ - struct mtk_wed_ring { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; -@@ -43,7 +48,11 @@ struct mtk_wed_device { - - /* filled by driver: */ - struct { -- struct pci_dev *pci_dev; -+ union { -+ struct platform_device *platform_dev; -+ struct pci_dev *pci_dev; -+ }; -+ enum mtk_wed_bus_tye bus_type; - - u32 wpdma_phys; - u32 wpdma_int; diff --git a/target/linux/generic/backport-6.1/723-v6.0-net-ethernet-mtk_eth_soc-introduce-flow-offloading-s.patch b/target/linux/generic/backport-6.1/723-v6.0-net-ethernet-mtk_eth_soc-introduce-flow-offloading-s.patch deleted file mode 100644 index fedcb6ccd899..000000000000 --- a/target/linux/generic/backport-6.1/723-v6.0-net-ethernet-mtk_eth_soc-introduce-flow-offloading-s.patch +++ /dev/null @@ -1,882 +0,0 @@ -From 93408c858e5dc01d97c55efa721268f63fde2ae5 Mon Sep 17 00:00:00 2001 -Message-Id: <93408c858e5dc01d97c55efa721268f63fde2ae5.1662886034.git.lorenzo@kernel.org> -In-Reply-To: -References: -From: Lorenzo Bianconi -Date: Sat, 3 Sep 2022 18:34:09 +0200 -Subject: [PATCH net-next 4/5] net: ethernet: mtk_eth_soc: introduce flow - offloading support for mt7986 - -Introduce hw flow offload support for mt7986 chipset. PPE is not enabled -yet in mt7986 since mt76 support is not available yet. - -Co-developed-by: Bo Jiao -Signed-off-by: Bo Jiao -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 72 ++++++ - drivers/net/ethernet/mediatek/mtk_ppe.c | 213 +++++++++++------- - drivers/net/ethernet/mediatek/mtk_ppe.h | 52 ++++- - .../net/ethernet/mediatek/mtk_ppe_offload.c | 49 ++-- - drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 8 + - 6 files changed, 289 insertions(+), 116 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1859,12 +1859,14 @@ static int mtk_poll_rx(struct napi_struc - bytes += skb->len; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); - hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; - if (hash != MTK_RXD5_FOE_ENTRY) - skb_set_hash(skb, jhash_1word(hash, 0), - PKT_HASH_TYPE_L4); - rxdcsum = &trxd.rxd3; - } else { -+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); - hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; - if (hash != MTK_RXD4_FOE_ENTRY) - skb_set_hash(skb, jhash_1word(hash, 0), -@@ -1878,7 +1880,6 @@ static int mtk_poll_rx(struct napi_struc - skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, netdev); - -- reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); - if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - mtk_ppe_check_skb(eth->ppe[0], skb, hash); - -@@ -4184,7 +4185,7 @@ static const struct mtk_soc_data mt7621_ - .required_pctl = false, - .offload_version = 2, - .hash_offset = 2, -- .foe_entry_size = sizeof(struct mtk_foe_entry), -+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4204,7 +4205,7 @@ static const struct mtk_soc_data mt7622_ - .required_pctl = false, - .offload_version = 2, - .hash_offset = 2, -- .foe_entry_size = sizeof(struct mtk_foe_entry), -+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4223,7 +4224,7 @@ static const struct mtk_soc_data mt7623_ - .required_pctl = true, - .offload_version = 2, - .hash_offset = 2, -- .foe_entry_size = sizeof(struct mtk_foe_entry), -+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4255,9 +4256,11 @@ static const struct mtk_soc_data mt7986_ - .reg_map = &mt7986_reg_map, - .ana_rgc3 = 0x128, - .caps = MT7986_CAPS, -+ .hw_features = MTK_HW_FEATURES, - .required_clks = MT7986_CLKS_BITMAP, - .required_pctl = false, - .hash_offset = 4, -+ .foe_entry_size = sizeof(struct mtk_foe_entry), - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1151,6 +1151,78 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u - return ppe->foe_table + hash * soc->foe_entry_size; - } - -+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2; -+ -+ return MTK_FOE_IB1_BIND_TIMESTAMP; -+} -+ -+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB1_BIND_PPPOE_V2; -+ -+ return MTK_FOE_IB1_BIND_PPPOE; -+} -+ -+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2; -+ -+ return MTK_FOE_IB1_BIND_VLAN_TAG; -+} -+ -+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; -+ -+ return MTK_FOE_IB1_BIND_VLAN_LAYER; -+} -+ -+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); -+ -+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); -+} -+ -+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); -+ -+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); -+} -+ -+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB1_PACKET_TYPE_V2; -+ -+ return MTK_FOE_IB1_PACKET_TYPE; -+} -+ -+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); -+ -+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); -+} -+ -+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) -+{ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) -+ return MTK_FOE_IB2_MULTICAST_V2; -+ -+ return MTK_FOE_IB2_MULTICAST; -+} -+ - /* read the hardware status register */ - void mtk_stats_update_mac(struct mtk_mac *mac); - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe - - static u32 mtk_eth_timestamp(struct mtk_eth *eth) - { -- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; -+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth); - } - - static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) -@@ -93,7 +93,7 @@ static u32 mtk_ppe_hash_entry(struct mtk - u32 hv1, hv2, hv3; - u32 hash; - -- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { -+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) { - case MTK_PPE_PKT_TYPE_IPV4_ROUTE: - case MTK_PPE_PKT_TYPE_IPV4_HNAPT: - hv1 = e->ipv4.orig.ports; -@@ -129,9 +129,9 @@ static u32 mtk_ppe_hash_entry(struct mtk - } - - static inline struct mtk_foe_mac_info * --mtk_foe_entry_l2(struct mtk_foe_entry *entry) -+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry) - { -- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); -+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); - - if (type == MTK_PPE_PKT_TYPE_BRIDGE) - return &entry->bridge.l2; -@@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e - } - - static inline u32 * --mtk_foe_entry_ib2(struct mtk_foe_entry *entry) -+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry) - { -- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); -+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); - - if (type == MTK_PPE_PKT_TYPE_BRIDGE) - return &entry->bridge.ib2; -@@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry * - return &entry->ipv4.ib2; - } - --int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, -- u8 pse_port, u8 *src_mac, u8 *dest_mac) -+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int type, int l4proto, u8 pse_port, u8 *src_mac, -+ u8 *dest_mac) - { - struct mtk_foe_mac_info *l2; - u32 ports_pad, val; - - memset(entry, 0, sizeof(*entry)); - -- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | -- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | -- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | -- MTK_FOE_IB1_BIND_TTL | -- MTK_FOE_IB1_BIND_CACHE; -- entry->ib1 = val; -- -- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | -- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) | -- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | -+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | -+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | -+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2; -+ entry->ib1 = val; -+ -+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) | -+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf); -+ } else { -+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | -+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | -+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | -+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL; -+ entry->ib1 = val; -+ -+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) | -+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | -+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f); -+ } - - if (is_multicast_ether_addr(dest_mac)) -- val |= MTK_FOE_IB2_MULTICAST; -+ val |= mtk_get_ib2_multicast_mask(eth); - - ports_pad = 0xa5a5a500 | (l4proto & 0xff); - if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) -@@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe - return 0; - } - --int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port) -+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, u8 port) - { -- u32 *ib2 = mtk_foe_entry_ib2(entry); -- u32 val; -+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry); -+ u32 val = *ib2; - -- val = *ib2; -- val &= ~MTK_FOE_IB2_DEST_PORT; -- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ val &= ~MTK_FOE_IB2_DEST_PORT_V2; -+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); -+ } else { -+ val &= ~MTK_FOE_IB2_DEST_PORT; -+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); -+ } - *ib2 = val; - - return 0; - } - --int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress, -+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, bool egress, - __be32 src_addr, __be16 src_port, - __be32 dest_addr, __be16 dest_port) - { -- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); -+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); - struct mtk_ipv4_tuple *t; - - switch (type) { -@@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct - return 0; - } - --int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, -+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, - __be32 *src_addr, __be16 src_port, - __be32 *dest_addr, __be16 dest_port) - { -- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); -+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); - u32 *src, *dest; - int i; - -@@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct - return 0; - } - --int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port) -+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int port) - { -- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); -+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); - - l2->etype = BIT(port); - -- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER)) -- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); -+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth))) -+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1); - else - l2->etype |= BIT(8); - -- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG; -+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth); - - return 0; - } - --int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid) -+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int vid) - { -- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); -+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); - -- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) { -+ switch (mtk_prep_ib1_vlan_layer(eth, entry->ib1)) { - case 0: -- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG | -- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); -+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) | -+ mtk_prep_ib1_vlan_layer(eth, 1); - l2->vlan1 = vid; - return 0; - case 1: -- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) { -+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) { - l2->vlan1 = vid; - l2->etype |= BIT(8); - } else { - l2->vlan2 = vid; -- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1); -+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1); - } - return 0; - default: -@@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_fo - } - } - --int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) -+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int sid) - { -- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); -+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); - -- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) || -- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) -+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) || -+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) - l2->etype = ETH_P_PPP_SES; - -- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE; -+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth); - l2->pppoe_id = sid; - - return 0; - } - --int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, -- int bss, int wcid) -+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int wdma_idx, int txq, int bss, int wcid) - { -- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); -- u32 *ib2 = mtk_foe_entry_ib2(entry); -+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); -+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry); - -- *ib2 &= ~MTK_FOE_IB2_PORT_MG; -- *ib2 |= MTK_FOE_IB2_WDMA_WINFO; -- if (wdma_idx) -- *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; -- -- l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | -- FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | -- FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; -+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | -+ MTK_FOE_IB2_WDMA_WINFO_V2; -+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | -+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss); -+ } else { -+ *ib2 &= ~MTK_FOE_IB2_PORT_MG; -+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO; -+ if (wdma_idx) -+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; -+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | -+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | -+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); -+ } - - return 0; - } -@@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable( - } - - static bool --mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) -+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, -+ struct mtk_foe_entry *data) - { - int type, len; - - if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) - return false; - -- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); -+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); - if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) - len = offsetof(struct mtk_foe_entry, ipv6._rsv); - else -@@ -427,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - - static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) - { -- u16 timestamp; -- u16 now; -- -- now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; -- timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; -+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); -+ u16 now = mtk_eth_timestamp(ppe->eth); -+ u16 timestamp = ib1 & ib1_ts_mask; - - if (timestamp > now) -- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; -+ return ib1_ts_mask + 1 - timestamp + now; - else - return now - timestamp; - } -@@ -442,6 +469,7 @@ static int __mtk_foe_entry_idle_time(str - static void - mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); - struct mtk_flow_entry *cur; - struct mtk_foe_entry *hwe; - struct hlist_node *tmp; -@@ -466,8 +494,8 @@ mtk_flow_entry_update_l2(struct mtk_ppe - continue; - - idle = cur_idle; -- entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; -- entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; -+ entry->data.ib1 &= ~ib1_ts_mask; -+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; - } - } - -@@ -489,7 +517,7 @@ mtk_flow_entry_update(struct mtk_ppe *pp - - hwe = mtk_foe_get_entry(ppe, entry->hash); - memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); -- if (!mtk_flow_entry_match(entry, &foe)) { -+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { - entry->hash = 0xffff; - goto out; - } -@@ -504,16 +532,22 @@ static void - __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 hash) - { -+ struct mtk_eth *eth = ppe->eth; -+ u16 timestamp = mtk_eth_timestamp(eth); - struct mtk_foe_entry *hwe; -- u16 timestamp; - -- timestamp = mtk_eth_timestamp(ppe->eth); -- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; -- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; -- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; -+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, -+ timestamp); -+ } else { -+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; -+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, -+ timestamp); -+ } - - hwe = mtk_foe_get_entry(ppe, hash); -- memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size); -+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size); - wmb(); - hwe->ib1 = entry->ib1; - -@@ -540,8 +574,8 @@ mtk_foe_entry_commit_l2(struct mtk_ppe * - - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); - const struct mtk_soc_data *soc = ppe->eth->soc; -+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1); - u32 hash; - - if (type == MTK_PPE_PKT_TYPE_BRIDGE) -@@ -564,7 +598,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ - struct mtk_flow_entry *flow_info; - struct mtk_foe_entry foe = {}, *hwe; - struct mtk_foe_mac_info *l2; -- u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; -+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; - int type; - - flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), -@@ -584,16 +618,16 @@ mtk_foe_entry_commit_subflow(struct mtk_ - foe.ib1 &= ib1_mask; - foe.ib1 |= entry->data.ib1 & ~ib1_mask; - -- l2 = mtk_foe_entry_l2(&foe); -+ l2 = mtk_foe_entry_l2(ppe->eth, &foe); - memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); - -- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); -+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); - if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) - memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); - else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) - l2->etype = ETH_P_IPV6; - -- *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; -+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; - - __mtk_foe_entry_commit(ppe, &foe, hash); - } -@@ -626,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe - continue; - } - -- if (found || !mtk_flow_entry_match(entry, hwe)) { -+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { - if (entry->hash != 0xffff) - entry->hash = 0xffff; - continue; -@@ -771,6 +805,8 @@ void mtk_ppe_start(struct mtk_ppe *ppe) - MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | - FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, - MTK_PPE_ENTRIES_SHIFT); -+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) -+ val |= MTK_PPE_TB_CFG_INFO_SEL; - ppe_w32(ppe, MTK_PPE_TB_CFG, val); - - ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, -@@ -778,15 +814,21 @@ void mtk_ppe_start(struct mtk_ppe *ppe) - - mtk_ppe_cache_enable(ppe, true); - -- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | -- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG | -- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | -+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | - MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | - MTK_PPE_FLOW_CFG_IP6_6RD | - MTK_PPE_FLOW_CFG_IP4_NAT | - MTK_PPE_FLOW_CFG_IP4_NAPT | - MTK_PPE_FLOW_CFG_IP4_DSLITE | - MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; -+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) -+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | -+ MTK_PPE_MD_TOAP_BYP_CRSN1 | -+ MTK_PPE_MD_TOAP_BYP_CRSN2 | -+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY; -+ else -+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | -+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG; - ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); - - val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | -@@ -820,6 +862,11 @@ void mtk_ppe_start(struct mtk_ppe *ppe) - ppe_w32(ppe, MTK_PPE_GLO_CFG, val); - - ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); -+ -+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) { -+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); -+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); -+ } - } - - int mtk_ppe_stop(struct mtk_ppe *ppe) ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -32,6 +32,15 @@ - #define MTK_FOE_IB1_UDP BIT(30) - #define MTK_FOE_IB1_STATIC BIT(31) - -+/* CONFIG_MEDIATEK_NETSYS_V2 */ -+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0) -+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14) -+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17) -+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18) -+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20) -+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22) -+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23) -+ - enum { - MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0, - MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1, -@@ -53,14 +62,25 @@ enum { - - #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) - -+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17) - #define MTK_FOE_IB2_PORT_AG GENMASK(23, 18) - - #define MTK_FOE_IB2_DSCP GENMASK(31, 24) - -+/* CONFIG_MEDIATEK_NETSYS_V2 */ -+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7) -+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9) -+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13) -+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19) -+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20) -+ - #define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) - #define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) - #define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) - -+#define MTK_FOE_WINFO_BSS GENMASK(5, 0) -+#define MTK_FOE_WINFO_WCID GENMASK(15, 6) -+ - enum { - MTK_FOE_STATE_INVALID, - MTK_FOE_STATE_UNBIND, -@@ -81,6 +101,9 @@ struct mtk_foe_mac_info { - - u16 pppoe_id; - u16 src_mac_lo; -+ -+ u16 minfo; -+ u16 winfo; - }; - - /* software-only entry type */ -@@ -198,7 +221,7 @@ struct mtk_foe_entry { - struct mtk_foe_ipv4_dslite dslite; - struct mtk_foe_ipv6 ipv6; - struct mtk_foe_ipv6_6rd ipv6_6rd; -- u32 data[19]; -+ u32 data[23]; - }; - }; - -@@ -306,20 +329,27 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s - __mtk_ppe_check_skb(ppe, skb, hash); - } - --int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, -- u8 pse_port, u8 *src_mac, u8 *dest_mac); --int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port); --int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig, -+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int type, int l4proto, u8 pse_port, u8 *src_mac, -+ u8 *dest_mac); -+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, u8 port); -+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, bool orig, - __be32 src_addr, __be16 src_port, - __be32 dest_addr, __be16 dest_port); --int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, -+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, -+ struct mtk_foe_entry *entry, - __be32 *src_addr, __be16 src_port, - __be32 *dest_addr, __be16 dest_port); --int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); --int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); --int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); --int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, -- int bss, int wcid); -+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int port); -+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int vid); -+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int sid); -+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, -+ int wdma_idx, int txq, int bss, int wcid); - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -52,18 +52,19 @@ static const struct rhashtable_params mt - }; - - static int --mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, -- bool egress) -+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, -+ struct mtk_flow_data *data, bool egress) - { -- return mtk_foe_entry_set_ipv4_tuple(foe, egress, -+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress, - data->v4.src_addr, data->src_port, - data->v4.dst_addr, data->dst_port); - } - - static int --mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) -+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, -+ struct mtk_flow_data *data) - { -- return mtk_foe_entry_set_ipv6_tuple(foe, -+ return mtk_foe_entry_set_ipv6_tuple(eth, foe, - data->v6.src_addr.s6_addr32, data->src_port, - data->v6.dst_addr.s6_addr32, data->dst_port); - } -@@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_et - int pse_port, dsa_port; - - if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { -- mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, -- info.wcid); -- pse_port = 3; -+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, -+ info.bss, info.wcid); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -+ switch (info.wdma_idx) { -+ case 0: -+ pse_port = 8; -+ break; -+ case 1: -+ pse_port = 9; -+ break; -+ default: -+ return -EINVAL; -+ } -+ } else { -+ pse_port = 3; -+ } - *wed_index = info.wdma_idx; - goto out; - } - - dsa_port = mtk_flow_get_dsa_port(&dev); - if (dsa_port >= 0) -- mtk_foe_entry_set_dsa(foe, dsa_port); -+ mtk_foe_entry_set_dsa(eth, foe, dsa_port); - - if (dev == eth->netdev[0]) - pse_port = 1; -@@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_et - return -EOPNOTSUPP; - - out: -- mtk_foe_entry_set_pse_port(foe, pse_port); -+ mtk_foe_entry_set_pse_port(eth, foe, pse_port); - - return 0; - } -@@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth - !is_valid_ether_addr(data.eth.h_dest)) - return -EINVAL; - -- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0, -- data.eth.h_source, -- data.eth.h_dest); -+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0, -+ data.eth.h_source, data.eth.h_dest); - if (err) - return err; - -@@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth - data.v4.src_addr = addrs.key->src; - data.v4.dst_addr = addrs.key->dst; - -- mtk_flow_set_ipv4_addr(&foe, &data, false); -+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false); - } - - if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { -@@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth - data.v6.src_addr = addrs.key->src; - data.v6.dst_addr = addrs.key->dst; - -- mtk_flow_set_ipv6_addr(&foe, &data); -+ mtk_flow_set_ipv6_addr(eth, &foe, &data); - } - - flow_action_for_each(i, act, &rule->action) { -@@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth - } - - if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { -- err = mtk_flow_set_ipv4_addr(&foe, &data, true); -+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true); - if (err) - return err; - } -@@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth - if (data.vlan.proto != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; - -- mtk_foe_entry_set_vlan(&foe, data.vlan.id); -+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); - } - if (data.pppoe.num == 1) -- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); -+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); - - err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, - &wed_index); ---- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h -@@ -21,6 +21,9 @@ - #define MTK_PPE_GLO_CFG_BUSY BIT(31) - - #define MTK_PPE_FLOW_CFG 0x204 -+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1) -+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2) -+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3) - #define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6) - #define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7) - #define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8) -@@ -54,6 +57,7 @@ - #define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14) - #define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) - #define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) -+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20) - - enum { - MTK_PPE_SCAN_MODE_DISABLED, -@@ -112,6 +116,8 @@ enum { - #define MTK_PPE_DEFAULT_CPU_PORT 0x248 - #define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4)) - -+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c -+ - #define MTK_PPE_MTU_DROP 0x308 - - #define MTK_PPE_VLAN_MTU0 0x30c -@@ -141,4 +147,6 @@ enum { - #define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) - #define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) - -+#define MTK_PPE_SBW_CTRL 0x374 -+ - #endif diff --git a/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-enable-flow-offloading-supp.patch b/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-enable-flow-offloading-supp.patch deleted file mode 100644 index e8bb85ac940e..000000000000 --- a/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-enable-flow-offloading-supp.patch +++ /dev/null @@ -1,26 +0,0 @@ -From b94b02a270471337bef73c44fa3493a521e31a61 Mon Sep 17 00:00:00 2001 -Message-Id: -In-Reply-To: -References: -From: Lorenzo Bianconi -Date: Mon, 5 Sep 2022 13:56:13 +0200 -Subject: [PATCH net-next 5/5] net: ethernet: mtk_eth_soc: enable flow - offloading support for mt7986 - -Enable hw packet engine and wireless packet dispatcher for mt7986 - -Signed-off-by: Lorenzo Bianconi ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4259,6 +4259,7 @@ static const struct mtk_soc_data mt7986_ - .hw_features = MTK_HW_FEATURES, - .required_clks = MT7986_CLKS_BITMAP, - .required_pctl = false, -+ .offload_version = 2, - .hash_offset = 4, - .foe_entry_size = sizeof(struct mtk_foe_entry), - .txrx = { diff --git a/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-fix-wrong-use-of-new-helper.patch b/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-fix-wrong-use-of-new-helper.patch deleted file mode 100644 index c4bd29365c7a..000000000000 --- a/target/linux/generic/backport-6.1/724-v6.0-net-ethernet-mtk_eth_soc-fix-wrong-use-of-new-helper.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 40350ce3ae8701146aafd79c5f7b5582d9955e58 Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Sun, 25 Sep 2022 15:12:35 +0100 -Subject: [PATCH 1/2] net: ethernet: mtk_eth_soc: fix wrong use of new helper - function -To: linux-mediatek@lists.infradead.org, - netdev@vger.kernel.org, - Lorenzo Bianconi -Cc: Sujuan Chen , - Bo Jiao , - Felix Fietkau , - John Crispin , - Sean Wang , - Mark Lee , - David S. Miller , - Eric Dumazet , - Jakub Kicinski , - Paolo Abeni , - Matthias Brugger , - Chen Minqiang - -In function mtk_foe_entry_set_vlan() the call to field accessor macro -FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1) -has been wrongly replaced by -mtk_prep_ib1_vlan_layer(eth, entry->ib1) - -Use correct helper function mtk_get_ib1_vlan_layer instead. - -Reported-by: Chen Minqiang -Fixes: 03a3180e5c09e1 ("net: ethernet: mtk_eth_soc: introduce flow offloading support for mt7986") -Signed-off-by: Daniel Golle ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -337,7 +337,7 @@ int mtk_foe_entry_set_vlan(struct mtk_et - { - struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); - -- switch (mtk_prep_ib1_vlan_layer(eth, entry->ib1)) { -+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) { - case 0: - entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) | - mtk_prep_ib1_vlan_layer(eth, 1); diff --git a/target/linux/generic/backport-6.1/725-v6.0-net-ethernet-mtk_eth_soc-fix-usage-of-foe_entry_size.patch b/target/linux/generic/backport-6.1/725-v6.0-net-ethernet-mtk_eth_soc-fix-usage-of-foe_entry_size.patch deleted file mode 100644 index bb02f401a2d6..000000000000 --- a/target/linux/generic/backport-6.1/725-v6.0-net-ethernet-mtk_eth_soc-fix-usage-of-foe_entry_size.patch +++ /dev/null @@ -1,49 +0,0 @@ -From fcf14c2c5deae8f8c3d25530bab10856f63f8a63 Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Sun, 25 Sep 2022 15:18:54 +0100 -Subject: [PATCH 2/2] net: ethernet: mtk_eth_soc: fix usage of foe_entry_size -To: linux-mediatek@lists.infradead.org, - netdev@vger.kernel.org, - Lorenzo Bianconi -Cc: Sujuan Chen , - Bo Jiao , - Felix Fietkau , - John Crispin , - Sean Wang , - Mark Lee , - David S. Miller , - Eric Dumazet , - Jakub Kicinski , - Paolo Abeni , - Matthias Brugger , - Chen Minqiang - -As sizeof(hwe->data) can now longer be used as the actual size depends -on foe_entry_size, in commit 9d8cb4c096ab02 -("net: ethernet: mtk_eth_soc: add foe_entry_size to mtk_eth_soc") the -use of sizeof(hwe->data) is hence replaced. -However, replacing it with ppe->eth->soc->foe_entry_size is wrong as -foe_entry_size represents the size of the whole descriptor and not just -the 'data' field. -Fix this by subtracing the size of the only other field in the struct -'ib1', so we actually end up with the correct size to be copied to the -data field. - -Reported-by: Chen Minqiang -Fixes: 9d8cb4c096ab02 ("net: ethernet: mtk_eth_soc: add foe_entry_size to mtk_eth_soc") -Signed-off-by: Daniel Golle ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -547,7 +547,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *p - } - - hwe = mtk_foe_get_entry(ppe, hash); -- memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size); -+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1)); - wmb(); - hwe->ib1 = entry->ib1; - diff --git a/target/linux/generic/backport-6.1/726-v6.0-net-ethernet-mtk_eth_soc-fix-mask-of-RX_DMA_GET_SPOR.patch b/target/linux/generic/backport-6.1/726-v6.0-net-ethernet-mtk_eth_soc-fix-mask-of-RX_DMA_GET_SPOR.patch deleted file mode 100644 index 27c719b66353..000000000000 --- a/target/linux/generic/backport-6.1/726-v6.0-net-ethernet-mtk_eth_soc-fix-mask-of-RX_DMA_GET_SPOR.patch +++ /dev/null @@ -1,32 +0,0 @@ -From c9da02bfb1112461e048d3b736afb1873f6f4ccf Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Tue, 27 Sep 2022 16:30:02 +0100 -Subject: [PATCH 1/1] net: ethernet: mtk_eth_soc: fix mask of - RX_DMA_GET_SPORT{,_V2} - -The bitmasks applied in RX_DMA_GET_SPORT and RX_DMA_GET_SPORT_V2 macros -were swapped. Fix that. - -Reported-by: Chen Minqiang -Fixes: 160d3a9b192985 ("net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support") -Acked-by: Lorenzo Bianconi -Signed-off-by: Daniel Golle -Link: https://lore.kernel.org/r/YzMW+mg9UsaCdKRQ@makrotopia.org -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -315,8 +315,8 @@ - #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) - #define MTK_RXD5_SRC_PORT GENMASK(29, 26) - --#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf) --#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7) -+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) -+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) - - /* PDMA V2 descriptor rxd3 */ - #define RX_DMA_VTAG_V2 BIT(0) diff --git a/target/linux/generic/backport-6.1/727-v6.1-net-ethernet-mtk_eth_soc-fix-state-in-__mtk_foe_entr.patch b/target/linux/generic/backport-6.1/727-v6.1-net-ethernet-mtk_eth_soc-fix-state-in-__mtk_foe_entr.patch deleted file mode 100644 index 11465c1c5b71..000000000000 --- a/target/linux/generic/backport-6.1/727-v6.1-net-ethernet-mtk_eth_soc-fix-state-in-__mtk_foe_entr.patch +++ /dev/null @@ -1,37 +0,0 @@ -From ae3ed15da5889263de372ff9df2e83e16acca4cb Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Fri, 30 Sep 2022 01:56:53 +0100 -Subject: [PATCH 1/1] net: ethernet: mtk_eth_soc: fix state in - __mtk_foe_entry_clear - -Setting ib1 state to MTK_FOE_STATE_UNBIND in __mtk_foe_entry_clear -routine as done by commit 0e80707d94e4c8 ("net: ethernet: mtk_eth_soc: -fix typo in __mtk_foe_entry_clear") breaks flow offloading, at least -on older MTK_NETSYS_V1 SoCs, OpenWrt users have confirmed the bug on -MT7622 and MT7621 systems. -Felix Fietkau suggested to use MTK_FOE_STATE_INVALID instead which -works well on both, MTK_NETSYS_V1 and MTK_NETSYS_V2. - -Tested on MT7622 (Linksys E8450) and MT7986 (BananaPi BPI-R3). - -Suggested-by: Felix Fietkau -Fixes: 0e80707d94e4c8 ("net: ethernet: mtk_eth_soc: fix typo in __mtk_foe_entry_clear") -Fixes: 33fc42de33278b ("net: ethernet: mtk_eth_soc: support creating mac address based offload entries") -Signed-off-by: Daniel Golle -Link: https://lore.kernel.org/r/YzY+1Yg0FBXcnrtc@makrotopia.org -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -442,7 +442,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); - - hwe->ib1 &= ~MTK_FOE_IB1_STATE; -- hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND); -+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); - dma_wmb(); - } - entry->hash = 0xffff; diff --git a/target/linux/generic/backport-6.1/728-v6.1-01-net-ethernet-mtk_eth_soc-fix-possible-memory-leak-in.patch b/target/linux/generic/backport-6.1/728-v6.1-01-net-ethernet-mtk_eth_soc-fix-possible-memory-leak-in.patch deleted file mode 100644 index b41318afd76d..000000000000 --- a/target/linux/generic/backport-6.1/728-v6.1-01-net-ethernet-mtk_eth_soc-fix-possible-memory-leak-in.patch +++ /dev/null @@ -1,73 +0,0 @@ -From b3d0d98179d62f9d55635a600679c4fa362baf8d Mon Sep 17 00:00:00 2001 -From: Yang Yingliang -Date: Mon, 17 Oct 2022 11:51:54 +0800 -Subject: [PATCH 1/3] net: ethernet: mtk_eth_soc: fix possible memory leak in - mtk_probe() - -If mtk_wed_add_hw() has been called, mtk_wed_exit() needs be called -in error path or removing module to free the memory allocated in -mtk_wed_add_hw(). - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Signed-off-by: Yang Yingliang -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_eth_soc.c | 17 ++++++++++++----- - 1 file changed, 12 insertions(+), 5 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4011,19 +4011,23 @@ static int mtk_probe(struct platform_dev - eth->irq[i] = platform_get_irq(pdev, i); - if (eth->irq[i] < 0) { - dev_err(&pdev->dev, "no IRQ%d resource found\n", i); -- return -ENXIO; -+ err = -ENXIO; -+ goto err_wed_exit; - } - } - for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { - eth->clks[i] = devm_clk_get(eth->dev, - mtk_clks_source_name[i]); - if (IS_ERR(eth->clks[i])) { -- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) -- return -EPROBE_DEFER; -+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { -+ err = -EPROBE_DEFER; -+ goto err_wed_exit; -+ } - if (eth->soc->required_clks & BIT(i)) { - dev_err(&pdev->dev, "clock %s not found\n", - mtk_clks_source_name[i]); -- return -EINVAL; -+ err = -EINVAL; -+ goto err_wed_exit; - } - eth->clks[i] = NULL; - } -@@ -4034,7 +4038,7 @@ static int mtk_probe(struct platform_dev - - err = mtk_hw_init(eth); - if (err) -- return err; -+ goto err_wed_exit; - - eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); - -@@ -4132,6 +4136,8 @@ err_free_dev: - mtk_free_dev(eth); - err_deinit_hw: - mtk_hw_deinit(eth); -+err_wed_exit: -+ mtk_wed_exit(); - - return err; - } -@@ -4151,6 +4157,7 @@ static int mtk_remove(struct platform_de - phylink_disconnect_phy(mac->phylink); - } - -+ mtk_wed_exit(); - mtk_hw_deinit(eth); - - netif_napi_del(ð->tx_napi); diff --git a/target/linux/generic/backport-6.1/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch b/target/linux/generic/backport-6.1/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch deleted file mode 100644 index ef5374dcc574..000000000000 --- a/target/linux/generic/backport-6.1/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 9d4f20a476ca57e4c9246eb1fa2a61bea2354720 Mon Sep 17 00:00:00 2001 -From: Yang Yingliang -Date: Mon, 17 Oct 2022 11:51:55 +0800 -Subject: [PATCH 2/3] net: ethernet: mtk_eth_wed: add missing put_device() in - mtk_wed_add_hw() - -After calling get_device() in mtk_wed_add_hw(), in error path, put_device() -needs be called. - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Signed-off-by: Yang Yingliang -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_wed.c | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -1084,11 +1084,11 @@ void mtk_wed_add_hw(struct device_node * - get_device(&pdev->dev); - irq = platform_get_irq(pdev, 0); - if (irq < 0) -- return; -+ goto err_put_device; - - regs = syscon_regmap_lookup_by_phandle(np, NULL); - if (IS_ERR(regs)) -- return; -+ goto err_put_device; - - rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); - -@@ -1131,8 +1131,14 @@ void mtk_wed_add_hw(struct device_node * - - hw_list[index] = hw; - -+ mutex_unlock(&hw_lock); -+ -+ return; -+ - unlock: - mutex_unlock(&hw_lock); -+err_put_device: -+ put_device(&pdev->dev); - } - - void mtk_wed_exit(void) diff --git a/target/linux/generic/backport-6.1/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch b/target/linux/generic/backport-6.1/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch deleted file mode 100644 index 0a452d4a7d46..000000000000 --- a/target/linux/generic/backport-6.1/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch +++ /dev/null @@ -1,43 +0,0 @@ -From e0bb4659e235770e6f53b3692e958591f49448f5 Mon Sep 17 00:00:00 2001 -From: Yang Yingliang -Date: Mon, 17 Oct 2022 11:51:56 +0800 -Subject: [PATCH 3/3] net: ethernet: mtk_eth_wed: add missing of_node_put() - -The device_node pointer returned by of_parse_phandle() with refcount -incremented, when finish using it, the refcount need be decreased. - -Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") -Signed-off-by: Yang Yingliang -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/mediatek/mtk_wed.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -1079,7 +1079,7 @@ void mtk_wed_add_hw(struct device_node * - - pdev = of_find_device_by_node(np); - if (!pdev) -- return; -+ goto err_of_node_put; - - get_device(&pdev->dev); - irq = platform_get_irq(pdev, 0); -@@ -1139,6 +1139,8 @@ unlock: - mutex_unlock(&hw_lock); - err_put_device: - put_device(&pdev->dev); -+err_of_node_put: -+ of_node_put(np); - } - - void mtk_wed_exit(void) -@@ -1159,6 +1161,7 @@ void mtk_wed_exit(void) - hw_list[i] = NULL; - debugfs_remove(hw->debugfs_dir); - put_device(hw->dev); -+ of_node_put(hw->node); - kfree(hw); - } - } diff --git a/target/linux/generic/backport-6.1/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch b/target/linux/generic/backport-6.1/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch deleted file mode 100644 index 003ca9bae699..000000000000 --- a/target/linux/generic/backport-6.1/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch +++ /dev/null @@ -1,98 +0,0 @@ -From: Lorenzo Bianconi -Date: Thu, 17 Nov 2022 00:35:04 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: do not overwrite mtu - configuration running reset routine - -Restore user configured MTU running mtk_hw_init() during tx timeout routine -since it will be overwritten after a hw reset. - -Reported-by: Felix Fietkau -Fixes: 9ea4d311509f ("net: ethernet: mediatek: add the whole ethernet reset into the reset process") -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3176,6 +3176,30 @@ static void mtk_dim_tx(struct work_struc - dim->state = DIM_START_MEASURE; - } - -+static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) -+{ -+ struct mtk_eth *eth = mac->hw; -+ u32 mcr_cur, mcr_new; -+ -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) -+ return; -+ -+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); -+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; -+ -+ if (val <= 1518) -+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); -+ else if (val <= 1536) -+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); -+ else if (val <= 1552) -+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); -+ else -+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); -+ -+ if (mcr_new != mcr_cur) -+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); -+} -+ - static int mtk_hw_init(struct mtk_eth *eth) - { - u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | -@@ -3250,8 +3274,16 @@ static int mtk_hw_init(struct mtk_eth *e - * up with the more appropriate value when mtk_mac_config call is being - * invoked. - */ -- for (i = 0; i < MTK_MAC_COUNT; i++) -+ for (i = 0; i < MTK_MAC_COUNT; i++) { -+ struct net_device *dev = eth->netdev[i]; -+ - mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); -+ if (dev) { -+ struct mtk_mac *mac = netdev_priv(dev); -+ -+ mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN); -+ } -+ } - - /* Indicates CDM to parse the MTK special tag from CPU - * which also is working out for untag packets. -@@ -3367,7 +3399,6 @@ static int mtk_change_mtu(struct net_dev - int length = new_mtu + MTK_RX_ETH_HLEN; - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth = mac->hw; -- u32 mcr_cur, mcr_new; - - if (rcu_access_pointer(eth->prog) && - length > MTK_PP_MAX_BUF_SIZE) { -@@ -3375,23 +3406,7 @@ static int mtk_change_mtu(struct net_dev - return -EINVAL; - } - -- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { -- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); -- mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; -- -- if (length <= 1518) -- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); -- else if (length <= 1536) -- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); -- else if (length <= 1552) -- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); -- else -- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); -- -- if (mcr_new != mcr_cur) -- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); -- } -- -+ mtk_set_mcr_max_rx(mac, length); - dev->mtu = new_mtu; - - return 0; diff --git a/target/linux/generic/backport-6.1/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch b/target/linux/generic/backport-6.1/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch deleted file mode 100644 index 12aa3ebf6a7c..000000000000 --- a/target/linux/generic/backport-6.1/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch +++ /dev/null @@ -1,63 +0,0 @@ -From: Lorenzo Bianconi -Date: Thu, 17 Nov 2022 15:29:53 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: fix RSTCTRL_PPE{0,1} definitions - -Fix RSTCTRL_PPE0 and RSTCTRL_PPE1 register mask definitions for -MTK_NETSYS_V2. -Remove duplicated definitions. - -Fixes: 160d3a9b1929 ("net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support") -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3239,16 +3239,17 @@ static int mtk_hw_init(struct mtk_eth *e - return 0; - } - -- val = RSTCTRL_FE | RSTCTRL_PPE; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); -- -- val |= RSTCTRL_ETH; -- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) -- val |= RSTCTRL_PPE1; -+ val = RSTCTRL_PPE0_V2; -+ } else { -+ val = RSTCTRL_PPE0; - } - -- ethsys_reset(eth, val); -+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) -+ val |= RSTCTRL_PPE1; -+ -+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -445,18 +445,14 @@ - /* ethernet reset control register */ - #define ETHSYS_RSTCTRL 0x34 - #define RSTCTRL_FE BIT(6) --#define RSTCTRL_PPE BIT(31) --#define RSTCTRL_PPE1 BIT(30) -+#define RSTCTRL_PPE0 BIT(31) -+#define RSTCTRL_PPE0_V2 BIT(30) -+#define RSTCTRL_PPE1 BIT(31) - #define RSTCTRL_ETH BIT(23) - - /* ethernet reset check idle register */ - #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 - --/* ethernet reset control register */ --#define ETHSYS_RSTCTRL 0x34 --#define RSTCTRL_FE BIT(6) --#define RSTCTRL_PPE BIT(31) -- - /* ethernet dma channel agent map */ - #define ETHSYS_DMA_AG_MAP 0x408 - #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) diff --git a/target/linux/generic/backport-6.1/730-10-v6.3-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch b/target/linux/generic/backport-6.1/730-10-v6.3-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch deleted file mode 100644 index 0bf48b078001..000000000000 --- a/target/linux/generic/backport-6.1/730-10-v6.3-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Felix Fietkau -Date: Thu, 3 Nov 2022 17:46:25 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: drop packets to WDMA if the - ring is full - -Improves handling of DMA ring overflow. -Clarify other WDMA drop related comment. - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3712,9 +3712,12 @@ static int mtk_hw_init(struct mtk_eth *e - mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { -- /* PSE should not drop port8 and port9 packets */ -+ /* PSE should not drop port8 and port9 packets from WDMA Tx */ - mtk_w32(eth, 0x00000300, PSE_DROP_CFG); - -+ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */ -+ mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); -+ - /* PSE Free Queue Flow Control */ - mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); - ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -140,6 +140,7 @@ - #define PSE_FQFC_CFG1 0x100 - #define PSE_FQFC_CFG2 0x104 - #define PSE_DROP_CFG 0x108 -+#define PSE_PPE0_DROP 0x110 - - /* PSE Input Queue Reservation Register*/ - #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) diff --git a/target/linux/generic/backport-6.1/733-v6.2-01-net-ethernet-mtk_eth_soc-Avoid-truncating-allocation.patch b/target/linux/generic/backport-6.1/733-v6.2-01-net-ethernet-mtk_eth_soc-Avoid-truncating-allocation.patch deleted file mode 100644 index 460c5c231789..000000000000 --- a/target/linux/generic/backport-6.1/733-v6.2-01-net-ethernet-mtk_eth_soc-Avoid-truncating-allocation.patch +++ /dev/null @@ -1,60 +0,0 @@ -From f3eceaed9edd7c0e0d9fb057613131f92973626f Mon Sep 17 00:00:00 2001 -From: Kees Cook -Date: Fri, 27 Jan 2023 14:38:54 -0800 -Subject: [PATCH] net: ethernet: mtk_eth_soc: Avoid truncating allocation - -There doesn't appear to be a reason to truncate the allocation used for -flow_info, so do a full allocation and remove the unused empty struct. -GCC does not like having a reference to an object that has been -partially allocated, as bounds checking may become impossible when -such an object is passed to other code. Seen with GCC 13: - -../drivers/net/ethernet/mediatek/mtk_ppe.c: In function 'mtk_foe_entry_commit_subflow': -../drivers/net/ethernet/mediatek/mtk_ppe.c:623:18: warning: array subscript 'struct mtk_flow_entry[0]' is partly outside array bounds of 'unsigned char[48]' [-Warray-bounds=] - 623 | flow_info->l2_data.base_flow = entry; - | ^~ - -Cc: Felix Fietkau -Cc: John Crispin -Cc: Sean Wang -Cc: Mark Lee -Cc: Lorenzo Bianconi -Cc: "David S. Miller" -Cc: Eric Dumazet -Cc: Jakub Kicinski -Cc: Paolo Abeni -Cc: Matthias Brugger -Cc: netdev@vger.kernel.org -Cc: linux-arm-kernel@lists.infradead.org -Cc: linux-mediatek@lists.infradead.org -Signed-off-by: Kees Cook -Reviewed-by: Simon Horman -Link: https://lore.kernel.org/r/20230127223853.never.014-kees@kernel.org -Signed-off-by: Paolo Abeni ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 3 +-- - drivers/net/ethernet/mediatek/mtk_ppe.h | 1 - - 2 files changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -621,8 +621,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ - u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; - int type; - -- flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), -- GFP_ATOMIC); -+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC); - if (!flow_info) - return; - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -279,7 +279,6 @@ struct mtk_flow_entry { - struct { - struct mtk_flow_entry *base_flow; - struct hlist_node list; -- struct {} end; - } l2_data; - }; - struct rhash_head node; diff --git a/target/linux/generic/backport-6.1/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch b/target/linux/generic/backport-6.1/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch deleted file mode 100644 index 6788a2ec350e..000000000000 --- a/target/linux/generic/backport-6.1/734-v5.16-0001-net-bgmac-improve-handling-PHY.patch +++ /dev/null @@ -1,84 +0,0 @@ -From b5375509184dc23d2b7fa0c5ed8763899ccc9674 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Sat, 2 Oct 2021 19:58:11 +0200 -Subject: [PATCH] net: bgmac: improve handling PHY -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -1. Use info from DT if available - -It allows describing for example a fixed link. It's more accurate than -just guessing there may be one (depending on a chipset). - -2. Verify PHY ID before trying to connect PHY - -PHY addr 0x1e (30) is special in Broadcom routers and means a switch -connected as MDIO devices instead of a real PHY. Don't try connecting to -it. - -Signed-off-by: Rafał Miłecki -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/broadcom/bgmac-bcma.c | 33 ++++++++++++++-------- - 1 file changed, 21 insertions(+), 12 deletions(-) - ---- a/drivers/net/ethernet/broadcom/bgmac-bcma.c -+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include "bgmac.h" - -@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac - struct phy_device *phy_dev; - char bus_id[MII_BUS_ID_SIZE + 3]; - -+ /* DT info should be the most accurate */ -+ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node, -+ bgmac_adjust_link); -+ if (phy_dev) -+ return 0; -+ - /* Connect to the PHY */ -- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, -- bgmac->phyaddr); -- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, -- PHY_INTERFACE_MODE_MII); -- if (IS_ERR(phy_dev)) { -- dev_err(bgmac->dev, "PHY connection failed\n"); -- return PTR_ERR(phy_dev); -+ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) { -+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, -+ bgmac->phyaddr); -+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link, -+ PHY_INTERFACE_MODE_MII); -+ if (IS_ERR(phy_dev)) { -+ dev_err(bgmac->dev, "PHY connection failed\n"); -+ return PTR_ERR(phy_dev); -+ } -+ -+ return 0; - } - -- return 0; -+ /* Assume a fixed link to the switch port */ -+ return bgmac_phy_connect_direct(bgmac); - } - - static const struct bcma_device_id bgmac_bcma_tbl[] = { -@@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_devic - bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; - bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; - bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; -- if (bgmac->mii_bus) -- bgmac->phy_connect = bcma_phy_connect; -- else -- bgmac->phy_connect = bgmac_phy_connect_direct; -+ bgmac->phy_connect = bcma_phy_connect; - - err = bgmac_enet_probe(bgmac); - if (err) diff --git a/target/linux/generic/backport-6.1/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch b/target/linux/generic/backport-6.1/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch deleted file mode 100644 index f1348282739d..000000000000 --- a/target/linux/generic/backport-6.1/734-v5.16-0002-net-bgmac-support-MDIO-described-in-DT.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 45c9d966688e7fad7f24bfc450547d91e4304d0b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Sat, 2 Oct 2021 19:58:12 +0200 -Subject: [PATCH] net: bgmac: support MDIO described in DT -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Check ethernet controller DT node for "mdio" subnode and use it with -of_mdiobus_register() when present. That allows specifying MDIO and its -PHY devices in a standard DT based way. - -This is required for BCM53573 SoC support. That family is sometimes -called Northstar (by marketing?) but is quite different from it. It uses -different CPU(s) and many different hw blocks. - -One of shared blocks in BCM53573 is Ethernet controller. Switch however -is not SRAB accessible (as it Northstar) but is MDIO attached. - -Signed-off-by: Rafał Miłecki -Signed-off-by: David S. Miller ---- - drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c -+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c -@@ -10,6 +10,7 @@ - - #include - #include -+#include - #include "bgmac.h" - - static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, -@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(s - { - struct bcma_device *core = bgmac->bcma.core; - struct mii_bus *mii_bus; -+ struct device_node *np; - int err; - - mii_bus = mdiobus_alloc(); -@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(s - mii_bus->parent = &core->dev; - mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - -- err = mdiobus_register(mii_bus); -+ np = of_get_child_by_name(core->dev.of_node, "mdio"); -+ -+ err = of_mdiobus_register(mii_bus, np); - if (err) { - dev_err(&core->dev, "Registration of mii bus failed\n"); - goto err_free_bus; diff --git a/target/linux/generic/backport-6.1/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch b/target/linux/generic/backport-6.1/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch deleted file mode 100644 index 8f000ba91879..000000000000 --- a/target/linux/generic/backport-6.1/742-v5.16-net-phy-at803x-add-support-for-qca-8327-internal-phy.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 0ccf8511182436183c031e8a2f740ae91a02c625 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 14 Sep 2021 14:33:45 +0200 -Subject: net: phy: at803x: add support for qca 8327 internal phy - -Add support for qca8327 internal phy needed for correct init of the -switch port. It does use the same qca8337 function and reg just with a -different id. - -Signed-off-by: Ansuel Smith -Tested-by: Rosen Penev -Tested-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 15 +++++++++++++++ - 1 file changed, 15 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -1412,6 +1412,19 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -+}, { -+ /* QCA8327 */ -+ .phy_id = QCA8327_PHY_ID, -+ .phy_id_mask = QCA8K_PHY_ID_MASK, -+ .name = "QCA PHY 8327", -+ /* PHY_GBIT_FEATURES */ -+ .probe = at803x_probe, -+ .flags = PHY_IS_INTERNAL, -+ .config_init = qca83xx_config_init, -+ .soft_reset = genphy_soft_reset, -+ .get_sset_count = at803x_get_sset_count, -+ .get_strings = at803x_get_strings, -+ .get_stats = at803x_get_stats, - }, }; - - module_phy_driver(at803x_driver); -@@ -1422,6 +1435,8 @@ static struct mdio_device_id __maybe_unu - { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) }, - { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, - { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, -+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, -+ { PHY_ID_MATCH_EXACT(QCA8327_PHY_ID) }, - { } - }; - diff --git a/target/linux/generic/backport-6.1/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch b/target/linux/generic/backport-6.1/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch deleted file mode 100644 index dc149a742b1a..000000000000 --- a/target/linux/generic/backport-6.1/743-v5.16-0001-net-dsa-b53-Include-all-ports-in-enabled_ports.patch +++ /dev/null @@ -1,131 +0,0 @@ -From 983d96a9116a328668601555d96736261d33170c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Thu, 16 Sep 2021 14:03:51 +0200 -Subject: [PATCH] net: dsa: b53: Include all ports in "enabled_ports" -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Make "enabled_ports" bitfield contain all available switch ports -including a CPU port. This way there is no need for fixup during -initialization. - -For BCM53010, BCM53018 and BCM53019 include also other available ports. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Tested-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/b53/b53_common.c | 23 +++++++++++------------ - 1 file changed, 11 insertions(+), 12 deletions(-) - ---- a/drivers/net/dsa/b53/b53_common.c -+++ b/drivers/net/dsa/b53/b53_common.c -@@ -2300,7 +2300,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5325_DEVICE_ID, - .dev_name = "BCM5325", - .vlans = 16, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x3f, - .arl_bins = 2, - .arl_buckets = 1024, - .imp_port = 5, -@@ -2311,7 +2311,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5365_DEVICE_ID, - .dev_name = "BCM5365", - .vlans = 256, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x3f, - .arl_bins = 2, - .arl_buckets = 1024, - .imp_port = 5, -@@ -2322,7 +2322,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5389_DEVICE_ID, - .dev_name = "BCM5389", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x11f, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2336,7 +2336,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5395_DEVICE_ID, - .dev_name = "BCM5395", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x11f, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2350,7 +2350,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5397_DEVICE_ID, - .dev_name = "BCM5397", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x11f, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2364,7 +2364,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM5398_DEVICE_ID, - .dev_name = "BCM5398", - .vlans = 4096, -- .enabled_ports = 0x7f, -+ .enabled_ports = 0x17f, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2378,7 +2378,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM53115_DEVICE_ID, - .dev_name = "BCM53115", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x11f, - .arl_bins = 4, - .arl_buckets = 1024, - .vta_regs = B53_VTA_REGS, -@@ -2392,7 +2392,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM53125_DEVICE_ID, - .dev_name = "BCM53125", - .vlans = 4096, -- .enabled_ports = 0xff, -+ .enabled_ports = 0x1ff, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2434,7 +2434,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM53010_DEVICE_ID, - .dev_name = "BCM53010", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x1bf, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2476,7 +2476,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM53018_DEVICE_ID, - .dev_name = "BCM53018", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x1bf, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2490,7 +2490,7 @@ static const struct b53_chip_data b53_sw - .chip_id = BCM53019_DEVICE_ID, - .dev_name = "BCM53019", - .vlans = 4096, -- .enabled_ports = 0x1f, -+ .enabled_ports = 0x1bf, - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -@@ -2632,7 +2632,6 @@ static int b53_switch_init(struct b53_de - dev->cpu_port = 5; - } - -- dev->enabled_ports |= BIT(dev->cpu_port); - dev->num_ports = fls(dev->enabled_ports); - - dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); diff --git a/target/linux/generic/backport-6.1/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch b/target/linux/generic/backport-6.1/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch deleted file mode 100644 index 23805a9027b3..000000000000 --- a/target/linux/generic/backport-6.1/743-v5.16-0002-net-dsa-b53-Drop-BCM5301x-workaround-for-a-wrong-CPU.patch +++ /dev/null @@ -1,42 +0,0 @@ -From b290c6384afabbca5ae6e2af72fb1b2bc37922be Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Thu, 16 Sep 2021 14:03:52 +0200 -Subject: [PATCH] net: dsa: b53: Drop BCM5301x workaround for a wrong CPU/IMP - port -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -On BCM5301x port 8 requires a fixed link when used. - -Years ago when b53 was an OpenWrt downstream driver (with configuration -based on sometimes bugged NVRAM) there was a need for a fixup. In case -of forcing fixed link for (incorrectly specified) port 5 the code had to -actually setup port 8 link. - -For upstream b53 driver with setup based on DT there is no need for that -workaround. In DT we have and require correct ports setup. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Tested-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/b53/b53_common.c | 6 ------ - 1 file changed, 6 deletions(-) - ---- a/drivers/net/dsa/b53/b53_common.c -+++ b/drivers/net/dsa/b53/b53_common.c -@@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_s - return; - } - } -- } else if (is5301x(dev)) { -- if (port != dev->cpu_port) { -- b53_force_port_config(dev, dev->cpu_port, 2000, -- DUPLEX_FULL, true, true); -- b53_force_link(dev, dev->cpu_port, 1); -- } - } - - /* Re-negotiate EEE if it was enabled already */ diff --git a/target/linux/generic/backport-6.1/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch b/target/linux/generic/backport-6.1/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch deleted file mode 100644 index 941fa23eb4c6..000000000000 --- a/target/linux/generic/backport-6.1/743-v5.16-0003-net-dsa-b53-Improve-flow-control-setup-on-BCM5301x.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 3ff26b29230c54fea2353b63124c589b61953e14 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Thu, 16 Sep 2021 14:03:53 +0200 -Subject: [PATCH] net: dsa: b53: Improve flow control setup on BCM5301x -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -According to the Broadcom's reference driver flow control needs to be -enabled for any CPU switch port (5, 7 or 8 - depending on which one is -used). Current code makes it work only for the port 5. Use -dsa_is_cpu_port() which solved that problem. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Tested-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/b53/b53_common.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/b53/b53_common.c -+++ b/drivers/net/dsa/b53/b53_common.c -@@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_s - return; - - /* Enable flow control on BCM5301x's CPU port */ -- if (is5301x(dev) && port == dev->cpu_port) -+ if (is5301x(dev) && dsa_is_cpu_port(ds, port)) - tx_pause = rx_pause = true; - - if (phydev->pause) { diff --git a/target/linux/generic/backport-6.1/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch b/target/linux/generic/backport-6.1/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch deleted file mode 100644 index 07d0ec03cf17..000000000000 --- a/target/linux/generic/backport-6.1/743-v5.16-0004-net-dsa-b53-Drop-unused-cpu_port-field.patch +++ /dev/null @@ -1,205 +0,0 @@ -From 7d5af56418d7d01e43247a33b6fe6492ea871923 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Thu, 16 Sep 2021 14:03:54 +0200 -Subject: [PATCH] net: dsa: b53: Drop unused "cpu_port" field -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -It's set but never used anymore. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Tested-by: Florian Fainelli -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/b53/b53_common.c | 28 ---------------------------- - drivers/net/dsa/b53/b53_priv.h | 1 - - 2 files changed, 29 deletions(-) - ---- a/drivers/net/dsa/b53/b53_common.c -+++ b/drivers/net/dsa/b53/b53_common.c -@@ -2298,7 +2298,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 2, - .arl_buckets = 1024, - .imp_port = 5, -- .cpu_port = B53_CPU_PORT_25, - .duplex_reg = B53_DUPLEX_STAT_FE, - }, - { -@@ -2309,7 +2308,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 2, - .arl_buckets = 1024, - .imp_port = 5, -- .cpu_port = B53_CPU_PORT_25, - .duplex_reg = B53_DUPLEX_STAT_FE, - }, - { -@@ -2320,7 +2318,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2334,7 +2331,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2348,7 +2344,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS_9798, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2362,7 +2357,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS_9798, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2377,7 +2371,6 @@ static const struct b53_chip_data b53_sw - .arl_buckets = 1024, - .vta_regs = B53_VTA_REGS, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, - .jumbo_size_reg = B53_JUMBO_MAX_SIZE, -@@ -2390,7 +2383,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2404,7 +2396,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2418,7 +2409,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS_63XX, - .duplex_reg = B53_DUPLEX_STAT_63XX, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, -@@ -2432,7 +2422,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2446,7 +2435,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2460,7 +2448,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2474,7 +2461,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2488,7 +2474,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2502,7 +2487,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2516,7 +2500,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2545,7 +2528,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 1024, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2559,7 +2541,6 @@ static const struct b53_chip_data b53_sw - .arl_bins = 4, - .arl_buckets = 256, - .imp_port = 8, -- .cpu_port = B53_CPU_PORT, - .vta_regs = B53_VTA_REGS, - .duplex_reg = B53_DUPLEX_STAT_GE, - .jumbo_pm_reg = B53_JUMBO_PORT_MASK, -@@ -2585,7 +2566,6 @@ static int b53_switch_init(struct b53_de - dev->vta_regs[2] = chip->vta_regs[2]; - dev->jumbo_pm_reg = chip->jumbo_pm_reg; - dev->imp_port = chip->imp_port; -- dev->cpu_port = chip->cpu_port; - dev->num_vlans = chip->vlans; - dev->num_arl_bins = chip->arl_bins; - dev->num_arl_buckets = chip->arl_buckets; -@@ -2617,13 +2597,6 @@ static int b53_switch_init(struct b53_de - break; - #endif - } -- } else if (dev->chip_id == BCM53115_DEVICE_ID) { -- u64 strap_value; -- -- b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); -- /* use second IMP port if GMII is enabled */ -- if (strap_value & SV_GMII_CTRL_115) -- dev->cpu_port = 5; - } - - dev->num_ports = fls(dev->enabled_ports); ---- a/drivers/net/dsa/b53/b53_priv.h -+++ b/drivers/net/dsa/b53/b53_priv.h -@@ -124,7 +124,6 @@ struct b53_device { - /* used ports mask */ - u16 enabled_ports; - unsigned int imp_port; -- unsigned int cpu_port; - - /* connect specific data */ - u8 current_page; diff --git a/target/linux/generic/backport-6.1/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch b/target/linux/generic/backport-6.1/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch deleted file mode 100644 index 99d91dfa7656..000000000000 --- a/target/linux/generic/backport-6.1/745-v5.16-01-net-phy-at803x-add-support-for-qca-8327-A-variant.patch +++ /dev/null @@ -1,65 +0,0 @@ -From b4df02b562f4aa14ff6811f30e1b4d2159585c59 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 19 Sep 2021 18:28:15 +0200 -Subject: net: phy: at803x: add support for qca 8327 A variant internal phy - -For qca8327 internal phy there are 2 different switch variant with 2 -different phy id. Add this missing variant so the internal phy can be -correctly identified and fixed. - -Signed-off-by: Ansuel Smith -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 25 ++++++++++++++++++++----- - 1 file changed, 20 insertions(+), 5 deletions(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -150,7 +150,8 @@ - #define ATH8035_PHY_ID 0x004dd072 - #define AT8030_PHY_ID_MASK 0xffffffef - --#define QCA8327_PHY_ID 0x004dd034 -+#define QCA8327_A_PHY_ID 0x004dd033 -+#define QCA8327_B_PHY_ID 0x004dd034 - #define QCA8337_PHY_ID 0x004dd036 - #define QCA8K_PHY_ID_MASK 0xffffffff - -@@ -1413,10 +1414,23 @@ static struct phy_driver at803x_driver[] - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, - }, { -- /* QCA8327 */ -- .phy_id = QCA8327_PHY_ID, -+ /* QCA8327-A from switch QCA8327-AL1A */ -+ .phy_id = QCA8327_A_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, -- .name = "QCA PHY 8327", -+ .name = "QCA PHY 8327-A", -+ /* PHY_GBIT_FEATURES */ -+ .probe = at803x_probe, -+ .flags = PHY_IS_INTERNAL, -+ .config_init = qca83xx_config_init, -+ .soft_reset = genphy_soft_reset, -+ .get_sset_count = at803x_get_sset_count, -+ .get_strings = at803x_get_strings, -+ .get_stats = at803x_get_stats, -+}, { -+ /* QCA8327-B from switch QCA8327-BL1A */ -+ .phy_id = QCA8327_B_PHY_ID, -+ .phy_id_mask = QCA8K_PHY_ID_MASK, -+ .name = "QCA PHY 8327-B", - /* PHY_GBIT_FEATURES */ - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, -@@ -1436,7 +1450,8 @@ static struct mdio_device_id __maybe_unu - { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, - { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, - { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, -- { PHY_ID_MATCH_EXACT(QCA8327_PHY_ID) }, -+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) }, -+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) }, - { } - }; - diff --git a/target/linux/generic/backport-6.1/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch b/target/linux/generic/backport-6.1/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch deleted file mode 100644 index cd83fac83c81..000000000000 --- a/target/linux/generic/backport-6.1/745-v5.16-02-net-phy-at803x-add-resume-suspend-function-to-qca83x.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 15b9df4ece17d084f14eb0ca1cf05f2ad497e425 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 19 Sep 2021 18:28:16 +0200 -Subject: net: phy: at803x: add resume/suspend function to qca83xx phy - -Add resume/suspend function to qca83xx internal phy. -We can't use the at803x generic function as the documentation lacks of -any support for WoL regs. - -Signed-off-by: Ansuel Smith -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -1413,6 +1413,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, - }, { - /* QCA8327-A from switch QCA8327-AL1A */ - .phy_id = QCA8327_A_PHY_ID, -@@ -1426,6 +1428,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, - }, { - /* QCA8327-B from switch QCA8327-BL1A */ - .phy_id = QCA8327_B_PHY_ID, -@@ -1439,6 +1443,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, - }, }; - - module_phy_driver(at803x_driver); diff --git a/target/linux/generic/backport-6.1/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch b/target/linux/generic/backport-6.1/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch deleted file mode 100644 index 586d8953b2ba..000000000000 --- a/target/linux/generic/backport-6.1/745-v5.16-03-net-phy-at803x-fix-spacing-and-improve-name-for-83xx.patch +++ /dev/null @@ -1,95 +0,0 @@ -From d44fd8604a4ab92119adb35f05fd87612af722b5 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 19 Sep 2021 18:28:17 +0200 -Subject: net: phy: at803x: fix spacing and improve name for 83xx phy - -Fix spacing and improve name for 83xx phy following other phy in the -same driver. - -Signed-off-by: Ansuel Smith -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 60 ++++++++++++++++++++++++------------------------ - 1 file changed, 30 insertions(+), 30 deletions(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -1402,47 +1402,47 @@ static struct phy_driver at803x_driver[] - .config_aneg = at803x_config_aneg, - }, { - /* QCA8337 */ -- .phy_id = QCA8337_PHY_ID, -- .phy_id_mask = QCA8K_PHY_ID_MASK, -- .name = "QCA PHY 8337", -+ .phy_id = QCA8337_PHY_ID, -+ .phy_id_mask = QCA8K_PHY_ID_MASK, -+ .name = "Qualcomm Atheros 8337 internal PHY", - /* PHY_GBIT_FEATURES */ -- .probe = at803x_probe, -- .flags = PHY_IS_INTERNAL, -- .config_init = qca83xx_config_init, -- .soft_reset = genphy_soft_reset, -- .get_sset_count = at803x_get_sset_count, -- .get_strings = at803x_get_strings, -- .get_stats = at803x_get_stats, -+ .probe = at803x_probe, -+ .flags = PHY_IS_INTERNAL, -+ .config_init = qca83xx_config_init, -+ .soft_reset = genphy_soft_reset, -+ .get_sset_count = at803x_get_sset_count, -+ .get_strings = at803x_get_strings, -+ .get_stats = at803x_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, - }, { - /* QCA8327-A from switch QCA8327-AL1A */ -- .phy_id = QCA8327_A_PHY_ID, -- .phy_id_mask = QCA8K_PHY_ID_MASK, -- .name = "QCA PHY 8327-A", -+ .phy_id = QCA8327_A_PHY_ID, -+ .phy_id_mask = QCA8K_PHY_ID_MASK, -+ .name = "Qualcomm Atheros 8327-A internal PHY", - /* PHY_GBIT_FEATURES */ -- .probe = at803x_probe, -- .flags = PHY_IS_INTERNAL, -- .config_init = qca83xx_config_init, -- .soft_reset = genphy_soft_reset, -- .get_sset_count = at803x_get_sset_count, -- .get_strings = at803x_get_strings, -- .get_stats = at803x_get_stats, -+ .probe = at803x_probe, -+ .flags = PHY_IS_INTERNAL, -+ .config_init = qca83xx_config_init, -+ .soft_reset = genphy_soft_reset, -+ .get_sset_count = at803x_get_sset_count, -+ .get_strings = at803x_get_strings, -+ .get_stats = at803x_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, - }, { - /* QCA8327-B from switch QCA8327-BL1A */ -- .phy_id = QCA8327_B_PHY_ID, -- .phy_id_mask = QCA8K_PHY_ID_MASK, -- .name = "QCA PHY 8327-B", -+ .phy_id = QCA8327_B_PHY_ID, -+ .phy_id_mask = QCA8K_PHY_ID_MASK, -+ .name = "Qualcomm Atheros 8327-B internal PHY", - /* PHY_GBIT_FEATURES */ -- .probe = at803x_probe, -- .flags = PHY_IS_INTERNAL, -- .config_init = qca83xx_config_init, -- .soft_reset = genphy_soft_reset, -- .get_sset_count = at803x_get_sset_count, -- .get_strings = at803x_get_strings, -- .get_stats = at803x_get_stats, -+ .probe = at803x_probe, -+ .flags = PHY_IS_INTERNAL, -+ .config_init = qca83xx_config_init, -+ .soft_reset = genphy_soft_reset, -+ .get_sset_count = at803x_get_sset_count, -+ .get_strings = at803x_get_strings, -+ .get_stats = at803x_get_stats, - .suspend = genphy_suspend, - .resume = genphy_resume, - }, }; diff --git a/target/linux/generic/backport-6.1/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch b/target/linux/generic/backport-6.1/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch deleted file mode 100644 index 09797ae83bcc..000000000000 --- a/target/linux/generic/backport-6.1/746-v5.16-01-net-phy-at803x-fix-resume-for-QCA8327-phy.patch +++ /dev/null @@ -1,131 +0,0 @@ -From ba3c01ee02ed0d821c9f241f179bbc9457542b8f Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 10 Oct 2021 00:46:15 +0200 -Subject: net: phy: at803x: fix resume for QCA8327 phy - -From Documentation phy resume triggers phy reset and restart -auto-negotiation. Add a dedicated function to wait reset to finish as -it was notice a regression where port sometime are not reliable after a -suspend/resume session. The reset wait logic is copied from phy_poll_reset. -Add dedicated suspend function to use genphy_suspend only with QCA8337 -phy and set only additional debug settings for QCA8327. With more test -it was reported that QCA8327 doesn't proprely support this mode and -using this cause the unreliability of the switch ports, especially the -malfunction of the port0. - -Fixes: 15b9df4ece17 ("net: phy: at803x: add resume/suspend function to qca83xx phy") -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 69 +++++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 63 insertions(+), 6 deletions(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -92,9 +92,14 @@ - #define AT803X_DEBUG_REG_5 0x05 - #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) - -+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b -+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10) -+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13) -+ - #define AT803X_DEBUG_REG_3C 0x3C - - #define AT803X_DEBUG_REG_3D 0x3D -+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6) - - #define AT803X_DEBUG_REG_1F 0x1F - #define AT803X_DEBUG_PLL_ON BIT(2) -@@ -1304,6 +1309,58 @@ static int qca83xx_config_init(struct ph - return 0; - } - -+static int qca83xx_resume(struct phy_device *phydev) -+{ -+ int ret, val; -+ -+ /* Skip reset if not suspended */ -+ if (!phydev->suspended) -+ return 0; -+ -+ /* Reinit the port, reset values set by suspend */ -+ qca83xx_config_init(phydev); -+ -+ /* Reset the port on port resume */ -+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); -+ -+ /* On resume from suspend the switch execute a reset and -+ * restart auto-negotiation. Wait for reset to complete. -+ */ -+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET), -+ 50000, 600000, true); -+ if (ret) -+ return ret; -+ -+ msleep(1); -+ -+ return 0; -+} -+ -+static int qca83xx_suspend(struct phy_device *phydev) -+{ -+ u16 mask = 0; -+ -+ /* Only QCA8337 support actual suspend. -+ * QCA8327 cause port unreliability when phy suspend -+ * is set. -+ */ -+ if (phydev->drv->phy_id == QCA8337_PHY_ID) { -+ genphy_suspend(phydev); -+ } else { -+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX); -+ phy_modify(phydev, MII_BMCR, mask, 0); -+ } -+ -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_3D, -+ AT803X_DEBUG_GATE_CLK_IN1000, 0); -+ -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, -+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE | -+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0); -+ -+ return 0; -+} -+ - static struct phy_driver at803x_driver[] = { - { - /* Qualcomm Atheros AR8035 */ -@@ -1413,8 +1470,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -- .suspend = genphy_suspend, -- .resume = genphy_resume, -+ .suspend = qca83xx_suspend, -+ .resume = qca83xx_resume, - }, { - /* QCA8327-A from switch QCA8327-AL1A */ - .phy_id = QCA8327_A_PHY_ID, -@@ -1428,8 +1485,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -- .suspend = genphy_suspend, -- .resume = genphy_resume, -+ .suspend = qca83xx_suspend, -+ .resume = qca83xx_resume, - }, { - /* QCA8327-B from switch QCA8327-BL1A */ - .phy_id = QCA8327_B_PHY_ID, -@@ -1443,8 +1500,8 @@ static struct phy_driver at803x_driver[] - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, -- .suspend = genphy_suspend, -- .resume = genphy_resume, -+ .suspend = qca83xx_suspend, -+ .resume = qca83xx_resume, - }, }; - - module_phy_driver(at803x_driver); diff --git a/target/linux/generic/backport-6.1/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch b/target/linux/generic/backport-6.1/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch deleted file mode 100644 index c504c37c84f8..000000000000 --- a/target/linux/generic/backport-6.1/746-v5.16-02-net-phy-at803x-add-DAC-amplitude-fix-for-8327-phy.patch +++ /dev/null @@ -1,91 +0,0 @@ -From 1ca8311949aec5c9447645731ef1c6bc5bd71350 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 10 Oct 2021 00:46:16 +0200 -Subject: net: phy: at803x: add DAC amplitude fix for 8327 phy - -QCA8327 internal phy require DAC amplitude adjustement set to +6% with -100m speed. Also add additional define to report a change of the same -reg in QCA8337. (different scope it does set 1000m voltage) -Add link_change_notify function to set the proper amplitude adjustement -on PHY_RUNNING state and disable on any other state. - -Fixes: b4df02b562f4 ("net: phy: at803x: add support for qca 8327 A variant internal phy") -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 33 +++++++++++++++++++++++++++++++++ - 1 file changed, 33 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -87,6 +87,8 @@ - #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 - - #define AT803X_DEBUG_REG_0 0x00 -+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2) -+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2) - #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) - - #define AT803X_DEBUG_REG_5 0x05 -@@ -1306,9 +1308,37 @@ static int qca83xx_config_init(struct ph - break; - } - -+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%. -+ * Disable on init and enable only with 100m speed following -+ * qca original source code. -+ */ -+ if (phydev->drv->phy_id == QCA8327_A_PHY_ID || -+ phydev->drv->phy_id == QCA8327_B_PHY_ID) -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ QCA8327_DEBUG_MANU_CTRL_EN, 0); -+ - return 0; - } - -+static void qca83xx_link_change_notify(struct phy_device *phydev) -+{ -+ /* QCA8337 doesn't require DAC Amplitude adjustement */ -+ if (phydev->drv->phy_id == QCA8337_PHY_ID) -+ return; -+ -+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */ -+ if (phydev->state == PHY_RUNNING) { -+ if (phydev->speed == SPEED_100) -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ QCA8327_DEBUG_MANU_CTRL_EN, -+ QCA8327_DEBUG_MANU_CTRL_EN); -+ } else { -+ /* Reset DAC Amplitude adjustment */ -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ QCA8327_DEBUG_MANU_CTRL_EN, 0); -+ } -+} -+ - static int qca83xx_resume(struct phy_device *phydev) - { - int ret, val; -@@ -1463,6 +1493,7 @@ static struct phy_driver at803x_driver[] - .phy_id_mask = QCA8K_PHY_ID_MASK, - .name = "Qualcomm Atheros 8337 internal PHY", - /* PHY_GBIT_FEATURES */ -+ .link_change_notify = qca83xx_link_change_notify, - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, - .config_init = qca83xx_config_init, -@@ -1478,6 +1509,7 @@ static struct phy_driver at803x_driver[] - .phy_id_mask = QCA8K_PHY_ID_MASK, - .name = "Qualcomm Atheros 8327-A internal PHY", - /* PHY_GBIT_FEATURES */ -+ .link_change_notify = qca83xx_link_change_notify, - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, - .config_init = qca83xx_config_init, -@@ -1493,6 +1525,7 @@ static struct phy_driver at803x_driver[] - .phy_id_mask = QCA8K_PHY_ID_MASK, - .name = "Qualcomm Atheros 8327-B internal PHY", - /* PHY_GBIT_FEATURES */ -+ .link_change_notify = qca83xx_link_change_notify, - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, - .config_init = qca83xx_config_init, diff --git a/target/linux/generic/backport-6.1/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch b/target/linux/generic/backport-6.1/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch deleted file mode 100644 index 9f880593f12c..000000000000 --- a/target/linux/generic/backport-6.1/746-v5.16-03-net-phy-at803x-enable-prefer-master-for-83xx-interna.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 9d1c29b4028557a496be9c5eb2b4b86063700636 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 10 Oct 2021 00:46:17 +0200 -Subject: net: phy: at803x: enable prefer master for 83xx internal phy - -From original QCA source code the port was set to prefer master as port -type in 1000BASE-T mode. Apply the same settings also here. - -Signed-off-by: Ansuel Smith -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -1317,6 +1317,9 @@ static int qca83xx_config_init(struct ph - at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, - QCA8327_DEBUG_MANU_CTRL_EN, 0); - -+ /* Following original QCA sourcecode set port to prefer master */ -+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER); -+ - return 0; - } - diff --git a/target/linux/generic/backport-6.1/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch b/target/linux/generic/backport-6.1/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch deleted file mode 100644 index 89e9b3f66231..000000000000 --- a/target/linux/generic/backport-6.1/746-v5.16-04-net-phy-at803x-better-describe-debug-regs.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 67999555ff42e91de7654488d9a7735bd9e84555 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 10 Oct 2021 00:46:18 +0200 -Subject: net: phy: at803x: better describe debug regs - -Give a name to known debug regs from Documentation instead of using -unknown hex values. - -Signed-off-by: Ansuel Smith -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 30 +++++++++++++++--------------- - 1 file changed, 15 insertions(+), 15 deletions(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -86,12 +86,12 @@ - #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ - #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 - --#define AT803X_DEBUG_REG_0 0x00 -+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00 - #define QCA8327_DEBUG_MANU_CTRL_EN BIT(2) - #define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2) - #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) - --#define AT803X_DEBUG_REG_5 0x05 -+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05 - #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) - - #define AT803X_DEBUG_REG_HIB_CTRL 0x0b -@@ -100,7 +100,7 @@ - - #define AT803X_DEBUG_REG_3C 0x3C - --#define AT803X_DEBUG_REG_3D 0x3D -+#define AT803X_DEBUG_REG_GREEN 0x3D - #define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6) - - #define AT803X_DEBUG_REG_1F 0x1F -@@ -284,25 +284,25 @@ static int at803x_read_page(struct phy_d - - static int at803x_enable_rx_delay(struct phy_device *phydev) - { -- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, -+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0, - AT803X_DEBUG_RX_CLK_DLY_EN); - } - - static int at803x_enable_tx_delay(struct phy_device *phydev) - { -- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, -+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0, - AT803X_DEBUG_TX_CLK_DLY_EN); - } - - static int at803x_disable_rx_delay(struct phy_device *phydev) - { -- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, - AT803X_DEBUG_RX_CLK_DLY_EN, 0); - } - - static int at803x_disable_tx_delay(struct phy_device *phydev) - { -- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, -+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, - AT803X_DEBUG_TX_CLK_DLY_EN, 0); - } - -@@ -1292,9 +1292,9 @@ static int qca83xx_config_init(struct ph - switch (switch_revision) { - case 1: - /* For 100M waveform */ -- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea); -+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea); - /* Turn on Gigabit clock */ -- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0); -+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0); - break; - - case 2: -@@ -1302,8 +1302,8 @@ static int qca83xx_config_init(struct ph - fallthrough; - case 4: - phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f); -- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860); -- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46); -+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860); -+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46); - at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000); - break; - } -@@ -1314,7 +1314,7 @@ static int qca83xx_config_init(struct ph - */ - if (phydev->drv->phy_id == QCA8327_A_PHY_ID || - phydev->drv->phy_id == QCA8327_B_PHY_ID) -- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, - QCA8327_DEBUG_MANU_CTRL_EN, 0); - - /* Following original QCA sourcecode set port to prefer master */ -@@ -1332,12 +1332,12 @@ static void qca83xx_link_change_notify(s - /* Set DAC Amplitude adjustment to +6% for 100m on link running */ - if (phydev->state == PHY_RUNNING) { - if (phydev->speed == SPEED_100) -- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, - QCA8327_DEBUG_MANU_CTRL_EN, - QCA8327_DEBUG_MANU_CTRL_EN); - } else { - /* Reset DAC Amplitude adjustment */ -- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, - QCA8327_DEBUG_MANU_CTRL_EN, 0); - } - } -@@ -1384,7 +1384,7 @@ static int qca83xx_suspend(struct phy_de - phy_modify(phydev, MII_BMCR, mask, 0); - } - -- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_3D, -+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN, - AT803X_DEBUG_GATE_CLK_IN1000, 0); - - at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, diff --git a/target/linux/generic/backport-6.1/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch b/target/linux/generic/backport-6.1/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch deleted file mode 100644 index c8d424de38c5..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-01-dsa-qca8k-add-mac-power-sel-support.patch +++ /dev/null @@ -1,80 +0,0 @@ -From d8b6f5bae6d3b648a67b6958cb98e4e97256d652 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:06 +0200 -Subject: dsa: qca8k: add mac_power_sel support - -Add missing mac power sel support needed for ipq8064/5 SoC that require -1.8v for the internal regulator port instead of the default 1.5v. -If other device needs this, consider adding a dedicated binding to -support this. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 31 +++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 5 +++++ - 2 files changed, 36 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -951,6 +951,33 @@ qca8k_setup_of_rgmii_delay(struct qca8k_ - } - - static int -+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) -+{ -+ u32 mask = 0; -+ int ret = 0; -+ -+ /* SoC specific settings for ipq8064. -+ * If more device require this consider adding -+ * a dedicated binding. -+ */ -+ if (of_machine_is_compatible("qcom,ipq8064")) -+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V; -+ -+ /* SoC specific settings for ipq8065 */ -+ if (of_machine_is_compatible("qcom,ipq8065")) -+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V; -+ -+ if (mask) { -+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, -+ QCA8K_MAC_PWR_RGMII0_1_8V | -+ QCA8K_MAC_PWR_RGMII1_1_8V, -+ mask); -+ } -+ -+ return ret; -+} -+ -+static int - qca8k_setup(struct dsa_switch *ds) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -@@ -979,6 +1006,10 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -+ ret = qca8k_setup_mac_pwr_sel(priv); -+ if (ret) -+ return ret; -+ - /* Enable CPU Port */ - ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -100,6 +100,11 @@ - #define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) - #define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) - -+/* MAC_PWR_SEL registers */ -+#define QCA8K_REG_MAC_PWR_SEL 0x0e4 -+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18) -+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19) -+ - /* EEE control registers */ - #define QCA8K_REG_EEE_CTRL 0x100 - #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) diff --git a/target/linux/generic/backport-6.1/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch b/target/linux/generic/backport-6.1/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch deleted file mode 100644 index bd768ec27d4d..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-02-dt-bindings-net-dsa-qca8k-Add-SGMII-clock-phase-prop.patch +++ /dev/null @@ -1,30 +0,0 @@ -From fdbf35df9c091db9c46e57e9938e3f7a4f603a7c Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:07 +0200 -Subject: dt-bindings: net: dsa: qca8k: Add SGMII clock phase properties - -Add names and descriptions of additional PORT0_PAD_CTRL properties. -qca,sgmii-(rx|tx)clk-falling-edge are for setting the respective clock -phase to failling edge. - -Co-developed-by: Matthew Hagan -Signed-off-by: Matthew Hagan -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/dsa/qca8k.txt | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt -@@ -37,6 +37,10 @@ A CPU port node has the following option - managed entity. See - Documentation/devicetree/bindings/net/fixed-link.txt - for details. -+- qca,sgmii-rxclk-falling-edge: Set the receive clock phase to falling edge. -+ Mostly used in qca8327 with CPU port 0 set to -+ sgmii. -+- qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. - - For QCA8K the 'fixed-link' sub-node supports only the following properties: - diff --git a/target/linux/generic/backport-6.1/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch b/target/linux/generic/backport-6.1/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch deleted file mode 100644 index e464452d82c1..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-03-net-dsa-qca8k-add-support-for-sgmii-falling-edge.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 6c43809bf1bee76c434e365a26546a92a5fbec14 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:08 +0200 -Subject: net: dsa: qca8k: add support for sgmii falling edge - -Add support for this in the qca8k driver. Also add support for SGMII -rx/tx clock falling edge. This is only present for pad0, pad5 and -pad6 have these bit reserved from Documentation. Add a comment that this -is hardcoded to PAD0 as qca8327/28/34/37 have an unique sgmii line and -setting falling in port0 applies to both configuration with sgmii used -for port0 or port6. - -Co-developed-by: Matthew Hagan -Signed-off-by: Matthew Hagan -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 4 ++++ - 2 files changed, 67 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -978,6 +978,42 @@ qca8k_setup_mac_pwr_sel(struct qca8k_pri - } - - static int -+qca8k_parse_port_config(struct qca8k_priv *priv) -+{ -+ struct device_node *port_dn; -+ phy_interface_t mode; -+ struct dsa_port *dp; -+ int port, ret; -+ -+ /* We have 2 CPU port. Check them */ -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Skip every other port */ -+ if (port != 0 && port != 6) -+ continue; -+ -+ dp = dsa_to_port(priv->ds, port); -+ port_dn = dp->dn; -+ -+ if (!of_device_is_available(port_dn)) -+ continue; -+ -+ ret = of_get_phy_mode(port_dn, &mode); -+ if (ret) -+ continue; -+ -+ if (mode == PHY_INTERFACE_MODE_SGMII) { -+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -+ priv->sgmii_tx_clk_falling_edge = true; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -+ priv->sgmii_rx_clk_falling_edge = true; -+ } -+ } -+ -+ return 0; -+} -+ -+static int - qca8k_setup(struct dsa_switch *ds) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -@@ -990,6 +1026,11 @@ qca8k_setup(struct dsa_switch *ds) - return -EINVAL; - } - -+ /* Parse CPU port config to be later used in phy_link mac_config */ -+ ret = qca8k_parse_port_config(priv); -+ if (ret) -+ return ret; -+ - mutex_init(&priv->reg_mutex); - - /* Start by setting up the register mapping */ -@@ -1274,6 +1315,28 @@ qca8k_phylink_mac_config(struct dsa_swit - } - - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); -+ -+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and -+ * falling edge is set writing in the PORT0 PAD reg -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327 || -+ priv->switch_id == QCA8K_ID_QCA8337) -+ reg = QCA8K_REG_PORT0_PAD_CTRL; -+ -+ val = 0; -+ -+ /* SGMII Clock phase configuration */ -+ if (priv->sgmii_rx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; -+ -+ if (priv->sgmii_tx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; -+ -+ if (val) -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | -+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, -+ val); - break; - default: - dev_err(ds->dev, "xMII mode %s not supported for port %d\n", ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -35,6 +35,8 @@ - #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) - #define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) - #define QCA8K_REG_PORT0_PAD_CTRL 0x004 -+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) -+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) - #define QCA8K_REG_PORT5_PAD_CTRL 0x008 - #define QCA8K_REG_PORT6_PAD_CTRL 0x00c - #define QCA8K_PORT_PAD_RGMII_EN BIT(26) -@@ -260,6 +262,8 @@ struct qca8k_priv { - u8 switch_revision; - u8 rgmii_tx_delay; - u8 rgmii_rx_delay; -+ bool sgmii_rx_clk_falling_edge; -+ bool sgmii_tx_clk_falling_edge; - bool legacy_phy_port_mapping; - struct regmap *regmap; - struct mii_bus *bus; diff --git a/target/linux/generic/backport-6.1/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch b/target/linux/generic/backport-6.1/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch deleted file mode 100644 index 606ac0af3da2..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-04-dt-bindings-net-dsa-qca8k-Document-support-for-CPU-p.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 731d613338ec6de482053ffa3f71be2325b0f8eb Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:09 +0200 -Subject: dt-bindings: net: dsa: qca8k: Document support for CPU port 6 - -The switch now support CPU port to be set 6 instead of be hardcoded to -0. Document support for it and describe logic selection. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/dsa/qca8k.txt | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt -@@ -29,7 +29,11 @@ the mdio MASTER is used as communication - Don't use mixed external and internal mdio-bus configurations, as this is - not supported by the hardware. - --The CPU port of this switch is always port 0. -+This switch support 2 CPU port. Normally and advised configuration is with -+CPU port set to port 0. It is also possible to set the CPU port to port 6 -+if the device requires it. The driver will configure the switch to the defined -+port. With both CPU port declared the first CPU port is selected as primary -+and the secondary CPU ignored. - - A CPU port node has the following optional node: - diff --git a/target/linux/generic/backport-6.1/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch b/target/linux/generic/backport-6.1/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch deleted file mode 100644 index 320db8fa9f9f..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-05-net-dsa-qca8k-add-support-for-cpu-port-6.patch +++ /dev/null @@ -1,153 +0,0 @@ -From 3fcf734aa482487df83cf8f18608438fcf59127f Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:10 +0200 -Subject: net: dsa: qca8k: add support for cpu port 6 - -Currently CPU port is always hardcoded to port 0. This switch have 2 CPU -ports. The original intention of this driver seems to be use the -mac06_exchange bit to swap MAC0 with MAC6 in the strange configuration -where device have connected only the CPU port 6. To skip the -introduction of a new binding, rework the driver to address the -secondary CPU port as primary and drop any reference of hardcoded port. -With configuration of mac06 exchange, just skip the definition of port0 -and define the CPU port as a secondary. The driver will autoconfigure -the switch to use that as the primary CPU port. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 51 ++++++++++++++++++++++++++++++++++--------------- - drivers/net/dsa/qca8k.h | 2 -- - 2 files changed, 36 insertions(+), 17 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -977,6 +977,22 @@ qca8k_setup_mac_pwr_sel(struct qca8k_pri - return ret; - } - -+static int qca8k_find_cpu_port(struct dsa_switch *ds) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Find the connected cpu port. Valid port are 0 or 6 */ -+ if (dsa_is_cpu_port(ds, 0)) -+ return 0; -+ -+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); -+ -+ if (dsa_is_cpu_port(ds, 6)) -+ return 6; -+ -+ return -EINVAL; -+} -+ - static int - qca8k_parse_port_config(struct qca8k_priv *priv) - { -@@ -1017,13 +1033,13 @@ static int - qca8k_setup(struct dsa_switch *ds) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int ret, i; -+ int cpu_port, ret, i; - u32 mask; - -- /* Make sure that port 0 is the cpu port */ -- if (!dsa_is_cpu_port(ds, 0)) { -- dev_err(priv->dev, "port 0 is not the CPU port"); -- return -EINVAL; -+ cpu_port = qca8k_find_cpu_port(ds); -+ if (cpu_port < 0) { -+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); -+ return cpu_port; - } - - /* Parse CPU port config to be later used in phy_link mac_config */ -@@ -1065,7 +1081,7 @@ qca8k_setup(struct dsa_switch *ds) - dev_warn(priv->dev, "mib init failed"); - - /* Enable QCA header mode on the cpu port */ -- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT), -+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(cpu_port), - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); - if (ret) { -@@ -1087,10 +1103,10 @@ qca8k_setup(struct dsa_switch *ds) - - /* Forward all unknown frames to CPU port for Linux processing */ - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | -- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | -- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | -- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); -+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | -+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | -+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | -+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); - if (ret) - return ret; - -@@ -1098,7 +1114,7 @@ qca8k_setup(struct dsa_switch *ds) - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - /* CPU port gets connected to all user ports of the switch */ - if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port), - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); - if (ret) - return ret; -@@ -1110,7 +1126,7 @@ qca8k_setup(struct dsa_switch *ds) - - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, -- BIT(QCA8K_CPU_PORT)); -+ BIT(cpu_port)); - if (ret) - return ret; - -@@ -1616,9 +1632,12 @@ static int - qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int port_mask = BIT(QCA8K_CPU_PORT); -+ int port_mask, cpu_port; - int i, ret; - -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ port_mask = BIT(cpu_port); -+ - for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (dsa_to_port(ds, i)->bridge_dev != br) - continue; -@@ -1645,7 +1664,9 @@ static void - qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int i; -+ int cpu_port, i; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - - for (i = 1; i < QCA8K_NUM_PORTS; i++) { - if (dsa_to_port(ds, i)->bridge_dev != br) -@@ -1662,7 +1683,7 @@ qca8k_port_bridge_leave(struct dsa_switc - * this port - */ - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT)); -+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); - } - - static int ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -24,8 +24,6 @@ - - #define QCA8K_NUM_FDB_RECORDS 2048 - --#define QCA8K_CPU_PORT 0 -- - #define QCA8K_PORT_VID_DEF 1 - - /* Global control registers */ diff --git a/target/linux/generic/backport-6.1/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch b/target/linux/generic/backport-6.1/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch deleted file mode 100644 index de201764f987..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-06-net-dsa-qca8k-rework-rgmii-delay-logic-and-scan-for-.patch +++ /dev/null @@ -1,295 +0,0 @@ -From 5654ec78dd7e64b1e04777b24007344329e6a63b Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:11 +0200 -Subject: net: dsa: qca8k: rework rgmii delay logic and scan for cpu port 6 - -Future proof commit. This switch have 2 CPU ports and one valid -configuration is first CPU port set to sgmii and second CPU port set to -rgmii-id. The current implementation detects delay only for CPU port -zero set to rgmii and doesn't count any delay set in a secondary CPU -port. Drop the current delay scan function and move it to the sgmii -parser function to generalize and implicitly add support for secondary -CPU port set to rgmii-id. Introduce new logic where delay is enabled -also with internal delay binding declared and rgmii set as PHY mode. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 165 ++++++++++++++++++++++++------------------------ - drivers/net/dsa/qca8k.h | 10 ++- - 2 files changed, 89 insertions(+), 86 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -889,68 +889,6 @@ qca8k_setup_mdio_bus(struct qca8k_priv * - } - - static int --qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv) --{ -- struct device_node *port_dn; -- phy_interface_t mode; -- struct dsa_port *dp; -- u32 val; -- -- /* CPU port is already checked */ -- dp = dsa_to_port(priv->ds, 0); -- -- port_dn = dp->dn; -- -- /* Check if port 0 is set to the correct type */ -- of_get_phy_mode(port_dn, &mode); -- if (mode != PHY_INTERFACE_MODE_RGMII_ID && -- mode != PHY_INTERFACE_MODE_RGMII_RXID && -- mode != PHY_INTERFACE_MODE_RGMII_TXID) { -- return 0; -- } -- -- switch (mode) { -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val)) -- val = 2; -- else -- /* Switch regs accept value in ns, convert ps to ns */ -- val = val / 1000; -- -- if (val > QCA8K_MAX_DELAY) { -- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -- val = 3; -- } -- -- priv->rgmii_rx_delay = val; -- /* Stop here if we need to check only for rx delay */ -- if (mode != PHY_INTERFACE_MODE_RGMII_ID) -- break; -- -- fallthrough; -- case PHY_INTERFACE_MODE_RGMII_TXID: -- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val)) -- val = 1; -- else -- /* Switch regs accept value in ns, convert ps to ns */ -- val = val / 1000; -- -- if (val > QCA8K_MAX_DELAY) { -- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -- val = 3; -- } -- -- priv->rgmii_tx_delay = val; -- break; -- default: -- return 0; -- } -- -- return 0; --} -- --static int - qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) - { - u32 mask = 0; -@@ -996,19 +934,21 @@ static int qca8k_find_cpu_port(struct ds - static int - qca8k_parse_port_config(struct qca8k_priv *priv) - { -+ int port, cpu_port_index = 0, ret; - struct device_node *port_dn; - phy_interface_t mode; - struct dsa_port *dp; -- int port, ret; -+ u32 delay; - - /* We have 2 CPU port. Check them */ -- for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { - /* Skip every other port */ - if (port != 0 && port != 6) - continue; - - dp = dsa_to_port(priv->ds, port); - port_dn = dp->dn; -+ cpu_port_index++; - - if (!of_device_is_available(port_dn)) - continue; -@@ -1017,12 +957,54 @@ qca8k_parse_port_config(struct qca8k_pri - if (ret) - continue; - -- if (mode == PHY_INTERFACE_MODE_SGMII) { -+ switch (mode) { -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID) -+ delay = 1; -+ -+ if (delay > QCA8K_MAX_DELAY) { -+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->rgmii_tx_delay[cpu_port_index] = delay; -+ -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ delay = 2; -+ -+ if (delay > QCA8K_MAX_DELAY) { -+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->rgmii_rx_delay[cpu_port_index] = delay; -+ -+ break; -+ case PHY_INTERFACE_MODE_SGMII: - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) - priv->sgmii_tx_clk_falling_edge = true; - - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) - priv->sgmii_rx_clk_falling_edge = true; -+ -+ break; -+ default: -+ continue; - } - } - -@@ -1059,10 +1041,6 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -- ret = qca8k_setup_of_rgmii_delay(priv); -- if (ret) -- return ret; -- - ret = qca8k_setup_mac_pwr_sel(priv); - if (ret) - return ret; -@@ -1229,8 +1207,8 @@ qca8k_phylink_mac_config(struct dsa_swit - const struct phylink_link_state *state) - { - struct qca8k_priv *priv = ds->priv; -- u32 reg, val; -- int ret; -+ int cpu_port_index, ret; -+ u32 reg, val, delay; - - switch (port) { - case 0: /* 1st CPU port */ -@@ -1242,6 +1220,7 @@ qca8k_phylink_mac_config(struct dsa_swit - return; - - reg = QCA8K_REG_PORT0_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT0; - break; - case 1: - case 2: -@@ -1260,6 +1239,7 @@ qca8k_phylink_mac_config(struct dsa_swit - return; - - reg = QCA8K_REG_PORT6_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT6; - break; - default: - dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); -@@ -1274,23 +1254,40 @@ qca8k_phylink_mac_config(struct dsa_swit - - switch (state->interface) { - case PHY_INTERFACE_MODE_RGMII: -- /* RGMII mode means no delay so don't enable the delay */ -- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -- break; - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: -- /* RGMII_ID needs internal delay. This is enabled through -- * PORT5_PAD_CTRL for all ports, rather than individual port -- * registers -+ val = QCA8K_PORT_PAD_RGMII_EN; -+ -+ /* Delay can be declared in 3 different way. -+ * Mode to rgmii and internal-delay standard binding defined -+ * rgmii-id or rgmii-tx/rx phy mode set. -+ * The parse logic set a delay different than 0 only when one -+ * of the 3 different way is used. In all other case delay is -+ * not enabled. With ID or TX/RXID delay is enabled and set -+ * to the default and recommended value. -+ */ -+ if (priv->rgmii_tx_delay[cpu_port_index]) { -+ delay = priv->rgmii_tx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -+ } -+ -+ if (priv->rgmii_rx_delay[cpu_port_index]) { -+ delay = priv->rgmii_rx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -+ } -+ -+ /* Set RGMII delay based on the selected values */ -+ qca8k_write(priv, reg, val); -+ -+ /* QCA8337 requires to set rgmii rx delay for all ports. -+ * This is enabled through PORT5_PAD_CTRL for all ports, -+ * rather than individual port registers. - */ -- qca8k_write(priv, reg, -- QCA8K_PORT_PAD_RGMII_EN | -- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) | -- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -- /* QCA8337 requires to set rgmii rx delay */ - if (priv->switch_id == QCA8K_ID_QCA8337) - qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -13,6 +13,7 @@ - #include - - #define QCA8K_NUM_PORTS 7 -+#define QCA8K_NUM_CPU_PORTS 2 - #define QCA8K_MAX_MTU 9000 - - #define PHY_ID_QCA8327 0x004dd034 -@@ -255,13 +256,18 @@ struct qca8k_match_data { - u8 id; - }; - -+enum { -+ QCA8K_CPU_PORT0, -+ QCA8K_CPU_PORT6, -+}; -+ - struct qca8k_priv { - u8 switch_id; - u8 switch_revision; -- u8 rgmii_tx_delay; -- u8 rgmii_rx_delay; - bool sgmii_rx_clk_falling_edge; - bool sgmii_tx_clk_falling_edge; -+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ -+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ - bool legacy_phy_port_mapping; - struct regmap *regmap; - struct mii_bus *bus; diff --git a/target/linux/generic/backport-6.1/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch b/target/linux/generic/backport-6.1/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch deleted file mode 100644 index 8abd264e794f..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-07-dt-bindings-net-dsa-qca8k-Document-qca-sgmii-enable-.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 13ad5ccc093ff448b99ac7e138e91e78796adb48 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:12 +0200 -Subject: dt-bindings: net: dsa: qca8k: Document qca,sgmii-enable-pll - -Document qca,sgmii-enable-pll binding used in the CPU nodes to -enable SGMII PLL on MAC config. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/dsa/qca8k.txt | 10 ++++++++++ - 1 file changed, 10 insertions(+) - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt -@@ -45,6 +45,16 @@ A CPU port node has the following option - Mostly used in qca8327 with CPU port 0 set to - sgmii. - - qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. -+- qca,sgmii-enable-pll : For SGMII CPU port, explicitly enable PLL, TX and RX -+ chain along with Signal Detection. -+ This should NOT be enabled for qca8327. If enabled with -+ qca8327 the sgmii port won't correctly init and an err -+ is printed. -+ This can be required for qca8337 switch with revision 2. -+ A warning is displayed when used with revision greater -+ 2. -+ With CPU port set to sgmii and qca8337 it is advised -+ to set this unless a communication problem is observed. - - For QCA8K the 'fixed-link' sub-node supports only the following properties: - diff --git a/target/linux/generic/backport-6.1/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch b/target/linux/generic/backport-6.1/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch deleted file mode 100644 index 2b5a84a1b0e8..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-08-net-dsa-qca8k-add-explicit-SGMII-PLL-enable.patch +++ /dev/null @@ -1,65 +0,0 @@ -From bbc4799e8bb6c397e3b3fec13de68e179f5db9ff Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:13 +0200 -Subject: net: dsa: qca8k: add explicit SGMII PLL enable - -Support enabling PLL on the SGMII CPU port. Some device require this -special configuration or no traffic is transmitted and the switch -doesn't work at all. A dedicated binding is added to the CPU node -port to apply the correct reg on mac config. -Fail to correctly configure sgmii with qca8327 switch and warn if pll is -used on qca8337 with a revision greater than 1. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 19 +++++++++++++++++-- - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 18 insertions(+), 2 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1002,6 +1002,18 @@ qca8k_parse_port_config(struct qca8k_pri - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) - priv->sgmii_rx_clk_falling_edge = true; - -+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -+ priv->sgmii_enable_pll = true; -+ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -+ priv->sgmii_enable_pll = false; -+ } -+ -+ if (priv->switch_revision < 2) -+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); -+ } -+ - break; - default: - continue; -@@ -1312,8 +1324,11 @@ qca8k_phylink_mac_config(struct dsa_swit - if (ret) - return; - -- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; -+ val |= QCA8K_SGMII_EN_SD; -+ -+ if (priv->sgmii_enable_pll) -+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -+ QCA8K_SGMII_EN_TX; - - if (dsa_is_cpu_port(ds, port)) { - /* CPU port, we're talking to the CPU MAC, be a PHY */ ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -266,6 +266,7 @@ struct qca8k_priv { - u8 switch_revision; - bool sgmii_rx_clk_falling_edge; - bool sgmii_tx_clk_falling_edge; -+ bool sgmii_enable_pll; - u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ - u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ - bool legacy_phy_port_mapping; diff --git a/target/linux/generic/backport-6.1/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch b/target/linux/generic/backport-6.1/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch deleted file mode 100644 index 38dc954e8cb5..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-09-dt-bindings-net-dsa-qca8k-Document-qca-led-open-drai.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 924087c5c3d41553700b0eb83ca2a53b91643dca Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:14 +0200 -Subject: dt-bindings: net: dsa: qca8k: Document qca,led-open-drain binding - -Document new binding qca,ignore-power-on-sel used to ignore -power on strapping and use sw regs instead. -Document qca,led-open.drain to set led to open drain mode, the -qca,ignore-power-on-sel is mandatory with this enabled or an error will -be reported. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/dsa/qca8k.txt | 11 +++++++++++ - 1 file changed, 11 insertions(+) - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt -@@ -13,6 +13,17 @@ Required properties: - Optional properties: - - - reset-gpios: GPIO to be used to reset the whole device -+- qca,ignore-power-on-sel: Ignore power on pin strapping to configure led open -+ drain or eeprom presence. This is needed for broken -+ devices that have wrong configuration or when the oem -+ decided to not use pin strapping and fallback to sw -+ regs. -+- qca,led-open-drain: Set leds to open-drain mode. This requires the -+ qca,ignore-power-on-sel to be set or the driver will fail -+ to probe. This is needed if the oem doesn't use pin -+ strapping to set this mode and prefers to set it using sw -+ regs. The pin strapping related to led open drain mode is -+ the pin B68 for QCA832x and B49 for QCA833x - - Subnodes: - diff --git a/target/linux/generic/backport-6.1/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch b/target/linux/generic/backport-6.1/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch deleted file mode 100644 index aa5d92a4fdec..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-10-net-dsa-qca8k-add-support-for-pws-config-reg.patch +++ /dev/null @@ -1,92 +0,0 @@ -From 362bb238d8bf1470424214a8a5968d9c6cce68fa Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:15 +0200 -Subject: net: dsa: qca8k: add support for pws config reg - -Some qca8327 switch require to force the ignore of power on sel -strapping. Some switch require to set the led open drain mode in regs -instead of using strapping. While most of the device implements this -using the correct way using pin strapping, there are still some broken -device that require to be set using sw regs. -Introduce a new binding and support these special configuration. -As led open drain require to ignore pin strapping to work, the probe -fails with EINVAL error with incorrect configuration. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 39 +++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 6 ++++++ - 2 files changed, 45 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -932,6 +932,41 @@ static int qca8k_find_cpu_port(struct ds - } - - static int -+qca8k_setup_of_pws_reg(struct qca8k_priv *priv) -+{ -+ struct device_node *node = priv->dev->of_node; -+ u32 val = 0; -+ int ret; -+ -+ /* QCA8327 require to set to the correct mode. -+ * His bigger brother QCA8328 have the 172 pin layout. -+ * Should be applied by default but we set this just to make sure. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -+ QCA8327_PWS_PACKAGE148_EN); -+ if (ret) -+ return ret; -+ } -+ -+ if (of_property_read_bool(node, "qca,ignore-power-on-sel")) -+ val |= QCA8K_PWS_POWER_ON_SEL; -+ -+ if (of_property_read_bool(node, "qca,led-open-drain")) { -+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) { -+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); -+ return -EINVAL; -+ } -+ -+ val |= QCA8K_PWS_LED_OPEN_EN_CSR; -+ } -+ -+ return qca8k_rmw(priv, QCA8K_REG_PWS, -+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, -+ val); -+} -+ -+static int - qca8k_parse_port_config(struct qca8k_priv *priv) - { - int port, cpu_port_index = 0, ret; -@@ -1053,6 +1088,10 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -+ ret = qca8k_setup_of_pws_reg(priv); -+ if (ret) -+ return ret; -+ - ret = qca8k_setup_mac_pwr_sel(priv); - if (ret) - return ret; ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -46,6 +46,12 @@ - #define QCA8K_MAX_DELAY 3 - #define QCA8K_PORT_PAD_SGMII_EN BIT(7) - #define QCA8K_REG_PWS 0x010 -+#define QCA8K_PWS_POWER_ON_SEL BIT(31) -+/* This reg is only valid for QCA832x and toggle the package -+ * type from 176 pin (by default) to 148 pin used on QCA8327 -+ */ -+#define QCA8327_PWS_PACKAGE148_EN BIT(30) -+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24) - #define QCA8K_PWS_SERDES_AEN_DIS BIT(7) - #define QCA8K_REG_MODULE_EN 0x030 - #define QCA8K_MODULE_EN_MIB BIT(0) diff --git a/target/linux/generic/backport-6.1/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch b/target/linux/generic/backport-6.1/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch deleted file mode 100644 index 1bfb00c5b2c6..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-11-dt-bindings-net-dsa-qca8k-document-support-for-qca83.patch +++ /dev/null @@ -1,32 +0,0 @@ -From ed7988d77fbfb79366b68f9e7fa60a6080da23d4 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:16 +0200 -Subject: dt-bindings: net: dsa: qca8k: document support for qca8328 - -QCA8328 is the bigger brother of qca8327. Document the new compatible -binding and add some information to understand the various switch -compatible. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/dsa/qca8k.txt | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt -@@ -3,9 +3,10 @@ - Required properties: - - - compatible: should be one of: -- "qca,qca8327" -- "qca,qca8334" -- "qca,qca8337" -+ "qca,qca8328": referenced as AR8328(N)-AK1(A/B) QFN 176 pin package -+ "qca,qca8327": referenced as AR8327(N)-AL1A DR-QFN 148 pin package -+ "qca,qca8334": referenced as QCA8334-AL3C QFN 88 pin package -+ "qca,qca8337": referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package - - - #size-cells: must be 0 - - #address-cells: must be 1 diff --git a/target/linux/generic/backport-6.1/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch b/target/linux/generic/backport-6.1/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch deleted file mode 100644 index b300621e63e8..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-12-net-dsa-qca8k-add-support-for-QCA8328.patch +++ /dev/null @@ -1,78 +0,0 @@ -From f477d1c8bdbef4f400718238e350f16f521d2a3e Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:17 +0200 -Subject: net: dsa: qca8k: add support for QCA8328 - -QCA8328 switch is the bigger brother of the qca8327. Same regs different -chip. Change the function to set the correct pin layout and introduce a -new match_data to differentiate the 2 switch as they have the same ID -and their internal PHY have the same ID. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 19 ++++++++++++++++--- - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 17 insertions(+), 3 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -935,6 +935,7 @@ static int - qca8k_setup_of_pws_reg(struct qca8k_priv *priv) - { - struct device_node *node = priv->dev->of_node; -+ const struct qca8k_match_data *data; - u32 val = 0; - int ret; - -@@ -943,8 +944,14 @@ qca8k_setup_of_pws_reg(struct qca8k_priv - * Should be applied by default but we set this just to make sure. - */ - if (priv->switch_id == QCA8K_ID_QCA8327) { -+ data = of_device_get_match_data(priv->dev); -+ -+ /* Set the correct package of 148 pin for QCA8327 */ -+ if (data->reduced_package) -+ val |= QCA8327_PWS_PACKAGE148_EN; -+ - ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -- QCA8327_PWS_PACKAGE148_EN); -+ val); - if (ret) - return ret; - } -@@ -2124,7 +2131,12 @@ static int qca8k_resume(struct device *d - static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, - qca8k_suspend, qca8k_resume); - --static const struct qca8k_match_data qca832x = { -+static const struct qca8k_match_data qca8327 = { -+ .id = QCA8K_ID_QCA8327, -+ .reduced_package = true, -+}; -+ -+static const struct qca8k_match_data qca8328 = { - .id = QCA8K_ID_QCA8327, - }; - -@@ -2133,7 +2145,8 @@ static const struct qca8k_match_data qca - }; - - static const struct of_device_id qca8k_of_match[] = { -- { .compatible = "qca,qca8327", .data = &qca832x }, -+ { .compatible = "qca,qca8327", .data = &qca8327 }, -+ { .compatible = "qca,qca8328", .data = &qca8328 }, - { .compatible = "qca,qca8334", .data = &qca833x }, - { .compatible = "qca,qca8337", .data = &qca833x }, - { /* sentinel */ }, ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -260,6 +260,7 @@ struct ar8xxx_port_status { - - struct qca8k_match_data { - u8 id; -+ bool reduced_package; - }; - - enum { diff --git a/target/linux/generic/backport-6.1/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch b/target/linux/generic/backport-6.1/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch deleted file mode 100644 index 27f94dca0270..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-13-net-dsa-qca8k-set-internal-delay-also-for-sgmii.patch +++ /dev/null @@ -1,159 +0,0 @@ -From cef08115846e581f80ff99abf7bf218da1840616 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:18 +0200 -Subject: net: dsa: qca8k: set internal delay also for sgmii - -QCA original code report port instability and sa that SGMII also require -to set internal delay. Generalize the rgmii delay function and apply the -advised value if they are not defined in DT. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 88 +++++++++++++++++++++++++++++++++---------------- - drivers/net/dsa/qca8k.h | 2 ++ - 2 files changed, 62 insertions(+), 28 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1004,6 +1004,7 @@ qca8k_parse_port_config(struct qca8k_pri - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: -+ case PHY_INTERFACE_MODE_SGMII: - delay = 0; - - if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -@@ -1036,8 +1037,13 @@ qca8k_parse_port_config(struct qca8k_pri - - priv->rgmii_rx_delay[cpu_port_index] = delay; - -- break; -- case PHY_INTERFACE_MODE_SGMII: -+ /* Skip sgmii parsing for rgmii* mode */ -+ if (mode == PHY_INTERFACE_MODE_RGMII || -+ mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ break; -+ - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) - priv->sgmii_tx_clk_falling_edge = true; - -@@ -1261,12 +1267,53 @@ qca8k_setup(struct dsa_switch *ds) - } - - static void -+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, -+ u32 reg) -+{ -+ u32 delay, val = 0; -+ int ret; -+ -+ /* Delay can be declared in 3 different way. -+ * Mode to rgmii and internal-delay standard binding defined -+ * rgmii-id or rgmii-tx/rx phy mode set. -+ * The parse logic set a delay different than 0 only when one -+ * of the 3 different way is used. In all other case delay is -+ * not enabled. With ID or TX/RXID delay is enabled and set -+ * to the default and recommended value. -+ */ -+ if (priv->rgmii_tx_delay[cpu_port_index]) { -+ delay = priv->rgmii_tx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -+ } -+ -+ if (priv->rgmii_rx_delay[cpu_port_index]) { -+ delay = priv->rgmii_rx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -+ } -+ -+ /* Set RGMII delay based on the selected values */ -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, -+ val); -+ if (ret) -+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d", -+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); -+} -+ -+static void - qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, - const struct phylink_link_state *state) - { - struct qca8k_priv *priv = ds->priv; - int cpu_port_index, ret; -- u32 reg, val, delay; -+ u32 reg, val; - - switch (port) { - case 0: /* 1st CPU port */ -@@ -1315,32 +1362,10 @@ qca8k_phylink_mac_config(struct dsa_swit - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: -- val = QCA8K_PORT_PAD_RGMII_EN; -- -- /* Delay can be declared in 3 different way. -- * Mode to rgmii and internal-delay standard binding defined -- * rgmii-id or rgmii-tx/rx phy mode set. -- * The parse logic set a delay different than 0 only when one -- * of the 3 different way is used. In all other case delay is -- * not enabled. With ID or TX/RXID delay is enabled and set -- * to the default and recommended value. -- */ -- if (priv->rgmii_tx_delay[cpu_port_index]) { -- delay = priv->rgmii_tx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -- } -- -- if (priv->rgmii_rx_delay[cpu_port_index]) { -- delay = priv->rgmii_rx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -- } -+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); - -- /* Set RGMII delay based on the selected values */ -- qca8k_write(priv, reg, val); -+ /* Configure rgmii delay */ -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); - - /* QCA8337 requires to set rgmii rx delay for all ports. - * This is enabled through PORT5_PAD_CTRL for all ports, -@@ -1411,6 +1436,13 @@ qca8k_phylink_mac_config(struct dsa_swit - QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, - val); -+ -+ /* From original code is reported port instability as SGMII also -+ * require delay set. Apply advised values here or take them from DT. -+ */ -+ if (state->interface == PHY_INTERFACE_MODE_SGMII) -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ - break; - default: - dev_err(ds->dev, "xMII mode %s not supported for port %d\n", ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -39,7 +39,9 @@ - #define QCA8K_REG_PORT5_PAD_CTRL 0x008 - #define QCA8K_REG_PORT6_PAD_CTRL 0x00c - #define QCA8K_PORT_PAD_RGMII_EN BIT(26) -+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) - #define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) -+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) - #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) - #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) - #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) diff --git a/target/linux/generic/backport-6.1/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch b/target/linux/generic/backport-6.1/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch deleted file mode 100644 index b991798c87fe..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-14-net-dsa-qca8k-move-port-config-to-dedicated-struct.patch +++ /dev/null @@ -1,124 +0,0 @@ -From fd0bb28c547f7c8affb1691128cece38f5b626a1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:19 +0200 -Subject: net: dsa: qca8k: move port config to dedicated struct - -Move ports related config to dedicated struct to keep things organized. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 26 +++++++++++++------------- - drivers/net/dsa/qca8k.h | 10 +++++++--- - 2 files changed, 20 insertions(+), 16 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1019,7 +1019,7 @@ qca8k_parse_port_config(struct qca8k_pri - delay = 3; - } - -- priv->rgmii_tx_delay[cpu_port_index] = delay; -+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; - - delay = 0; - -@@ -1035,7 +1035,7 @@ qca8k_parse_port_config(struct qca8k_pri - delay = 3; - } - -- priv->rgmii_rx_delay[cpu_port_index] = delay; -+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; - - /* Skip sgmii parsing for rgmii* mode */ - if (mode == PHY_INTERFACE_MODE_RGMII || -@@ -1045,17 +1045,17 @@ qca8k_parse_port_config(struct qca8k_pri - break; - - if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -- priv->sgmii_tx_clk_falling_edge = true; -+ priv->ports_config.sgmii_tx_clk_falling_edge = true; - - if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -- priv->sgmii_rx_clk_falling_edge = true; -+ priv->ports_config.sgmii_rx_clk_falling_edge = true; - - if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -- priv->sgmii_enable_pll = true; -+ priv->ports_config.sgmii_enable_pll = true; - - if (priv->switch_id == QCA8K_ID_QCA8327) { - dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -- priv->sgmii_enable_pll = false; -+ priv->ports_config.sgmii_enable_pll = false; - } - - if (priv->switch_revision < 2) -@@ -1281,15 +1281,15 @@ qca8k_mac_config_setup_internal_delay(st - * not enabled. With ID or TX/RXID delay is enabled and set - * to the default and recommended value. - */ -- if (priv->rgmii_tx_delay[cpu_port_index]) { -- delay = priv->rgmii_tx_delay[cpu_port_index]; -+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; - - val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | - QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; - } - -- if (priv->rgmii_rx_delay[cpu_port_index]) { -- delay = priv->rgmii_rx_delay[cpu_port_index]; -+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; - - val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -@@ -1397,7 +1397,7 @@ qca8k_phylink_mac_config(struct dsa_swit - - val |= QCA8K_SGMII_EN_SD; - -- if (priv->sgmii_enable_pll) -+ if (priv->ports_config.sgmii_enable_pll) - val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | - QCA8K_SGMII_EN_TX; - -@@ -1425,10 +1425,10 @@ qca8k_phylink_mac_config(struct dsa_swit - val = 0; - - /* SGMII Clock phase configuration */ -- if (priv->sgmii_rx_clk_falling_edge) -+ if (priv->ports_config.sgmii_rx_clk_falling_edge) - val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; - -- if (priv->sgmii_tx_clk_falling_edge) -+ if (priv->ports_config.sgmii_tx_clk_falling_edge) - val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; - - if (val) ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -270,15 +270,19 @@ enum { - QCA8K_CPU_PORT6, - }; - --struct qca8k_priv { -- u8 switch_id; -- u8 switch_revision; -+struct qca8k_ports_config { - bool sgmii_rx_clk_falling_edge; - bool sgmii_tx_clk_falling_edge; - bool sgmii_enable_pll; - u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ - u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ -+}; -+ -+struct qca8k_priv { -+ u8 switch_id; -+ u8 switch_revision; - bool legacy_phy_port_mapping; -+ struct qca8k_ports_config ports_config; - struct regmap *regmap; - struct mii_bus *bus; - struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; diff --git a/target/linux/generic/backport-6.1/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch b/target/linux/generic/backport-6.1/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch deleted file mode 100644 index f7cb51417633..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-15-dt-bindings-net-ipq8064-mdio-fix-warning-with-new-qc.patch +++ /dev/null @@ -1,26 +0,0 @@ -From e52073a8e3086046a098b8a7cbeb282ff0cdb424 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Thu, 14 Oct 2021 00:39:20 +0200 -Subject: dt-bindings: net: ipq8064-mdio: fix warning with new qca8k switch - -Fix warning now that we have qca8k switch Documentation using yaml. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml -+++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml -@@ -51,6 +51,9 @@ examples: - switch@10 { - compatible = "qca,qca8337"; - reg = <0x10>; -- /* ... */ -+ -+ ports { -+ /* ... */ -+ }; - }; - }; diff --git a/target/linux/generic/backport-6.1/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch b/target/linux/generic/backport-6.1/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch deleted file mode 100644 index b9bce97dd3da..000000000000 --- a/target/linux/generic/backport-6.1/747-v5.16-16-dt-bindings-net-dsa-qca8k-convert-to-YAML-schema.patch +++ /dev/null @@ -1,631 +0,0 @@ -From d291fbb8245d5ba04979fed85575860a5cea7196 Mon Sep 17 00:00:00 2001 -From: Matthew Hagan -Date: Thu, 14 Oct 2021 00:39:21 +0200 -Subject: dt-bindings: net: dsa: qca8k: convert to YAML schema - -Convert the qca8k bindings to YAML format. - -Signed-off-by: Matthew Hagan -Co-developed-by: Ansuel Smith -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - .../devicetree/bindings/net/dsa/qca8k.txt | 245 -------------- - .../devicetree/bindings/net/dsa/qca8k.yaml | 362 +++++++++++++++++++++ - 2 files changed, 362 insertions(+), 245 deletions(-) - delete mode 100644 Documentation/devicetree/bindings/net/dsa/qca8k.txt - create mode 100644 Documentation/devicetree/bindings/net/dsa/qca8k.yaml - ---- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt -+++ /dev/null -@@ -1,245 +0,0 @@ --* Qualcomm Atheros QCA8xxx switch family -- --Required properties: -- --- compatible: should be one of: -- "qca,qca8328": referenced as AR8328(N)-AK1(A/B) QFN 176 pin package -- "qca,qca8327": referenced as AR8327(N)-AL1A DR-QFN 148 pin package -- "qca,qca8334": referenced as QCA8334-AL3C QFN 88 pin package -- "qca,qca8337": referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package -- --- #size-cells: must be 0 --- #address-cells: must be 1 -- --Optional properties: -- --- reset-gpios: GPIO to be used to reset the whole device --- qca,ignore-power-on-sel: Ignore power on pin strapping to configure led open -- drain or eeprom presence. This is needed for broken -- devices that have wrong configuration or when the oem -- decided to not use pin strapping and fallback to sw -- regs. --- qca,led-open-drain: Set leds to open-drain mode. This requires the -- qca,ignore-power-on-sel to be set or the driver will fail -- to probe. This is needed if the oem doesn't use pin -- strapping to set this mode and prefers to set it using sw -- regs. The pin strapping related to led open drain mode is -- the pin B68 for QCA832x and B49 for QCA833x -- --Subnodes: -- --The integrated switch subnode should be specified according to the binding --described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external --mdio-bus each subnode describing a port needs to have a valid phandle --referencing the internal PHY it is connected to. This is because there's no --N:N mapping of port and PHY id. --To declare the internal mdio-bus configuration, declare a mdio node in the --switch node and declare the phandle for the port referencing the internal --PHY is connected to. In this config a internal mdio-bus is registered and --the mdio MASTER is used as communication. -- --Don't use mixed external and internal mdio-bus configurations, as this is --not supported by the hardware. -- --This switch support 2 CPU port. Normally and advised configuration is with --CPU port set to port 0. It is also possible to set the CPU port to port 6 --if the device requires it. The driver will configure the switch to the defined --port. With both CPU port declared the first CPU port is selected as primary --and the secondary CPU ignored. -- --A CPU port node has the following optional node: -- --- fixed-link : Fixed-link subnode describing a link to a non-MDIO -- managed entity. See -- Documentation/devicetree/bindings/net/fixed-link.txt -- for details. --- qca,sgmii-rxclk-falling-edge: Set the receive clock phase to falling edge. -- Mostly used in qca8327 with CPU port 0 set to -- sgmii. --- qca,sgmii-txclk-falling-edge: Set the transmit clock phase to falling edge. --- qca,sgmii-enable-pll : For SGMII CPU port, explicitly enable PLL, TX and RX -- chain along with Signal Detection. -- This should NOT be enabled for qca8327. If enabled with -- qca8327 the sgmii port won't correctly init and an err -- is printed. -- This can be required for qca8337 switch with revision 2. -- A warning is displayed when used with revision greater -- 2. -- With CPU port set to sgmii and qca8337 it is advised -- to set this unless a communication problem is observed. -- --For QCA8K the 'fixed-link' sub-node supports only the following properties: -- --- 'speed' (integer, mandatory), to indicate the link speed. Accepted -- values are 10, 100 and 1000 --- 'full-duplex' (boolean, optional), to indicate that full duplex is -- used. When absent, half duplex is assumed. -- --Examples: -- --for the external mdio-bus configuration: -- -- &mdio0 { -- phy_port1: phy@0 { -- reg = <0>; -- }; -- -- phy_port2: phy@1 { -- reg = <1>; -- }; -- -- phy_port3: phy@2 { -- reg = <2>; -- }; -- -- phy_port4: phy@3 { -- reg = <3>; -- }; -- -- phy_port5: phy@4 { -- reg = <4>; -- }; -- -- switch@10 { -- compatible = "qca,qca8337"; -- #address-cells = <1>; -- #size-cells = <0>; -- -- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; -- reg = <0x10>; -- -- ports { -- #address-cells = <1>; -- #size-cells = <0>; -- port@0 { -- reg = <0>; -- label = "cpu"; -- ethernet = <&gmac1>; -- phy-mode = "rgmii"; -- fixed-link { -- speed = 1000; -- full-duplex; -- }; -- }; -- -- port@1 { -- reg = <1>; -- label = "lan1"; -- phy-handle = <&phy_port1>; -- }; -- -- port@2 { -- reg = <2>; -- label = "lan2"; -- phy-handle = <&phy_port2>; -- }; -- -- port@3 { -- reg = <3>; -- label = "lan3"; -- phy-handle = <&phy_port3>; -- }; -- -- port@4 { -- reg = <4>; -- label = "lan4"; -- phy-handle = <&phy_port4>; -- }; -- -- port@5 { -- reg = <5>; -- label = "wan"; -- phy-handle = <&phy_port5>; -- }; -- }; -- }; -- }; -- --for the internal master mdio-bus configuration: -- -- &mdio0 { -- switch@10 { -- compatible = "qca,qca8337"; -- #address-cells = <1>; -- #size-cells = <0>; -- -- reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; -- reg = <0x10>; -- -- ports { -- #address-cells = <1>; -- #size-cells = <0>; -- -- port@0 { -- reg = <0>; -- label = "cpu"; -- ethernet = <&gmac1>; -- phy-mode = "rgmii"; -- fixed-link { -- speed = 1000; -- full-duplex; -- }; -- }; -- -- port@1 { -- reg = <1>; -- label = "lan1"; -- phy-mode = "internal"; -- phy-handle = <&phy_port1>; -- }; -- -- port@2 { -- reg = <2>; -- label = "lan2"; -- phy-mode = "internal"; -- phy-handle = <&phy_port2>; -- }; -- -- port@3 { -- reg = <3>; -- label = "lan3"; -- phy-mode = "internal"; -- phy-handle = <&phy_port3>; -- }; -- -- port@4 { -- reg = <4>; -- label = "lan4"; -- phy-mode = "internal"; -- phy-handle = <&phy_port4>; -- }; -- -- port@5 { -- reg = <5>; -- label = "wan"; -- phy-mode = "internal"; -- phy-handle = <&phy_port5>; -- }; -- }; -- -- mdio { -- #address-cells = <1>; -- #size-cells = <0>; -- -- phy_port1: phy@0 { -- reg = <0>; -- }; -- -- phy_port2: phy@1 { -- reg = <1>; -- }; -- -- phy_port3: phy@2 { -- reg = <2>; -- }; -- -- phy_port4: phy@3 { -- reg = <3>; -- }; -- -- phy_port5: phy@4 { -- reg = <4>; -- }; -- }; -- }; -- }; ---- /dev/null -+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml -@@ -0,0 +1,362 @@ -+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -+%YAML 1.2 -+--- -+$id: http://devicetree.org/schemas/net/dsa/qca8k.yaml# -+$schema: http://devicetree.org/meta-schemas/core.yaml# -+ -+title: Qualcomm Atheros QCA83xx switch family -+ -+maintainers: -+ - John Crispin -+ -+description: -+ If the QCA8K switch is connect to an SoC's external mdio-bus, each subnode -+ describing a port needs to have a valid phandle referencing the internal PHY -+ it is connected to. This is because there is no N:N mapping of port and PHY -+ ID. To declare the internal mdio-bus configuration, declare an MDIO node in -+ the switch node and declare the phandle for the port, referencing the internal -+ PHY it is connected to. In this config, an internal mdio-bus is registered and -+ the MDIO master is used for communication. Mixed external and internal -+ mdio-bus configurations are not supported by the hardware. -+ -+properties: -+ compatible: -+ oneOf: -+ - enum: -+ - qca,qca8327 -+ - qca,qca8328 -+ - qca,qca8334 -+ - qca,qca8337 -+ description: | -+ qca,qca8328: referenced as AR8328(N)-AK1(A/B) QFN 176 pin package -+ qca,qca8327: referenced as AR8327(N)-AL1A DR-QFN 148 pin package -+ qca,qca8334: referenced as QCA8334-AL3C QFN 88 pin package -+ qca,qca8337: referenced as QCA8337N-AL3(B/C) DR-QFN 148 pin package -+ -+ reg: -+ maxItems: 1 -+ -+ reset-gpios: -+ description: -+ GPIO to be used to reset the whole device -+ maxItems: 1 -+ -+ qca,ignore-power-on-sel: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ Ignore power-on pin strapping to configure LED open-drain or EEPROM -+ presence. This is needed for devices with incorrect configuration or when -+ the OEM has decided not to use pin strapping and falls back to SW regs. -+ -+ qca,led-open-drain: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ Set LEDs to open-drain mode. This requires the qca,ignore-power-on-sel to -+ be set, otherwise the driver will fail at probe. This is required if the -+ OEM does not use pin strapping to set this mode and prefers to set it -+ using SW regs. The pin strappings related to LED open-drain mode are -+ B68 on the QCA832x and B49 on the QCA833x. -+ -+ mdio: -+ type: object -+ description: Qca8k switch have an internal mdio to access switch port. -+ If this is not present, the legacy mapping is used and the -+ internal mdio access is used. -+ With the legacy mapping the reg corresponding to the internal -+ mdio is the switch reg with an offset of -1. -+ -+ properties: -+ '#address-cells': -+ const: 1 -+ '#size-cells': -+ const: 0 -+ -+ patternProperties: -+ "^(ethernet-)?phy@[0-4]$": -+ type: object -+ -+ allOf: -+ - $ref: "http://devicetree.org/schemas/net/mdio.yaml#" -+ -+ properties: -+ reg: -+ maxItems: 1 -+ -+ required: -+ - reg -+ -+patternProperties: -+ "^(ethernet-)?ports$": -+ type: object -+ properties: -+ '#address-cells': -+ const: 1 -+ '#size-cells': -+ const: 0 -+ -+ patternProperties: -+ "^(ethernet-)?port@[0-6]$": -+ type: object -+ description: Ethernet switch ports -+ -+ properties: -+ reg: -+ description: Port number -+ -+ label: -+ description: -+ Describes the label associated with this port, which will become -+ the netdev name -+ $ref: /schemas/types.yaml#/definitions/string -+ -+ link: -+ description: -+ Should be a list of phandles to other switch's DSA port. This -+ port is used as the outgoing port towards the phandle ports. The -+ full routing information must be given, not just the one hop -+ routes to neighbouring switches -+ $ref: /schemas/types.yaml#/definitions/phandle-array -+ -+ ethernet: -+ description: -+ Should be a phandle to a valid Ethernet device node. This host -+ device is what the switch port is connected to -+ $ref: /schemas/types.yaml#/definitions/phandle -+ -+ phy-handle: true -+ -+ phy-mode: true -+ -+ fixed-link: true -+ -+ mac-address: true -+ -+ sfp: true -+ -+ qca,sgmii-rxclk-falling-edge: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ Set the receive clock phase to falling edge. Mostly commonly used on -+ the QCA8327 with CPU port 0 set to SGMII. -+ -+ qca,sgmii-txclk-falling-edge: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ Set the transmit clock phase to falling edge. -+ -+ qca,sgmii-enable-pll: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ For SGMII CPU port, explicitly enable PLL, TX and RX chain along with -+ Signal Detection. On the QCA8327 this should not be enabled, otherwise -+ the SGMII port will not initialize. When used on the QCA8337, revision 3 -+ or greater, a warning will be displayed. When the CPU port is set to -+ SGMII on the QCA8337, it is advised to set this unless a communication -+ issue is observed. -+ -+ required: -+ - reg -+ -+ additionalProperties: false -+ -+oneOf: -+ - required: -+ - ports -+ - required: -+ - ethernet-ports -+ -+required: -+ - compatible -+ - reg -+ -+additionalProperties: true -+ -+examples: -+ - | -+ #include -+ -+ mdio { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ external_phy_port1: ethernet-phy@0 { -+ reg = <0>; -+ }; -+ -+ external_phy_port2: ethernet-phy@1 { -+ reg = <1>; -+ }; -+ -+ external_phy_port3: ethernet-phy@2 { -+ reg = <2>; -+ }; -+ -+ external_phy_port4: ethernet-phy@3 { -+ reg = <3>; -+ }; -+ -+ external_phy_port5: ethernet-phy@4 { -+ reg = <4>; -+ }; -+ -+ switch@10 { -+ compatible = "qca,qca8337"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; -+ reg = <0x10>; -+ -+ ports { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ port@0 { -+ reg = <0>; -+ label = "cpu"; -+ ethernet = <&gmac1>; -+ phy-mode = "rgmii"; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ -+ port@1 { -+ reg = <1>; -+ label = "lan1"; -+ phy-handle = <&external_phy_port1>; -+ }; -+ -+ port@2 { -+ reg = <2>; -+ label = "lan2"; -+ phy-handle = <&external_phy_port2>; -+ }; -+ -+ port@3 { -+ reg = <3>; -+ label = "lan3"; -+ phy-handle = <&external_phy_port3>; -+ }; -+ -+ port@4 { -+ reg = <4>; -+ label = "lan4"; -+ phy-handle = <&external_phy_port4>; -+ }; -+ -+ port@5 { -+ reg = <5>; -+ label = "wan"; -+ phy-handle = <&external_phy_port5>; -+ }; -+ }; -+ }; -+ }; -+ - | -+ #include -+ -+ mdio { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ switch@10 { -+ compatible = "qca,qca8337"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reset-gpios = <&gpio 42 GPIO_ACTIVE_LOW>; -+ reg = <0x10>; -+ -+ ports { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ port@0 { -+ reg = <0>; -+ label = "cpu"; -+ ethernet = <&gmac1>; -+ phy-mode = "rgmii"; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ -+ port@1 { -+ reg = <1>; -+ label = "lan1"; -+ phy-mode = "internal"; -+ phy-handle = <&internal_phy_port1>; -+ }; -+ -+ port@2 { -+ reg = <2>; -+ label = "lan2"; -+ phy-mode = "internal"; -+ phy-handle = <&internal_phy_port2>; -+ }; -+ -+ port@3 { -+ reg = <3>; -+ label = "lan3"; -+ phy-mode = "internal"; -+ phy-handle = <&internal_phy_port3>; -+ }; -+ -+ port@4 { -+ reg = <4>; -+ label = "lan4"; -+ phy-mode = "internal"; -+ phy-handle = <&internal_phy_port4>; -+ }; -+ -+ port@5 { -+ reg = <5>; -+ label = "wan"; -+ phy-mode = "internal"; -+ phy-handle = <&internal_phy_port5>; -+ }; -+ -+ port@6 { -+ reg = <0>; -+ label = "cpu"; -+ ethernet = <&gmac1>; -+ phy-mode = "sgmii"; -+ -+ qca,sgmii-rxclk-falling-edge; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ }; -+ -+ mdio { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ internal_phy_port1: ethernet-phy@0 { -+ reg = <0>; -+ }; -+ -+ internal_phy_port2: ethernet-phy@1 { -+ reg = <1>; -+ }; -+ -+ internal_phy_port3: ethernet-phy@2 { -+ reg = <2>; -+ }; -+ -+ internal_phy_port4: ethernet-phy@3 { -+ reg = <3>; -+ }; -+ -+ internal_phy_port5: ethernet-phy@4 { -+ reg = <4>; -+ }; -+ }; -+ }; -+ }; diff --git a/target/linux/generic/backport-6.1/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch b/target/linux/generic/backport-6.1/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch deleted file mode 100644 index a510cfdc1831..000000000000 --- a/target/linux/generic/backport-6.1/748-v5.16-net-dsa-qca8k-fix-delay-applied-to-wrong-cpu-in-parse-p.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 06dd34a628ae5b6a839b757e746de165d6789ca8 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sun, 17 Oct 2021 16:56:46 +0200 -Subject: net: dsa: qca8k: fix delay applied to wrong cpu in parse_port_config - -Fix delay settings applied to wrong cpu in parse_port_config. The delay -values is set to the wrong index as the cpu_port_index is incremented -too early. Start the cpu_port_index to -1 so the correct value is -applied to address also the case with invalid phy mode and not available -port. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -976,7 +976,7 @@ qca8k_setup_of_pws_reg(struct qca8k_priv - static int - qca8k_parse_port_config(struct qca8k_priv *priv) - { -- int port, cpu_port_index = 0, ret; -+ int port, cpu_port_index = -1, ret; - struct device_node *port_dn; - phy_interface_t mode; - struct dsa_port *dp; diff --git a/target/linux/generic/backport-6.1/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch b/target/linux/generic/backport-6.1/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch deleted file mode 100644 index 71fa3022d558..000000000000 --- a/target/linux/generic/backport-6.1/749-v5.16-net-dsa-qca8k-tidy-for-loop-in-setup-and-add-cpu-port-c.patch +++ /dev/null @@ -1,151 +0,0 @@ -From 040e926f5813a5f4cc18dbff7c942d1e52f368f2 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 19 Oct 2021 02:08:50 +0200 -Subject: net: dsa: qca8k: tidy for loop in setup and add cpu port check - -Tidy and organize qca8k setup function from multiple for loop. -Change for loop in bridge leave/join to scan all port and skip cpu port. -No functional change intended. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 74 +++++++++++++++++++++++++++++-------------------- - 1 file changed, 44 insertions(+), 30 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1122,28 +1122,34 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - dev_warn(priv->dev, "mib init failed"); - -- /* Enable QCA header mode on the cpu port */ -- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(cpu_port), -- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | -- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); -- if (ret) { -- dev_err(priv->dev, "failed enabling QCA header mode"); -- return ret; -- } -- -- /* Disable forwarding by default on all ports */ -+ /* Initial setup of all ports */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* Disable forwarding by default on all ports */ - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, 0); - if (ret) - return ret; -- } - -- /* Disable MAC by default on all ports */ -- for (i = 1; i < QCA8K_NUM_PORTS; i++) -- qca8k_port_set_status(priv, i, 0); -+ /* Enable QCA header mode on all cpu ports */ -+ if (dsa_is_cpu_port(ds, i)) { -+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | -+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); -+ if (ret) { -+ dev_err(priv->dev, "failed enabling QCA header mode"); -+ return ret; -+ } -+ } -+ -+ /* Disable MAC by default on all user ports */ -+ if (dsa_is_user_port(ds, i)) -+ qca8k_port_set_status(priv, i, 0); -+ } - -- /* Forward all unknown frames to CPU port for Linux processing */ -+ /* Forward all unknown frames to CPU port for Linux processing -+ * Notice that in multi-cpu config only one port should be set -+ * for igmp, unknown, multicast and broadcast packet -+ */ - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | -@@ -1152,11 +1158,13 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -- /* Setup connection between CPU port & user ports */ -+ /* Setup connection between CPU port & user ports -+ * Configure specific switch configuration for ports -+ */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - /* CPU port gets connected to all user ports of the switch */ - if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port), -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); - if (ret) - return ret; -@@ -1193,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - } -- } - -- /* The port 5 of the qca8337 have some problem in flood condition. The -- * original legacy driver had some specific buffer and priority settings -- * for the different port suggested by the QCA switch team. Add this -- * missing settings to improve switch stability under load condition. -- * This problem is limited to qca8337 and other qca8k switch are not affected. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8337) { -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* The port 5 of the qca8337 have some problem in flood condition. The -+ * original legacy driver had some specific buffer and priority settings -+ * for the different port suggested by the QCA switch team. Add this -+ * missing settings to improve switch stability under load condition. -+ * This problem is limited to qca8337 and other qca8k switch are not affected. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8337) { - switch (i) { - /* The 2 CPU port and port 5 requires some different - * priority than any other ports. -@@ -1238,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds) - QCA8K_PORT_HOL_CTRL1_WRED_EN, - mask); - } -+ -+ /* Set initial MTU for every port. -+ * We have only have a general MTU setting. So track -+ * every port and set the max across all port. -+ */ -+ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; - } - - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -@@ -1251,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds) - } - - /* Setup our port MTUs to match power on defaults */ -- for (i = 0; i < QCA8K_NUM_PORTS; i++) -- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); - if (ret) - dev_warn(priv->dev, "failed setting MTU settings"); -@@ -1728,7 +1738,9 @@ qca8k_port_bridge_join(struct dsa_switch - cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - port_mask = BIT(cpu_port); - -- for (i = 1; i < QCA8K_NUM_PORTS; i++) { -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; - if (dsa_to_port(ds, i)->bridge_dev != br) - continue; - /* Add this port to the portvlan mask of the other ports -@@ -1758,7 +1770,9 @@ qca8k_port_bridge_leave(struct dsa_switc - - cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - -- for (i = 1; i < QCA8K_NUM_PORTS; i++) { -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; - if (dsa_to_port(ds, i)->bridge_dev != br) - continue; - /* Remove this port to the portvlan mask of the other ports diff --git a/target/linux/generic/backport-6.1/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch b/target/linux/generic/backport-6.1/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch deleted file mode 100644 index 4a61703c5274..000000000000 --- a/target/linux/generic/backport-6.1/750-v5.16-net-dsa-qca8k-make-sure-pad0-mac06-exchange-is-disabled.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 5f15d392dcb4aa250a63d6f2c5adfc26c0aedc78 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 2 Nov 2021 19:30:41 +0100 -Subject: net: dsa: qca8k: make sure PAD0 MAC06 exchange is disabled - -Some device set MAC06 exchange in the bootloader. This cause some -problem as we don't support this strange mode and we just set the port6 -as the primary CPU port. With MAC06 exchange, PAD0 reg configure port6 -instead of port0. Add an extra check and explicitly disable MAC06 exchange -to correctly configure the port PAD config. - -Signed-off-by: Ansuel Smith -Fixes: 3fcf734aa482 ("net: dsa: qca8k: add support for cpu port 6") -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 8 ++++++++ - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 9 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1109,6 +1109,14 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -+ /* Make sure MAC06 is disabled */ -+ ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, -+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -+ if (ret) { -+ dev_err(priv->dev, "failed disabling MAC06 exchange"); -+ return ret; -+ } -+ - /* Enable CPU Port */ - ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -34,6 +34,7 @@ - #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) - #define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) - #define QCA8K_REG_PORT0_PAD_CTRL 0x004 -+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) - #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) - #define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) - #define QCA8K_REG_PORT5_PAD_CTRL 0x008 diff --git a/target/linux/generic/backport-6.1/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch b/target/linux/generic/backport-6.1/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch deleted file mode 100644 index df9518d86cd3..000000000000 --- a/target/linux/generic/backport-6.1/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 3b00a07c2443745d62babfe08dbb2ad8e649526e Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Fri, 19 Nov 2021 03:03:49 +0100 -Subject: [PATCH] net: dsa: qca8k: fix internal delay applied to the wrong PAD - config - -With SGMII phy the internal delay is always applied to the PAD0 config. -This is caused by the falling edge configuration that hardcode the reg -to PAD0 (as the falling edge bits are present only in PAD0 reg) -Move the delay configuration before the reg overwrite to correctly apply -the delay. - -Fixes: cef08115846e ("net: dsa: qca8k: set internal delay also for sgmii") -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1433,6 +1433,12 @@ qca8k_phylink_mac_config(struct dsa_swit - - qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); - -+ /* From original code is reported port instability as SGMII also -+ * require delay set. Apply advised values here or take them from DT. -+ */ -+ if (state->interface == PHY_INTERFACE_MODE_SGMII) -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ - /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and - * falling edge is set writing in the PORT0 PAD reg - */ -@@ -1455,12 +1461,6 @@ qca8k_phylink_mac_config(struct dsa_swit - QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, - val); - -- /* From original code is reported port instability as SGMII also -- * require delay set. Apply advised values here or take them from DT. -- */ -- if (state->interface == PHY_INTERFACE_MODE_SGMII) -- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -- - break; - default: - dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/target/linux/generic/backport-6.1/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch b/target/linux/generic/backport-6.1/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch deleted file mode 100644 index 7348d93ec44b..000000000000 --- a/target/linux/generic/backport-6.1/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 65258b9d8cde45689bdc86ca39b50f01f983733b Mon Sep 17 00:00:00 2001 -From: Robert Marko -Date: Fri, 19 Nov 2021 03:03:50 +0100 -Subject: [PATCH] net: dsa: qca8k: fix MTU calculation - -qca8k has a global MTU, so its tracking the MTU per port to make sure -that the largest MTU gets applied. -Since it uses the frame size instead of MTU the driver MTU change function -will then add the size of Ethernet header and checksum on top of MTU. - -The driver currently populates the per port MTU size as Ethernet frame -length + checksum which equals 1518. - -The issue is that then MTU change function will go through all of the -ports, find the largest MTU and apply the Ethernet header + checksum on -top of it again, so for a desired MTU of 1500 you will end up with 1536. - -This is obviously incorrect, so to correct it populate the per port struct -MTU with just the MTU and not include the Ethernet header + checksum size -as those will be added by the MTU change function. - -Fixes: f58d2598cf70 ("net: dsa: qca8k: implement the port MTU callbacks") -Signed-off-by: Robert Marko -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds) - /* Set initial MTU for every port. - * We have only have a general MTU setting. So track - * every port and set the max across all port. -+ * Set per port MTU to 1500 as the MTU change function -+ * will add the overhead and if its set to 1518 then it -+ * will apply the overhead again and we will end up with -+ * MTU of 1536 instead of 1518 - */ -- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; -+ priv->port_mtu[i] = ETH_DATA_LEN; - } - - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ diff --git a/target/linux/generic/backport-6.1/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch b/target/linux/generic/backport-6.1/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch deleted file mode 100644 index f477b1b92985..000000000000 --- a/target/linux/generic/backport-6.1/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch +++ /dev/null @@ -1,29 +0,0 @@ -From b9133f3ef5a2659730cf47a74bd0a9259f1cf8ff Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:40 +0100 -Subject: net: dsa: qca8k: remove redundant check in parse_port_config - -The very next check for port 0 and 6 already makes sure we don't go out -of bounds with the ports_config delay table. -Remove the redundant check. - -Reported-by: kernel test robot -Reported-by: Dan Carpenter -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -983,7 +983,7 @@ qca8k_parse_port_config(struct qca8k_pri - u32 delay; - - /* We have 2 CPU port. Check them */ -- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { - /* Skip every other port */ - if (port != 0 && port != 6) - continue; diff --git a/target/linux/generic/backport-6.1/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch b/target/linux/generic/backport-6.1/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch deleted file mode 100644 index 2cea88089dbb..000000000000 --- a/target/linux/generic/backport-6.1/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch +++ /dev/null @@ -1,507 +0,0 @@ -From 90ae68bfc2ffcb54a4ba4f64edbeb84a80cbb57c Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:41 +0100 -Subject: net: dsa: qca8k: convert to GENMASK/FIELD_PREP/FIELD_GET - -Convert and try to standardize bit fields using -GENMASK/FIELD_PREP/FIELD_GET macros. Rework some logic to support the -standard macro and tidy things up. No functional change intended. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 98 +++++++++++++++---------------- - drivers/net/dsa/qca8k.h | 153 ++++++++++++++++++++++++++---------------------- - 2 files changed, 130 insertions(+), 121 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -319,18 +320,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, - } - - /* vid - 83:72 */ -- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M; -+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); - /* aging - 67:64 */ -- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M; -+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); - /* portmask - 54:48 */ -- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M; -+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); - /* mac - 47:0 */ -- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff; -- fdb->mac[1] = reg[1] & 0xff; -- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff; -- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff; -- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff; -- fdb->mac[5] = reg[0] & 0xff; -+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); - - return 0; - } -@@ -343,18 +344,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, - int i; - - /* vid - 83:72 */ -- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S; -+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); - /* aging - 67:64 */ -- reg[2] |= aging & QCA8K_ATU_STATUS_M; -+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); - /* portmask - 54:48 */ -- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S; -+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); - /* mac - 47:0 */ -- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S; -- reg[1] |= mac[1]; -- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S; -- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S; -- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S; -- reg[0] |= mac[5]; -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); - - /* load the array into the ARL table */ - for (i = 0; i < 3; i++) -@@ -372,7 +373,7 @@ qca8k_fdb_access(struct qca8k_priv *priv - reg |= cmd; - if (port >= 0) { - reg |= QCA8K_ATU_FUNC_PORT_EN; -- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S; -+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); - } - - /* Write the function register triggering the table access */ -@@ -454,7 +455,7 @@ qca8k_vlan_access(struct qca8k_priv *pri - /* Set the command and VLAN index */ - reg = QCA8K_VTU_FUNC1_BUSY; - reg |= cmd; -- reg |= vid << QCA8K_VTU_FUNC1_VID_S; -+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); - - /* Write the function register triggering the table access */ - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -@@ -500,13 +501,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, - if (ret < 0) - goto out; - reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); - if (untagged) -- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << -- QCA8K_VTU_FUNC0_EG_MODE_S(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); - else -- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << -- QCA8K_VTU_FUNC0_EG_MODE_S(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); - - ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); - if (ret) -@@ -534,15 +533,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, - ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); - if (ret < 0) - goto out; -- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); -- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << -- QCA8K_VTU_FUNC0_EG_MODE_S(port); -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); - - /* Check if we're the last member to be removed */ - del = true; - for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; -- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); -+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); - - if ((reg & mask) != mask) { - del = false; -@@ -1014,7 +1011,7 @@ qca8k_parse_port_config(struct qca8k_pri - mode == PHY_INTERFACE_MODE_RGMII_TXID) - delay = 1; - -- if (delay > QCA8K_MAX_DELAY) { -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { - dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); - delay = 3; - } -@@ -1030,7 +1027,7 @@ qca8k_parse_port_config(struct qca8k_pri - mode == PHY_INTERFACE_MODE_RGMII_RXID) - delay = 2; - -- if (delay > QCA8K_MAX_DELAY) { -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { - dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); - delay = 3; - } -@@ -1141,8 +1138,8 @@ qca8k_setup(struct dsa_switch *ds) - /* Enable QCA header mode on all cpu ports */ - if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | -- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); - if (ret) { - dev_err(priv->dev, "failed enabling QCA header mode"); - return ret; -@@ -1159,10 +1156,10 @@ qca8k_setup(struct dsa_switch *ds) - * for igmp, unknown, multicast and broadcast packet - */ - ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | -- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | -- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | -- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); - if (ret) - return ret; - -@@ -1180,8 +1177,6 @@ qca8k_setup(struct dsa_switch *ds) - - /* Individual user ports get connected to CPU port only */ - if (dsa_is_user_port(ds, i)) { -- int shift = 16 * (i % 2); -- - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, - BIT(cpu_port)); -@@ -1198,8 +1193,8 @@ qca8k_setup(struct dsa_switch *ds) - * default egress vid - */ - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), -- 0xfff << shift, -- QCA8K_PORT_VID_DEF << shift); -+ QCA8K_EGREES_VLAN_PORT_MASK(i), -+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); - if (ret) - return ret; - -@@ -1246,7 +1241,7 @@ qca8k_setup(struct dsa_switch *ds) - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN; - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), -- QCA8K_PORT_HOL_CTRL1_ING_BUF | -+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN, -@@ -1269,8 +1264,8 @@ qca8k_setup(struct dsa_switch *ds) - mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | - QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); - qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, -- QCA8K_GLOBAL_FC_GOL_XON_THRES_S | -- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S, -+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | -+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, - mask); - } - -@@ -1935,11 +1930,11 @@ qca8k_port_vlan_filtering(struct dsa_swi - - if (vlan_filtering) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, - QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); - } else { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, - QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); - } - -@@ -1963,10 +1958,9 @@ qca8k_port_vlan_add(struct dsa_switch *d - } - - if (pvid) { -- int shift = 16 * (port % 2); -- - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -- 0xfff << shift, vlan->vid << shift); -+ QCA8K_EGREES_VLAN_PORT_MASK(port), -+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); - if (ret) - return ret; - -@@ -2060,7 +2054,7 @@ static int qca8k_read_switch_id(struct q - if (ret < 0) - return -ENODEV; - -- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); -+ id = QCA8K_MASK_CTRL_DEVICE_ID(val); - if (id != data->id) { - dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); - return -ENODEV; -@@ -2069,7 +2063,7 @@ static int qca8k_read_switch_id(struct q - priv->switch_id = id; - - /* Save revision to communicate to the internal PHY driver */ -- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK); -+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); - - return 0; - } ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -30,9 +30,9 @@ - /* Global control registers */ - #define QCA8K_REG_MASK_CTRL 0x000 - #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) --#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0) -+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) - #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) --#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) -+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) - #define QCA8K_REG_PORT0_PAD_CTRL 0x004 - #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) - #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) -@@ -41,12 +41,11 @@ - #define QCA8K_REG_PORT6_PAD_CTRL 0x00c - #define QCA8K_PORT_PAD_RGMII_EN BIT(26) - #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) --#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) -+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) - #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) --#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) -+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) - #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) - #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) --#define QCA8K_MAX_DELAY 3 - #define QCA8K_PORT_PAD_SGMII_EN BIT(7) - #define QCA8K_REG_PWS 0x010 - #define QCA8K_PWS_POWER_ON_SEL BIT(31) -@@ -68,10 +67,12 @@ - #define QCA8K_MDIO_MASTER_READ BIT(27) - #define QCA8K_MDIO_MASTER_WRITE 0 - #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) --#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) --#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) --#define QCA8K_MDIO_MASTER_DATA(x) (x) -+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) -+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) -+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) -+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) - #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) -+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) - #define QCA8K_MDIO_MASTER_MAX_PORTS 5 - #define QCA8K_MDIO_MASTER_MAX_REG 32 - #define QCA8K_GOL_MAC_ADDR0 0x60 -@@ -93,9 +94,7 @@ - #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) - #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) - #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) --#define QCA8K_PORT_HDR_CTRL_RX_S 2 - #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) --#define QCA8K_PORT_HDR_CTRL_TX_S 0 - #define QCA8K_PORT_HDR_CTRL_ALL 2 - #define QCA8K_PORT_HDR_CTRL_MGMT 1 - #define QCA8K_PORT_HDR_CTRL_NONE 0 -@@ -105,10 +104,11 @@ - #define QCA8K_SGMII_EN_TX BIT(3) - #define QCA8K_SGMII_EN_SD BIT(4) - #define QCA8K_SGMII_CLK125M_DELAY BIT(7) --#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23)) --#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22) --#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) --#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) -+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) -+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) -+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) -+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) -+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) - - /* MAC_PWR_SEL registers */ - #define QCA8K_REG_MAC_PWR_SEL 0x0e4 -@@ -121,100 +121,115 @@ - - /* ACL registers */ - #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) --#define QCA8K_PORT_VLAN_CVID(x) (x << 16) --#define QCA8K_PORT_VLAN_SVID(x) x -+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) -+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) -+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) -+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) - #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) - #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 - #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 - - /* Lookup registers */ - #define QCA8K_REG_ATU_DATA0 0x600 --#define QCA8K_ATU_ADDR2_S 24 --#define QCA8K_ATU_ADDR3_S 16 --#define QCA8K_ATU_ADDR4_S 8 -+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) -+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) -+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) -+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) - #define QCA8K_REG_ATU_DATA1 0x604 --#define QCA8K_ATU_PORT_M 0x7f --#define QCA8K_ATU_PORT_S 16 --#define QCA8K_ATU_ADDR0_S 8 -+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) -+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) -+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) - #define QCA8K_REG_ATU_DATA2 0x608 --#define QCA8K_ATU_VID_M 0xfff --#define QCA8K_ATU_VID_S 8 --#define QCA8K_ATU_STATUS_M 0xf -+#define QCA8K_ATU_VID_MASK GENMASK(19, 8) -+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) - #define QCA8K_ATU_STATUS_STATIC 0xf - #define QCA8K_REG_ATU_FUNC 0x60c - #define QCA8K_ATU_FUNC_BUSY BIT(31) - #define QCA8K_ATU_FUNC_PORT_EN BIT(14) - #define QCA8K_ATU_FUNC_MULTI_EN BIT(13) - #define QCA8K_ATU_FUNC_FULL BIT(12) --#define QCA8K_ATU_FUNC_PORT_M 0xf --#define QCA8K_ATU_FUNC_PORT_S 8 -+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) - #define QCA8K_REG_VTU_FUNC0 0x610 - #define QCA8K_VTU_FUNC0_VALID BIT(20) - #define QCA8K_VTU_FUNC0_IVL_EN BIT(19) --#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) --#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 --#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 --#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 --#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 --#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 -+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) -+ * It does contain VLAN_MODE for each port [5:4] for port0, -+ * [7:6] for port1 ... [17:16] for port6. Use virtual port -+ * define to handle this. -+ */ -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) -+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) - #define QCA8K_REG_VTU_FUNC1 0x614 - #define QCA8K_VTU_FUNC1_BUSY BIT(31) --#define QCA8K_VTU_FUNC1_VID_S 16 -+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) - #define QCA8K_VTU_FUNC1_FULL BIT(4) - #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 - #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) - #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 --#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24 --#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16 --#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8 --#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 -+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) -+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) -+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) -+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) - #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) - #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) --#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) - #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) --#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) --#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) --#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16) --#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16) --#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16) --#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) -+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) -+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) -+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) -+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) -+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) -+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) - #define QCA8K_PORT_LOOKUP_LEARN BIT(20) - - #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 --#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16) --#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16) --#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0) --#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0) -+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) -+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) -+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) -+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) - - #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20) --#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24) --#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) -+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) - - #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) --#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0) --#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0) -+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) -+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) - #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) - #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) - #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) - #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) - - /* Pkt edit registers */ -+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) -+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) -+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) - #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) - - /* L3 registers */ diff --git a/target/linux/generic/backport-6.1/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch b/target/linux/generic/backport-6.1/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch deleted file mode 100644 index 8c39b8ea295a..000000000000 --- a/target/linux/generic/backport-6.1/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 994c28b6f971fa5db8ae977daea37eee87d93d51 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:42 +0100 -Subject: net: dsa: qca8k: remove extra mutex_init in qca8k_setup - -Mutex is already init in sw_probe. Remove the extra init in qca8k_setup. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 2 -- - 1 file changed, 2 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1086,8 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -- mutex_init(&priv->reg_mutex); -- - /* Start by setting up the register mapping */ - priv->regmap = devm_regmap_init(ds->dev, NULL, priv, - &qca8k_regmap_config); diff --git a/target/linux/generic/backport-6.1/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch b/target/linux/generic/backport-6.1/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch deleted file mode 100644 index 44d938c53e85..000000000000 --- a/target/linux/generic/backport-6.1/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 36b8af12f424e7a7f60a935c60a0fd4aa0822378 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:43 +0100 -Subject: net: dsa: qca8k: move regmap init in probe and set it mandatory - -In preparation for regmap conversion, move regmap init in the probe -function and make it mandatory as any read/write/rmw operation will be -converted to regmap API. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 14 ++++++++------ - 1 file changed, 8 insertions(+), 6 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1086,12 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) - if (ret) - return ret; - -- /* Start by setting up the register mapping */ -- priv->regmap = devm_regmap_init(ds->dev, NULL, priv, -- &qca8k_regmap_config); -- if (IS_ERR(priv->regmap)) -- dev_warn(priv->dev, "regmap initialization failed"); -- - ret = qca8k_setup_mdio_bus(priv); - if (ret) - return ret; -@@ -2096,6 +2090,14 @@ qca8k_sw_probe(struct mdio_device *mdiod - gpiod_set_value_cansleep(priv->reset_gpio, 0); - } - -+ /* Start by setting up the register mapping */ -+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, -+ &qca8k_regmap_config); -+ if (IS_ERR(priv->regmap)) { -+ dev_err(priv->dev, "regmap initialization failed"); -+ return PTR_ERR(priv->regmap); -+ } -+ - /* Check the detected switch id */ - ret = qca8k_read_switch_id(priv); - if (ret) diff --git a/target/linux/generic/backport-6.1/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch b/target/linux/generic/backport-6.1/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch deleted file mode 100644 index 4ca9c8ba41d6..000000000000 --- a/target/linux/generic/backport-6.1/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch +++ /dev/null @@ -1,249 +0,0 @@ -From 8b5f3f29a81a71934d004e21a1292c1148b05926 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:44 +0100 -Subject: net: dsa: qca8k: initial conversion to regmap helper - -Convert any qca8k set/clear/pool to regmap helper and add -missing config to regmap_config struct. -Read/write/rmw operation are reworked to use the regmap helper -internally to keep the delta of this patch low. These additional -function will then be dropped when the code split will be proposed. - -Ipq40xx SoC have the internal switch based on the qca8k regmap but use -mmio for read/write/rmw operation instead of mdio. -In preparation for the support of this internal switch, convert the -driver to regmap API to later split the driver to common and specific -code. The overhead introduced by the use of regamp API is marginal as the -internal mdio will bypass it by using its direct access and regmap will be -used only by configuration functions or fdb access. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 107 +++++++++++++++++++++--------------------------- - 1 file changed, 47 insertions(+), 60 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -152,6 +153,25 @@ qca8k_set_page(struct mii_bus *bus, u16 - static int - qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) - { -+ return regmap_read(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) -+{ -+ return regmap_write(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ return regmap_update_bits(priv->regmap, reg, mask, write_val); -+} -+ -+static int -+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; - struct mii_bus *bus = priv->bus; - u16 r1, r2, page; - int ret; -@@ -172,8 +192,9 @@ exit: - } - - static int --qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) -+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) - { -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; - struct mii_bus *bus = priv->bus; - u16 r1, r2, page; - int ret; -@@ -194,8 +215,9 @@ exit: - } - - static int --qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) - { -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; - struct mii_bus *bus = priv->bus; - u16 r1, r2, page; - u32 val; -@@ -223,34 +245,6 @@ exit: - return ret; - } - --static int --qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val) --{ -- return qca8k_rmw(priv, reg, 0, val); --} -- --static int --qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val) --{ -- return qca8k_rmw(priv, reg, val, 0); --} -- --static int --qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- -- return qca8k_read(priv, reg, val); --} -- --static int --qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- -- return qca8k_write(priv, reg, val); --} -- - static const struct regmap_range qca8k_readable_ranges[] = { - regmap_reg_range(0x0000, 0x00e4), /* Global control */ - regmap_reg_range(0x0100, 0x0168), /* EEE control */ -@@ -282,26 +276,19 @@ static struct regmap_config qca8k_regmap - .max_register = 0x16ac, /* end MIB - Port6 range */ - .reg_read = qca8k_regmap_read, - .reg_write = qca8k_regmap_write, -+ .reg_update_bits = qca8k_regmap_update_bits, - .rd_table = &qca8k_readable_table, -+ .disable_locking = true, /* Locking is handled by qca8k read/write */ -+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ - }; - - static int - qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) - { -- int ret, ret1; - u32 val; - -- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask), -- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -- priv, reg, &val); -- -- /* Check if qca8k_read has failed for a different reason -- * before returning -ETIMEDOUT -- */ -- if (ret < 0 && ret1 < 0) -- return ret1; -- -- return ret; -+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); - } - - static int -@@ -568,7 +555,7 @@ qca8k_mib_init(struct qca8k_priv *priv) - int ret; - - mutex_lock(&priv->reg_mutex); -- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); - if (ret) - goto exit; - -@@ -576,7 +563,7 @@ qca8k_mib_init(struct qca8k_priv *priv) - if (ret) - goto exit; - -- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); - if (ret) - goto exit; - -@@ -597,9 +584,9 @@ qca8k_port_set_status(struct qca8k_priv - mask |= QCA8K_PORT_STATUS_LINK_AUTO; - - if (enable) -- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask); -+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); - else -- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); -+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); - } - - static u32 -@@ -861,8 +848,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv * - * a dt-overlay and driver reload changed the configuration - */ - -- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_EN); -+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_EN); - } - - /* Check if the devicetree declare the port:phy mapping */ -@@ -1099,16 +1086,16 @@ qca8k_setup(struct dsa_switch *ds) - return ret; - - /* Make sure MAC06 is disabled */ -- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, -- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, -+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); - if (ret) { - dev_err(priv->dev, "failed disabling MAC06 exchange"); - return ret; - } - - /* Enable CPU Port */ -- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); - if (ret) { - dev_err(priv->dev, "failed enabling CPU port"); - return ret; -@@ -1176,8 +1163,8 @@ qca8k_setup(struct dsa_switch *ds) - return ret; - - /* Enable ARP Auto-learning by default */ -- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_LEARN); -+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_LEARN); - if (ret) - return ret; - -@@ -1745,9 +1732,9 @@ qca8k_port_bridge_join(struct dsa_switch - /* Add this port to the portvlan mask of the other ports - * in the bridge - */ -- ret = qca8k_reg_set(priv, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -+ ret = regmap_set_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); - if (ret) - return ret; - if (i != port) -@@ -1777,9 +1764,9 @@ qca8k_port_bridge_leave(struct dsa_switc - /* Remove this port to the portvlan mask of the other ports - * in the bridge - */ -- qca8k_reg_clear(priv, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -+ regmap_clear_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); - } - - /* Set the cpu port to be the only one in the portvlan mask of diff --git a/target/linux/generic/backport-6.1/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch b/target/linux/generic/backport-6.1/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch deleted file mode 100644 index c8c050933b68..000000000000 --- a/target/linux/generic/backport-6.1/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch +++ /dev/null @@ -1,120 +0,0 @@ -From c126f118b330ccf0db0dda4a4bd6c729865a205f Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:45 +0100 -Subject: net: dsa: qca8k: add additional MIB counter and make it dynamic - -We are currently missing 2 additionals MIB counter present in QCA833x -switch. -QC832x switch have 39 MIB counter and QCA833X have 41 MIB counter. -Add the additional MIB counter and rework the MIB function to print the -correct supported counter from the match_data struct. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++--- - drivers/net/dsa/qca8k.h | 4 ++++ - 2 files changed, 24 insertions(+), 3 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -70,6 +70,8 @@ static const struct qca8k_mib_desc ar832 - MIB_DESC(1, 0x9c, "TxExcDefer"), - MIB_DESC(1, 0xa0, "TxDefer"), - MIB_DESC(1, 0xa4, "TxLateCol"), -+ MIB_DESC(1, 0xa8, "RXUnicast"), -+ MIB_DESC(1, 0xac, "TXUnicast"), - }; - - /* The 32bit switch registers are accessed indirectly. To achieve this we need -@@ -1605,12 +1607,16 @@ qca8k_phylink_mac_link_up(struct dsa_swi - static void - qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) - { -+ const struct qca8k_match_data *match_data; -+ struct qca8k_priv *priv = ds->priv; - int i; - - if (stringset != ETH_SS_STATS) - return; - -- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) -+ match_data = of_device_get_match_data(priv->dev); -+ -+ for (i = 0; i < match_data->mib_count; i++) - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, - ETH_GSTRING_LEN); - } -@@ -1620,12 +1626,15 @@ qca8k_get_ethtool_stats(struct dsa_switc - uint64_t *data) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ const struct qca8k_match_data *match_data; - const struct qca8k_mib_desc *mib; - u32 reg, i, val; - u32 hi = 0; - int ret; - -- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { -+ match_data = of_device_get_match_data(priv->dev); -+ -+ for (i = 0; i < match_data->mib_count; i++) { - mib = &ar8327_mib[i]; - reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; - -@@ -1648,10 +1657,15 @@ qca8k_get_ethtool_stats(struct dsa_switc - static int - qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) - { -+ const struct qca8k_match_data *match_data; -+ struct qca8k_priv *priv = ds->priv; -+ - if (sset != ETH_SS_STATS) - return 0; - -- return ARRAY_SIZE(ar8327_mib); -+ match_data = of_device_get_match_data(priv->dev); -+ -+ return match_data->mib_count; - } - - static int -@@ -2173,14 +2187,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, - static const struct qca8k_match_data qca8327 = { - .id = QCA8K_ID_QCA8327, - .reduced_package = true, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, - }; - - static const struct qca8k_match_data qca8328 = { - .id = QCA8K_ID_QCA8327, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, - }; - - static const struct qca8k_match_data qca833x = { - .id = QCA8K_ID_QCA8337, -+ .mib_count = QCA8K_QCA833X_MIB_COUNT, - }; - - static const struct of_device_id qca8k_of_match[] = { ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -21,6 +21,9 @@ - #define PHY_ID_QCA8337 0x004dd036 - #define QCA8K_ID_QCA8337 0x13 - -+#define QCA8K_QCA832X_MIB_COUNT 39 -+#define QCA8K_QCA833X_MIB_COUNT 41 -+ - #define QCA8K_BUSY_WAIT_TIMEOUT 2000 - - #define QCA8K_NUM_FDB_RECORDS 2048 -@@ -279,6 +282,7 @@ struct ar8xxx_port_status { - struct qca8k_match_data { - u8 id; - bool reduced_package; -+ u8 mib_count; - }; - - enum { diff --git a/target/linux/generic/backport-6.1/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch b/target/linux/generic/backport-6.1/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch deleted file mode 100644 index 8ad7ab472d4f..000000000000 --- a/target/linux/generic/backport-6.1/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 4592538bfb0d5d3c3c8a1d7071724d081412ac91 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:46 +0100 -Subject: net: dsa: qca8k: add support for port fast aging - -The switch supports fast aging by flushing any rule in the ARL -table for a specific port. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 11 +++++++++++ - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 12 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1790,6 +1790,16 @@ qca8k_port_bridge_leave(struct dsa_switc - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); - } - -+static void -+qca8k_port_fast_age(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -+ mutex_unlock(&priv->reg_mutex); -+} -+ - static int - qca8k_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) -@@ -2017,6 +2027,7 @@ static const struct dsa_switch_ops qca8k - .port_stp_state_set = qca8k_port_stp_state_set, - .port_bridge_join = qca8k_port_bridge_join, - .port_bridge_leave = qca8k_port_bridge_leave, -+ .port_fast_age = qca8k_port_fast_age, - .port_fdb_add = qca8k_port_fdb_add, - .port_fdb_del = qca8k_port_fdb_del, - .port_fdb_dump = qca8k_port_fdb_dump, ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -262,6 +262,7 @@ enum qca8k_fdb_cmd { - QCA8K_FDB_FLUSH = 1, - QCA8K_FDB_LOAD = 2, - QCA8K_FDB_PURGE = 3, -+ QCA8K_FDB_FLUSH_PORT = 5, - QCA8K_FDB_NEXT = 6, - QCA8K_FDB_SEARCH = 7, - }; diff --git a/target/linux/generic/backport-6.1/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch b/target/linux/generic/backport-6.1/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch deleted file mode 100644 index 659e482405a5..000000000000 --- a/target/linux/generic/backport-6.1/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 6a3bdc5209f45d2af83aa92433ab6e5cf2297aa4 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:47 +0100 -Subject: net: dsa: qca8k: add set_ageing_time support - -qca8k support setting ageing time in step of 7s. Add support for it and -set the max value accepted of 7645m. -Documentation talks about support for 10000m but that values doesn't -make sense as the value doesn't match the max value in the reg. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 25 +++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 3 +++ - 2 files changed, 28 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1261,6 +1261,10 @@ qca8k_setup(struct dsa_switch *ds) - /* We don't have interrupts for link changes, so we need to poll */ - ds->pcs_poll = true; - -+ /* Set min a max ageing value supported */ -+ ds->ageing_time_min = 7000; -+ ds->ageing_time_max = 458745000; -+ - return 0; - } - -@@ -1801,6 +1805,26 @@ qca8k_port_fast_age(struct dsa_switch *d - } - - static int -+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ unsigned int secs = msecs / 1000; -+ u32 val; -+ -+ /* AGE_TIME reg is set in 7s step */ -+ val = secs / 7; -+ -+ /* Handle case with 0 as val to NOT disable -+ * learning -+ */ -+ if (!val) -+ val = 1; -+ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -+ QCA8K_ATU_AGE_TIME(val)); -+} -+ -+static int - qca8k_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) - { -@@ -2018,6 +2042,7 @@ static const struct dsa_switch_ops qca8k - .get_strings = qca8k_get_strings, - .get_ethtool_stats = qca8k_get_ethtool_stats, - .get_sset_count = qca8k_get_sset_count, -+ .set_ageing_time = qca8k_set_ageing_time, - .get_mac_eee = qca8k_get_mac_eee, - .set_mac_eee = qca8k_set_mac_eee, - .port_enable = qca8k_port_enable, ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -175,6 +175,9 @@ - #define QCA8K_VTU_FUNC1_BUSY BIT(31) - #define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) - #define QCA8K_VTU_FUNC1_FULL BIT(4) -+#define QCA8K_REG_ATU_CTRL 0x618 -+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) -+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) - #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 - #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) - #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 diff --git a/target/linux/generic/backport-6.1/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch b/target/linux/generic/backport-6.1/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch deleted file mode 100644 index 8b97939ecb29..000000000000 --- a/target/linux/generic/backport-6.1/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch +++ /dev/null @@ -1,142 +0,0 @@ -From ba8f870dfa635113ce6e8095a5eb1835ecde2e9e Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Mon, 22 Nov 2021 16:23:48 +0100 -Subject: net: dsa: qca8k: add support for mdb_add/del - -Add support for mdb add/del function. The ARL table is used to insert -the rule. The rule will be searched, deleted and reinserted with the -port mask updated. The function will check if the rule has to be updated -or insert directly with no deletion of the old rule. -If every port is removed from the port mask, the rule is removed. -The rule is set STATIC in the ARL table (aka it doesn't age) to not be -flushed by fast age function. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 99 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -436,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv) - } - - static int -+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_fdb_read(priv, &fdb); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule exist. Delete first */ -+ if (!fdb.aging) { -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ } -+ -+ /* Add port to fdb portmask */ -+ fdb.port_mask |= port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule doesn't exist. Why delete? */ -+ if (!fdb.aging) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ -+ /* Only port in the rule is this port. Don't re insert */ -+ if (fdb.port_mask == port_mask) -+ goto exit; -+ -+ /* Remove port from port mask */ -+ fdb.port_mask &= ~port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int - qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) - { - u32 reg; -@@ -1949,6 +2024,28 @@ qca8k_port_fdb_dump(struct dsa_switch *d - } - - static int -+qca8k_port_mdb_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); -+} -+ -+static int -+qca8k_port_mdb_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); -+} -+ -+static int - qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) - { -@@ -2056,6 +2153,8 @@ static const struct dsa_switch_ops qca8k - .port_fdb_add = qca8k_port_fdb_add, - .port_fdb_del = qca8k_port_fdb_del, - .port_fdb_dump = qca8k_port_fdb_dump, -+ .port_mdb_add = qca8k_port_mdb_add, -+ .port_mdb_del = qca8k_port_mdb_del, - .port_vlan_filtering = qca8k_port_vlan_filtering, - .port_vlan_add = qca8k_port_vlan_add, - .port_vlan_del = qca8k_port_vlan_del, diff --git a/target/linux/generic/backport-6.1/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch b/target/linux/generic/backport-6.1/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch deleted file mode 100644 index dc5a22935f1d..000000000000 --- a/target/linux/generic/backport-6.1/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch +++ /dev/null @@ -1,155 +0,0 @@ -From 2c1bdbc7e7560d7de754cad277d968d56bb1899e Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 23 Nov 2021 03:59:10 +0100 -Subject: net: dsa: qca8k: add support for mirror mode - -The switch supports mirror mode. Only one port can set as mirror port and -every other port can set to both ingress and egress mode. The mirror -port is disabled and reverted to normal operation once every port is -removed from sending packet to it. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 4 +++ - 2 files changed, 99 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -2046,6 +2046,99 @@ qca8k_port_mdb_del(struct dsa_switch *ds - } - - static int -+qca8k_port_mirror_add(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror, -+ bool ingress) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int monitor_port, ret; -+ u32 reg, val; -+ -+ /* Check for existent entry */ -+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -+ return -EEXIST; -+ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* QCA83xx can have only one port set to mirror mode. -+ * Check that the correct port is requested and return error otherwise. -+ * When no mirror port is set, the values is set to 0xF -+ */ -+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -+ return -EEXIST; -+ -+ /* Set the monitor port */ -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -+ mirror->to_local_port); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ return ret; -+ -+ if (ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_update_bits(priv->regmap, reg, val, val); -+ if (ret) -+ return ret; -+ -+ /* Track mirror port for tx and rx to decide when the -+ * mirror port has to be disabled. -+ */ -+ if (ingress) -+ priv->mirror_rx |= BIT(port); -+ else -+ priv->mirror_tx |= BIT(port); -+ -+ return 0; -+} -+ -+static void -+qca8k_port_mirror_del(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg, val; -+ int ret; -+ -+ if (mirror->ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_clear_bits(priv->regmap, reg, val); -+ if (ret) -+ goto err; -+ -+ if (mirror->ingress) -+ priv->mirror_rx &= ~BIT(port); -+ else -+ priv->mirror_tx &= ~BIT(port); -+ -+ /* No port set to send packet to mirror port. Disable mirror port */ -+ if (!priv->mirror_rx && !priv->mirror_tx) { -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ goto err; -+ } -+err: -+ dev_err(priv->dev, "Failed to del mirror port from %d", port); -+} -+ -+static int - qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) - { -@@ -2155,6 +2248,8 @@ static const struct dsa_switch_ops qca8k - .port_fdb_dump = qca8k_port_fdb_dump, - .port_mdb_add = qca8k_port_mdb_add, - .port_mdb_del = qca8k_port_mdb_del, -+ .port_mirror_add = qca8k_port_mirror_add, -+ .port_mirror_del = qca8k_port_mirror_del, - .port_vlan_filtering = qca8k_port_vlan_filtering, - .port_vlan_add = qca8k_port_vlan_add, - .port_vlan_del = qca8k_port_vlan_del, ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -180,6 +180,7 @@ - #define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) - #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 - #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) -+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4) - #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 - #define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) - #define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) -@@ -201,6 +202,7 @@ - #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) - #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) - #define QCA8K_PORT_LOOKUP_LEARN BIT(20) -+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) - - #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 - #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) -@@ -305,6 +307,8 @@ struct qca8k_ports_config { - struct qca8k_priv { - u8 switch_id; - u8 switch_revision; -+ u8 mirror_rx; -+ u8 mirror_tx; - bool legacy_phy_port_mapping; - struct qca8k_ports_config ports_config; - struct regmap *regmap; diff --git a/target/linux/generic/backport-6.1/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch b/target/linux/generic/backport-6.1/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch deleted file mode 100644 index b53f1288d5a0..000000000000 --- a/target/linux/generic/backport-6.1/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch +++ /dev/null @@ -1,288 +0,0 @@ -From def975307c01191b6f0170048c3724b0ed3348af Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 23 Nov 2021 03:59:11 +0100 -Subject: net: dsa: qca8k: add LAG support - -Add LAG support to this switch. In Documentation this is described as -trunk mode. A max of 4 LAGs are supported and each can support up to 4 -port. The current tx mode supported is Hash mode with both L2 and L2+3 -mode. -When no port are present in the trunk, the trunk is disabled in the -switch. -When a port is disconnected, the traffic is redirected to the other -available port. -The hash mode is global and each LAG require to have the same hash mode -set. To change the hash mode when multiple LAG are configured, it's -required to remove each LAG and set the desired hash mode to the last. -An error is printed when it's asked to set a not supported hadh mode. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 33 +++++++++ - 2 files changed, 210 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1340,6 +1340,9 @@ qca8k_setup(struct dsa_switch *ds) - ds->ageing_time_min = 7000; - ds->ageing_time_max = 458745000; - -+ /* Set max number of LAGs supported */ -+ ds->num_lag_ids = QCA8K_NUM_LAGS; -+ - return 0; - } - -@@ -2226,6 +2229,178 @@ qca8k_get_tag_protocol(struct dsa_switch - return DSA_TAG_PROTO_QCA; - } - -+static bool -+qca8k_lag_can_offload(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct dsa_port *dp; -+ int id, members = 0; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ if (id < 0 || id >= ds->num_lag_ids) -+ return false; -+ -+ dsa_lag_foreach_port(dp, ds->dst, lag) -+ /* Includes the port joining the LAG */ -+ members++; -+ -+ if (members > QCA8K_NUM_PORTS_FOR_LAG) -+ return false; -+ -+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -+ return false; -+ -+ if (info->hash_type != NETDEV_LAG_HASH_L2 || -+ info->hash_type != NETDEV_LAG_HASH_L23) -+ return false; -+ -+ return true; -+} -+ -+static int -+qca8k_lag_setup_hash(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ bool unique_lag = true; -+ int i, id; -+ u32 hash; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ switch (info->hash_type) { -+ case NETDEV_LAG_HASH_L23: -+ hash |= QCA8K_TRUNK_HASH_SIP_EN; -+ hash |= QCA8K_TRUNK_HASH_DIP_EN; -+ fallthrough; -+ case NETDEV_LAG_HASH_L2: -+ hash |= QCA8K_TRUNK_HASH_SA_EN; -+ hash |= QCA8K_TRUNK_HASH_DA_EN; -+ break; -+ default: /* We should NEVER reach this */ -+ return -EOPNOTSUPP; -+ } -+ -+ /* Check if we are the unique configured LAG */ -+ dsa_lags_foreach_id(i, ds->dst) -+ if (i != id && dsa_lag_dev(ds->dst, i)) { -+ unique_lag = false; -+ break; -+ } -+ -+ /* Hash Mode is global. Make sure the same Hash Mode -+ * is set to all the 4 possible lag. -+ * If we are the unique LAG we can set whatever hash -+ * mode we want. -+ * To change hash mode it's needed to remove all LAG -+ * and change the mode with the latest. -+ */ -+ if (unique_lag) { -+ priv->lag_hash_mode = hash; -+ } else if (priv->lag_hash_mode != hash) { -+ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -+ QCA8K_TRUNK_HASH_MASK, hash); -+} -+ -+static int -+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -+ struct net_device *lag, bool delete) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret, id, i; -+ u32 val; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ /* Read current port member */ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* Shift val to the correct trunk */ -+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -+ if (delete) -+ val &= ~BIT(port); -+ else -+ val |= BIT(port); -+ -+ /* Update port member. With empty portmap disable trunk */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -+ QCA8K_REG_GOL_TRUNK_MEMBER(id) | -+ QCA8K_REG_GOL_TRUNK_EN(id), -+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -+ -+ /* Search empty member if adding or port on deleting */ -+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -+ if (ret) -+ return ret; -+ -+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -+ -+ if (delete) { -+ /* If port flagged to be disabled assume this member is -+ * empty -+ */ -+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -+ if (val != port) -+ continue; -+ } else { -+ /* If port flagged to be enabled assume this member is -+ * already set -+ */ -+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ } -+ -+ /* We have found the member to add/remove */ -+ break; -+ } -+ -+ /* Set port in the correct port mask or disable port if in delete mode */ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); -+} -+ -+static int -+qca8k_port_lag_join(struct dsa_switch *ds, int port, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ int ret; -+ -+ if (!qca8k_lag_can_offload(ds, lag, info)) -+ return -EOPNOTSUPP; -+ -+ ret = qca8k_lag_setup_hash(ds, lag, info); -+ if (ret) -+ return ret; -+ -+ return qca8k_lag_refresh_portmap(ds, port, lag, false); -+} -+ -+static int -+qca8k_port_lag_leave(struct dsa_switch *ds, int port, -+ struct net_device *lag) -+{ -+ return qca8k_lag_refresh_portmap(ds, port, lag, true); -+} -+ - static const struct dsa_switch_ops qca8k_switch_ops = { - .get_tag_protocol = qca8k_get_tag_protocol, - .setup = qca8k_setup, -@@ -2259,6 +2434,8 @@ static const struct dsa_switch_ops qca8k - .phylink_mac_link_down = qca8k_phylink_mac_link_down, - .phylink_mac_link_up = qca8k_phylink_mac_link_up, - .get_phy_flags = qca8k_get_phy_flags, -+ .port_lag_join = qca8k_port_lag_join, -+ .port_lag_leave = qca8k_port_lag_leave, - }; - - static int qca8k_read_switch_id(struct qca8k_priv *priv) ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -15,6 +15,8 @@ - #define QCA8K_NUM_PORTS 7 - #define QCA8K_NUM_CPU_PORTS 2 - #define QCA8K_MAX_MTU 9000 -+#define QCA8K_NUM_LAGS 4 -+#define QCA8K_NUM_PORTS_FOR_LAG 4 - - #define PHY_ID_QCA8327 0x004dd034 - #define QCA8K_ID_QCA8327 0x12 -@@ -122,6 +124,14 @@ - #define QCA8K_REG_EEE_CTRL 0x100 - #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) - -+/* TRUNK_HASH_EN registers */ -+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270 -+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3) -+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2) -+#define QCA8K_TRUNK_HASH_SA_EN BIT(1) -+#define QCA8K_TRUNK_HASH_DA_EN BIT(0) -+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0) -+ - /* ACL registers */ - #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) - #define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) -@@ -204,6 +214,28 @@ - #define QCA8K_PORT_LOOKUP_LEARN BIT(20) - #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) - -+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700 -+/* 4 max trunk first -+ * first 6 bit for member bitmap -+ * 7th bit is to enable trunk port -+ */ -+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8) -+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7) -+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) -+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0) -+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) -+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */ -+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0) -+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16) -+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4) -+/* Complex shift: FIRST shift for port THEN shift for trunk */ -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) -+ - #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 - #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) - #define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) -@@ -309,6 +341,7 @@ struct qca8k_priv { - u8 switch_revision; - u8 mirror_rx; - u8 mirror_tx; -+ u8 lag_hash_mode; - bool legacy_phy_port_mapping; - struct qca8k_ports_config ports_config; - struct regmap *regmap; diff --git a/target/linux/generic/backport-6.1/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch b/target/linux/generic/backport-6.1/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch deleted file mode 100644 index 7d811be11ce9..000000000000 --- a/target/linux/generic/backport-6.1/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 0898ca67b86e14207d4feb3f3fea8b87cec5aab1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Tue, 23 Nov 2021 16:44:46 +0100 -Subject: net: dsa: qca8k: fix warning in LAG feature - -Fix warning reported by bot. -Make sure hash is init to 0 and fix wrong logic for hash_type in -qca8k_lag_can_offload. - -Reported-by: kernel test robot -Fixes: def975307c01 ("net: dsa: qca8k: add LAG support") -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Link: https://lore.kernel.org/r/20211123154446.31019-1-ansuelsmth@gmail.com -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca8k.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -2251,7 +2251,7 @@ qca8k_lag_can_offload(struct dsa_switch - if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) - return false; - -- if (info->hash_type != NETDEV_LAG_HASH_L2 || -+ if (info->hash_type != NETDEV_LAG_HASH_L2 && - info->hash_type != NETDEV_LAG_HASH_L23) - return false; - -@@ -2265,8 +2265,8 @@ qca8k_lag_setup_hash(struct dsa_switch * - { - struct qca8k_priv *priv = ds->priv; - bool unique_lag = true; -+ u32 hash = 0; - int i, id; -- u32 hash; - - id = dsa_lag_id(ds->dst, lag); - diff --git a/target/linux/generic/backport-6.1/765-v5.17-01-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch b/target/linux/generic/backport-6.1/765-v5.17-01-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch deleted file mode 100644 index 77cf63b809be..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-01-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 904e112ad431492b34f235f59738e8312802bbf9 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:12 +0200 -Subject: [PATCH 1/6] net: dsa: reorder PHY initialization with MTU setup in - slave.c - -In dsa_slave_create() there are 2 sections that take rtnl_lock(): -MTU change and netdev registration. They are separated by PHY -initialization. - -There isn't any strict ordering requirement except for the fact that -netdev registration should be last. Therefore, we can perform the MTU -change a bit later, after the PHY setup. A future change will then be -able to merge the two rtnl_lock sections into one. - -Signed-off-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/slave.c | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - ---- a/net/dsa/slave.c -+++ b/net/dsa/slave.c -@@ -1977,13 +1977,6 @@ int dsa_slave_create(struct dsa_port *po - port->slave = slave_dev; - dsa_slave_setup_tagger(slave_dev); - -- rtnl_lock(); -- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); -- rtnl_unlock(); -- if (ret && ret != -EOPNOTSUPP) -- dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", -- ret, ETH_DATA_LEN, port->index); -- - netif_carrier_off(slave_dev); - - ret = dsa_slave_phy_setup(slave_dev); -@@ -1995,6 +1988,13 @@ int dsa_slave_create(struct dsa_port *po - } - - rtnl_lock(); -+ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); -+ rtnl_unlock(); -+ if (ret && ret != -EOPNOTSUPP) -+ dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", -+ ret, ETH_DATA_LEN, port->index); -+ -+ rtnl_lock(); - - ret = register_netdevice(slave_dev); - if (ret) { diff --git a/target/linux/generic/backport-6.1/765-v5.17-02-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch b/target/linux/generic/backport-6.1/765-v5.17-02-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch deleted file mode 100644 index 50aa5d8f0dd5..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-02-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch +++ /dev/null @@ -1,34 +0,0 @@ -From e31dbd3b6aba585231cd84a87adeb22e7c6a8c19 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:13 +0200 -Subject: [PATCH 2/6] net: dsa: merge rtnl_lock sections in dsa_slave_create - -Currently dsa_slave_create() has two sequences of rtnl_lock/rtnl_unlock -in a row. Remove the rtnl_unlock() and rtnl_lock() in between, such that -the operation can execute slighly faster. - -Signed-off-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/slave.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/net/dsa/slave.c -+++ b/net/dsa/slave.c -@@ -1988,14 +1988,12 @@ int dsa_slave_create(struct dsa_port *po - } - - rtnl_lock(); -+ - ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); -- rtnl_unlock(); - if (ret && ret != -EOPNOTSUPP) - dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", - ret, ETH_DATA_LEN, port->index); - -- rtnl_lock(); -- - ret = register_netdevice(slave_dev); - if (ret) { - netdev_err(master, "error %d registering interface %s\n", diff --git a/target/linux/generic/backport-6.1/765-v5.17-03-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch b/target/linux/generic/backport-6.1/765-v5.17-03-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch deleted file mode 100644 index 6c7aad692885..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-03-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch +++ /dev/null @@ -1,91 +0,0 @@ -From a1ff94c2973c43bc1e2677ac63ebb15b1d1ff846 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:14 +0200 -Subject: [PATCH 3/6] net: dsa: stop updating master MTU from master.c - -At present there are two paths for changing the MTU of the DSA master. - -The first is: - -dsa_tree_setup --> dsa_tree_setup_ports - -> dsa_port_setup - -> dsa_slave_create - -> dsa_slave_change_mtu - -> dev_set_mtu(master) - -The second is: - -dsa_tree_setup --> dsa_tree_setup_master - -> dsa_master_setup - -> dev_set_mtu(dev) - -So the dev_set_mtu() call from dsa_master_setup() has been effectively -superseded by the dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN) that is -done from dsa_slave_create() for each user port. The later function also -updates the master MTU according to the largest user port MTU from the -tree. Therefore, updating the master MTU through a separate code path -isn't needed. - -Signed-off-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/master.c | 25 +------------------------ - 1 file changed, 1 insertion(+), 24 deletions(-) - ---- a/net/dsa/master.c -+++ b/net/dsa/master.c -@@ -329,28 +329,13 @@ static const struct attribute_group dsa_ - .attrs = dsa_slave_attrs, - }; - --static void dsa_master_reset_mtu(struct net_device *dev) --{ -- int err; -- -- rtnl_lock(); -- err = dev_set_mtu(dev, ETH_DATA_LEN); -- if (err) -- netdev_dbg(dev, -- "Unable to reset MTU to exclude DSA overheads\n"); -- rtnl_unlock(); --} -- - static struct lock_class_key dsa_master_addr_list_lock_key; - - int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) - { -- const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops; - struct dsa_switch *ds = cpu_dp->ds; - struct device_link *consumer_link; -- int mtu, ret; -- -- mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops); -+ int ret; - - /* The DSA master must use SET_NETDEV_DEV for this to work. */ - consumer_link = device_link_add(ds->dev, dev->dev.parent, -@@ -360,13 +345,6 @@ int dsa_master_setup(struct net_device * - "Failed to create a device link to DSA switch %s\n", - dev_name(ds->dev)); - -- rtnl_lock(); -- ret = dev_set_mtu(dev, mtu); -- rtnl_unlock(); -- if (ret) -- netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", -- ret, mtu); -- - /* If we use a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point on get - * sent to the tag format's receive function. -@@ -404,7 +382,6 @@ void dsa_master_teardown(struct net_devi - sysfs_remove_group(&dev->dev.kobj, &dsa_group); - dsa_netdev_ops_set(dev, NULL); - dsa_master_ethtool_teardown(dev); -- dsa_master_reset_mtu(dev); - dsa_master_set_promiscuity(dev, -1); - - dev->dsa_ptr = NULL; diff --git a/target/linux/generic/backport-6.1/765-v5.17-04-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch b/target/linux/generic/backport-6.1/765-v5.17-04-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch deleted file mode 100644 index e331226fc41d..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-04-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch +++ /dev/null @@ -1,78 +0,0 @@ -From c146f9bc195a9dc3ad7fd000a14540e7c9df952d Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:15 +0200 -Subject: [PATCH 4/6] net: dsa: hold rtnl_mutex when calling - dsa_master_{setup,teardown} - -DSA needs to simulate master tracking events when a binding is first -with a DSA master established and torn down, in order to give drivers -the simplifying guarantee that ->master_state_change calls are made -only when the master's readiness state to pass traffic changes. -master_state_change() provide a operational bool that DSA driver can use -to understand if DSA master is operational or not. -To avoid races, we need to block the reception of -NETDEV_UP/NETDEV_CHANGE/NETDEV_GOING_DOWN events in the netdev notifier -chain while we are changing the master's dev->dsa_ptr (this changes what -netdev_uses_dsa(dev) reports). - -The dsa_master_setup() and dsa_master_teardown() functions optionally -require the rtnl_mutex to be held, if the tagger needs the master to be -promiscuous, these functions call dev_set_promiscuity(). Move the -rtnl_lock() from that function and make it top-level. - -Signed-off-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/dsa2.c | 8 ++++++++ - net/dsa/master.c | 4 ++-- - 2 files changed, 10 insertions(+), 2 deletions(-) - ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -1034,6 +1034,8 @@ static int dsa_tree_setup_master(struct - struct dsa_port *dp; - int err; - -+ rtnl_lock(); -+ - list_for_each_entry(dp, &dst->ports, list) { - if (dsa_port_is_cpu(dp)) { - err = dsa_master_setup(dp->master, dp); -@@ -1042,6 +1044,8 @@ static int dsa_tree_setup_master(struct - } - } - -+ rtnl_unlock(); -+ - return 0; - } - -@@ -1049,9 +1053,13 @@ static void dsa_tree_teardown_master(str - { - struct dsa_port *dp; - -+ rtnl_lock(); -+ - list_for_each_entry(dp, &dst->ports, list) - if (dsa_port_is_cpu(dp)) - dsa_master_teardown(dp->master); -+ -+ rtnl_unlock(); - } - - static int dsa_tree_setup_lags(struct dsa_switch_tree *dst) ---- a/net/dsa/master.c -+++ b/net/dsa/master.c -@@ -266,9 +266,9 @@ static void dsa_master_set_promiscuity(s - if (!ops->promisc_on_master) - return; - -- rtnl_lock(); -+ ASSERT_RTNL(); -+ - dev_set_promiscuity(dev, inc); -- rtnl_unlock(); - } - - static ssize_t tagging_show(struct device *d, struct device_attribute *attr, diff --git a/target/linux/generic/backport-6.1/765-v5.17-05-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch b/target/linux/generic/backport-6.1/765-v5.17-05-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch deleted file mode 100644 index e6472c61da67..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-05-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch +++ /dev/null @@ -1,118 +0,0 @@ -From 1e3f407f3cacc5dcfe27166c412ed9bc263d82bf Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:16 +0200 -Subject: [PATCH 5/6] net: dsa: first set up shared ports, then non-shared - ports - -After commit a57d8c217aad ("net: dsa: flush switchdev workqueue before -tearing down CPU/DSA ports"), the port setup and teardown procedure -became asymmetric. - -The fact of the matter is that user ports need the shared ports to be up -before they can be used for CPU-initiated termination. And since we -register net devices for the user ports, those won't be functional until -we also call the setup for the shared (CPU, DSA) ports. But we may do -that later, depending on the port numbering scheme of the hardware we -are dealing with. - -It just makes sense that all shared ports are brought up before any user -port is. I can't pinpoint any issue due to the current behavior, but -let's change it nonetheless, for consistency's sake. - -Signed-off-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - net/dsa/dsa2.c | 50 +++++++++++++++++++++++++++++++++++++------------- - 1 file changed, 37 insertions(+), 13 deletions(-) - ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -999,23 +999,28 @@ static void dsa_tree_teardown_switches(s - dsa_switch_teardown(dp->ds); - } - --static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) -+/* Bring shared ports up first, then non-shared ports */ -+static int dsa_tree_setup_ports(struct dsa_switch_tree *dst) - { - struct dsa_port *dp; -- int err; -+ int err = 0; - - list_for_each_entry(dp, &dst->ports, list) { -- err = dsa_switch_setup(dp->ds); -- if (err) -- goto teardown; -+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) { -+ err = dsa_port_setup(dp); -+ if (err) -+ goto teardown; -+ } - } - - list_for_each_entry(dp, &dst->ports, list) { -- err = dsa_port_setup(dp); -- if (err) { -- err = dsa_port_reinit_as_unused(dp); -- if (err) -- goto teardown; -+ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { -+ err = dsa_port_setup(dp); -+ if (err) { -+ err = dsa_port_reinit_as_unused(dp); -+ if (err) -+ goto teardown; -+ } - } - } - -@@ -1024,7 +1029,21 @@ static int dsa_tree_setup_switches(struc - teardown: - dsa_tree_teardown_ports(dst); - -- dsa_tree_teardown_switches(dst); -+ return err; -+} -+ -+static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) -+{ -+ struct dsa_port *dp; -+ int err = 0; -+ -+ list_for_each_entry(dp, &dst->ports, list) { -+ err = dsa_switch_setup(dp->ds); -+ if (err) { -+ dsa_tree_teardown_switches(dst); -+ break; -+ } -+ } - - return err; - } -@@ -1111,10 +1130,14 @@ static int dsa_tree_setup(struct dsa_swi - if (err) - goto teardown_cpu_ports; - -- err = dsa_tree_setup_master(dst); -+ err = dsa_tree_setup_ports(dst); - if (err) - goto teardown_switches; - -+ err = dsa_tree_setup_master(dst); -+ if (err) -+ goto teardown_ports; -+ - err = dsa_tree_setup_lags(dst); - if (err) - goto teardown_master; -@@ -1127,8 +1150,9 @@ static int dsa_tree_setup(struct dsa_swi - - teardown_master: - dsa_tree_teardown_master(dst); --teardown_switches: -+teardown_ports: - dsa_tree_teardown_ports(dst); -+teardown_switches: - dsa_tree_teardown_switches(dst); - teardown_cpu_ports: - dsa_tree_teardown_cpu_ports(dst); diff --git a/target/linux/generic/backport-6.1/765-v5.17-06-net-next-net-dsa-setup-master-before-ports.patch b/target/linux/generic/backport-6.1/765-v5.17-06-net-next-net-dsa-setup-master-before-ports.patch deleted file mode 100644 index 93cad0c98aa0..000000000000 --- a/target/linux/generic/backport-6.1/765-v5.17-06-net-next-net-dsa-setup-master-before-ports.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 11fd667dac315ea3f2469961f6d2869271a46cae Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Thu, 6 Jan 2022 01:11:17 +0200 -Subject: [PATCH 6/6] net: dsa: setup master before ports - -It is said that as soon as a network interface is registered, all its -resources should have already been prepared, so that it is available for -sending and receiving traffic. One of the resources needed by a DSA -slave interface is the master. - -dsa_tree_setup --> dsa_tree_setup_ports - -> dsa_port_setup - -> dsa_slave_create - -> register_netdevice --> dsa_tree_setup_master - -> dsa_master_setup - -> sets up master->dsa_ptr, which enables reception - -Therefore, there is a short period of time after register_netdevice() -during which the master isn't prepared to pass traffic to the DSA layer -(master->dsa_ptr is checked by eth_type_trans). Same thing during -unregistration, there is a time frame in which packets might be missed. - -Note that this change opens us to another race: dsa_master_find_slave() -will get invoked potentially earlier than the slave creation, and later -than the slave deletion. Since dp->slave starts off as a NULL pointer, -the earlier calls aren't a problem, but the later calls are. To avoid -use-after-free, we should zeroize dp->slave before calling -dsa_slave_destroy(). - -In practice I cannot really test real life improvements brought by this -change, since in my systems, netdevice creation races with PHY autoneg -which takes a few seconds to complete, and that masks quite a few races. -Effects might be noticeable in a setup with fixed links all the way to -an external system. - -Signed-off-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - net/dsa/dsa2.c | 23 +++++++++++++---------- - 1 file changed, 13 insertions(+), 10 deletions(-) - ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -545,6 +545,7 @@ static void dsa_port_teardown(struct dsa - struct devlink_port *dlp = &dp->devlink_port; - struct dsa_switch *ds = dp->ds; - struct dsa_mac_addr *a, *tmp; -+ struct net_device *slave; - - if (!dp->setup) - return; -@@ -566,9 +567,11 @@ static void dsa_port_teardown(struct dsa - dsa_port_link_unregister_of(dp); - break; - case DSA_PORT_TYPE_USER: -- if (dp->slave) { -- dsa_slave_destroy(dp->slave); -+ slave = dp->slave; -+ -+ if (slave) { - dp->slave = NULL; -+ dsa_slave_destroy(slave); - } - break; - } -@@ -1130,17 +1133,17 @@ static int dsa_tree_setup(struct dsa_swi - if (err) - goto teardown_cpu_ports; - -- err = dsa_tree_setup_ports(dst); -+ err = dsa_tree_setup_master(dst); - if (err) - goto teardown_switches; - -- err = dsa_tree_setup_master(dst); -+ err = dsa_tree_setup_ports(dst); - if (err) -- goto teardown_ports; -+ goto teardown_master; - - err = dsa_tree_setup_lags(dst); - if (err) -- goto teardown_master; -+ goto teardown_ports; - - dst->setup = true; - -@@ -1148,10 +1151,10 @@ static int dsa_tree_setup(struct dsa_swi - - return 0; - --teardown_master: -- dsa_tree_teardown_master(dst); - teardown_ports: - dsa_tree_teardown_ports(dst); -+teardown_master: -+ dsa_tree_teardown_master(dst); - teardown_switches: - dsa_tree_teardown_switches(dst); - teardown_cpu_ports: -@@ -1169,10 +1172,10 @@ static void dsa_tree_teardown(struct dsa - - dsa_tree_teardown_lags(dst); - -- dsa_tree_teardown_master(dst); -- - dsa_tree_teardown_ports(dst); - -+ dsa_tree_teardown_master(dst); -+ - dsa_tree_teardown_switches(dst); - - dsa_tree_teardown_cpu_ports(dst); diff --git a/target/linux/generic/backport-6.1/766-v5.18-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch b/target/linux/generic/backport-6.1/766-v5.18-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch deleted file mode 100644 index bffdcb28819f..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch +++ /dev/null @@ -1,254 +0,0 @@ -From 295ab96f478d0fa56393e85406f19a867e26ce22 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Wed, 2 Feb 2022 01:03:20 +0100 -Subject: [PATCH 01/16] net: dsa: provide switch operations for tracking the - master state - -Certain drivers may need to send management traffic to the switch for -things like register access, FDB dump, etc, to accelerate what their -slow bus (SPI, I2C, MDIO) can already do. - -Ethernet is faster (especially in bulk transactions) but is also more -unreliable, since the user may decide to bring the DSA master down (or -not bring it up), therefore severing the link between the host and the -attached switch. - -Drivers needing Ethernet-based register access already should have -fallback logic to the slow bus if the Ethernet method fails, but that -fallback may be based on a timeout, and the I/O to the switch may slow -down to a halt if the master is down, because every Ethernet packet will -have to time out. The driver also doesn't have the option to turn off -Ethernet-based I/O momentarily, because it wouldn't know when to turn it -back on. - -Which is where this change comes in. By tracking NETDEV_CHANGE, -NETDEV_UP and NETDEV_GOING_DOWN events on the DSA master, we should know -the exact interval of time during which this interface is reliably -available for traffic. Provide this information to switches so they can -use it as they wish. - -An helper is added dsa_port_master_is_operational() to check if a master -port is operational. - -Signed-off-by: Vladimir Oltean -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/net/dsa.h | 17 +++++++++++++++++ - net/dsa/dsa2.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ - net/dsa/dsa_priv.h | 13 +++++++++++++ - net/dsa/slave.c | 32 ++++++++++++++++++++++++++++++++ - net/dsa/switch.c | 15 +++++++++++++++ - 5 files changed, 123 insertions(+) - ---- a/include/net/dsa.h -+++ b/include/net/dsa.h -@@ -291,6 +291,10 @@ struct dsa_port { - struct list_head mdbs; - - bool setup; -+ /* Master state bits, valid only on CPU ports */ -+ u8 master_admin_up:1; -+ u8 master_oper_up:1; -+ - }; - - /* TODO: ideally DSA ports would have a single dp->link_dp member, -@@ -456,6 +460,12 @@ static inline bool dsa_port_is_unused(st - return dp->type == DSA_PORT_TYPE_UNUSED; - } - -+static inline bool dsa_port_master_is_operational(struct dsa_port *dp) -+{ -+ return dsa_port_is_cpu(dp) && dp->master_admin_up && -+ dp->master_oper_up; -+} -+ - static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) - { - return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; -@@ -949,6 +959,13 @@ struct dsa_switch_ops { - int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, - u16 flags); - int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); -+ -+ /* -+ * DSA master tracking operations -+ */ -+ void (*master_state_change)(struct dsa_switch *ds, -+ const struct net_device *master, -+ bool operational); - }; - - #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -1275,6 +1275,52 @@ out_unlock: - return err; - } - -+static void dsa_tree_master_state_change(struct dsa_switch_tree *dst, -+ struct net_device *master) -+{ -+ struct dsa_notifier_master_state_info info; -+ struct dsa_port *cpu_dp = master->dsa_ptr; -+ -+ info.master = master; -+ info.operational = dsa_port_master_is_operational(cpu_dp); -+ -+ dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info); -+} -+ -+void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, -+ struct net_device *master, -+ bool up) -+{ -+ struct dsa_port *cpu_dp = master->dsa_ptr; -+ bool notify = false; -+ -+ if ((dsa_port_master_is_operational(cpu_dp)) != -+ (up && cpu_dp->master_oper_up)) -+ notify = true; -+ -+ cpu_dp->master_admin_up = up; -+ -+ if (notify) -+ dsa_tree_master_state_change(dst, master); -+} -+ -+void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, -+ struct net_device *master, -+ bool up) -+{ -+ struct dsa_port *cpu_dp = master->dsa_ptr; -+ bool notify = false; -+ -+ if ((dsa_port_master_is_operational(cpu_dp)) != -+ (cpu_dp->master_admin_up && up)) -+ notify = true; -+ -+ cpu_dp->master_oper_up = up; -+ -+ if (notify) -+ dsa_tree_master_state_change(dst, master); -+} -+ - static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) - { - struct dsa_switch_tree *dst = ds->dst; ---- a/net/dsa/dsa_priv.h -+++ b/net/dsa/dsa_priv.h -@@ -45,6 +45,7 @@ enum { - DSA_NOTIFIER_MRP_DEL_RING_ROLE, - DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, - DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, -+ DSA_NOTIFIER_MASTER_STATE_CHANGE, - }; - - /* DSA_NOTIFIER_AGEING_TIME */ -@@ -127,6 +128,12 @@ struct dsa_notifier_tag_8021q_vlan_info - u16 vid; - }; - -+/* DSA_NOTIFIER_MASTER_STATE_CHANGE */ -+struct dsa_notifier_master_state_info { -+ const struct net_device *master; -+ bool operational; -+}; -+ - struct dsa_switchdev_event_work { - struct dsa_switch *ds; - int port; -@@ -549,6 +556,12 @@ int dsa_tree_change_tag_proto(struct dsa - struct net_device *master, - const struct dsa_device_ops *tag_ops, - const struct dsa_device_ops *old_tag_ops); -+void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, -+ struct net_device *master, -+ bool up); -+void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, -+ struct net_device *master, -+ bool up); - int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); - void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num); - ---- a/net/dsa/slave.c -+++ b/net/dsa/slave.c -@@ -2311,6 +2311,36 @@ static int dsa_slave_netdevice_event(str - err = dsa_port_lag_change(dp, info->lower_state_info); - return notifier_from_errno(err); - } -+ case NETDEV_CHANGE: -+ case NETDEV_UP: { -+ /* Track state of master port. -+ * DSA driver may require the master port (and indirectly -+ * the tagger) to be available for some special operation. -+ */ -+ if (netdev_uses_dsa(dev)) { -+ struct dsa_port *cpu_dp = dev->dsa_ptr; -+ struct dsa_switch_tree *dst = cpu_dp->ds->dst; -+ -+ /* Track when the master port is UP */ -+ dsa_tree_master_oper_state_change(dst, dev, -+ netif_oper_up(dev)); -+ -+ /* Track when the master port is ready and can accept -+ * packet. -+ * NETDEV_UP event is not enough to flag a port as ready. -+ * We also have to wait for linkwatch_do_dev to dev_activate -+ * and emit a NETDEV_CHANGE event. -+ * We check if a master port is ready by checking if the dev -+ * have a qdisc assigned and is not noop. -+ */ -+ dsa_tree_master_admin_state_change(dst, dev, -+ !qdisc_tx_is_noop(dev)); -+ -+ return NOTIFY_OK; -+ } -+ -+ return NOTIFY_DONE; -+ } - case NETDEV_GOING_DOWN: { - struct dsa_port *dp, *cpu_dp; - struct dsa_switch_tree *dst; -@@ -2322,6 +2352,8 @@ static int dsa_slave_netdevice_event(str - cpu_dp = dev->dsa_ptr; - dst = cpu_dp->ds->dst; - -+ dsa_tree_master_admin_state_change(dst, dev, false); -+ - list_for_each_entry(dp, &dst->ports, list) { - if (!dsa_is_user_port(dp->ds, dp->index)) - continue; ---- a/net/dsa/switch.c -+++ b/net/dsa/switch.c -@@ -722,6 +722,18 @@ dsa_switch_mrp_del_ring_role(struct dsa_ - return 0; - } - -+static int -+dsa_switch_master_state_change(struct dsa_switch *ds, -+ struct dsa_notifier_master_state_info *info) -+{ -+ if (!ds->ops->master_state_change) -+ return 0; -+ -+ ds->ops->master_state_change(ds, info->master, info->operational); -+ -+ return 0; -+} -+ - static int dsa_switch_event(struct notifier_block *nb, - unsigned long event, void *info) - { -@@ -813,6 +825,9 @@ static int dsa_switch_event(struct notif - case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: - err = dsa_switch_tag_8021q_vlan_del(ds, info); - break; -+ case DSA_NOTIFIER_MASTER_STATE_CHANGE: -+ err = dsa_switch_master_state_change(ds, info); -+ break; - default: - err = -EOPNOTSUPP; - break; diff --git a/target/linux/generic/backport-6.1/766-v5.18-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch b/target/linux/generic/backport-6.1/766-v5.18-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch deleted file mode 100644 index 6478d580c017..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch +++ /dev/null @@ -1,89 +0,0 @@ -From e83d56537859849f2223b90749e554831b1f3c27 Mon Sep 17 00:00:00 2001 -From: Vladimir Oltean -Date: Wed, 2 Feb 2022 01:03:21 +0100 -Subject: [PATCH 02/16] net: dsa: replay master state events in - dsa_tree_{setup,teardown}_master - -In order for switch driver to be able to make simple and reliable use of -the master tracking operations, they must also be notified of the -initial state of the DSA master, not just of the changes. This is -because they might enable certain features only during the time when -they know that the DSA master is up and running. - -Therefore, this change explicitly checks the state of the DSA master -under the same rtnl_mutex as we were holding during the -dsa_master_setup() and dsa_master_teardown() call. The idea being that -if the DSA master became operational in between the moment in which it -became a DSA master (dsa_master_setup set dev->dsa_ptr) and the moment -when we checked for the master being up, there is a chance that we -would emit a ->master_state_change() call with no actual state change. -We need to avoid that by serializing the concurrent netdevice event with -us. If the netdevice event started before, we force it to finish before -we begin, because we take rtnl_lock before making netdev_uses_dsa() -return true. So we also handle that early event and do nothing on it. -Similarly, if the dev_open() attempt is concurrent with us, it will -attempt to take the rtnl_mutex, but we're holding it. We'll see that -the master flag IFF_UP isn't set, then when we release the rtnl_mutex -we'll process the NETDEV_UP notifier. - -Signed-off-by: Vladimir Oltean -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/dsa2.c | 28 ++++++++++++++++++++++++---- - 1 file changed, 24 insertions(+), 4 deletions(-) - ---- a/net/dsa/dsa2.c -+++ b/net/dsa/dsa2.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include "dsa_priv.h" - -@@ -1060,9 +1061,18 @@ static int dsa_tree_setup_master(struct - - list_for_each_entry(dp, &dst->ports, list) { - if (dsa_port_is_cpu(dp)) { -- err = dsa_master_setup(dp->master, dp); -+ struct net_device *master = dp->master; -+ bool admin_up = (master->flags & IFF_UP) && -+ !qdisc_tx_is_noop(master); -+ -+ err = dsa_master_setup(master, dp); - if (err) - return err; -+ -+ /* Replay master state event */ -+ dsa_tree_master_admin_state_change(dst, master, admin_up); -+ dsa_tree_master_oper_state_change(dst, master, -+ netif_oper_up(master)); - } - } - -@@ -1077,9 +1087,19 @@ static void dsa_tree_teardown_master(str - - rtnl_lock(); - -- list_for_each_entry(dp, &dst->ports, list) -- if (dsa_port_is_cpu(dp)) -- dsa_master_teardown(dp->master); -+ list_for_each_entry(dp, &dst->ports, list) { -+ if (dsa_port_is_cpu(dp)) { -+ struct net_device *master = dp->master; -+ -+ /* Synthesizing an "admin down" state is sufficient for -+ * the switches to get a notification if the master is -+ * currently up and running. -+ */ -+ dsa_tree_master_admin_state_change(dst, master, false); -+ -+ dsa_master_teardown(master); -+ } -+ } - - rtnl_unlock(); - } diff --git a/target/linux/generic/backport-6.1/766-v5.18-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch b/target/linux/generic/backport-6.1/766-v5.18-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch deleted file mode 100644 index 82c94b385b5f..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 6b0458299297ca4ab6fb295800e29a4e501d50c1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:22 +0100 -Subject: [PATCH 03/16] net: dsa: tag_qca: convert to FIELD macro - -Convert driver to FIELD macro to drop redundant define. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/tag_qca.c | 34 +++++++++++++++------------------- - 1 file changed, 15 insertions(+), 19 deletions(-) - ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -4,29 +4,24 @@ - */ - - #include -+#include - - #include "dsa_priv.h" - - #define QCA_HDR_LEN 2 - #define QCA_HDR_VERSION 0x2 - --#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14) --#define QCA_HDR_RECV_VERSION_S 14 --#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11) --#define QCA_HDR_RECV_PRIORITY_S 11 --#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6) --#define QCA_HDR_RECV_TYPE_S 6 -+#define QCA_HDR_RECV_VERSION GENMASK(15, 14) -+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) -+#define QCA_HDR_RECV_TYPE GENMASK(10, 6) - #define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) --#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) -+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) - --#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14) --#define QCA_HDR_XMIT_VERSION_S 14 --#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11) --#define QCA_HDR_XMIT_PRIORITY_S 11 --#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8) --#define QCA_HDR_XMIT_CONTROL_S 8 -+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) -+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) -+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) - #define QCA_HDR_XMIT_FROM_CPU BIT(7) --#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0) -+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) - - static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) - { -@@ -40,8 +35,9 @@ static struct sk_buff *qca_tag_xmit(stru - phdr = dsa_etype_header_pos_tx(skb); - - /* Set the version field, and set destination port information */ -- hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S | -- QCA_HDR_XMIT_FROM_CPU | BIT(dp->index); -+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -+ hdr |= QCA_HDR_XMIT_FROM_CPU; -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index)); - - *phdr = htons(hdr); - -@@ -62,7 +58,7 @@ static struct sk_buff *qca_tag_rcv(struc - hdr = ntohs(*phdr); - - /* Make sure the version is correct */ -- ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S; -+ ver = FIELD_GET(QCA_HDR_RECV_VERSION, hdr); - if (unlikely(ver != QCA_HDR_VERSION)) - return NULL; - -@@ -71,7 +67,7 @@ static struct sk_buff *qca_tag_rcv(struc - dsa_strip_etype_header(skb, QCA_HDR_LEN); - - /* Get source port information */ -- port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK); -+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr); - - skb->dev = dsa_master_find_slave(dev, 0, port); - if (!skb->dev) diff --git a/target/linux/generic/backport-6.1/766-v5.18-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch b/target/linux/generic/backport-6.1/766-v5.18-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch deleted file mode 100644 index c1e74ceeeb5b..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 3ec762fb13c7e7273800b94c80db1c2cc37590d1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:23 +0100 -Subject: [PATCH 04/16] net: dsa: tag_qca: move define to include linux/dsa - -Move tag_qca define to include dir linux/dsa as the qca8k require access -to the tagger define to support in-band mdio read/write using ethernet -packet. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/linux/dsa/tag_qca.h | 21 +++++++++++++++++++++ - net/dsa/tag_qca.c | 16 +--------------- - 2 files changed, 22 insertions(+), 15 deletions(-) - create mode 100644 include/linux/dsa/tag_qca.h - ---- /dev/null -+++ b/include/linux/dsa/tag_qca.h -@@ -0,0 +1,21 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+#ifndef __TAG_QCA_H -+#define __TAG_QCA_H -+ -+#define QCA_HDR_LEN 2 -+#define QCA_HDR_VERSION 0x2 -+ -+#define QCA_HDR_RECV_VERSION GENMASK(15, 14) -+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) -+#define QCA_HDR_RECV_TYPE GENMASK(10, 6) -+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) -+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) -+ -+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) -+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) -+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) -+#define QCA_HDR_XMIT_FROM_CPU BIT(7) -+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) -+ -+#endif /* __TAG_QCA_H */ ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -5,24 +5,10 @@ - - #include - #include -+#include - - #include "dsa_priv.h" - --#define QCA_HDR_LEN 2 --#define QCA_HDR_VERSION 0x2 -- --#define QCA_HDR_RECV_VERSION GENMASK(15, 14) --#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) --#define QCA_HDR_RECV_TYPE GENMASK(10, 6) --#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) --#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) -- --#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) --#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) --#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) --#define QCA_HDR_XMIT_FROM_CPU BIT(7) --#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) -- - static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) - { - struct dsa_port *dp = dsa_slave_to_port(dev); diff --git a/target/linux/generic/backport-6.1/766-v5.18-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch b/target/linux/generic/backport-6.1/766-v5.18-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch deleted file mode 100644 index 9394a0dabbe6..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 101c04c3463b87061e6a3d4f72c1bc57670685a6 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:24 +0100 -Subject: [PATCH 05/16] net: dsa: tag_qca: enable promisc_on_master flag - -Ethernet MDIO packets are non-standard and DSA master expects the first -6 octets to be the MAC DA. To address these kind of packet, enable -promisc_on_master flag for the tagger. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - net/dsa/tag_qca.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -68,6 +68,7 @@ static const struct dsa_device_ops qca_n - .xmit = qca_tag_xmit, - .rcv = qca_tag_rcv, - .needed_headroom = QCA_HDR_LEN, -+ .promisc_on_master = true, - }; - - MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-6.1/766-v5.18-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch b/target/linux/generic/backport-6.1/766-v5.18-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch deleted file mode 100644 index 459454e03ba5..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch +++ /dev/null @@ -1,110 +0,0 @@ -From c2ee8181fddb293d296477f60b3eb4fa3ce4e1a6 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:25 +0100 -Subject: [PATCH 06/16] net: dsa: tag_qca: add define for handling mgmt - Ethernet packet - -Add all the required define to prepare support for mgmt read/write in -Ethernet packet. Any packet of this type has to be dropped as the only -use of these special packet is receive ack for an mgmt write request or -receive data for an mgmt read request. -A struct is used that emulates the Ethernet header but is used for a -different purpose. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/linux/dsa/tag_qca.h | 44 +++++++++++++++++++++++++++++++++++++ - net/dsa/tag_qca.c | 15 ++++++++++--- - 2 files changed, 56 insertions(+), 3 deletions(-) - ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -12,10 +12,54 @@ - #define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) - #define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) - -+/* Packet type for recv */ -+#define QCA_HDR_RECV_TYPE_NORMAL 0x0 -+#define QCA_HDR_RECV_TYPE_MIB 0x1 -+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2 -+ - #define QCA_HDR_XMIT_VERSION GENMASK(15, 14) - #define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) - #define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) - #define QCA_HDR_XMIT_FROM_CPU BIT(7) - #define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) - -+/* Packet type for xmit */ -+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0 -+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1 -+ -+/* Check code for a valid mgmt packet. Switch will ignore the packet -+ * with this wrong. -+ */ -+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5 -+ -+/* Specific define for in-band MDIO read/write with Ethernet packet */ -+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */ -+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */ -+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */ -+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \ -+ QCA_HDR_MGMT_COMMAND_LEN + \ -+ QCA_HDR_MGMT_DATA1_LEN) -+ -+#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */ -+#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */ -+ -+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \ -+ QCA_HDR_LEN + \ -+ QCA_HDR_MGMT_DATA2_LEN + \ -+ QCA_HDR_MGMT_PADDING_LEN) -+ -+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */ -+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */ -+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */ -+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */ -+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */ -+ -+/* Special struct emulating a Ethernet header */ -+struct qca_mgmt_ethhdr { -+ u32 command; /* command bit 31:0 */ -+ u32 seq; /* seq 63:32 */ -+ u32 mdio_data; /* first 4byte mdio */ -+ __be16 hdr; /* qca hdr */ -+} __packed; -+ - #endif /* __TAG_QCA_H */ ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -32,10 +32,12 @@ static struct sk_buff *qca_tag_xmit(stru - - static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) - { -- u8 ver; -- u16 hdr; -- int port; -+ u8 ver, pk_type; - __be16 *phdr; -+ int port; -+ u16 hdr; -+ -+ BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); - - if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) - return NULL; -@@ -48,6 +50,13 @@ static struct sk_buff *qca_tag_rcv(struc - if (unlikely(ver != QCA_HDR_VERSION)) - return NULL; - -+ /* Get pk type */ -+ pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); -+ -+ /* Ethernet MDIO read/write packet */ -+ if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) -+ return NULL; -+ - /* Remove QCA tag and recalculate checksum */ - skb_pull_rcsum(skb, QCA_HDR_LEN); - dsa_strip_etype_header(skb, QCA_HDR_LEN); diff --git a/target/linux/generic/backport-6.1/766-v5.18-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch b/target/linux/generic/backport-6.1/766-v5.18-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch deleted file mode 100644 index 7e5dc6573068..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 18be654a4345f7d937b4bfbad74bea8093e3a93c Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:26 +0100 -Subject: [PATCH 07/16] net: dsa: tag_qca: add define for handling MIB packet - -Add struct to correctly parse a mib Ethernet packet. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/linux/dsa/tag_qca.h | 10 ++++++++++ - net/dsa/tag_qca.c | 4 ++++ - 2 files changed, 14 insertions(+) - ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -62,4 +62,14 @@ struct qca_mgmt_ethhdr { - __be16 hdr; /* qca hdr */ - } __packed; - -+enum mdio_cmd { -+ MDIO_WRITE = 0x0, -+ MDIO_READ -+}; -+ -+struct mib_ethhdr { -+ u32 data[3]; /* first 3 mib counter */ -+ __be16 hdr; /* qca hdr */ -+} __packed; -+ - #endif /* __TAG_QCA_H */ ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -57,6 +57,10 @@ static struct sk_buff *qca_tag_rcv(struc - if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) - return NULL; - -+ /* Ethernet MIB counter packet */ -+ if (pk_type == QCA_HDR_RECV_TYPE_MIB) -+ return NULL; -+ - /* Remove QCA tag and recalculate checksum */ - skb_pull_rcsum(skb, QCA_HDR_LEN); - dsa_strip_etype_header(skb, QCA_HDR_LEN); diff --git a/target/linux/generic/backport-6.1/766-v5.18-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch b/target/linux/generic/backport-6.1/766-v5.18-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch deleted file mode 100644 index ad25da30e6d8..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 31eb6b4386ad91930417e3f5c8157a4b5e31cbd5 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:27 +0100 -Subject: [PATCH 08/16] net: dsa: tag_qca: add support for handling mgmt and - MIB Ethernet packet - -Add connect/disconnect helper to assign private struct to the DSA switch. -Add support for Ethernet mgmt and MIB if the DSA driver provide an handler -to correctly parse and elaborate the data. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - include/linux/dsa/tag_qca.h | 7 +++++++ - net/dsa/tag_qca.c | 39 ++++++++++++++++++++++++++++++++++--- - 2 files changed, 43 insertions(+), 3 deletions(-) - ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -72,4 +72,11 @@ struct mib_ethhdr { - __be16 hdr; /* qca hdr */ - } __packed; - -+struct qca_tagger_data { -+ void (*rw_reg_ack_handler)(struct dsa_switch *ds, -+ struct sk_buff *skb); -+ void (*mib_autocast_handler)(struct dsa_switch *ds, -+ struct sk_buff *skb); -+}; -+ - #endif /* __TAG_QCA_H */ ---- a/net/dsa/tag_qca.c -+++ b/net/dsa/tag_qca.c -@@ -5,6 +5,7 @@ - - #include - #include -+#include - #include - - #include "dsa_priv.h" -@@ -32,6 +33,9 @@ static struct sk_buff *qca_tag_xmit(stru - - static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) - { -+ struct qca_tagger_data *tagger_data; -+ struct dsa_port *dp = dev->dsa_ptr; -+ struct dsa_switch *ds = dp->ds; - u8 ver, pk_type; - __be16 *phdr; - int port; -@@ -39,6 +43,8 @@ static struct sk_buff *qca_tag_rcv(struc - - BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); - -+ tagger_data = ds->tagger_data; -+ - if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) - return NULL; - -@@ -53,13 +59,19 @@ static struct sk_buff *qca_tag_rcv(struc - /* Get pk type */ - pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); - -- /* Ethernet MDIO read/write packet */ -- if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) -+ /* Ethernet mgmt read/write packet */ -+ if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) { -+ if (likely(tagger_data->rw_reg_ack_handler)) -+ tagger_data->rw_reg_ack_handler(ds, skb); - return NULL; -+ } - - /* Ethernet MIB counter packet */ -- if (pk_type == QCA_HDR_RECV_TYPE_MIB) -+ if (pk_type == QCA_HDR_RECV_TYPE_MIB) { -+ if (likely(tagger_data->mib_autocast_handler)) -+ tagger_data->mib_autocast_handler(ds, skb); - return NULL; -+ } - - /* Remove QCA tag and recalculate checksum */ - skb_pull_rcsum(skb, QCA_HDR_LEN); -@@ -75,9 +87,30 @@ static struct sk_buff *qca_tag_rcv(struc - return skb; - } - -+static int qca_tag_connect(struct dsa_switch *ds) -+{ -+ struct qca_tagger_data *tagger_data; -+ -+ tagger_data = kzalloc(sizeof(*tagger_data), GFP_KERNEL); -+ if (!tagger_data) -+ return -ENOMEM; -+ -+ ds->tagger_data = tagger_data; -+ -+ return 0; -+} -+ -+static void qca_tag_disconnect(struct dsa_switch *ds) -+{ -+ kfree(ds->tagger_data); -+ ds->tagger_data = NULL; -+} -+ - static const struct dsa_device_ops qca_netdev_ops = { - .name = "qca", - .proto = DSA_TAG_PROTO_QCA, -+ .connect = qca_tag_connect, -+ .disconnect = qca_tag_disconnect, - .xmit = qca_tag_xmit, - .rcv = qca_tag_rcv, - .needed_headroom = QCA_HDR_LEN, diff --git a/target/linux/generic/backport-6.1/766-v5.18-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch b/target/linux/generic/backport-6.1/766-v5.18-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch deleted file mode 100644 index eb21cc391246..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch +++ /dev/null @@ -1,67 +0,0 @@ -From cddbec19466a1dfb4d45ddd507d9f09f991d54ae Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:28 +0100 -Subject: [PATCH 09/16] net: dsa: qca8k: add tracking state of master port - -MDIO/MIB Ethernet require the master port and the tagger availabale to -correctly work. Use the new api master_state_change to track when master -is operational or not and set a bool in qca8k_priv. -We cache the first cached master available and we check if other cpu -port are operational when the cached one goes down. -This cached master will later be used by mdio read/write and mib request to -correctly use the working function. - -qca8k implementation for MDIO/MIB Ethernet is bad. CPU port0 is the only -one that answers with the ack packet or sends MIB Ethernet packets. For -this reason the master_state_change ignore CPU port6 and only checks -CPU port0 if it's operational and enables this mode. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 15 +++++++++++++++ - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 16 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -2401,6 +2401,20 @@ qca8k_port_lag_leave(struct dsa_switch * - return qca8k_lag_refresh_portmap(ds, port, lag, true); - } - -+static void -+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, -+ bool operational) -+{ -+ struct dsa_port *dp = master->dsa_ptr; -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */ -+ if (dp->index != 0) -+ return; -+ -+ priv->mgmt_master = operational ? (struct net_device *)master : NULL; -+} -+ - static const struct dsa_switch_ops qca8k_switch_ops = { - .get_tag_protocol = qca8k_get_tag_protocol, - .setup = qca8k_setup, -@@ -2436,6 +2450,7 @@ static const struct dsa_switch_ops qca8k - .get_phy_flags = qca8k_get_phy_flags, - .port_lag_join = qca8k_port_lag_join, - .port_lag_leave = qca8k_port_lag_leave, -+ .master_state_change = qca8k_master_change, - }; - - static int qca8k_read_switch_id(struct qca8k_priv *priv) ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -353,6 +353,7 @@ struct qca8k_priv { - struct dsa_switch_ops ops; - struct gpio_desc *reset_gpio; - unsigned int port_mtu[QCA8K_NUM_PORTS]; -+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ - }; - - struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-6.1/766-v5.18-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch b/target/linux/generic/backport-6.1/766-v5.18-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch deleted file mode 100644 index 07c5ba462100..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch +++ /dev/null @@ -1,363 +0,0 @@ -From 5950c7c0a68c915b336c70f79388626e2d576ab7 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:29 +0100 -Subject: [PATCH 10/16] net: dsa: qca8k: add support for mgmt read/write in - Ethernet packet - -Add qca8k side support for mgmt read/write in Ethernet packet. -qca8k supports some specially crafted Ethernet packet that can be used -for mgmt read/write instead of the legacy method uart/internal mdio. -This add support for the qca8k side to craft the packet and enqueue it. -Each port and the qca8k_priv have a special struct to put data in it. -The completion API is used to wait for the packet to be received back -with the requested data. - -The various steps are: -1. Craft the special packet with the qca hdr set to mgmt read/write - mode. -2. Set the lock in the dedicated mgmt struct. -3. Increment the seq number and set it in the mgmt pkt -4. Reinit the completion. -5. Enqueue the packet. -6. Wait the packet to be received. -7. Use the data set by the tagger to complete the mdio operation. - -If the completion timeouts or the ack value is not true, the legacy -mdio way is used. - -It has to be considered that in the initial setup mdio is still used and -mdio is still used until DSA is ready to accept and tag packet. - -tag_proto_connect() is used to fill the required handler for the tagger -to correctly parse and elaborate the special Ethernet mdio packet. - -Locking is added to qca8k_master_change() to make sure no mgmt Ethernet -are in progress. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 225 ++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 13 +++ - 2 files changed, 238 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - - #include "qca8k.h" - -@@ -170,6 +171,194 @@ qca8k_rmw(struct qca8k_priv *priv, u32 r - return regmap_update_bits(priv->regmap, reg, mask, write_val); - } - -+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ u8 len, cmd; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -+ -+ /* Make sure the seq match the requested packet */ -+ if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -+ mgmt_eth_data->ack = true; -+ -+ if (cmd == MDIO_READ) { -+ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -+ -+ /* Get the rest of the 12 byte of data */ -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(mgmt_eth_data->data + 1, skb->data, -+ QCA_HDR_MGMT_DATA2_LEN); -+ } -+ -+ complete(&mgmt_eth_data->rw_done); -+} -+ -+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -+ int priority) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ struct sk_buff *skb; -+ u16 hdr; -+ -+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); -+ if (!skb) -+ return NULL; -+ -+ skb_reset_mac_header(skb); -+ skb_set_network_header(skb, skb->len); -+ -+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); -+ -+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); -+ hdr |= QCA_HDR_XMIT_FROM_CPU; -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); -+ -+ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, 4); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -+ QCA_HDR_MGMT_CHECK_CODE_VAL); -+ -+ if (cmd == MDIO_WRITE) -+ mgmt_ethhdr->mdio_data = *val; -+ -+ mgmt_ethhdr->hdr = htons(hdr); -+ -+ skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -+ -+ return skb; -+} -+ -+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -+ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); -+} -+ -+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -+ QCA8K_ETHERNET_MDIO_PRIORITY); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ *val = mgmt_eth_data->data[0]; -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 val) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, &val, -+ QCA8K_ETHERNET_MDIO_PRIORITY); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int -+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ u32 val = 0; -+ int ret; -+ -+ ret = qca8k_read_eth(priv, reg, &val); -+ if (ret) -+ return ret; -+ -+ val &= ~mask; -+ val |= write_val; -+ -+ return qca8k_write_eth(priv, reg, val); -+} -+ - static int - qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) - { -@@ -178,6 +367,9 @@ qca8k_regmap_read(void *ctx, uint32_t re - u16 r1, r2, page; - int ret; - -+ if (!qca8k_read_eth(priv, reg, val)) -+ return 0; -+ - qca8k_split_addr(reg, &r1, &r2, &page); - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -@@ -201,6 +393,9 @@ qca8k_regmap_write(void *ctx, uint32_t r - u16 r1, r2, page; - int ret; - -+ if (!qca8k_write_eth(priv, reg, val)) -+ return 0; -+ - qca8k_split_addr(reg, &r1, &r2, &page); - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -@@ -225,6 +420,9 @@ qca8k_regmap_update_bits(void *ctx, uint - u32 val; - int ret; - -+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) -+ return 0; -+ - qca8k_split_addr(reg, &r1, &r2, &page); - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -@@ -2412,7 +2610,30 @@ qca8k_master_change(struct dsa_switch *d - if (dp->index != 0) - return; - -+ mutex_lock(&priv->mgmt_eth_data.mutex); -+ - priv->mgmt_master = operational ? (struct net_device *)master : NULL; -+ -+ mutex_unlock(&priv->mgmt_eth_data.mutex); -+} -+ -+static int qca8k_connect_tag_protocol(struct dsa_switch *ds, -+ enum dsa_tag_protocol proto) -+{ -+ struct qca_tagger_data *tagger_data; -+ -+ switch (proto) { -+ case DSA_TAG_PROTO_QCA: -+ tagger_data = ds->tagger_data; -+ -+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -+ -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; - } - - static const struct dsa_switch_ops qca8k_switch_ops = { -@@ -2451,6 +2672,7 @@ static const struct dsa_switch_ops qca8k - .port_lag_join = qca8k_port_lag_join, - .port_lag_leave = qca8k_port_lag_leave, - .master_state_change = qca8k_master_change, -+ .connect_tag_protocol = qca8k_connect_tag_protocol, - }; - - static int qca8k_read_switch_id(struct qca8k_priv *priv) -@@ -2530,6 +2752,9 @@ qca8k_sw_probe(struct mdio_device *mdiod - if (!priv->ds) - return -ENOMEM; - -+ mutex_init(&priv->mgmt_eth_data.mutex); -+ init_completion(&priv->mgmt_eth_data.rw_done); -+ - priv->ds->dev = &mdiodev->dev; - priv->ds->num_ports = QCA8K_NUM_PORTS; - priv->ds->priv = priv; ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -11,6 +11,10 @@ - #include - #include - #include -+#include -+ -+#define QCA8K_ETHERNET_MDIO_PRIORITY 7 -+#define QCA8K_ETHERNET_TIMEOUT 100 - - #define QCA8K_NUM_PORTS 7 - #define QCA8K_NUM_CPU_PORTS 2 -@@ -328,6 +332,14 @@ enum { - QCA8K_CPU_PORT6, - }; - -+struct qca8k_mgmt_eth_data { -+ struct completion rw_done; -+ struct mutex mutex; /* Enforce one mdio read/write at time */ -+ bool ack; -+ u32 seq; -+ u32 data[4]; -+}; -+ - struct qca8k_ports_config { - bool sgmii_rx_clk_falling_edge; - bool sgmii_tx_clk_falling_edge; -@@ -354,6 +366,7 @@ struct qca8k_priv { - struct gpio_desc *reset_gpio; - unsigned int port_mtu[QCA8K_NUM_PORTS]; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ -+ struct qca8k_mgmt_eth_data mgmt_eth_data; - }; - - struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-6.1/766-v5.18-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch b/target/linux/generic/backport-6.1/766-v5.18-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch deleted file mode 100644 index 0dcf27943383..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch +++ /dev/null @@ -1,226 +0,0 @@ -From 5c957c7ca78cce5e4b96866722b0115bd758d945 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:30 +0100 -Subject: [PATCH 11/16] net: dsa: qca8k: add support for mib autocast in - Ethernet packet - -The switch can autocast MIB counter using Ethernet packet. -Add support for this and provide a handler for the tagger. -The switch will send packet with MIB counter for each port, the switch -will use completion API to wait for the correct packet to be received -and will complete the task only when each packet is received. -Although the handler will drop all the other packet, we still have to -consume each MIB packet to complete the request. This is done to prevent -mixed data with concurrent ethtool request. - -connect_tag_protocol() is used to add the handler to the tag_qca tagger, -master_state_change() use the MIB lock to make sure no MIB Ethernet is -in progress. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 106 +++++++++++++++++++++++++++++++++++++++- - drivers/net/dsa/qca8k.h | 17 ++++++- - 2 files changed, 121 insertions(+), 2 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -830,7 +830,10 @@ qca8k_mib_init(struct qca8k_priv *priv) - int ret; - - mutex_lock(&priv->reg_mutex); -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -+ QCA8K_MIB_BUSY); - if (ret) - goto exit; - -@@ -1901,6 +1904,97 @@ qca8k_get_strings(struct dsa_switch *ds, - ETH_GSTRING_LEN); - } - -+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ const struct qca8k_match_data *match_data; -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ const struct qca8k_mib_desc *mib; -+ struct mib_ethhdr *mib_ethhdr; -+ int i, mib_len, offset = 0; -+ u64 *data; -+ u8 port; -+ -+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); -+ mib_eth_data = &priv->mib_eth_data; -+ -+ /* The switch autocast every port. Ignore other packet and -+ * parse only the requested one. -+ */ -+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); -+ if (port != mib_eth_data->req_port) -+ goto exit; -+ -+ match_data = device_get_match_data(priv->dev); -+ data = mib_eth_data->data; -+ -+ for (i = 0; i < match_data->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ -+ /* First 3 mib are present in the skb head */ -+ if (i < 3) { -+ data[i] = mib_ethhdr->data[i]; -+ continue; -+ } -+ -+ mib_len = sizeof(uint32_t); -+ -+ /* Some mib are 64 bit wide */ -+ if (mib->size == 2) -+ mib_len = sizeof(uint64_t); -+ -+ /* Copy the mib value from packet to the */ -+ memcpy(data + i, skb->data + offset, mib_len); -+ -+ /* Set the offset for the next mib */ -+ offset += mib_len; -+ } -+ -+exit: -+ /* Complete on receiving all the mib packet */ -+ if (refcount_dec_and_test(&mib_eth_data->port_parsed)) -+ complete(&mib_eth_data->rw_done); -+} -+ -+static int -+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) -+{ -+ struct dsa_port *dp = dsa_to_port(ds, port); -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ mib_eth_data = &priv->mib_eth_data; -+ -+ mutex_lock(&mib_eth_data->mutex); -+ -+ reinit_completion(&mib_eth_data->rw_done); -+ -+ mib_eth_data->req_port = dp->index; -+ mib_eth_data->data = data; -+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ /* Send mib autocast request */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | -+ QCA8K_MIB_BUSY); -+ -+ mutex_unlock(&priv->reg_mutex); -+ -+ if (ret) -+ goto exit; -+ -+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); -+ -+exit: -+ mutex_unlock(&mib_eth_data->mutex); -+ -+ return ret; -+} -+ - static void - qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data) -@@ -1912,6 +2006,10 @@ qca8k_get_ethtool_stats(struct dsa_switc - u32 hi = 0; - int ret; - -+ if (priv->mgmt_master && -+ qca8k_get_ethtool_stats_eth(ds, port, data) > 0) -+ return; -+ - match_data = of_device_get_match_data(priv->dev); - - for (i = 0; i < match_data->mib_count; i++) { -@@ -2611,9 +2709,11 @@ qca8k_master_change(struct dsa_switch *d - return; - - mutex_lock(&priv->mgmt_eth_data.mutex); -+ mutex_lock(&priv->mib_eth_data.mutex); - - priv->mgmt_master = operational ? (struct net_device *)master : NULL; - -+ mutex_unlock(&priv->mib_eth_data.mutex); - mutex_unlock(&priv->mgmt_eth_data.mutex); - } - -@@ -2627,6 +2727,7 @@ static int qca8k_connect_tag_protocol(st - tagger_data = ds->tagger_data; - - tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; - - break; - default: -@@ -2755,6 +2856,9 @@ qca8k_sw_probe(struct mdio_device *mdiod - mutex_init(&priv->mgmt_eth_data.mutex); - init_completion(&priv->mgmt_eth_data.rw_done); - -+ mutex_init(&priv->mib_eth_data.mutex); -+ init_completion(&priv->mib_eth_data.rw_done); -+ - priv->ds->dev = &mdiodev->dev; - priv->ds->num_ports = QCA8K_NUM_PORTS; - priv->ds->priv = priv; ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -67,7 +67,7 @@ - #define QCA8K_REG_MODULE_EN 0x030 - #define QCA8K_MODULE_EN_MIB BIT(0) - #define QCA8K_REG_MIB 0x034 --#define QCA8K_MIB_FLUSH BIT(24) -+#define QCA8K_MIB_FUNC GENMASK(26, 24) - #define QCA8K_MIB_CPU_KEEP BIT(20) - #define QCA8K_MIB_BUSY BIT(17) - #define QCA8K_MDIO_MASTER_CTRL 0x3c -@@ -317,6 +317,12 @@ enum qca8k_vlan_cmd { - QCA8K_VLAN_READ = 6, - }; - -+enum qca8k_mid_cmd { -+ QCA8K_MIB_FLUSH = 1, -+ QCA8K_MIB_FLUSH_PORT = 2, -+ QCA8K_MIB_CAST = 3, -+}; -+ - struct ar8xxx_port_status { - int enabled; - }; -@@ -340,6 +346,14 @@ struct qca8k_mgmt_eth_data { - u32 data[4]; - }; - -+struct qca8k_mib_eth_data { -+ struct completion rw_done; -+ struct mutex mutex; /* Process one command at time */ -+ refcount_t port_parsed; /* Counter to track parsed port */ -+ u8 req_port; -+ u64 *data; /* pointer to ethtool data */ -+}; -+ - struct qca8k_ports_config { - bool sgmii_rx_clk_falling_edge; - bool sgmii_tx_clk_falling_edge; -@@ -367,6 +381,7 @@ struct qca8k_priv { - unsigned int port_mtu[QCA8K_NUM_PORTS]; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ - struct qca8k_mgmt_eth_data mgmt_eth_data; -+ struct qca8k_mib_eth_data mib_eth_data; - }; - - struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-6.1/766-v5.18-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch b/target/linux/generic/backport-6.1/766-v5.18-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch deleted file mode 100644 index f5899eb5901a..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch +++ /dev/null @@ -1,287 +0,0 @@ -From 2cd5485663847d468dc207b3ff85fb1fab44d97f Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:31 +0100 -Subject: [PATCH 12/16] net: dsa: qca8k: add support for phy read/write with - mgmt Ethernet - -Use mgmt Ethernet also for phy read/write if availabale. Use a different -seq number to make sure we receive the correct packet. -On any error, we fallback to the legacy mdio read/write. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 216 ++++++++++++++++++++++++++++++++++++++++ - drivers/net/dsa/qca8k.h | 1 + - 2 files changed, 217 insertions(+) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -867,6 +867,199 @@ qca8k_port_set_status(struct qca8k_priv - regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); - } - -+static int -+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, -+ struct sk_buff *read_skb, u32 *val) -+{ -+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); -+ bool ack; -+ int ret; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the copy pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ *val = mgmt_eth_data->data[0]; -+ -+ return 0; -+} -+ -+static int -+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, -+ int regnum, u16 data) -+{ -+ struct sk_buff *write_skb, *clear_skb, *read_skb; -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ u32 write_val, clear_val = 0, val; -+ struct net_device *mgmt_master; -+ int ret, ret1; -+ bool ack; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum); -+ -+ if (read) { -+ write_val |= QCA8K_MDIO_MASTER_READ; -+ } else { -+ write_val |= QCA8K_MDIO_MASTER_WRITE; -+ write_val |= QCA8K_MDIO_MASTER_DATA(data); -+ } -+ -+ /* Prealloc all the needed skb before the lock */ -+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, -+ &write_val, QCA8K_ETHERNET_PHY_PRIORITY); -+ if (!write_skb) -+ return -ENOMEM; -+ -+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, -+ &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); -+ if (!write_skb) { -+ ret = -ENOMEM; -+ goto err_clear_skb; -+ } -+ -+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, -+ &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); -+ if (!write_skb) { -+ ret = -ENOMEM; -+ goto err_read_skb; -+ } -+ -+ /* Actually start the request: -+ * 1. Send mdio master packet -+ * 2. Busy Wait for mdio master command -+ * 3. Get the data if we are reading -+ * 4. Reset the mdio master (even with error) -+ */ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check if mgmt_master is operational */ -+ mgmt_master = priv->mgmt_master; -+ if (!mgmt_master) { -+ mutex_unlock(&mgmt_eth_data->mutex); -+ ret = -EINVAL; -+ goto err_mgmt_master; -+ } -+ -+ read_skb->dev = mgmt_master; -+ clear_skb->dev = mgmt_master; -+ write_skb->dev = mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the write pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(write_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, -+ !(val & QCA8K_MDIO_MASTER_BUSY), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -+ mgmt_eth_data, read_skb, &val); -+ -+ if (ret < 0 && ret1 < 0) { -+ ret = ret1; -+ goto exit; -+ } -+ -+ if (read) { -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the read pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(read_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; -+ } else { -+ kfree_skb(read_skb); -+ } -+exit: -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the clear pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(clear_skb); -+ -+ wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ return ret; -+ -+ /* Error handling before lock */ -+err_mgmt_master: -+ kfree_skb(read_skb); -+err_read_skb: -+ kfree_skb(clear_skb); -+err_clear_skb: -+ kfree_skb(write_skb); -+ -+ return ret; -+} -+ - static u32 - qca8k_port_to_phy(int port) - { -@@ -989,6 +1182,12 @@ qca8k_internal_mdio_write(struct mii_bus - { - struct qca8k_priv *priv = slave_bus->priv; - struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); -+ if (!ret) -+ return 0; - - return qca8k_mdio_write(bus, phy, regnum, data); - } -@@ -998,6 +1197,12 @@ qca8k_internal_mdio_read(struct mii_bus - { - struct qca8k_priv *priv = slave_bus->priv; - struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); -+ if (ret >= 0) -+ return ret; - - return qca8k_mdio_read(bus, phy, regnum); - } -@@ -1006,6 +1211,7 @@ static int - qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) - { - struct qca8k_priv *priv = ds->priv; -+ int ret; - - /* Check if the legacy mapping should be used and the - * port is not correctly mapped to the right PHY in the -@@ -1014,6 +1220,11 @@ qca8k_phy_write(struct dsa_switch *ds, i - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, false, port, regnum, 0); -+ if (!ret) -+ return ret; -+ - return qca8k_mdio_write(priv->bus, port, regnum, data); - } - -@@ -1030,6 +1241,11 @@ qca8k_phy_read(struct dsa_switch *ds, in - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, true, port, regnum, 0); -+ if (ret >= 0) -+ return ret; -+ - ret = qca8k_mdio_read(priv->bus, port, regnum); - - if (ret < 0) ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -14,6 +14,7 @@ - #include - - #define QCA8K_ETHERNET_MDIO_PRIORITY 7 -+#define QCA8K_ETHERNET_PHY_PRIORITY 6 - #define QCA8K_ETHERNET_TIMEOUT 100 - - #define QCA8K_NUM_PORTS 7 diff --git a/target/linux/generic/backport-6.1/766-v5.18-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch b/target/linux/generic/backport-6.1/766-v5.18-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch deleted file mode 100644 index 4ac0bc32fdbe..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch +++ /dev/null @@ -1,208 +0,0 @@ -From 4264350acb75430d5021a1d7de56a33faf69a097 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:32 +0100 -Subject: [PATCH 13/16] net: dsa: qca8k: move page cache to driver priv - -There can be multiple qca8k switch on the same system. Move the static -qca8k_current_page to qca8k_priv and make it specific for each switch. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 42 ++++++++++++++++++++--------------------- - drivers/net/dsa/qca8k.h | 9 +++++++++ - 2 files changed, 29 insertions(+), 22 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -75,12 +75,6 @@ static const struct qca8k_mib_desc ar832 - MIB_DESC(1, 0xac, "TXUnicast"), - }; - --/* The 32bit switch registers are accessed indirectly. To achieve this we need -- * to set the page of the register. Track the last page that was set to reduce -- * mdio writes -- */ --static u16 qca8k_current_page = 0xffff; -- - static void - qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) - { -@@ -134,11 +128,13 @@ qca8k_mii_write32(struct mii_bus *bus, i - } - - static int --qca8k_set_page(struct mii_bus *bus, u16 page) -+qca8k_set_page(struct qca8k_priv *priv, u16 page) - { -+ u16 *cached_page = &priv->mdio_cache.page; -+ struct mii_bus *bus = priv->bus; - int ret; - -- if (page == qca8k_current_page) -+ if (page == *cached_page) - return 0; - - ret = bus->write(bus, 0x18, 0, page); -@@ -148,7 +144,7 @@ qca8k_set_page(struct mii_bus *bus, u16 - return ret; - } - -- qca8k_current_page = page; -+ *cached_page = page; - usleep_range(1000, 2000); - return 0; - } -@@ -374,7 +370,7 @@ qca8k_regmap_read(void *ctx, uint32_t re - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - -- ret = qca8k_set_page(bus, page); -+ ret = qca8k_set_page(priv, page); - if (ret < 0) - goto exit; - -@@ -400,7 +396,7 @@ qca8k_regmap_write(void *ctx, uint32_t r - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - -- ret = qca8k_set_page(bus, page); -+ ret = qca8k_set_page(priv, page); - if (ret < 0) - goto exit; - -@@ -427,7 +423,7 @@ qca8k_regmap_update_bits(void *ctx, uint - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - -- ret = qca8k_set_page(bus, page); -+ ret = qca8k_set_page(priv, page); - if (ret < 0) - goto exit; - -@@ -1098,8 +1094,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus - } - - static int --qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) -+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) - { -+ struct mii_bus *bus = priv->bus; - u16 r1, r2, page; - u32 val; - int ret; -@@ -1116,7 +1113,7 @@ qca8k_mdio_write(struct mii_bus *bus, in - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - -- ret = qca8k_set_page(bus, page); -+ ret = qca8k_set_page(priv, page); - if (ret) - goto exit; - -@@ -1135,8 +1132,9 @@ exit: - } - - static int --qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) -+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) - { -+ struct mii_bus *bus = priv->bus; - u16 r1, r2, page; - u32 val; - int ret; -@@ -1152,7 +1150,7 @@ qca8k_mdio_read(struct mii_bus *bus, int - - mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - -- ret = qca8k_set_page(bus, page); -+ ret = qca8k_set_page(priv, page); - if (ret) - goto exit; - -@@ -1181,7 +1179,6 @@ static int - qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) - { - struct qca8k_priv *priv = slave_bus->priv; -- struct mii_bus *bus = priv->bus; - int ret; - - /* Use mdio Ethernet when available, fallback to legacy one on error */ -@@ -1189,14 +1186,13 @@ qca8k_internal_mdio_write(struct mii_bus - if (!ret) - return 0; - -- return qca8k_mdio_write(bus, phy, regnum, data); -+ return qca8k_mdio_write(priv, phy, regnum, data); - } - - static int - qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) - { - struct qca8k_priv *priv = slave_bus->priv; -- struct mii_bus *bus = priv->bus; - int ret; - - /* Use mdio Ethernet when available, fallback to legacy one on error */ -@@ -1204,7 +1200,7 @@ qca8k_internal_mdio_read(struct mii_bus - if (ret >= 0) - return ret; - -- return qca8k_mdio_read(bus, phy, regnum); -+ return qca8k_mdio_read(priv, phy, regnum); - } - - static int -@@ -1225,7 +1221,7 @@ qca8k_phy_write(struct dsa_switch *ds, i - if (!ret) - return ret; - -- return qca8k_mdio_write(priv->bus, port, regnum, data); -+ return qca8k_mdio_write(priv, port, regnum, data); - } - - static int -@@ -1246,7 +1242,7 @@ qca8k_phy_read(struct dsa_switch *ds, in - if (ret >= 0) - return ret; - -- ret = qca8k_mdio_read(priv->bus, port, regnum); -+ ret = qca8k_mdio_read(priv, port, regnum); - - if (ret < 0) - return 0xffff; -@@ -3060,6 +3056,8 @@ qca8k_sw_probe(struct mdio_device *mdiod - return PTR_ERR(priv->regmap); - } - -+ priv->mdio_cache.page = 0xffff; -+ - /* Check the detected switch id */ - ret = qca8k_read_switch_id(priv); - if (ret) ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -363,6 +363,14 @@ struct qca8k_ports_config { - u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ - }; - -+struct qca8k_mdio_cache { -+/* The 32bit switch registers are accessed indirectly. To achieve this we need -+ * to set the page of the register. Track the last page that was set to reduce -+ * mdio writes -+ */ -+ u16 page; -+}; -+ - struct qca8k_priv { - u8 switch_id; - u8 switch_revision; -@@ -383,6 +391,7 @@ struct qca8k_priv { - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ - struct qca8k_mgmt_eth_data mgmt_eth_data; - struct qca8k_mib_eth_data mib_eth_data; -+ struct qca8k_mdio_cache mdio_cache; - }; - - struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-6.1/766-v5.18-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch b/target/linux/generic/backport-6.1/766-v5.18-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch deleted file mode 100644 index e2cb2721ce92..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 2481d206fae7884cd07014fd1318e63af35e99eb Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:33 +0100 -Subject: [PATCH 14/16] net: dsa: qca8k: cache lo and hi for mdio write - -From Documentation, we can cache lo and hi the same way we do with the -page. This massively reduce the mdio write as 3/4 of the time as we only -require to write the lo or hi part for a mdio write. - -Signed-off-by: Ansuel Smith -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 61 +++++++++++++++++++++++++++++++++-------- - drivers/net/dsa/qca8k.h | 5 ++++ - 2 files changed, 54 insertions(+), 12 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -89,6 +89,44 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u - } - - static int -+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) -+{ -+ u16 *cached_lo = &priv->mdio_cache.lo; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (lo == *cached_lo) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, lo); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit lo register\n"); -+ -+ *cached_lo = lo; -+ return 0; -+} -+ -+static int -+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) -+{ -+ u16 *cached_hi = &priv->mdio_cache.hi; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (hi == *cached_hi) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, hi); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit hi register\n"); -+ -+ *cached_hi = hi; -+ return 0; -+} -+ -+static int - qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) - { - int ret; -@@ -111,7 +149,7 @@ qca8k_mii_read32(struct mii_bus *bus, in - } - - static void --qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) -+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) - { - u16 lo, hi; - int ret; -@@ -119,12 +157,9 @@ qca8k_mii_write32(struct mii_bus *bus, i - lo = val & 0xffff; - hi = (u16)(val >> 16); - -- ret = bus->write(bus, phy_id, regnum, lo); -+ ret = qca8k_set_lo(priv, phy_id, regnum, lo); - if (ret >= 0) -- ret = bus->write(bus, phy_id, regnum + 1, hi); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit register\n"); -+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); - } - - static int -@@ -400,7 +435,7 @@ qca8k_regmap_write(void *ctx, uint32_t r - if (ret < 0) - goto exit; - -- qca8k_mii_write32(bus, 0x10 | r2, r1, val); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); - - exit: - mutex_unlock(&bus->mdio_lock); -@@ -433,7 +468,7 @@ qca8k_regmap_update_bits(void *ctx, uint - - val &= ~mask; - val |= write_val; -- qca8k_mii_write32(bus, 0x10 | r2, r1, val); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); - - exit: - mutex_unlock(&bus->mdio_lock); -@@ -1117,14 +1152,14 @@ qca8k_mdio_write(struct qca8k_priv *priv - if (ret) - goto exit; - -- qca8k_mii_write32(bus, 0x10 | r2, r1, val); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); - - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, - QCA8K_MDIO_MASTER_BUSY); - - exit: - /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(bus, 0x10 | r2, r1, 0); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); - - mutex_unlock(&bus->mdio_lock); - -@@ -1154,7 +1189,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, - if (ret) - goto exit; - -- qca8k_mii_write32(bus, 0x10 | r2, r1, val); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); - - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, - QCA8K_MDIO_MASTER_BUSY); -@@ -1165,7 +1200,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, - - exit: - /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(bus, 0x10 | r2, r1, 0); -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); - - mutex_unlock(&bus->mdio_lock); - -@@ -3057,6 +3092,8 @@ qca8k_sw_probe(struct mdio_device *mdiod - } - - priv->mdio_cache.page = 0xffff; -+ priv->mdio_cache.lo = 0xffff; -+ priv->mdio_cache.hi = 0xffff; - - /* Check the detected switch id */ - ret = qca8k_read_switch_id(priv); ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -369,6 +369,11 @@ struct qca8k_mdio_cache { - * mdio writes - */ - u16 page; -+/* lo and hi can also be cached and from Documentation we can skip one -+ * extra mdio write if lo or hi is didn't change. -+ */ -+ u16 lo; -+ u16 hi; - }; - - struct qca8k_priv { diff --git a/target/linux/generic/backport-6.1/766-v5.18-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch b/target/linux/generic/backport-6.1/766-v5.18-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch deleted file mode 100644 index 5acd13dbad8e..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch +++ /dev/null @@ -1,206 +0,0 @@ -From 90386223f44e2a751d7e9e9ac8f78ea33358a891 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:34 +0100 -Subject: [PATCH 15/16] net: dsa: qca8k: add support for larger read/write size - with mgmt Ethernet - -mgmt Ethernet packet can read/write up to 16byte at times. The len reg -is limited to 15 (0xf). The switch actually sends and accepts data in 4 -different steps of len values. -Len steps: -- 0: nothing -- 1-4: first 4 byte -- 5-6: first 12 byte -- 7-15: all 16 byte - -In the alloc skb function we check if the len is 16 and we fix it to a -len of 15. It the read/write function interest to extract the real asked -data. The tagger handler will always copy the fully 16byte with a READ -command. This is useful for some big regs like the fdb reg that are -more than 4byte of data. This permits to introduce a bulk function that -will send and request the entire entry in one go. -Write function is changed and it does now require to pass the pointer to -val to also handle array val. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 61 +++++++++++++++++++++++++++-------------- - 1 file changed, 41 insertions(+), 20 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -222,7 +222,9 @@ static void qca8k_rw_reg_ack_handler(str - if (cmd == MDIO_READ) { - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; - -- /* Get the rest of the 12 byte of data */ -+ /* Get the rest of the 12 byte of data. -+ * The read/write function will extract the requested data. -+ */ - if (len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(mgmt_eth_data->data + 1, skb->data, - QCA_HDR_MGMT_DATA2_LEN); -@@ -232,16 +234,30 @@ static void qca8k_rw_reg_ack_handler(str - } - - static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -- int priority) -+ int priority, unsigned int len) - { - struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ unsigned int real_len; - struct sk_buff *skb; -+ u32 *data2; - u16 hdr; - - skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); - if (!skb) - return NULL; - -+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -+ * Actually for some reason the steps are: -+ * 0: nothing -+ * 1-4: first 4 byte -+ * 5-6: first 12 byte -+ * 7-15: all 16 byte -+ */ -+ if (len == 16) -+ real_len = 15; -+ else -+ real_len = len; -+ - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb->len); - -@@ -254,7 +270,7 @@ static struct sk_buff *qca8k_alloc_mdio_ - hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); - - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, 4); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, - QCA_HDR_MGMT_CHECK_CODE_VAL); -@@ -264,7 +280,9 @@ static struct sk_buff *qca8k_alloc_mdio_ - - mgmt_ethhdr->hdr = htons(hdr); - -- skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); - - return skb; - } -@@ -277,7 +295,7 @@ static void qca8k_mdio_header_fill_seq_n - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); - } - --static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val) -+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) - { - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; - struct sk_buff *skb; -@@ -285,7 +303,7 @@ static int qca8k_read_eth(struct qca8k_p - int ret; - - skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -- QCA8K_ETHERNET_MDIO_PRIORITY); -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); - if (!skb) - return -ENOMEM; - -@@ -313,6 +331,9 @@ static int qca8k_read_eth(struct qca8k_p - msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); - - *val = mgmt_eth_data->data[0]; -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ - ack = mgmt_eth_data->ack; - - mutex_unlock(&mgmt_eth_data->mutex); -@@ -326,15 +347,15 @@ static int qca8k_read_eth(struct qca8k_p - return 0; - } - --static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 val) -+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) - { - struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; - struct sk_buff *skb; - bool ack; - int ret; - -- skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, &val, -- QCA8K_ETHERNET_MDIO_PRIORITY); -+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); - if (!skb) - return -ENOMEM; - -@@ -380,14 +401,14 @@ qca8k_regmap_update_bits_eth(struct qca8 - u32 val = 0; - int ret; - -- ret = qca8k_read_eth(priv, reg, &val); -+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); - if (ret) - return ret; - - val &= ~mask; - val |= write_val; - -- return qca8k_write_eth(priv, reg, val); -+ return qca8k_write_eth(priv, reg, &val, sizeof(val)); - } - - static int -@@ -398,7 +419,7 @@ qca8k_regmap_read(void *ctx, uint32_t re - u16 r1, r2, page; - int ret; - -- if (!qca8k_read_eth(priv, reg, val)) -+ if (!qca8k_read_eth(priv, reg, val, sizeof(val))) - return 0; - - qca8k_split_addr(reg, &r1, &r2, &page); -@@ -424,7 +445,7 @@ qca8k_regmap_write(void *ctx, uint32_t r - u16 r1, r2, page; - int ret; - -- if (!qca8k_write_eth(priv, reg, val)) -+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) - return 0; - - qca8k_split_addr(reg, &r1, &r2, &page); -@@ -959,21 +980,21 @@ qca8k_phy_eth_command(struct qca8k_priv - } - - /* Prealloc all the needed skb before the lock */ -- write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, -- &write_val, QCA8K_ETHERNET_PHY_PRIORITY); -+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); - if (!write_skb) - return -ENOMEM; - -- clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, -- &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); -+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); - if (!write_skb) { - ret = -ENOMEM; - goto err_clear_skb; - } - -- read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, -- &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); -- if (!write_skb) { -+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -+ if (!read_skb) { - ret = -ENOMEM; - goto err_read_skb; - } diff --git a/target/linux/generic/backport-6.1/766-v5.18-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch b/target/linux/generic/backport-6.1/766-v5.18-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch deleted file mode 100644 index f26c6b91ac59..000000000000 --- a/target/linux/generic/backport-6.1/766-v5.18-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch +++ /dev/null @@ -1,104 +0,0 @@ -From 4f3701fc599820568ba4395070d34e4248800fc0 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Wed, 2 Feb 2022 01:03:35 +0100 -Subject: [PATCH 16/16] net: dsa: qca8k: introduce qca8k_bulk_read/write - function - -Introduce qca8k_bulk_read/write() function to use mgmt Ethernet way to -read/write packet in bulk. Make use of this new function in the fdb -function and while at it reduce the reg for fdb_read from 4 to 3 as the -max bit for the ARL(fdb) table is 83 bits. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 55 ++++++++++++++++++++++++++++++++--------- - 1 file changed, 43 insertions(+), 12 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -412,6 +412,43 @@ qca8k_regmap_update_bits_eth(struct qca8 - } - - static int -+qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ -+ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ u32 tmp; -+ -+ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ tmp = val[i]; -+ -+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int - qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -@@ -546,17 +583,13 @@ qca8k_busy_wait(struct qca8k_priv *priv, - static int - qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) - { -- u32 reg[4], val; -- int i, ret; -+ u32 reg[3]; -+ int ret; - - /* load the ARL table into an array */ -- for (i = 0; i < 4; i++) { -- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val); -- if (ret < 0) -- return ret; -- -- reg[i] = val; -- } -+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+ if (ret) -+ return ret; - - /* vid - 83:72 */ - fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -@@ -580,7 +613,6 @@ qca8k_fdb_write(struct qca8k_priv *priv, - u8 aging) - { - u32 reg[3] = { 0 }; -- int i; - - /* vid - 83:72 */ - reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -@@ -597,8 +629,7 @@ qca8k_fdb_write(struct qca8k_priv *priv, - reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); - - /* load the array into the ARL table */ -- for (i = 0; i < 3; i++) -- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]); -+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); - } - - static int diff --git a/target/linux/generic/backport-6.1/767-v5.18-net-dsa-qca8k-check-correct-variable-in-qca8k_phy_et.patch b/target/linux/generic/backport-6.1/767-v5.18-net-dsa-qca8k-check-correct-variable-in-qca8k_phy_et.patch deleted file mode 100644 index 34607c223ce4..000000000000 --- a/target/linux/generic/backport-6.1/767-v5.18-net-dsa-qca8k-check-correct-variable-in-qca8k_phy_et.patch +++ /dev/null @@ -1,28 +0,0 @@ -From c3664d913dc115cab4a5fdb5634df4887048000e Mon Sep 17 00:00:00 2001 -From: Dan Carpenter -Date: Fri, 4 Feb 2022 13:03:36 +0300 -Subject: [PATCH 1/1] net: dsa: qca8k: check correct variable in - qca8k_phy_eth_command() - -This is a copy and paste bug. It was supposed to check "clear_skb" -instead of "write_skb". - -Fixes: 2cd548566384 ("net: dsa: qca8k: add support for phy read/write with mgmt Ethernet") -Signed-off-by: Dan Carpenter -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1018,7 +1018,7 @@ qca8k_phy_eth_command(struct qca8k_priv - - clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, - QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -- if (!write_skb) { -+ if (!clear_skb) { - ret = -ENOMEM; - goto err_clear_skb; - } diff --git a/target/linux/generic/backport-6.1/768-v5.18-net-dsa-qca8k-fix-noderef.cocci-warnings.patch b/target/linux/generic/backport-6.1/768-v5.18-net-dsa-qca8k-fix-noderef.cocci-warnings.patch deleted file mode 100644 index d8cf26630906..000000000000 --- a/target/linux/generic/backport-6.1/768-v5.18-net-dsa-qca8k-fix-noderef.cocci-warnings.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 4f5e483b8c7a644733db941a1ae00173baa7b463 Mon Sep 17 00:00:00 2001 -From: kernel test robot -Date: Thu, 10 Feb 2022 06:13:04 +0800 -Subject: [PATCH 1/1] net: dsa: qca8k: fix noderef.cocci warnings - -drivers/net/dsa/qca8k.c:422:37-43: ERROR: application of sizeof to pointer - - sizeof when applied to a pointer typed expression gives the size of - the pointer - -Generated by: scripts/coccinelle/misc/noderef.cocci - -Fixes: 90386223f44e ("net: dsa: qca8k: add support for larger read/write size with mgmt Ethernet") -CC: Ansuel Smith -Reported-by: kernel test robot -Signed-off-by: kernel test robot -Reviewed-by: Florian Fainelli -Link: https://lore.kernel.org/r/20220209221304.GA17529@d2214a582157 -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca8k.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -456,7 +456,7 @@ qca8k_regmap_read(void *ctx, uint32_t re - u16 r1, r2, page; - int ret; - -- if (!qca8k_read_eth(priv, reg, val, sizeof(val))) -+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val))) - return 0; - - qca8k_split_addr(reg, &r1, &r2, &page); diff --git a/target/linux/generic/backport-6.1/769-v5.19-01-net-dsa-qca8k-drop-MTU-tracking-from-qca8k_priv.patch b/target/linux/generic/backport-6.1/769-v5.19-01-net-dsa-qca8k-drop-MTU-tracking-from-qca8k_priv.patch deleted file mode 100644 index 57df4c126e49..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-01-net-dsa-qca8k-drop-MTU-tracking-from-qca8k_priv.patch +++ /dev/null @@ -1,79 +0,0 @@ -From 69fd055957a02309ffdc23d887a01988b6e5bab1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:12 +0200 -Subject: [PATCH 1/6] net: dsa: qca8k: drop MTU tracking from qca8k_priv - -DSA set the CPU port based on the largest MTU of all the slave ports. -Based on this we can drop the MTU array from qca8k_priv and set the -port_change_mtu logic on DSA changing MTU of the CPU port as the switch -have a global MTU settingfor each port. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 26 +++++++++----------------- - drivers/net/dsa/qca8k.h | 1 - - 2 files changed, 9 insertions(+), 18 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1803,16 +1803,6 @@ qca8k_setup(struct dsa_switch *ds) - QCA8K_PORT_HOL_CTRL1_WRED_EN, - mask); - } -- -- /* Set initial MTU for every port. -- * We have only have a general MTU setting. So track -- * every port and set the max across all port. -- * Set per port MTU to 1500 as the MTU change function -- * will add the overhead and if its set to 1518 then it -- * will apply the overhead again and we will end up with -- * MTU of 1536 instead of 1518 -- */ -- priv->port_mtu[i] = ETH_DATA_LEN; - } - - /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -@@ -2525,13 +2515,16 @@ static int - qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) - { - struct qca8k_priv *priv = ds->priv; -- int ret, i, mtu = 0; -- -- priv->port_mtu[port] = new_mtu; -+ int ret; - -- for (i = 0; i < QCA8K_NUM_PORTS; i++) -- if (priv->port_mtu[i] > mtu) -- mtu = priv->port_mtu[i]; -+ /* We have only have a general MTU setting. -+ * DSA always set the CPU port's MTU to the largest MTU of the slave -+ * ports. -+ * Setting MTU just for the CPU port is sufficient to correctly set a -+ * value for every port. -+ */ -+ if (!dsa_is_cpu_port(ds, port)) -+ return 0; - - /* To change the MAX_FRAME_SIZE the cpu ports must be off or - * the switch panics. -@@ -2545,7 +2538,7 @@ qca8k_port_change_mtu(struct dsa_switch - qca8k_port_set_status(priv, 6, 0); - - /* Include L2 header / FCS length */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); - - if (priv->port_sts[0].enabled) - qca8k_port_set_status(priv, 0, 1); ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -392,7 +392,6 @@ struct qca8k_priv { - struct device *dev; - struct dsa_switch_ops ops; - struct gpio_desc *reset_gpio; -- unsigned int port_mtu[QCA8K_NUM_PORTS]; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ - struct qca8k_mgmt_eth_data mgmt_eth_data; - struct qca8k_mib_eth_data mib_eth_data; diff --git a/target/linux/generic/backport-6.1/769-v5.19-02-net-dsa-qca8k-drop-port_sts-from-qca8k_priv.patch b/target/linux/generic/backport-6.1/769-v5.19-02-net-dsa-qca8k-drop-port_sts-from-qca8k_priv.patch deleted file mode 100644 index 3cacd7e4fdb7..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-02-net-dsa-qca8k-drop-port_sts-from-qca8k_priv.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 2b8fd87af7f156942971789abac8ee2bb60c03bc Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:13 +0200 -Subject: [PATCH 2/6] net: dsa: qca8k: drop port_sts from qca8k_priv - -Port_sts is a thing of the past for this driver. It was something -present on the initial implementation of this driver and parts of the -original struct were dropped over time. Using an array of int to store if -a port is enabled or not to handle PM operation seems overkill. Switch -and use a simple u8 to store the port status where each bit correspond -to a port. (bit is set port is enabled, bit is not set, port is disabled) -Also add some comments to better describe why we need to track port -status. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 15 +++++++++------ - drivers/net/dsa/qca8k.h | 9 ++++----- - 2 files changed, 13 insertions(+), 11 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -2494,7 +2494,7 @@ qca8k_port_enable(struct dsa_switch *ds, - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - qca8k_port_set_status(priv, port, 1); -- priv->port_sts[port].enabled = 1; -+ priv->port_enabled_map |= BIT(port); - - if (dsa_is_user_port(ds, port)) - phy_support_asym_pause(phy); -@@ -2508,7 +2508,7 @@ qca8k_port_disable(struct dsa_switch *ds - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - qca8k_port_set_status(priv, port, 0); -- priv->port_sts[port].enabled = 0; -+ priv->port_enabled_map &= ~BIT(port); - } - - static int -@@ -2531,19 +2531,19 @@ qca8k_port_change_mtu(struct dsa_switch - * Turn off both cpu ports before applying the new value to prevent - * this. - */ -- if (priv->port_sts[0].enabled) -+ if (priv->port_enabled_map & BIT(0)) - qca8k_port_set_status(priv, 0, 0); - -- if (priv->port_sts[6].enabled) -+ if (priv->port_enabled_map & BIT(6)) - qca8k_port_set_status(priv, 6, 0); - - /* Include L2 header / FCS length */ - ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); - -- if (priv->port_sts[0].enabled) -+ if (priv->port_enabled_map & BIT(0)) - qca8k_port_set_status(priv, 0, 1); - -- if (priv->port_sts[6].enabled) -+ if (priv->port_enabled_map & BIT(6)) - qca8k_port_set_status(priv, 6, 1); - - return ret; -@@ -3199,13 +3199,16 @@ static void qca8k_sw_shutdown(struct mdi - static void - qca8k_set_pm(struct qca8k_priv *priv, int enable) - { -- int i; -+ int port; - -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (!priv->port_sts[i].enabled) -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Do not enable on resume if the port was -+ * disabled before. -+ */ -+ if (!(priv->port_enabled_map & BIT(port))) - continue; - -- qca8k_port_set_status(priv, i, enable); -+ qca8k_port_set_status(priv, port, enable); - } - } - ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -324,10 +324,6 @@ enum qca8k_mid_cmd { - QCA8K_MIB_CAST = 3, - }; - --struct ar8xxx_port_status { -- int enabled; --}; -- - struct qca8k_match_data { - u8 id; - bool reduced_package; -@@ -382,11 +378,14 @@ struct qca8k_priv { - u8 mirror_rx; - u8 mirror_tx; - u8 lag_hash_mode; -+ /* Each bit correspond to a port. This switch can support a max of 7 port. -+ * Bit 1: port enabled. Bit 0: port disabled. -+ */ -+ u8 port_enabled_map; - bool legacy_phy_port_mapping; - struct qca8k_ports_config ports_config; - struct regmap *regmap; - struct mii_bus *bus; -- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; - struct dsa_switch *ds; - struct mutex reg_mutex; - struct device *dev; diff --git a/target/linux/generic/backport-6.1/769-v5.19-03-net-dsa-qca8k-rework-and-simplify-mdiobus-logic.patch b/target/linux/generic/backport-6.1/769-v5.19-03-net-dsa-qca8k-rework-and-simplify-mdiobus-logic.patch deleted file mode 100644 index 12c3221077ca..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-03-net-dsa-qca8k-rework-and-simplify-mdiobus-logic.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 8255212e4130bd2dc1463286a3dddb74797bbdc1 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:14 +0200 -Subject: [PATCH 3/6] net: dsa: qca8k: rework and simplify mdiobus logic - -In an attempt to reduce qca8k_priv space, rework and simplify mdiobus -logic. -We now declare a mdiobus instead of relying on DSA phy_read/write even -if a mdio node is not present. This is all to make the qca8k ops static -and not switch specific. With a legacy implementation where port doesn't -have a phy map declared in the dts with a mdio node, we declare a -'qca8k-legacy' mdiobus. The conversion logic is used as legacy read and -write ops are used instead of the internal one. -Also drop the legacy_phy_port_mapping as we now declare mdiobus with ops -that already address the workaround. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 95 +++++++++++++---------------------------- - drivers/net/dsa/qca8k.h | 1 - - 2 files changed, 29 insertions(+), 67 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1291,83 +1291,63 @@ qca8k_internal_mdio_read(struct mii_bus - } - - static int --qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) -+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) - { -- struct qca8k_priv *priv = ds->priv; -- int ret; -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - -- /* Check if the legacy mapping should be used and the -- * port is not correctly mapped to the right PHY in the -- * devicetree -- */ -- if (priv->legacy_phy_port_mapping) -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, false, port, regnum, 0); -- if (!ret) -- return ret; -- -- return qca8k_mdio_write(priv, port, regnum, data); -+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data); - } - - static int --qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) -+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) - { -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- /* Check if the legacy mapping should be used and the -- * port is not correctly mapped to the right PHY in the -- * devicetree -- */ -- if (priv->legacy_phy_port_mapping) -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, true, port, regnum, 0); -- if (ret >= 0) -- return ret; -- -- ret = qca8k_mdio_read(priv, port, regnum); -- -- if (ret < 0) -- return 0xffff; -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - -- return ret; -+ return qca8k_internal_mdio_read(slave_bus, port, regnum); - } - - static int --qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) -+qca8k_mdio_register(struct qca8k_priv *priv) - { - struct dsa_switch *ds = priv->ds; -+ struct device_node *mdio; - struct mii_bus *bus; - - bus = devm_mdiobus_alloc(ds->dev); -- - if (!bus) - return -ENOMEM; - - bus->priv = (void *)priv; -- bus->name = "qca8k slave mii"; -- bus->read = qca8k_internal_mdio_read; -- bus->write = qca8k_internal_mdio_write; -- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", -- ds->index); -- - bus->parent = ds->dev; - bus->phy_mask = ~ds->phys_mii_mask; -- - ds->slave_mii_bus = bus; - -- return devm_of_mdiobus_register(priv->dev, bus, mdio); -+ /* Check if the devicetree declare the port:phy mapping */ -+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -+ if (of_device_is_available(mdio)) { -+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); -+ bus->name = "qca8k slave mii"; -+ bus->read = qca8k_internal_mdio_read; -+ bus->write = qca8k_internal_mdio_write; -+ return devm_of_mdiobus_register(priv->dev, bus, mdio); -+ } -+ -+ /* If a mapping can't be found the legacy mapping is used, -+ * using the qca8k_port_to_phy function -+ */ -+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -+ ds->dst->index, ds->index); -+ bus->name = "qca8k-legacy slave mii"; -+ bus->read = qca8k_legacy_mdio_read; -+ bus->write = qca8k_legacy_mdio_write; -+ return devm_mdiobus_register(priv->dev, bus); - } - - static int - qca8k_setup_mdio_bus(struct qca8k_priv *priv) - { - u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; -- struct device_node *ports, *port, *mdio; -+ struct device_node *ports, *port; - phy_interface_t mode; - int err; - -@@ -1429,24 +1409,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv * - QCA8K_MDIO_MASTER_EN); - } - -- /* Check if the devicetree declare the port:phy mapping */ -- mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -- if (of_device_is_available(mdio)) { -- err = qca8k_mdio_register(priv, mdio); -- if (err) -- of_node_put(mdio); -- -- return err; -- } -- -- /* If a mapping can't be found the legacy mapping is used, -- * using the qca8k_port_to_phy function -- */ -- priv->legacy_phy_port_mapping = true; -- priv->ops.phy_read = qca8k_phy_read; -- priv->ops.phy_write = qca8k_phy_write; -- -- return 0; -+ return qca8k_mdio_register(priv); - } - - static int ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -382,7 +382,6 @@ struct qca8k_priv { - * Bit 1: port enabled. Bit 0: port disabled. - */ - u8 port_enabled_map; -- bool legacy_phy_port_mapping; - struct qca8k_ports_config ports_config; - struct regmap *regmap; - struct mii_bus *bus; diff --git a/target/linux/generic/backport-6.1/769-v5.19-04-net-dsa-qca8k-drop-dsa_switch_ops-from-qca8k_priv.patch b/target/linux/generic/backport-6.1/769-v5.19-04-net-dsa-qca8k-drop-dsa_switch_ops-from-qca8k_priv.patch deleted file mode 100644 index 8641000abbc6..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-04-net-dsa-qca8k-drop-dsa_switch_ops-from-qca8k_priv.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 2349b83a2486c55b9dd225326f0172a84a43c5e4 Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:15 +0200 -Subject: [PATCH 4/6] net: dsa: qca8k: drop dsa_switch_ops from qca8k_priv - -Now that dsa_switch_ops is not switch specific anymore, we can drop it -from qca8k_priv and use the static ops directly for the dsa_switch -pointer. - -Signed-off-by: Ansuel Smith -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 3 +-- - drivers/net/dsa/qca8k.h | 1 - - 2 files changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -3121,8 +3121,7 @@ qca8k_sw_probe(struct mdio_device *mdiod - priv->ds->dev = &mdiodev->dev; - priv->ds->num_ports = QCA8K_NUM_PORTS; - priv->ds->priv = priv; -- priv->ops = qca8k_switch_ops; -- priv->ds->ops = &priv->ops; -+ priv->ds->ops = &qca8k_switch_ops; - mutex_init(&priv->reg_mutex); - dev_set_drvdata(&mdiodev->dev, priv); - ---- a/drivers/net/dsa/qca8k.h -+++ b/drivers/net/dsa/qca8k.h -@@ -388,7 +388,6 @@ struct qca8k_priv { - struct dsa_switch *ds; - struct mutex reg_mutex; - struct device *dev; -- struct dsa_switch_ops ops; - struct gpio_desc *reset_gpio; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ - struct qca8k_mgmt_eth_data mgmt_eth_data; diff --git a/target/linux/generic/backport-6.1/769-v5.19-05-net-dsa-qca8k-correctly-handle-mdio-read-error.patch b/target/linux/generic/backport-6.1/769-v5.19-05-net-dsa-qca8k-correctly-handle-mdio-read-error.patch deleted file mode 100644 index b14b22091bc1..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-05-net-dsa-qca8k-correctly-handle-mdio-read-error.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 6cfc03b602200c5cbbd8d906fd905547814e83df Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:16 +0200 -Subject: [PATCH 5/6] net: dsa: qca8k: correctly handle mdio read error - -Restore original way to handle mdio read error by returning 0xffff. -This was wrongly changed when the internal_mdio_read was introduced, -now that both legacy and internal use the same function, make sure that -they behave the same way. - -Fixes: ce062a0adbfe ("net: dsa: qca8k: fix kernel panic with legacy mdio mapping") -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1287,7 +1287,12 @@ qca8k_internal_mdio_read(struct mii_bus - if (ret >= 0) - return ret; - -- return qca8k_mdio_read(priv, phy, regnum); -+ ret = qca8k_mdio_read(priv, phy, regnum); -+ -+ if (ret < 0) -+ return 0xffff; -+ -+ return ret; - } - - static int diff --git a/target/linux/generic/backport-6.1/769-v5.19-06-net-dsa-qca8k-unify-bus-id-naming-with-legacy-and-OF.patch b/target/linux/generic/backport-6.1/769-v5.19-06-net-dsa-qca8k-unify-bus-id-naming-with-legacy-and-OF.patch deleted file mode 100644 index 094686f11bef..000000000000 --- a/target/linux/generic/backport-6.1/769-v5.19-06-net-dsa-qca8k-unify-bus-id-naming-with-legacy-and-OF.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 8d1af50842bf2774f4edc57054206e909117469b Mon Sep 17 00:00:00 2001 -From: Ansuel Smith -Date: Sat, 16 Apr 2022 01:30:17 +0200 -Subject: [PATCH 6/6] net: dsa: qca8k: unify bus id naming with legacy and OF - mdio bus - -Add support for multiple switch with OF mdio bus declaration. -Unify the bus id naming and use the same logic for both legacy and OF -mdio bus. - -Signed-off-by: Ansuel Smith -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca8k.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/net/dsa/qca8k.c -+++ b/drivers/net/dsa/qca8k.c -@@ -1323,6 +1323,8 @@ qca8k_mdio_register(struct qca8k_priv *p - return -ENOMEM; - - bus->priv = (void *)priv; -+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -+ ds->dst->index, ds->index); - bus->parent = ds->dev; - bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; -@@ -1330,7 +1332,6 @@ qca8k_mdio_register(struct qca8k_priv *p - /* Check if the devicetree declare the port:phy mapping */ - mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); - if (of_device_is_available(mdio)) { -- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); - bus->name = "qca8k slave mii"; - bus->read = qca8k_internal_mdio_read; - bus->write = qca8k_internal_mdio_write; -@@ -1340,8 +1341,6 @@ qca8k_mdio_register(struct qca8k_priv *p - /* If a mapping can't be found the legacy mapping is used, - * using the qca8k_port_to_phy function - */ -- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -- ds->dst->index, ds->index); - bus->name = "qca8k-legacy slave mii"; - bus->read = qca8k_legacy_mdio_read; - bus->write = qca8k_legacy_mdio_write; diff --git a/target/linux/generic/backport-6.1/770-v6.0-net-dsa-qca8k-move-driver-to-qca-dir.patch b/target/linux/generic/backport-6.1/770-v6.0-net-dsa-qca8k-move-driver-to-qca-dir.patch deleted file mode 100644 index 1534113f1df6..000000000000 --- a/target/linux/generic/backport-6.1/770-v6.0-net-dsa-qca8k-move-driver-to-qca-dir.patch +++ /dev/null @@ -1,7389 +0,0 @@ -From 4bbaf764e1e1786eb937fdb62172f656f512e116 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 13 Jul 2022 22:53:50 +0200 -Subject: [PATCH 1/1] net: dsa: qca8k: move driver to qca dir - -Move qca8k driver to qca dir in preparation for code split and -introduction of ipq4019 switch based on qca8k. - -Signed-off-by: Christian Marangi -Reviewed-by: Florian Fainelli -Signed-off-by: David S. Miller ---- - drivers/net/dsa/Kconfig | 8 -------- - drivers/net/dsa/Makefile | 1 - - drivers/net/dsa/qca/Kconfig | 8 ++++++++ - drivers/net/dsa/qca/Makefile | 1 + - drivers/net/dsa/{ => qca}/qca8k.c | 0 - drivers/net/dsa/{ => qca}/qca8k.h | 0 - 6 files changed, 9 insertions(+), 9 deletions(-) - rename drivers/net/dsa/{ => qca}/qca8k.c (100%) - rename drivers/net/dsa/{ => qca}/qca8k.h (100%) - ---- a/drivers/net/dsa/Kconfig -+++ b/drivers/net/dsa/Kconfig -@@ -60,14 +60,6 @@ source "drivers/net/dsa/sja1105/Kconfig" - - source "drivers/net/dsa/xrs700x/Kconfig" - --config NET_DSA_QCA8K -- tristate "Qualcomm Atheros QCA8K Ethernet switch family support" -- select NET_DSA_TAG_QCA -- select REGMAP -- help -- This enables support for the Qualcomm Atheros QCA8K Ethernet -- switch chips. -- - config NET_DSA_REALTEK_SMI - tristate "Realtek SMI Ethernet switch family support" - select NET_DSA_TAG_RTL4_A ---- a/drivers/net/dsa/Makefile -+++ b/drivers/net/dsa/Makefile -@@ -8,7 +8,6 @@ endif - obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o - obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o - obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o --obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o - obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o - realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o - obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o ---- a/drivers/net/dsa/qca/Kconfig -+++ b/drivers/net/dsa/qca/Kconfig -@@ -7,3 +7,11 @@ config NET_DSA_AR9331 - help - This enables support for the Qualcomm Atheros AR9331 built-in Ethernet - switch. -+ -+config NET_DSA_QCA8K -+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support" -+ select NET_DSA_TAG_QCA -+ select REGMAP -+ help -+ This enables support for the Qualcomm Atheros QCA8K Ethernet -+ switch chips. ---- a/drivers/net/dsa/qca/Makefile -+++ b/drivers/net/dsa/qca/Makefile -@@ -1,2 +1,3 @@ - # SPDX-License-Identifier: GPL-2.0-only - obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o -+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o ---- /dev/null -+++ b/drivers/net/dsa/qca/qca8k.c -@@ -0,0 +1,3243 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2009 Felix Fietkau -+ * Copyright (C) 2011-2012 Gabor Juhos -+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2016 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "qca8k.h" -+ -+#define MIB_DESC(_s, _o, _n) \ -+ { \ -+ .size = (_s), \ -+ .offset = (_o), \ -+ .name = (_n), \ -+ } -+ -+static const struct qca8k_mib_desc ar8327_mib[] = { -+ MIB_DESC(1, 0x00, "RxBroad"), -+ MIB_DESC(1, 0x04, "RxPause"), -+ MIB_DESC(1, 0x08, "RxMulti"), -+ MIB_DESC(1, 0x0c, "RxFcsErr"), -+ MIB_DESC(1, 0x10, "RxAlignErr"), -+ MIB_DESC(1, 0x14, "RxRunt"), -+ MIB_DESC(1, 0x18, "RxFragment"), -+ MIB_DESC(1, 0x1c, "Rx64Byte"), -+ MIB_DESC(1, 0x20, "Rx128Byte"), -+ MIB_DESC(1, 0x24, "Rx256Byte"), -+ MIB_DESC(1, 0x28, "Rx512Byte"), -+ MIB_DESC(1, 0x2c, "Rx1024Byte"), -+ MIB_DESC(1, 0x30, "Rx1518Byte"), -+ MIB_DESC(1, 0x34, "RxMaxByte"), -+ MIB_DESC(1, 0x38, "RxTooLong"), -+ MIB_DESC(2, 0x3c, "RxGoodByte"), -+ MIB_DESC(2, 0x44, "RxBadByte"), -+ MIB_DESC(1, 0x4c, "RxOverFlow"), -+ MIB_DESC(1, 0x50, "Filtered"), -+ MIB_DESC(1, 0x54, "TxBroad"), -+ MIB_DESC(1, 0x58, "TxPause"), -+ MIB_DESC(1, 0x5c, "TxMulti"), -+ MIB_DESC(1, 0x60, "TxUnderRun"), -+ MIB_DESC(1, 0x64, "Tx64Byte"), -+ MIB_DESC(1, 0x68, "Tx128Byte"), -+ MIB_DESC(1, 0x6c, "Tx256Byte"), -+ MIB_DESC(1, 0x70, "Tx512Byte"), -+ MIB_DESC(1, 0x74, "Tx1024Byte"), -+ MIB_DESC(1, 0x78, "Tx1518Byte"), -+ MIB_DESC(1, 0x7c, "TxMaxByte"), -+ MIB_DESC(1, 0x80, "TxOverSize"), -+ MIB_DESC(2, 0x84, "TxByte"), -+ MIB_DESC(1, 0x8c, "TxCollision"), -+ MIB_DESC(1, 0x90, "TxAbortCol"), -+ MIB_DESC(1, 0x94, "TxMultiCol"), -+ MIB_DESC(1, 0x98, "TxSingleCol"), -+ MIB_DESC(1, 0x9c, "TxExcDefer"), -+ MIB_DESC(1, 0xa0, "TxDefer"), -+ MIB_DESC(1, 0xa4, "TxLateCol"), -+ MIB_DESC(1, 0xa8, "RXUnicast"), -+ MIB_DESC(1, 0xac, "TXUnicast"), -+}; -+ -+static void -+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) -+{ -+ regaddr >>= 1; -+ *r1 = regaddr & 0x1e; -+ -+ regaddr >>= 5; -+ *r2 = regaddr & 0x7; -+ -+ regaddr >>= 3; -+ *page = regaddr & 0x3ff; -+} -+ -+static int -+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) -+{ -+ u16 *cached_lo = &priv->mdio_cache.lo; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (lo == *cached_lo) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, lo); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit lo register\n"); -+ -+ *cached_lo = lo; -+ return 0; -+} -+ -+static int -+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) -+{ -+ u16 *cached_hi = &priv->mdio_cache.hi; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (hi == *cached_hi) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, hi); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit hi register\n"); -+ -+ *cached_hi = hi; -+ return 0; -+} -+ -+static int -+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) -+{ -+ int ret; -+ -+ ret = bus->read(bus, phy_id, regnum); -+ if (ret >= 0) { -+ *val = ret; -+ ret = bus->read(bus, phy_id, regnum + 1); -+ *val |= ret << 16; -+ } -+ -+ if (ret < 0) { -+ dev_err_ratelimited(&bus->dev, -+ "failed to read qca8k 32bit register\n"); -+ *val = 0; -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void -+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) -+{ -+ u16 lo, hi; -+ int ret; -+ -+ lo = val & 0xffff; -+ hi = (u16)(val >> 16); -+ -+ ret = qca8k_set_lo(priv, phy_id, regnum, lo); -+ if (ret >= 0) -+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); -+} -+ -+static int -+qca8k_set_page(struct qca8k_priv *priv, u16 page) -+{ -+ u16 *cached_page = &priv->mdio_cache.page; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (page == *cached_page) -+ return 0; -+ -+ ret = bus->write(bus, 0x18, 0, page); -+ if (ret < 0) { -+ dev_err_ratelimited(&bus->dev, -+ "failed to set qca8k page\n"); -+ return ret; -+ } -+ -+ *cached_page = page; -+ usleep_range(1000, 2000); -+ return 0; -+} -+ -+static int -+qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) -+{ -+ return regmap_read(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) -+{ -+ return regmap_write(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ return regmap_update_bits(priv->regmap, reg, mask, write_val); -+} -+ -+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ u8 len, cmd; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -+ -+ /* Make sure the seq match the requested packet */ -+ if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -+ mgmt_eth_data->ack = true; -+ -+ if (cmd == MDIO_READ) { -+ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -+ -+ /* Get the rest of the 12 byte of data. -+ * The read/write function will extract the requested data. -+ */ -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(mgmt_eth_data->data + 1, skb->data, -+ QCA_HDR_MGMT_DATA2_LEN); -+ } -+ -+ complete(&mgmt_eth_data->rw_done); -+} -+ -+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -+ int priority, unsigned int len) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ unsigned int real_len; -+ struct sk_buff *skb; -+ u32 *data2; -+ u16 hdr; -+ -+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); -+ if (!skb) -+ return NULL; -+ -+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -+ * Actually for some reason the steps are: -+ * 0: nothing -+ * 1-4: first 4 byte -+ * 5-6: first 12 byte -+ * 7-15: all 16 byte -+ */ -+ if (len == 16) -+ real_len = 15; -+ else -+ real_len = len; -+ -+ skb_reset_mac_header(skb); -+ skb_set_network_header(skb, skb->len); -+ -+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); -+ -+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); -+ hdr |= QCA_HDR_XMIT_FROM_CPU; -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); -+ -+ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -+ QCA_HDR_MGMT_CHECK_CODE_VAL); -+ -+ if (cmd == MDIO_WRITE) -+ mgmt_ethhdr->mdio_data = *val; -+ -+ mgmt_ethhdr->hdr = htons(hdr); -+ -+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ return skb; -+} -+ -+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -+ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); -+} -+ -+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ *val = mgmt_eth_data->data[0]; -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int -+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ u32 val = 0; -+ int ret; -+ -+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); -+ if (ret) -+ return ret; -+ -+ val &= ~mask; -+ val |= write_val; -+ -+ return qca8k_write_eth(priv, reg, &val, sizeof(val)); -+} -+ -+static int -+qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ -+ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ u32 tmp; -+ -+ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ tmp = val[i]; -+ -+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ int ret; -+ -+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val))) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ return ret; -+} -+ -+static int -+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ int ret; -+ -+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ return ret; -+} -+ -+static int -+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -+ if (ret < 0) -+ goto exit; -+ -+ val &= ~mask; -+ val |= write_val; -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ -+ return ret; -+} -+ -+static const struct regmap_range qca8k_readable_ranges[] = { -+ regmap_reg_range(0x0000, 0x00e4), /* Global control */ -+ regmap_reg_range(0x0100, 0x0168), /* EEE control */ -+ regmap_reg_range(0x0200, 0x0270), /* Parser control */ -+ regmap_reg_range(0x0400, 0x0454), /* ACL */ -+ regmap_reg_range(0x0600, 0x0718), /* Lookup */ -+ regmap_reg_range(0x0800, 0x0b70), /* QM */ -+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -+ -+}; -+ -+static const struct regmap_access_table qca8k_readable_table = { -+ .yes_ranges = qca8k_readable_ranges, -+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), -+}; -+ -+static struct regmap_config qca8k_regmap_config = { -+ .reg_bits = 16, -+ .val_bits = 32, -+ .reg_stride = 4, -+ .max_register = 0x16ac, /* end MIB - Port6 range */ -+ .reg_read = qca8k_regmap_read, -+ .reg_write = qca8k_regmap_write, -+ .reg_update_bits = qca8k_regmap_update_bits, -+ .rd_table = &qca8k_readable_table, -+ .disable_locking = true, /* Locking is handled by qca8k read/write */ -+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ -+}; -+ -+static int -+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) -+{ -+ u32 val; -+ -+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); -+} -+ -+static int -+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) -+{ -+ u32 reg[3]; -+ int ret; -+ -+ /* load the ARL table into an array */ -+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+ if (ret) -+ return ret; -+ -+ /* vid - 83:72 */ -+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -+ /* aging - 67:64 */ -+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -+ /* portmask - 54:48 */ -+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -+ /* mac - 47:0 */ -+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -+ -+ return 0; -+} -+ -+static void -+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, -+ u8 aging) -+{ -+ u32 reg[3] = { 0 }; -+ -+ /* vid - 83:72 */ -+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -+ /* aging - 67:64 */ -+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -+ /* portmask - 54:48 */ -+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -+ /* mac - 47:0 */ -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -+ -+ /* load the array into the ARL table */ -+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+} -+ -+static int -+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and FDB index */ -+ reg = QCA8K_ATU_FUNC_BUSY; -+ reg |= cmd; -+ if (port >= 0) { -+ reg |= QCA8K_ATU_FUNC_PORT_EN; -+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -+ } -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_FDB_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_ATU_FUNC_FULL) -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) -+{ -+ int ret; -+ -+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -+ if (ret < 0) -+ return ret; -+ -+ return qca8k_fdb_read(priv, fdb); -+} -+ -+static int -+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, -+ u16 vid, u8 aging) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static void -+qca8k_fdb_flush(struct qca8k_priv *priv) -+{ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -+ mutex_unlock(&priv->reg_mutex); -+} -+ -+static int -+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_fdb_read(priv, &fdb); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule exist. Delete first */ -+ if (!fdb.aging) { -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ } -+ -+ /* Add port to fdb portmask */ -+ fdb.port_mask |= port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule doesn't exist. Why delete? */ -+ if (!fdb.aging) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ -+ /* Only port in the rule is this port. Don't re insert */ -+ if (fdb.port_mask == port_mask) -+ goto exit; -+ -+ /* Remove port from port mask */ -+ fdb.port_mask &= ~port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and VLAN index */ -+ reg = QCA8K_VTU_FUNC1_BUSY; -+ reg |= cmd; -+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_VLAN_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_VTU_FUNC1_FULL) -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) -+{ -+ u32 reg; -+ int ret; -+ -+ /* -+ We do the right thing with VLAN 0 and treat it as untagged while -+ preserving the tag on egress. -+ */ -+ if (vid == 0) -+ return 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ if (untagged) -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -+ else -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -+ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) -+{ -+ u32 reg, mask; -+ int ret, i; -+ bool del; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -+ -+ /* Check if we're the last member to be removed */ -+ del = true; -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -+ -+ if ((reg & mask) != mask) { -+ del = false; -+ break; -+ } -+ } -+ -+ if (del) { -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -+ } else { -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ } -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_mib_init(struct qca8k_priv *priv) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -+ QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static void -+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) -+{ -+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -+ -+ /* Port 0 and 6 have no internal PHY */ -+ if (port > 0 && port < 6) -+ mask |= QCA8K_PORT_STATUS_LINK_AUTO; -+ -+ if (enable) -+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+ else -+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+} -+ -+static int -+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, -+ struct sk_buff *read_skb, u32 *val) -+{ -+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); -+ bool ack; -+ int ret; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the copy pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ *val = mgmt_eth_data->data[0]; -+ -+ return 0; -+} -+ -+static int -+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, -+ int regnum, u16 data) -+{ -+ struct sk_buff *write_skb, *clear_skb, *read_skb; -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ u32 write_val, clear_val = 0, val; -+ struct net_device *mgmt_master; -+ int ret, ret1; -+ bool ack; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum); -+ -+ if (read) { -+ write_val |= QCA8K_MDIO_MASTER_READ; -+ } else { -+ write_val |= QCA8K_MDIO_MASTER_WRITE; -+ write_val |= QCA8K_MDIO_MASTER_DATA(data); -+ } -+ -+ /* Prealloc all the needed skb before the lock */ -+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); -+ if (!write_skb) -+ return -ENOMEM; -+ -+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -+ if (!clear_skb) { -+ ret = -ENOMEM; -+ goto err_clear_skb; -+ } -+ -+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -+ if (!read_skb) { -+ ret = -ENOMEM; -+ goto err_read_skb; -+ } -+ -+ /* Actually start the request: -+ * 1. Send mdio master packet -+ * 2. Busy Wait for mdio master command -+ * 3. Get the data if we are reading -+ * 4. Reset the mdio master (even with error) -+ */ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check if mgmt_master is operational */ -+ mgmt_master = priv->mgmt_master; -+ if (!mgmt_master) { -+ mutex_unlock(&mgmt_eth_data->mutex); -+ ret = -EINVAL; -+ goto err_mgmt_master; -+ } -+ -+ read_skb->dev = mgmt_master; -+ clear_skb->dev = mgmt_master; -+ write_skb->dev = mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the write pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(write_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, -+ !(val & QCA8K_MDIO_MASTER_BUSY), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -+ mgmt_eth_data, read_skb, &val); -+ -+ if (ret < 0 && ret1 < 0) { -+ ret = ret1; -+ goto exit; -+ } -+ -+ if (read) { -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the read pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(read_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; -+ } else { -+ kfree_skb(read_skb); -+ } -+exit: -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the clear pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(clear_skb); -+ -+ wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ return ret; -+ -+ /* Error handling before lock */ -+err_mgmt_master: -+ kfree_skb(read_skb); -+err_read_skb: -+ kfree_skb(clear_skb); -+err_clear_skb: -+ kfree_skb(write_skb); -+ -+ return ret; -+} -+ -+static u32 -+qca8k_port_to_phy(int port) -+{ -+ /* From Andrew Lunn: -+ * Port 0 has no internal phy. -+ * Port 1 has an internal PHY at MDIO address 0. -+ * Port 2 has an internal PHY at MDIO address 1. -+ * ... -+ * Port 5 has an internal PHY at MDIO address 4. -+ * Port 6 has no internal PHY. -+ */ -+ -+ return port - 1; -+} -+ -+static int -+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) -+{ -+ u16 r1, r2, page; -+ u32 val; -+ int ret, ret1; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -+ bus, 0x10 | r2, r1, &val); -+ -+ /* Check if qca8k_read has failed for a different reason -+ * before returnting -ETIMEDOUT -+ */ -+ if (ret < 0 && ret1 < 0) -+ return ret1; -+ -+ return ret; -+} -+ -+static int -+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) -+{ -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) | -+ QCA8K_MDIO_MASTER_DATA(data); -+ -+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_BUSY); -+ -+exit: -+ /* even if the busy_wait timeouts try to clear the MASTER_EN */ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ -+ mutex_unlock(&bus->mdio_lock); -+ -+ return ret; -+} -+ -+static int -+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) -+{ -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum); -+ -+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -+ -+exit: -+ /* even if the busy_wait timeouts try to clear the MASTER_EN */ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ -+ mutex_unlock(&bus->mdio_lock); -+ -+ if (ret >= 0) -+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK; -+ -+ return ret; -+} -+ -+static int -+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) -+{ -+ struct qca8k_priv *priv = slave_bus->priv; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); -+ if (!ret) -+ return 0; -+ -+ return qca8k_mdio_write(priv, phy, regnum, data); -+} -+ -+static int -+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) -+{ -+ struct qca8k_priv *priv = slave_bus->priv; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); -+ if (ret >= 0) -+ return ret; -+ -+ ret = qca8k_mdio_read(priv, phy, regnum); -+ -+ if (ret < 0) -+ return 0xffff; -+ -+ return ret; -+} -+ -+static int -+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) -+{ -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -+ -+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data); -+} -+ -+static int -+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) -+{ -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -+ -+ return qca8k_internal_mdio_read(slave_bus, port, regnum); -+} -+ -+static int -+qca8k_mdio_register(struct qca8k_priv *priv) -+{ -+ struct dsa_switch *ds = priv->ds; -+ struct device_node *mdio; -+ struct mii_bus *bus; -+ -+ bus = devm_mdiobus_alloc(ds->dev); -+ if (!bus) -+ return -ENOMEM; -+ -+ bus->priv = (void *)priv; -+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -+ ds->dst->index, ds->index); -+ bus->parent = ds->dev; -+ bus->phy_mask = ~ds->phys_mii_mask; -+ ds->slave_mii_bus = bus; -+ -+ /* Check if the devicetree declare the port:phy mapping */ -+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -+ if (of_device_is_available(mdio)) { -+ bus->name = "qca8k slave mii"; -+ bus->read = qca8k_internal_mdio_read; -+ bus->write = qca8k_internal_mdio_write; -+ return devm_of_mdiobus_register(priv->dev, bus, mdio); -+ } -+ -+ /* If a mapping can't be found the legacy mapping is used, -+ * using the qca8k_port_to_phy function -+ */ -+ bus->name = "qca8k-legacy slave mii"; -+ bus->read = qca8k_legacy_mdio_read; -+ bus->write = qca8k_legacy_mdio_write; -+ return devm_mdiobus_register(priv->dev, bus); -+} -+ -+static int -+qca8k_setup_mdio_bus(struct qca8k_priv *priv) -+{ -+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; -+ struct device_node *ports, *port; -+ phy_interface_t mode; -+ int err; -+ -+ ports = of_get_child_by_name(priv->dev->of_node, "ports"); -+ if (!ports) -+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports"); -+ -+ if (!ports) -+ return -EINVAL; -+ -+ for_each_available_child_of_node(ports, port) { -+ err = of_property_read_u32(port, "reg", ®); -+ if (err) { -+ of_node_put(port); -+ of_node_put(ports); -+ return err; -+ } -+ -+ if (!dsa_is_user_port(priv->ds, reg)) -+ continue; -+ -+ of_get_phy_mode(port, &mode); -+ -+ if (of_property_read_bool(port, "phy-handle") && -+ mode != PHY_INTERFACE_MODE_INTERNAL) -+ external_mdio_mask |= BIT(reg); -+ else -+ internal_mdio_mask |= BIT(reg); -+ } -+ -+ of_node_put(ports); -+ if (!external_mdio_mask && !internal_mdio_mask) { -+ dev_err(priv->dev, "no PHYs are defined.\n"); -+ return -EINVAL; -+ } -+ -+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through -+ * the MDIO_MASTER register also _disconnects_ the external MDC -+ * passthrough to the internal PHYs. It's not possible to use both -+ * configurations at the same time! -+ * -+ * Because this came up during the review process: -+ * If the external mdio-bus driver is capable magically disabling -+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's -+ * accessors for the time being, it would be possible to pull this -+ * off. -+ */ -+ if (!!external_mdio_mask && !!internal_mdio_mask) { -+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); -+ return -EINVAL; -+ } -+ -+ if (external_mdio_mask) { -+ /* Make sure to disable the internal mdio bus in cases -+ * a dt-overlay and driver reload changed the configuration -+ */ -+ -+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_EN); -+ } -+ -+ return qca8k_mdio_register(priv); -+} -+ -+static int -+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) -+{ -+ u32 mask = 0; -+ int ret = 0; -+ -+ /* SoC specific settings for ipq8064. -+ * If more device require this consider adding -+ * a dedicated binding. -+ */ -+ if (of_machine_is_compatible("qcom,ipq8064")) -+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V; -+ -+ /* SoC specific settings for ipq8065 */ -+ if (of_machine_is_compatible("qcom,ipq8065")) -+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V; -+ -+ if (mask) { -+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, -+ QCA8K_MAC_PWR_RGMII0_1_8V | -+ QCA8K_MAC_PWR_RGMII1_1_8V, -+ mask); -+ } -+ -+ return ret; -+} -+ -+static int qca8k_find_cpu_port(struct dsa_switch *ds) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Find the connected cpu port. Valid port are 0 or 6 */ -+ if (dsa_is_cpu_port(ds, 0)) -+ return 0; -+ -+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); -+ -+ if (dsa_is_cpu_port(ds, 6)) -+ return 6; -+ -+ return -EINVAL; -+} -+ -+static int -+qca8k_setup_of_pws_reg(struct qca8k_priv *priv) -+{ -+ struct device_node *node = priv->dev->of_node; -+ const struct qca8k_match_data *data; -+ u32 val = 0; -+ int ret; -+ -+ /* QCA8327 require to set to the correct mode. -+ * His bigger brother QCA8328 have the 172 pin layout. -+ * Should be applied by default but we set this just to make sure. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ data = of_device_get_match_data(priv->dev); -+ -+ /* Set the correct package of 148 pin for QCA8327 */ -+ if (data->reduced_package) -+ val |= QCA8327_PWS_PACKAGE148_EN; -+ -+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -+ val); -+ if (ret) -+ return ret; -+ } -+ -+ if (of_property_read_bool(node, "qca,ignore-power-on-sel")) -+ val |= QCA8K_PWS_POWER_ON_SEL; -+ -+ if (of_property_read_bool(node, "qca,led-open-drain")) { -+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) { -+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); -+ return -EINVAL; -+ } -+ -+ val |= QCA8K_PWS_LED_OPEN_EN_CSR; -+ } -+ -+ return qca8k_rmw(priv, QCA8K_REG_PWS, -+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, -+ val); -+} -+ -+static int -+qca8k_parse_port_config(struct qca8k_priv *priv) -+{ -+ int port, cpu_port_index = -1, ret; -+ struct device_node *port_dn; -+ phy_interface_t mode; -+ struct dsa_port *dp; -+ u32 delay; -+ -+ /* We have 2 CPU port. Check them */ -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Skip every other port */ -+ if (port != 0 && port != 6) -+ continue; -+ -+ dp = dsa_to_port(priv->ds, port); -+ port_dn = dp->dn; -+ cpu_port_index++; -+ -+ if (!of_device_is_available(port_dn)) -+ continue; -+ -+ ret = of_get_phy_mode(port_dn, &mode); -+ if (ret) -+ continue; -+ -+ switch (mode) { -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ case PHY_INTERFACE_MODE_SGMII: -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID) -+ delay = 1; -+ -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { -+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; -+ -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ delay = 2; -+ -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { -+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; -+ -+ /* Skip sgmii parsing for rgmii* mode */ -+ if (mode == PHY_INTERFACE_MODE_RGMII || -+ mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ break; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -+ priv->ports_config.sgmii_tx_clk_falling_edge = true; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -+ priv->ports_config.sgmii_rx_clk_falling_edge = true; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -+ priv->ports_config.sgmii_enable_pll = true; -+ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -+ priv->ports_config.sgmii_enable_pll = false; -+ } -+ -+ if (priv->switch_revision < 2) -+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); -+ } -+ -+ break; -+ default: -+ continue; -+ } -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_setup(struct dsa_switch *ds) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int cpu_port, ret, i; -+ u32 mask; -+ -+ cpu_port = qca8k_find_cpu_port(ds); -+ if (cpu_port < 0) { -+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); -+ return cpu_port; -+ } -+ -+ /* Parse CPU port config to be later used in phy_link mac_config */ -+ ret = qca8k_parse_port_config(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_mdio_bus(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_of_pws_reg(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_mac_pwr_sel(priv); -+ if (ret) -+ return ret; -+ -+ /* Make sure MAC06 is disabled */ -+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, -+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -+ if (ret) { -+ dev_err(priv->dev, "failed disabling MAC06 exchange"); -+ return ret; -+ } -+ -+ /* Enable CPU Port */ -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); -+ if (ret) { -+ dev_err(priv->dev, "failed enabling CPU port"); -+ return ret; -+ } -+ -+ /* Enable MIB counters */ -+ ret = qca8k_mib_init(priv); -+ if (ret) -+ dev_warn(priv->dev, "mib init failed"); -+ -+ /* Initial setup of all ports */ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* Disable forwarding by default on all ports */ -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, 0); -+ if (ret) -+ return ret; -+ -+ /* Enable QCA header mode on all cpu ports */ -+ if (dsa_is_cpu_port(ds, i)) { -+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); -+ if (ret) { -+ dev_err(priv->dev, "failed enabling QCA header mode"); -+ return ret; -+ } -+ } -+ -+ /* Disable MAC by default on all user ports */ -+ if (dsa_is_user_port(ds, i)) -+ qca8k_port_set_status(priv, i, 0); -+ } -+ -+ /* Forward all unknown frames to CPU port for Linux processing -+ * Notice that in multi-cpu config only one port should be set -+ * for igmp, unknown, multicast and broadcast packet -+ */ -+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); -+ if (ret) -+ return ret; -+ -+ /* Setup connection between CPU port & user ports -+ * Configure specific switch configuration for ports -+ */ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* CPU port gets connected to all user ports of the switch */ -+ if (dsa_is_cpu_port(ds, i)) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); -+ if (ret) -+ return ret; -+ } -+ -+ /* Individual user ports get connected to CPU port only */ -+ if (dsa_is_user_port(ds, i)) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, -+ BIT(cpu_port)); -+ if (ret) -+ return ret; -+ -+ /* Enable ARP Auto-learning by default */ -+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_LEARN); -+ if (ret) -+ return ret; -+ -+ /* For port based vlans to work we need to set the -+ * default egress vid -+ */ -+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), -+ QCA8K_EGREES_VLAN_PORT_MASK(i), -+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), -+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | -+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); -+ if (ret) -+ return ret; -+ } -+ -+ /* The port 5 of the qca8337 have some problem in flood condition. The -+ * original legacy driver had some specific buffer and priority settings -+ * for the different port suggested by the QCA switch team. Add this -+ * missing settings to improve switch stability under load condition. -+ * This problem is limited to qca8337 and other qca8k switch are not affected. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8337) { -+ switch (i) { -+ /* The 2 CPU port and port 5 requires some different -+ * priority than any other ports. -+ */ -+ case 0: -+ case 5: -+ case 6: -+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | -+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); -+ break; -+ default: -+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | -+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); -+ } -+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); -+ -+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | -+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_WRED_EN; -+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), -+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | -+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_WRED_EN, -+ mask); -+ } -+ } -+ -+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | -+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); -+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, -+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | -+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, -+ mask); -+ } -+ -+ /* Setup our port MTUs to match power on defaults */ -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); -+ if (ret) -+ dev_warn(priv->dev, "failed setting MTU settings"); -+ -+ /* Flush the FDB table */ -+ qca8k_fdb_flush(priv); -+ -+ /* We don't have interrupts for link changes, so we need to poll */ -+ ds->pcs_poll = true; -+ -+ /* Set min a max ageing value supported */ -+ ds->ageing_time_min = 7000; -+ ds->ageing_time_max = 458745000; -+ -+ /* Set max number of LAGs supported */ -+ ds->num_lag_ids = QCA8K_NUM_LAGS; -+ -+ return 0; -+} -+ -+static void -+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, -+ u32 reg) -+{ -+ u32 delay, val = 0; -+ int ret; -+ -+ /* Delay can be declared in 3 different way. -+ * Mode to rgmii and internal-delay standard binding defined -+ * rgmii-id or rgmii-tx/rx phy mode set. -+ * The parse logic set a delay different than 0 only when one -+ * of the 3 different way is used. In all other case delay is -+ * not enabled. With ID or TX/RXID delay is enabled and set -+ * to the default and recommended value. -+ */ -+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -+ } -+ -+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -+ } -+ -+ /* Set RGMII delay based on the selected values */ -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, -+ val); -+ if (ret) -+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d", -+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); -+} -+ -+static void -+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, -+ const struct phylink_link_state *state) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int cpu_port_index, ret; -+ u32 reg, val; -+ -+ switch (port) { -+ case 0: /* 1st CPU port */ -+ if (state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII) -+ return; -+ -+ reg = QCA8K_REG_PORT0_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT0; -+ break; -+ case 1: -+ case 2: -+ case 3: -+ case 4: -+ case 5: -+ /* Internal PHY, nothing to do */ -+ return; -+ case 6: /* 2nd CPU port / external PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII && -+ state->interface != PHY_INTERFACE_MODE_1000BASEX) -+ return; -+ -+ reg = QCA8K_REG_PORT6_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT6; -+ break; -+ default: -+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); -+ return; -+ } -+ -+ if (port != 6 && phylink_autoneg_inband(mode)) { -+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n", -+ __func__); -+ return; -+ } -+ -+ switch (state->interface) { -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -+ -+ /* Configure rgmii delay */ -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ -+ /* QCA8337 requires to set rgmii rx delay for all ports. -+ * This is enabled through PORT5_PAD_CTRL for all ports, -+ * rather than individual port registers. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8337) -+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -+ break; -+ case PHY_INTERFACE_MODE_SGMII: -+ case PHY_INTERFACE_MODE_1000BASEX: -+ /* Enable SGMII on the port */ -+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); -+ -+ /* Enable/disable SerDes auto-negotiation as necessary */ -+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val); -+ if (ret) -+ return; -+ if (phylink_autoneg_inband(mode)) -+ val &= ~QCA8K_PWS_SERDES_AEN_DIS; -+ else -+ val |= QCA8K_PWS_SERDES_AEN_DIS; -+ qca8k_write(priv, QCA8K_REG_PWS, val); -+ -+ /* Configure the SGMII parameters */ -+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); -+ if (ret) -+ return; -+ -+ val |= QCA8K_SGMII_EN_SD; -+ -+ if (priv->ports_config.sgmii_enable_pll) -+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -+ QCA8K_SGMII_EN_TX; -+ -+ if (dsa_is_cpu_port(ds, port)) { -+ /* CPU port, we're talking to the CPU MAC, be a PHY */ -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_PHY; -+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_MAC; -+ } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_BASEX; -+ } -+ -+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); -+ -+ /* From original code is reported port instability as SGMII also -+ * require delay set. Apply advised values here or take them from DT. -+ */ -+ if (state->interface == PHY_INTERFACE_MODE_SGMII) -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ -+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and -+ * falling edge is set writing in the PORT0 PAD reg -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327 || -+ priv->switch_id == QCA8K_ID_QCA8337) -+ reg = QCA8K_REG_PORT0_PAD_CTRL; -+ -+ val = 0; -+ -+ /* SGMII Clock phase configuration */ -+ if (priv->ports_config.sgmii_rx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; -+ -+ if (priv->ports_config.sgmii_tx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; -+ -+ if (val) -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | -+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, -+ val); -+ -+ break; -+ default: -+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n", -+ phy_modes(state->interface), port); -+ return; -+ } -+} -+ -+static void -+qca8k_phylink_validate(struct dsa_switch *ds, int port, -+ unsigned long *supported, -+ struct phylink_link_state *state) -+{ -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -+ -+ switch (port) { -+ case 0: /* 1st CPU port */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII) -+ goto unsupported; -+ break; -+ case 1: -+ case 2: -+ case 3: -+ case 4: -+ case 5: -+ /* Internal PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_GMII && -+ state->interface != PHY_INTERFACE_MODE_INTERNAL) -+ goto unsupported; -+ break; -+ case 6: /* 2nd CPU port / external PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII && -+ state->interface != PHY_INTERFACE_MODE_1000BASEX) -+ goto unsupported; -+ break; -+ default: -+unsupported: -+ linkmode_zero(supported); -+ return; -+ } -+ -+ phylink_set_port_modes(mask); -+ phylink_set(mask, Autoneg); -+ -+ phylink_set(mask, 1000baseT_Full); -+ phylink_set(mask, 10baseT_Half); -+ phylink_set(mask, 10baseT_Full); -+ phylink_set(mask, 100baseT_Half); -+ phylink_set(mask, 100baseT_Full); -+ -+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX) -+ phylink_set(mask, 1000baseX_Full); -+ -+ phylink_set(mask, Pause); -+ phylink_set(mask, Asym_Pause); -+ -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); -+} -+ -+static int -+qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, -+ struct phylink_link_state *state) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg; -+ int ret; -+ -+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); -+ if (ret < 0) -+ return ret; -+ -+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); -+ state->an_complete = state->link; -+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); -+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : -+ DUPLEX_HALF; -+ -+ switch (reg & QCA8K_PORT_STATUS_SPEED) { -+ case QCA8K_PORT_STATUS_SPEED_10: -+ state->speed = SPEED_10; -+ break; -+ case QCA8K_PORT_STATUS_SPEED_100: -+ state->speed = SPEED_100; -+ break; -+ case QCA8K_PORT_STATUS_SPEED_1000: -+ state->speed = SPEED_1000; -+ break; -+ default: -+ state->speed = SPEED_UNKNOWN; -+ break; -+ } -+ -+ state->pause = MLO_PAUSE_NONE; -+ if (reg & QCA8K_PORT_STATUS_RXFLOW) -+ state->pause |= MLO_PAUSE_RX; -+ if (reg & QCA8K_PORT_STATUS_TXFLOW) -+ state->pause |= MLO_PAUSE_TX; -+ -+ return 1; -+} -+ -+static void -+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, -+ phy_interface_t interface) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ qca8k_port_set_status(priv, port, 0); -+} -+ -+static void -+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, -+ phy_interface_t interface, struct phy_device *phydev, -+ int speed, int duplex, bool tx_pause, bool rx_pause) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg; -+ -+ if (phylink_autoneg_inband(mode)) { -+ reg = QCA8K_PORT_STATUS_LINK_AUTO; -+ } else { -+ switch (speed) { -+ case SPEED_10: -+ reg = QCA8K_PORT_STATUS_SPEED_10; -+ break; -+ case SPEED_100: -+ reg = QCA8K_PORT_STATUS_SPEED_100; -+ break; -+ case SPEED_1000: -+ reg = QCA8K_PORT_STATUS_SPEED_1000; -+ break; -+ default: -+ reg = QCA8K_PORT_STATUS_LINK_AUTO; -+ break; -+ } -+ -+ if (duplex == DUPLEX_FULL) -+ reg |= QCA8K_PORT_STATUS_DUPLEX; -+ -+ if (rx_pause || dsa_is_cpu_port(ds, port)) -+ reg |= QCA8K_PORT_STATUS_RXFLOW; -+ -+ if (tx_pause || dsa_is_cpu_port(ds, port)) -+ reg |= QCA8K_PORT_STATUS_TXFLOW; -+ } -+ -+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -+ -+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); -+} -+ -+static void -+qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) -+{ -+ const struct qca8k_match_data *match_data; -+ struct qca8k_priv *priv = ds->priv; -+ int i; -+ -+ if (stringset != ETH_SS_STATS) -+ return; -+ -+ match_data = of_device_get_match_data(priv->dev); -+ -+ for (i = 0; i < match_data->mib_count; i++) -+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -+ ETH_GSTRING_LEN); -+} -+ -+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ const struct qca8k_match_data *match_data; -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ const struct qca8k_mib_desc *mib; -+ struct mib_ethhdr *mib_ethhdr; -+ int i, mib_len, offset = 0; -+ u64 *data; -+ u8 port; -+ -+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); -+ mib_eth_data = &priv->mib_eth_data; -+ -+ /* The switch autocast every port. Ignore other packet and -+ * parse only the requested one. -+ */ -+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); -+ if (port != mib_eth_data->req_port) -+ goto exit; -+ -+ match_data = device_get_match_data(priv->dev); -+ data = mib_eth_data->data; -+ -+ for (i = 0; i < match_data->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ -+ /* First 3 mib are present in the skb head */ -+ if (i < 3) { -+ data[i] = mib_ethhdr->data[i]; -+ continue; -+ } -+ -+ mib_len = sizeof(uint32_t); -+ -+ /* Some mib are 64 bit wide */ -+ if (mib->size == 2) -+ mib_len = sizeof(uint64_t); -+ -+ /* Copy the mib value from packet to the */ -+ memcpy(data + i, skb->data + offset, mib_len); -+ -+ /* Set the offset for the next mib */ -+ offset += mib_len; -+ } -+ -+exit: -+ /* Complete on receiving all the mib packet */ -+ if (refcount_dec_and_test(&mib_eth_data->port_parsed)) -+ complete(&mib_eth_data->rw_done); -+} -+ -+static int -+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) -+{ -+ struct dsa_port *dp = dsa_to_port(ds, port); -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ mib_eth_data = &priv->mib_eth_data; -+ -+ mutex_lock(&mib_eth_data->mutex); -+ -+ reinit_completion(&mib_eth_data->rw_done); -+ -+ mib_eth_data->req_port = dp->index; -+ mib_eth_data->data = data; -+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ /* Send mib autocast request */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | -+ QCA8K_MIB_BUSY); -+ -+ mutex_unlock(&priv->reg_mutex); -+ -+ if (ret) -+ goto exit; -+ -+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); -+ -+exit: -+ mutex_unlock(&mib_eth_data->mutex); -+ -+ return ret; -+} -+ -+static void -+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -+ uint64_t *data) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ const struct qca8k_match_data *match_data; -+ const struct qca8k_mib_desc *mib; -+ u32 reg, i, val; -+ u32 hi = 0; -+ int ret; -+ -+ if (priv->mgmt_master && -+ qca8k_get_ethtool_stats_eth(ds, port, data) > 0) -+ return; -+ -+ match_data = of_device_get_match_data(priv->dev); -+ -+ for (i = 0; i < match_data->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -+ -+ ret = qca8k_read(priv, reg, &val); -+ if (ret < 0) -+ continue; -+ -+ if (mib->size == 2) { -+ ret = qca8k_read(priv, reg + 4, &hi); -+ if (ret < 0) -+ continue; -+ } -+ -+ data[i] = val; -+ if (mib->size == 2) -+ data[i] |= (u64)hi << 32; -+ } -+} -+ -+static int -+qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) -+{ -+ const struct qca8k_match_data *match_data; -+ struct qca8k_priv *priv = ds->priv; -+ -+ if (sset != ETH_SS_STATS) -+ return 0; -+ -+ match_data = of_device_get_match_data(priv->dev); -+ -+ return match_data->mib_count; -+} -+ -+static int -+qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -+ u32 reg; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -+ if (ret < 0) -+ goto exit; -+ -+ if (eee->eee_enabled) -+ reg |= lpi_en; -+ else -+ reg &= ~lpi_en; -+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) -+{ -+ /* Nothing to do on the port's MAC */ -+ return 0; -+} -+ -+static void -+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u32 stp_state; -+ -+ switch (state) { -+ case BR_STATE_DISABLED: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -+ break; -+ case BR_STATE_BLOCKING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -+ break; -+ case BR_STATE_LISTENING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -+ break; -+ case BR_STATE_LEARNING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -+ break; -+ case BR_STATE_FORWARDING: -+ default: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -+ break; -+ } -+ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); -+} -+ -+static int -+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int port_mask, cpu_port; -+ int i, ret; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ port_mask = BIT(cpu_port); -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Add this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ ret = regmap_set_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ if (ret) -+ return ret; -+ if (i != port) -+ port_mask |= BIT(i); -+ } -+ -+ /* Add all other ports to this ports portvlan mask */ -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, port_mask); -+ -+ return ret; -+} -+ -+static void -+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int cpu_port, i; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Remove this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ regmap_clear_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ } -+ -+ /* Set the cpu port to be the only one in the portvlan mask of -+ * this port -+ */ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); -+} -+ -+static void -+qca8k_port_fast_age(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -+ mutex_unlock(&priv->reg_mutex); -+} -+ -+static int -+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ unsigned int secs = msecs / 1000; -+ u32 val; -+ -+ /* AGE_TIME reg is set in 7s step */ -+ val = secs / 7; -+ -+ /* Handle case with 0 as val to NOT disable -+ * learning -+ */ -+ if (!val) -+ val = 1; -+ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -+ QCA8K_ATU_AGE_TIME(val)); -+} -+ -+static int -+qca8k_port_enable(struct dsa_switch *ds, int port, -+ struct phy_device *phy) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ -+ qca8k_port_set_status(priv, port, 1); -+ priv->port_enabled_map |= BIT(port); -+ -+ if (dsa_is_user_port(ds, port)) -+ phy_support_asym_pause(phy); -+ -+ return 0; -+} -+ -+static void -+qca8k_port_disable(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ -+ qca8k_port_set_status(priv, port, 0); -+ priv->port_enabled_map &= ~BIT(port); -+} -+ -+static int -+qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ /* We have only have a general MTU setting. -+ * DSA always set the CPU port's MTU to the largest MTU of the slave -+ * ports. -+ * Setting MTU just for the CPU port is sufficient to correctly set a -+ * value for every port. -+ */ -+ if (!dsa_is_cpu_port(ds, port)) -+ return 0; -+ -+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or -+ * the switch panics. -+ * Turn off both cpu ports before applying the new value to prevent -+ * this. -+ */ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 0); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 0); -+ -+ /* Include L2 header / FCS length */ -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); -+ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 1); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 1); -+ -+ return ret; -+} -+ -+static int -+qca8k_port_max_mtu(struct dsa_switch *ds, int port) -+{ -+ return QCA8K_MAX_MTU; -+} -+ -+static int -+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -+ u16 port_mask, u16 vid) -+{ -+ /* Set the vid to the port vlan id if no vid is set */ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_add(priv, addr, port_mask, vid, -+ QCA8K_ATU_STATUS_STATIC); -+} -+ -+static int -+qca8k_port_fdb_add(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid); -+} -+ -+static int -+qca8k_port_fdb_del(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_del(priv, addr, port_mask, vid); -+} -+ -+static int -+qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -+ dsa_fdb_dump_cb_t *cb, void *data) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ struct qca8k_fdb _fdb = { 0 }; -+ int cnt = QCA8K_NUM_FDB_RECORDS; -+ bool is_static; -+ int ret = 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -+ if (!_fdb.aging) -+ break; -+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -+ ret = cb(_fdb.mac, _fdb.vid, is_static, data); -+ if (ret) -+ break; -+ } -+ mutex_unlock(&priv->reg_mutex); -+ -+ return 0; -+} -+ -+static int -+qca8k_port_mdb_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); -+} -+ -+static int -+qca8k_port_mdb_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); -+} -+ -+static int -+qca8k_port_mirror_add(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror, -+ bool ingress) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int monitor_port, ret; -+ u32 reg, val; -+ -+ /* Check for existent entry */ -+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -+ return -EEXIST; -+ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* QCA83xx can have only one port set to mirror mode. -+ * Check that the correct port is requested and return error otherwise. -+ * When no mirror port is set, the values is set to 0xF -+ */ -+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -+ return -EEXIST; -+ -+ /* Set the monitor port */ -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -+ mirror->to_local_port); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ return ret; -+ -+ if (ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_update_bits(priv->regmap, reg, val, val); -+ if (ret) -+ return ret; -+ -+ /* Track mirror port for tx and rx to decide when the -+ * mirror port has to be disabled. -+ */ -+ if (ingress) -+ priv->mirror_rx |= BIT(port); -+ else -+ priv->mirror_tx |= BIT(port); -+ -+ return 0; -+} -+ -+static void -+qca8k_port_mirror_del(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg, val; -+ int ret; -+ -+ if (mirror->ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_clear_bits(priv->regmap, reg, val); -+ if (ret) -+ goto err; -+ -+ if (mirror->ingress) -+ priv->mirror_rx &= ~BIT(port); -+ else -+ priv->mirror_tx &= ~BIT(port); -+ -+ /* No port set to send packet to mirror port. Disable mirror port */ -+ if (!priv->mirror_rx && !priv->mirror_tx) { -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ goto err; -+ } -+err: -+ dev_err(priv->dev, "Failed to del mirror port from %d", port); -+} -+ -+static int -+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -+ struct netlink_ext_ack *extack) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ if (vlan_filtering) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -+ } else { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -+ } -+ -+ return ret; -+} -+ -+static int -+qca8k_port_vlan_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan, -+ struct netlink_ext_ack *extack) -+{ -+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -+ if (ret) { -+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -+ return ret; -+ } -+ -+ if (pvid) { -+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -+ QCA8K_EGREES_VLAN_PORT_MASK(port), -+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -+ QCA8K_PORT_VLAN_CVID(vlan->vid) | -+ QCA8K_PORT_VLAN_SVID(vlan->vid)); -+ } -+ -+ return ret; -+} -+ -+static int -+qca8k_port_vlan_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_del(priv, port, vlan->vid); -+ if (ret) -+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -+ -+ return ret; -+} -+ -+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Communicate to the phy internal driver the switch revision. -+ * Based on the switch revision different values needs to be -+ * set to the dbg and mmd reg on the phy. -+ * The first 2 bit are used to communicate the switch revision -+ * to the phy driver. -+ */ -+ if (port > 0 && port < 6) -+ return priv->switch_revision; -+ -+ return 0; -+} -+ -+static enum dsa_tag_protocol -+qca8k_get_tag_protocol(struct dsa_switch *ds, int port, -+ enum dsa_tag_protocol mp) -+{ -+ return DSA_TAG_PROTO_QCA; -+} -+ -+static bool -+qca8k_lag_can_offload(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct dsa_port *dp; -+ int id, members = 0; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ if (id < 0 || id >= ds->num_lag_ids) -+ return false; -+ -+ dsa_lag_foreach_port(dp, ds->dst, lag) -+ /* Includes the port joining the LAG */ -+ members++; -+ -+ if (members > QCA8K_NUM_PORTS_FOR_LAG) -+ return false; -+ -+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -+ return false; -+ -+ if (info->hash_type != NETDEV_LAG_HASH_L2 && -+ info->hash_type != NETDEV_LAG_HASH_L23) -+ return false; -+ -+ return true; -+} -+ -+static int -+qca8k_lag_setup_hash(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ bool unique_lag = true; -+ u32 hash = 0; -+ int i, id; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ switch (info->hash_type) { -+ case NETDEV_LAG_HASH_L23: -+ hash |= QCA8K_TRUNK_HASH_SIP_EN; -+ hash |= QCA8K_TRUNK_HASH_DIP_EN; -+ fallthrough; -+ case NETDEV_LAG_HASH_L2: -+ hash |= QCA8K_TRUNK_HASH_SA_EN; -+ hash |= QCA8K_TRUNK_HASH_DA_EN; -+ break; -+ default: /* We should NEVER reach this */ -+ return -EOPNOTSUPP; -+ } -+ -+ /* Check if we are the unique configured LAG */ -+ dsa_lags_foreach_id(i, ds->dst) -+ if (i != id && dsa_lag_dev(ds->dst, i)) { -+ unique_lag = false; -+ break; -+ } -+ -+ /* Hash Mode is global. Make sure the same Hash Mode -+ * is set to all the 4 possible lag. -+ * If we are the unique LAG we can set whatever hash -+ * mode we want. -+ * To change hash mode it's needed to remove all LAG -+ * and change the mode with the latest. -+ */ -+ if (unique_lag) { -+ priv->lag_hash_mode = hash; -+ } else if (priv->lag_hash_mode != hash) { -+ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -+ QCA8K_TRUNK_HASH_MASK, hash); -+} -+ -+static int -+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -+ struct net_device *lag, bool delete) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret, id, i; -+ u32 val; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ /* Read current port member */ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* Shift val to the correct trunk */ -+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -+ if (delete) -+ val &= ~BIT(port); -+ else -+ val |= BIT(port); -+ -+ /* Update port member. With empty portmap disable trunk */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -+ QCA8K_REG_GOL_TRUNK_MEMBER(id) | -+ QCA8K_REG_GOL_TRUNK_EN(id), -+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -+ -+ /* Search empty member if adding or port on deleting */ -+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -+ if (ret) -+ return ret; -+ -+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -+ -+ if (delete) { -+ /* If port flagged to be disabled assume this member is -+ * empty -+ */ -+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -+ if (val != port) -+ continue; -+ } else { -+ /* If port flagged to be enabled assume this member is -+ * already set -+ */ -+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ } -+ -+ /* We have found the member to add/remove */ -+ break; -+ } -+ -+ /* Set port in the correct port mask or disable port if in delete mode */ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); -+} -+ -+static int -+qca8k_port_lag_join(struct dsa_switch *ds, int port, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ int ret; -+ -+ if (!qca8k_lag_can_offload(ds, lag, info)) -+ return -EOPNOTSUPP; -+ -+ ret = qca8k_lag_setup_hash(ds, lag, info); -+ if (ret) -+ return ret; -+ -+ return qca8k_lag_refresh_portmap(ds, port, lag, false); -+} -+ -+static int -+qca8k_port_lag_leave(struct dsa_switch *ds, int port, -+ struct net_device *lag) -+{ -+ return qca8k_lag_refresh_portmap(ds, port, lag, true); -+} -+ -+static void -+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, -+ bool operational) -+{ -+ struct dsa_port *dp = master->dsa_ptr; -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */ -+ if (dp->index != 0) -+ return; -+ -+ mutex_lock(&priv->mgmt_eth_data.mutex); -+ mutex_lock(&priv->mib_eth_data.mutex); -+ -+ priv->mgmt_master = operational ? (struct net_device *)master : NULL; -+ -+ mutex_unlock(&priv->mib_eth_data.mutex); -+ mutex_unlock(&priv->mgmt_eth_data.mutex); -+} -+ -+static int qca8k_connect_tag_protocol(struct dsa_switch *ds, -+ enum dsa_tag_protocol proto) -+{ -+ struct qca_tagger_data *tagger_data; -+ -+ switch (proto) { -+ case DSA_TAG_PROTO_QCA: -+ tagger_data = ds->tagger_data; -+ -+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; -+ -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+static const struct dsa_switch_ops qca8k_switch_ops = { -+ .get_tag_protocol = qca8k_get_tag_protocol, -+ .setup = qca8k_setup, -+ .get_strings = qca8k_get_strings, -+ .get_ethtool_stats = qca8k_get_ethtool_stats, -+ .get_sset_count = qca8k_get_sset_count, -+ .set_ageing_time = qca8k_set_ageing_time, -+ .get_mac_eee = qca8k_get_mac_eee, -+ .set_mac_eee = qca8k_set_mac_eee, -+ .port_enable = qca8k_port_enable, -+ .port_disable = qca8k_port_disable, -+ .port_change_mtu = qca8k_port_change_mtu, -+ .port_max_mtu = qca8k_port_max_mtu, -+ .port_stp_state_set = qca8k_port_stp_state_set, -+ .port_bridge_join = qca8k_port_bridge_join, -+ .port_bridge_leave = qca8k_port_bridge_leave, -+ .port_fast_age = qca8k_port_fast_age, -+ .port_fdb_add = qca8k_port_fdb_add, -+ .port_fdb_del = qca8k_port_fdb_del, -+ .port_fdb_dump = qca8k_port_fdb_dump, -+ .port_mdb_add = qca8k_port_mdb_add, -+ .port_mdb_del = qca8k_port_mdb_del, -+ .port_mirror_add = qca8k_port_mirror_add, -+ .port_mirror_del = qca8k_port_mirror_del, -+ .port_vlan_filtering = qca8k_port_vlan_filtering, -+ .port_vlan_add = qca8k_port_vlan_add, -+ .port_vlan_del = qca8k_port_vlan_del, -+ .phylink_validate = qca8k_phylink_validate, -+ .phylink_mac_link_state = qca8k_phylink_mac_link_state, -+ .phylink_mac_config = qca8k_phylink_mac_config, -+ .phylink_mac_link_down = qca8k_phylink_mac_link_down, -+ .phylink_mac_link_up = qca8k_phylink_mac_link_up, -+ .get_phy_flags = qca8k_get_phy_flags, -+ .port_lag_join = qca8k_port_lag_join, -+ .port_lag_leave = qca8k_port_lag_leave, -+ .master_state_change = qca8k_master_change, -+ .connect_tag_protocol = qca8k_connect_tag_protocol, -+}; -+ -+static int qca8k_read_switch_id(struct qca8k_priv *priv) -+{ -+ const struct qca8k_match_data *data; -+ u32 val; -+ u8 id; -+ int ret; -+ -+ /* get the switches ID from the compatible */ -+ data = of_device_get_match_data(priv->dev); -+ if (!data) -+ return -ENODEV; -+ -+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -+ if (ret < 0) -+ return -ENODEV; -+ -+ id = QCA8K_MASK_CTRL_DEVICE_ID(val); -+ if (id != data->id) { -+ dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); -+ return -ENODEV; -+ } -+ -+ priv->switch_id = id; -+ -+ /* Save revision to communicate to the internal PHY driver */ -+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -+ -+ return 0; -+} -+ -+static int -+qca8k_sw_probe(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv; -+ int ret; -+ -+ /* allocate the private data struct so that we can probe the switches -+ * ID register -+ */ -+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ priv->bus = mdiodev->bus; -+ priv->dev = &mdiodev->dev; -+ -+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", -+ GPIOD_ASIS); -+ if (IS_ERR(priv->reset_gpio)) -+ return PTR_ERR(priv->reset_gpio); -+ -+ if (priv->reset_gpio) { -+ gpiod_set_value_cansleep(priv->reset_gpio, 1); -+ /* The active low duration must be greater than 10 ms -+ * and checkpatch.pl wants 20 ms. -+ */ -+ msleep(20); -+ gpiod_set_value_cansleep(priv->reset_gpio, 0); -+ } -+ -+ /* Start by setting up the register mapping */ -+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, -+ &qca8k_regmap_config); -+ if (IS_ERR(priv->regmap)) { -+ dev_err(priv->dev, "regmap initialization failed"); -+ return PTR_ERR(priv->regmap); -+ } -+ -+ priv->mdio_cache.page = 0xffff; -+ priv->mdio_cache.lo = 0xffff; -+ priv->mdio_cache.hi = 0xffff; -+ -+ /* Check the detected switch id */ -+ ret = qca8k_read_switch_id(priv); -+ if (ret) -+ return ret; -+ -+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); -+ if (!priv->ds) -+ return -ENOMEM; -+ -+ mutex_init(&priv->mgmt_eth_data.mutex); -+ init_completion(&priv->mgmt_eth_data.rw_done); -+ -+ mutex_init(&priv->mib_eth_data.mutex); -+ init_completion(&priv->mib_eth_data.rw_done); -+ -+ priv->ds->dev = &mdiodev->dev; -+ priv->ds->num_ports = QCA8K_NUM_PORTS; -+ priv->ds->priv = priv; -+ priv->ds->ops = &qca8k_switch_ops; -+ mutex_init(&priv->reg_mutex); -+ dev_set_drvdata(&mdiodev->dev, priv); -+ -+ return dsa_register_switch(priv->ds); -+} -+ -+static void -+qca8k_sw_remove(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -+ int i; -+ -+ if (!priv) -+ return; -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) -+ qca8k_port_set_status(priv, i, 0); -+ -+ dsa_unregister_switch(priv->ds); -+ -+ dev_set_drvdata(&mdiodev->dev, NULL); -+} -+ -+static void qca8k_sw_shutdown(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -+ -+ if (!priv) -+ return; -+ -+ dsa_switch_shutdown(priv->ds); -+ -+ dev_set_drvdata(&mdiodev->dev, NULL); -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static void -+qca8k_set_pm(struct qca8k_priv *priv, int enable) -+{ -+ int port; -+ -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Do not enable on resume if the port was -+ * disabled before. -+ */ -+ if (!(priv->port_enabled_map & BIT(port))) -+ continue; -+ -+ qca8k_port_set_status(priv, port, enable); -+ } -+} -+ -+static int qca8k_suspend(struct device *dev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(dev); -+ -+ qca8k_set_pm(priv, 0); -+ -+ return dsa_switch_suspend(priv->ds); -+} -+ -+static int qca8k_resume(struct device *dev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(dev); -+ -+ qca8k_set_pm(priv, 1); -+ -+ return dsa_switch_resume(priv->ds); -+} -+#endif /* CONFIG_PM_SLEEP */ -+ -+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, -+ qca8k_suspend, qca8k_resume); -+ -+static const struct qca8k_match_data qca8327 = { -+ .id = QCA8K_ID_QCA8327, -+ .reduced_package = true, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, -+}; -+ -+static const struct qca8k_match_data qca8328 = { -+ .id = QCA8K_ID_QCA8327, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, -+}; -+ -+static const struct qca8k_match_data qca833x = { -+ .id = QCA8K_ID_QCA8337, -+ .mib_count = QCA8K_QCA833X_MIB_COUNT, -+}; -+ -+static const struct of_device_id qca8k_of_match[] = { -+ { .compatible = "qca,qca8327", .data = &qca8327 }, -+ { .compatible = "qca,qca8328", .data = &qca8328 }, -+ { .compatible = "qca,qca8334", .data = &qca833x }, -+ { .compatible = "qca,qca8337", .data = &qca833x }, -+ { /* sentinel */ }, -+}; -+ -+static struct mdio_driver qca8kmdio_driver = { -+ .probe = qca8k_sw_probe, -+ .remove = qca8k_sw_remove, -+ .shutdown = qca8k_sw_shutdown, -+ .mdiodrv.driver = { -+ .name = "qca8k", -+ .of_match_table = qca8k_of_match, -+ .pm = &qca8k_pm_ops, -+ }, -+}; -+ -+mdio_module_driver(qca8kmdio_driver); -+ -+MODULE_AUTHOR("Mathieu Olivari, John Crispin "); -+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family"); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS("platform:qca8k"); ---- /dev/null -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -0,0 +1,411 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2009 Felix Fietkau -+ * Copyright (C) 2011-2012 Gabor Juhos -+ * Copyright (c) 2015, The Linux Foundation. All rights reserved. -+ */ -+ -+#ifndef __QCA8K_H -+#define __QCA8K_H -+ -+#include -+#include -+#include -+#include -+ -+#define QCA8K_ETHERNET_MDIO_PRIORITY 7 -+#define QCA8K_ETHERNET_PHY_PRIORITY 6 -+#define QCA8K_ETHERNET_TIMEOUT 100 -+ -+#define QCA8K_NUM_PORTS 7 -+#define QCA8K_NUM_CPU_PORTS 2 -+#define QCA8K_MAX_MTU 9000 -+#define QCA8K_NUM_LAGS 4 -+#define QCA8K_NUM_PORTS_FOR_LAG 4 -+ -+#define PHY_ID_QCA8327 0x004dd034 -+#define QCA8K_ID_QCA8327 0x12 -+#define PHY_ID_QCA8337 0x004dd036 -+#define QCA8K_ID_QCA8337 0x13 -+ -+#define QCA8K_QCA832X_MIB_COUNT 39 -+#define QCA8K_QCA833X_MIB_COUNT 41 -+ -+#define QCA8K_BUSY_WAIT_TIMEOUT 2000 -+ -+#define QCA8K_NUM_FDB_RECORDS 2048 -+ -+#define QCA8K_PORT_VID_DEF 1 -+ -+/* Global control registers */ -+#define QCA8K_REG_MASK_CTRL 0x000 -+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) -+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) -+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) -+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) -+#define QCA8K_REG_PORT0_PAD_CTRL 0x004 -+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) -+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) -+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) -+#define QCA8K_REG_PORT5_PAD_CTRL 0x008 -+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c -+#define QCA8K_PORT_PAD_RGMII_EN BIT(26) -+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) -+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) -+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) -+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) -+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) -+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) -+#define QCA8K_PORT_PAD_SGMII_EN BIT(7) -+#define QCA8K_REG_PWS 0x010 -+#define QCA8K_PWS_POWER_ON_SEL BIT(31) -+/* This reg is only valid for QCA832x and toggle the package -+ * type from 176 pin (by default) to 148 pin used on QCA8327 -+ */ -+#define QCA8327_PWS_PACKAGE148_EN BIT(30) -+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24) -+#define QCA8K_PWS_SERDES_AEN_DIS BIT(7) -+#define QCA8K_REG_MODULE_EN 0x030 -+#define QCA8K_MODULE_EN_MIB BIT(0) -+#define QCA8K_REG_MIB 0x034 -+#define QCA8K_MIB_FUNC GENMASK(26, 24) -+#define QCA8K_MIB_CPU_KEEP BIT(20) -+#define QCA8K_MIB_BUSY BIT(17) -+#define QCA8K_MDIO_MASTER_CTRL 0x3c -+#define QCA8K_MDIO_MASTER_BUSY BIT(31) -+#define QCA8K_MDIO_MASTER_EN BIT(30) -+#define QCA8K_MDIO_MASTER_READ BIT(27) -+#define QCA8K_MDIO_MASTER_WRITE 0 -+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) -+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) -+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) -+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) -+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) -+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) -+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) -+#define QCA8K_MDIO_MASTER_MAX_PORTS 5 -+#define QCA8K_MDIO_MASTER_MAX_REG 32 -+#define QCA8K_GOL_MAC_ADDR0 0x60 -+#define QCA8K_GOL_MAC_ADDR1 0x64 -+#define QCA8K_MAX_FRAME_SIZE 0x78 -+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) -+#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) -+#define QCA8K_PORT_STATUS_SPEED_10 0 -+#define QCA8K_PORT_STATUS_SPEED_100 0x1 -+#define QCA8K_PORT_STATUS_SPEED_1000 0x2 -+#define QCA8K_PORT_STATUS_TXMAC BIT(2) -+#define QCA8K_PORT_STATUS_RXMAC BIT(3) -+#define QCA8K_PORT_STATUS_TXFLOW BIT(4) -+#define QCA8K_PORT_STATUS_RXFLOW BIT(5) -+#define QCA8K_PORT_STATUS_DUPLEX BIT(6) -+#define QCA8K_PORT_STATUS_LINK_UP BIT(8) -+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9) -+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10) -+#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) -+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) -+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) -+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) -+#define QCA8K_PORT_HDR_CTRL_ALL 2 -+#define QCA8K_PORT_HDR_CTRL_MGMT 1 -+#define QCA8K_PORT_HDR_CTRL_NONE 0 -+#define QCA8K_REG_SGMII_CTRL 0x0e0 -+#define QCA8K_SGMII_EN_PLL BIT(1) -+#define QCA8K_SGMII_EN_RX BIT(2) -+#define QCA8K_SGMII_EN_TX BIT(3) -+#define QCA8K_SGMII_EN_SD BIT(4) -+#define QCA8K_SGMII_CLK125M_DELAY BIT(7) -+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) -+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) -+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) -+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) -+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) -+ -+/* MAC_PWR_SEL registers */ -+#define QCA8K_REG_MAC_PWR_SEL 0x0e4 -+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18) -+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19) -+ -+/* EEE control registers */ -+#define QCA8K_REG_EEE_CTRL 0x100 -+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) -+ -+/* TRUNK_HASH_EN registers */ -+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270 -+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3) -+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2) -+#define QCA8K_TRUNK_HASH_SA_EN BIT(1) -+#define QCA8K_TRUNK_HASH_DA_EN BIT(0) -+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0) -+ -+/* ACL registers */ -+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) -+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) -+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) -+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) -+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) -+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) -+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 -+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 -+ -+/* Lookup registers */ -+#define QCA8K_REG_ATU_DATA0 0x600 -+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) -+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) -+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) -+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) -+#define QCA8K_REG_ATU_DATA1 0x604 -+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) -+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) -+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) -+#define QCA8K_REG_ATU_DATA2 0x608 -+#define QCA8K_ATU_VID_MASK GENMASK(19, 8) -+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) -+#define QCA8K_ATU_STATUS_STATIC 0xf -+#define QCA8K_REG_ATU_FUNC 0x60c -+#define QCA8K_ATU_FUNC_BUSY BIT(31) -+#define QCA8K_ATU_FUNC_PORT_EN BIT(14) -+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13) -+#define QCA8K_ATU_FUNC_FULL BIT(12) -+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) -+#define QCA8K_REG_VTU_FUNC0 0x610 -+#define QCA8K_VTU_FUNC0_VALID BIT(20) -+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19) -+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) -+ * It does contain VLAN_MODE for each port [5:4] for port0, -+ * [7:6] for port1 ... [17:16] for port6. Use virtual port -+ * define to handle this. -+ */ -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) -+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) -+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) -+#define QCA8K_REG_VTU_FUNC1 0x614 -+#define QCA8K_VTU_FUNC1_BUSY BIT(31) -+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) -+#define QCA8K_VTU_FUNC1_FULL BIT(4) -+#define QCA8K_REG_ATU_CTRL 0x618 -+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) -+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) -+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 -+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) -+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4) -+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 -+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) -+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) -+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) -+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) -+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) -+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) -+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) -+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) -+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) -+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) -+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) -+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) -+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) -+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) -+#define QCA8K_PORT_LOOKUP_LEARN BIT(20) -+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) -+ -+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700 -+/* 4 max trunk first -+ * first 6 bit for member bitmap -+ * 7th bit is to enable trunk port -+ */ -+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8) -+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7) -+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) -+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0) -+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) -+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */ -+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0) -+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16) -+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4) -+/* Complex shift: FIRST shift for port THEN shift for trunk */ -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) -+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) -+ -+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800 -+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) -+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) -+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) -+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) -+ -+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) -+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) -+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) -+ -+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) -+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) -+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) -+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) -+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) -+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) -+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) -+ -+/* Pkt edit registers */ -+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) -+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) -+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) -+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) -+ -+/* L3 registers */ -+#define QCA8K_HROUTER_CONTROL 0xe00 -+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16) -+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16 -+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1 -+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08 -+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c -+#define QCA8K_HNAT_CONTROL 0xe38 -+ -+/* MIB registers */ -+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100) -+ -+/* QCA specific MII registers */ -+#define MII_ATH_MMD_ADDR 0x0d -+#define MII_ATH_MMD_DATA 0x0e -+ -+enum { -+ QCA8K_PORT_SPEED_10M = 0, -+ QCA8K_PORT_SPEED_100M = 1, -+ QCA8K_PORT_SPEED_1000M = 2, -+ QCA8K_PORT_SPEED_ERR = 3, -+}; -+ -+enum qca8k_fdb_cmd { -+ QCA8K_FDB_FLUSH = 1, -+ QCA8K_FDB_LOAD = 2, -+ QCA8K_FDB_PURGE = 3, -+ QCA8K_FDB_FLUSH_PORT = 5, -+ QCA8K_FDB_NEXT = 6, -+ QCA8K_FDB_SEARCH = 7, -+}; -+ -+enum qca8k_vlan_cmd { -+ QCA8K_VLAN_FLUSH = 1, -+ QCA8K_VLAN_LOAD = 2, -+ QCA8K_VLAN_PURGE = 3, -+ QCA8K_VLAN_REMOVE_PORT = 4, -+ QCA8K_VLAN_NEXT = 5, -+ QCA8K_VLAN_READ = 6, -+}; -+ -+enum qca8k_mid_cmd { -+ QCA8K_MIB_FLUSH = 1, -+ QCA8K_MIB_FLUSH_PORT = 2, -+ QCA8K_MIB_CAST = 3, -+}; -+ -+struct qca8k_match_data { -+ u8 id; -+ bool reduced_package; -+ u8 mib_count; -+}; -+ -+enum { -+ QCA8K_CPU_PORT0, -+ QCA8K_CPU_PORT6, -+}; -+ -+struct qca8k_mgmt_eth_data { -+ struct completion rw_done; -+ struct mutex mutex; /* Enforce one mdio read/write at time */ -+ bool ack; -+ u32 seq; -+ u32 data[4]; -+}; -+ -+struct qca8k_mib_eth_data { -+ struct completion rw_done; -+ struct mutex mutex; /* Process one command at time */ -+ refcount_t port_parsed; /* Counter to track parsed port */ -+ u8 req_port; -+ u64 *data; /* pointer to ethtool data */ -+}; -+ -+struct qca8k_ports_config { -+ bool sgmii_rx_clk_falling_edge; -+ bool sgmii_tx_clk_falling_edge; -+ bool sgmii_enable_pll; -+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ -+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ -+}; -+ -+struct qca8k_mdio_cache { -+/* The 32bit switch registers are accessed indirectly. To achieve this we need -+ * to set the page of the register. Track the last page that was set to reduce -+ * mdio writes -+ */ -+ u16 page; -+/* lo and hi can also be cached and from Documentation we can skip one -+ * extra mdio write if lo or hi is didn't change. -+ */ -+ u16 lo; -+ u16 hi; -+}; -+ -+struct qca8k_priv { -+ u8 switch_id; -+ u8 switch_revision; -+ u8 mirror_rx; -+ u8 mirror_tx; -+ u8 lag_hash_mode; -+ /* Each bit correspond to a port. This switch can support a max of 7 port. -+ * Bit 1: port enabled. Bit 0: port disabled. -+ */ -+ u8 port_enabled_map; -+ struct qca8k_ports_config ports_config; -+ struct regmap *regmap; -+ struct mii_bus *bus; -+ struct dsa_switch *ds; -+ struct mutex reg_mutex; -+ struct device *dev; -+ struct gpio_desc *reset_gpio; -+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ -+ struct qca8k_mgmt_eth_data mgmt_eth_data; -+ struct qca8k_mib_eth_data mib_eth_data; -+ struct qca8k_mdio_cache mdio_cache; -+}; -+ -+struct qca8k_mib_desc { -+ unsigned int size; -+ unsigned int offset; -+ const char *name; -+}; -+ -+struct qca8k_fdb { -+ u16 vid; -+ u8 port_mask; -+ u8 aging; -+ u8 mac[6]; -+}; -+ -+#endif /* __QCA8K_H */ ---- a/drivers/net/dsa/qca8k.c -+++ /dev/null -@@ -1,3243 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (C) 2009 Felix Fietkau -- * Copyright (C) 2011-2012 Gabor Juhos -- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. -- * Copyright (c) 2016 John Crispin -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#include "qca8k.h" -- --#define MIB_DESC(_s, _o, _n) \ -- { \ -- .size = (_s), \ -- .offset = (_o), \ -- .name = (_n), \ -- } -- --static const struct qca8k_mib_desc ar8327_mib[] = { -- MIB_DESC(1, 0x00, "RxBroad"), -- MIB_DESC(1, 0x04, "RxPause"), -- MIB_DESC(1, 0x08, "RxMulti"), -- MIB_DESC(1, 0x0c, "RxFcsErr"), -- MIB_DESC(1, 0x10, "RxAlignErr"), -- MIB_DESC(1, 0x14, "RxRunt"), -- MIB_DESC(1, 0x18, "RxFragment"), -- MIB_DESC(1, 0x1c, "Rx64Byte"), -- MIB_DESC(1, 0x20, "Rx128Byte"), -- MIB_DESC(1, 0x24, "Rx256Byte"), -- MIB_DESC(1, 0x28, "Rx512Byte"), -- MIB_DESC(1, 0x2c, "Rx1024Byte"), -- MIB_DESC(1, 0x30, "Rx1518Byte"), -- MIB_DESC(1, 0x34, "RxMaxByte"), -- MIB_DESC(1, 0x38, "RxTooLong"), -- MIB_DESC(2, 0x3c, "RxGoodByte"), -- MIB_DESC(2, 0x44, "RxBadByte"), -- MIB_DESC(1, 0x4c, "RxOverFlow"), -- MIB_DESC(1, 0x50, "Filtered"), -- MIB_DESC(1, 0x54, "TxBroad"), -- MIB_DESC(1, 0x58, "TxPause"), -- MIB_DESC(1, 0x5c, "TxMulti"), -- MIB_DESC(1, 0x60, "TxUnderRun"), -- MIB_DESC(1, 0x64, "Tx64Byte"), -- MIB_DESC(1, 0x68, "Tx128Byte"), -- MIB_DESC(1, 0x6c, "Tx256Byte"), -- MIB_DESC(1, 0x70, "Tx512Byte"), -- MIB_DESC(1, 0x74, "Tx1024Byte"), -- MIB_DESC(1, 0x78, "Tx1518Byte"), -- MIB_DESC(1, 0x7c, "TxMaxByte"), -- MIB_DESC(1, 0x80, "TxOverSize"), -- MIB_DESC(2, 0x84, "TxByte"), -- MIB_DESC(1, 0x8c, "TxCollision"), -- MIB_DESC(1, 0x90, "TxAbortCol"), -- MIB_DESC(1, 0x94, "TxMultiCol"), -- MIB_DESC(1, 0x98, "TxSingleCol"), -- MIB_DESC(1, 0x9c, "TxExcDefer"), -- MIB_DESC(1, 0xa0, "TxDefer"), -- MIB_DESC(1, 0xa4, "TxLateCol"), -- MIB_DESC(1, 0xa8, "RXUnicast"), -- MIB_DESC(1, 0xac, "TXUnicast"), --}; -- --static void --qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) --{ -- regaddr >>= 1; -- *r1 = regaddr & 0x1e; -- -- regaddr >>= 5; -- *r2 = regaddr & 0x7; -- -- regaddr >>= 3; -- *page = regaddr & 0x3ff; --} -- --static int --qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) --{ -- u16 *cached_lo = &priv->mdio_cache.lo; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (lo == *cached_lo) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, lo); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit lo register\n"); -- -- *cached_lo = lo; -- return 0; --} -- --static int --qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) --{ -- u16 *cached_hi = &priv->mdio_cache.hi; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (hi == *cached_hi) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, hi); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit hi register\n"); -- -- *cached_hi = hi; -- return 0; --} -- --static int --qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) --{ -- int ret; -- -- ret = bus->read(bus, phy_id, regnum); -- if (ret >= 0) { -- *val = ret; -- ret = bus->read(bus, phy_id, regnum + 1); -- *val |= ret << 16; -- } -- -- if (ret < 0) { -- dev_err_ratelimited(&bus->dev, -- "failed to read qca8k 32bit register\n"); -- *val = 0; -- return ret; -- } -- -- return 0; --} -- --static void --qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) --{ -- u16 lo, hi; -- int ret; -- -- lo = val & 0xffff; -- hi = (u16)(val >> 16); -- -- ret = qca8k_set_lo(priv, phy_id, regnum, lo); -- if (ret >= 0) -- ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); --} -- --static int --qca8k_set_page(struct qca8k_priv *priv, u16 page) --{ -- u16 *cached_page = &priv->mdio_cache.page; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (page == *cached_page) -- return 0; -- -- ret = bus->write(bus, 0x18, 0, page); -- if (ret < 0) { -- dev_err_ratelimited(&bus->dev, -- "failed to set qca8k page\n"); -- return ret; -- } -- -- *cached_page = page; -- usleep_range(1000, 2000); -- return 0; --} -- --static int --qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) --{ -- return regmap_read(priv->regmap, reg, val); --} -- --static int --qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) --{ -- return regmap_write(priv->regmap, reg, val); --} -- --static int --qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) --{ -- return regmap_update_bits(priv->regmap, reg, mask, write_val); --} -- --static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data; -- struct qca8k_priv *priv = ds->priv; -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- u8 len, cmd; -- -- mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); -- mgmt_eth_data = &priv->mgmt_eth_data; -- -- cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -- len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -- -- /* Make sure the seq match the requested packet */ -- if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -- mgmt_eth_data->ack = true; -- -- if (cmd == MDIO_READ) { -- mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -- -- /* Get the rest of the 12 byte of data. -- * The read/write function will extract the requested data. -- */ -- if (len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(mgmt_eth_data->data + 1, skb->data, -- QCA_HDR_MGMT_DATA2_LEN); -- } -- -- complete(&mgmt_eth_data->rw_done); --} -- --static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -- int priority, unsigned int len) --{ -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- unsigned int real_len; -- struct sk_buff *skb; -- u32 *data2; -- u16 hdr; -- -- skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); -- if (!skb) -- return NULL; -- -- /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -- * Actually for some reason the steps are: -- * 0: nothing -- * 1-4: first 4 byte -- * 5-6: first 12 byte -- * 7-15: all 16 byte -- */ -- if (len == 16) -- real_len = 15; -- else -- real_len = len; -- -- skb_reset_mac_header(skb); -- skb_set_network_header(skb, skb->len); -- -- mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); -- -- hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -- hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); -- hdr |= QCA_HDR_XMIT_FROM_CPU; -- hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); -- hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); -- -- mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -- QCA_HDR_MGMT_CHECK_CODE_VAL); -- -- if (cmd == MDIO_WRITE) -- mgmt_ethhdr->mdio_data = *val; -- -- mgmt_ethhdr->hdr = htons(hdr); -- -- data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -- if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); -- -- return skb; --} -- --static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) --{ -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- -- mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -- mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); --} -- --static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -- struct sk_buff *skb; -- bool ack; -- int ret; -- -- skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -- QCA8K_ETHERNET_MDIO_PRIORITY, len); -- if (!skb) -- return -ENOMEM; -- -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check mgmt_master if is operational */ -- if (!priv->mgmt_master) { -- kfree_skb(skb); -- mutex_unlock(&mgmt_eth_data->mutex); -- return -EINVAL; -- } -- -- skb->dev = priv->mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the mdio pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -- -- *val = mgmt_eth_data->data[0]; -- if (len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); -- -- ack = mgmt_eth_data->ack; -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- return 0; --} -- --static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -- struct sk_buff *skb; -- bool ack; -- int ret; -- -- skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, -- QCA8K_ETHERNET_MDIO_PRIORITY, len); -- if (!skb) -- return -ENOMEM; -- -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check mgmt_master if is operational */ -- if (!priv->mgmt_master) { -- kfree_skb(skb); -- mutex_unlock(&mgmt_eth_data->mutex); -- return -EINVAL; -- } -- -- skb->dev = priv->mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the mdio pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -- -- ack = mgmt_eth_data->ack; -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- return 0; --} -- --static int --qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) --{ -- u32 val = 0; -- int ret; -- -- ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); -- if (ret) -- return ret; -- -- val &= ~mask; -- val |= write_val; -- -- return qca8k_write_eth(priv, reg, &val, sizeof(val)); --} -- --static int --qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- -- if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int --qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- u32 tmp; -- -- if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- tmp = val[i]; -- -- ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int --qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- int ret; -- -- if (!qca8k_read_eth(priv, reg, val, sizeof(*val))) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- return ret; --} -- --static int --qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- int ret; -- -- if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- return ret; --} -- --static int --qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -- if (ret < 0) -- goto exit; -- -- val &= ~mask; -- val |= write_val; -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- --static const struct regmap_range qca8k_readable_ranges[] = { -- regmap_reg_range(0x0000, 0x00e4), /* Global control */ -- regmap_reg_range(0x0100, 0x0168), /* EEE control */ -- regmap_reg_range(0x0200, 0x0270), /* Parser control */ -- regmap_reg_range(0x0400, 0x0454), /* ACL */ -- regmap_reg_range(0x0600, 0x0718), /* Lookup */ -- regmap_reg_range(0x0800, 0x0b70), /* QM */ -- regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -- regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -- --}; -- --static const struct regmap_access_table qca8k_readable_table = { -- .yes_ranges = qca8k_readable_ranges, -- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), --}; -- --static struct regmap_config qca8k_regmap_config = { -- .reg_bits = 16, -- .val_bits = 32, -- .reg_stride = 4, -- .max_register = 0x16ac, /* end MIB - Port6 range */ -- .reg_read = qca8k_regmap_read, -- .reg_write = qca8k_regmap_write, -- .reg_update_bits = qca8k_regmap_update_bits, -- .rd_table = &qca8k_readable_table, -- .disable_locking = true, /* Locking is handled by qca8k read/write */ -- .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ --}; -- --static int --qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) --{ -- u32 val; -- -- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); --} -- --static int --qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) --{ -- u32 reg[3]; -- int ret; -- -- /* load the ARL table into an array */ -- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -- if (ret) -- return ret; -- -- /* vid - 83:72 */ -- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -- /* aging - 67:64 */ -- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -- /* portmask - 54:48 */ -- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -- /* mac - 47:0 */ -- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -- -- return 0; --} -- --static void --qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, -- u8 aging) --{ -- u32 reg[3] = { 0 }; -- -- /* vid - 83:72 */ -- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -- /* aging - 67:64 */ -- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -- /* portmask - 54:48 */ -- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -- /* mac - 47:0 */ -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -- -- /* load the array into the ARL table */ -- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); --} -- --static int --qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) --{ -- u32 reg; -- int ret; -- -- /* Set the command and FDB index */ -- reg = QCA8K_ATU_FUNC_BUSY; -- reg |= cmd; -- if (port >= 0) { -- reg |= QCA8K_ATU_FUNC_PORT_EN; -- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -- } -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_FDB_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_ATU_FUNC_FULL) -- return -1; -- } -- -- return 0; --} -- --static int --qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) --{ -- int ret; -- -- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -- if (ret < 0) -- return ret; -- -- return qca8k_fdb_read(priv, fdb); --} -- --static int --qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, -- u16 vid, u8 aging) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static void --qca8k_fdb_flush(struct qca8k_priv *priv) --{ -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_fdb_read(priv, &fdb); -- if (ret < 0) -- goto exit; -- -- /* Rule exist. Delete first */ -- if (!fdb.aging) { -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- } -- -- /* Add port to fdb portmask */ -- fdb.port_mask |= port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- /* Rule doesn't exist. Why delete? */ -- if (!fdb.aging) { -- ret = -EINVAL; -- goto exit; -- } -- -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- -- /* Only port in the rule is this port. Don't re insert */ -- if (fdb.port_mask == port_mask) -- goto exit; -- -- /* Remove port from port mask */ -- fdb.port_mask &= ~port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) --{ -- u32 reg; -- int ret; -- -- /* Set the command and VLAN index */ -- reg = QCA8K_VTU_FUNC1_BUSY; -- reg |= cmd; -- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_VLAN_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_VTU_FUNC1_FULL) -- return -ENOMEM; -- } -- -- return 0; --} -- --static int --qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) --{ -- u32 reg; -- int ret; -- -- /* -- We do the right thing with VLAN 0 and treat it as untagged while -- preserving the tag on egress. -- */ -- if (vid == 0) -- return 0; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- if (untagged) -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -- else -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -- -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) --{ -- u32 reg, mask; -- int ret, i; -- bool del; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -- -- /* Check if we're the last member to be removed */ -- del = true; -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -- -- if ((reg & mask) != mask) { -- del = false; -- break; -- } -- } -- -- if (del) { -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -- } else { -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- } -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_mib_init(struct qca8k_priv *priv) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -- QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -- if (ret) -- goto exit; -- -- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static void --qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) --{ -- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -- -- /* Port 0 and 6 have no internal PHY */ -- if (port > 0 && port < 6) -- mask |= QCA8K_PORT_STATUS_LINK_AUTO; -- -- if (enable) -- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -- else -- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); --} -- --static int --qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, -- struct sk_buff *read_skb, u32 *val) --{ -- struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); -- bool ack; -- int ret; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the copy pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- *val = mgmt_eth_data->data[0]; -- -- return 0; --} -- --static int --qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, -- int regnum, u16 data) --{ -- struct sk_buff *write_skb, *clear_skb, *read_skb; -- struct qca8k_mgmt_eth_data *mgmt_eth_data; -- u32 write_val, clear_val = 0, val; -- struct net_device *mgmt_master; -- int ret, ret1; -- bool ack; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- mgmt_eth_data = &priv->mgmt_eth_data; -- -- write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum); -- -- if (read) { -- write_val |= QCA8K_MDIO_MASTER_READ; -- } else { -- write_val |= QCA8K_MDIO_MASTER_WRITE; -- write_val |= QCA8K_MDIO_MASTER_DATA(data); -- } -- -- /* Prealloc all the needed skb before the lock */ -- write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); -- if (!write_skb) -- return -ENOMEM; -- -- clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -- if (!clear_skb) { -- ret = -ENOMEM; -- goto err_clear_skb; -- } -- -- read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -- if (!read_skb) { -- ret = -ENOMEM; -- goto err_read_skb; -- } -- -- /* Actually start the request: -- * 1. Send mdio master packet -- * 2. Busy Wait for mdio master command -- * 3. Get the data if we are reading -- * 4. Reset the mdio master (even with error) -- */ -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check if mgmt_master is operational */ -- mgmt_master = priv->mgmt_master; -- if (!mgmt_master) { -- mutex_unlock(&mgmt_eth_data->mutex); -- ret = -EINVAL; -- goto err_mgmt_master; -- } -- -- read_skb->dev = mgmt_master; -- clear_skb->dev = mgmt_master; -- write_skb->dev = mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the write pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(write_skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) { -- ret = -ETIMEDOUT; -- kfree_skb(read_skb); -- goto exit; -- } -- -- if (!ack) { -- ret = -EINVAL; -- kfree_skb(read_skb); -- goto exit; -- } -- -- ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, -- !(val & QCA8K_MDIO_MASTER_BUSY), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -- mgmt_eth_data, read_skb, &val); -- -- if (ret < 0 && ret1 < 0) { -- ret = ret1; -- goto exit; -- } -- -- if (read) { -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the read pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(read_skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) { -- ret = -ETIMEDOUT; -- goto exit; -- } -- -- if (!ack) { -- ret = -EINVAL; -- goto exit; -- } -- -- ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; -- } else { -- kfree_skb(read_skb); -- } --exit: -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the clear pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(clear_skb); -- -- wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- return ret; -- -- /* Error handling before lock */ --err_mgmt_master: -- kfree_skb(read_skb); --err_read_skb: -- kfree_skb(clear_skb); --err_clear_skb: -- kfree_skb(write_skb); -- -- return ret; --} -- --static u32 --qca8k_port_to_phy(int port) --{ -- /* From Andrew Lunn: -- * Port 0 has no internal phy. -- * Port 1 has an internal PHY at MDIO address 0. -- * Port 2 has an internal PHY at MDIO address 1. -- * ... -- * Port 5 has an internal PHY at MDIO address 4. -- * Port 6 has no internal PHY. -- */ -- -- return port - 1; --} -- --static int --qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) --{ -- u16 r1, r2, page; -- u32 val; -- int ret, ret1; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -- bus, 0x10 | r2, r1, &val); -- -- /* Check if qca8k_read has failed for a different reason -- * before returnting -ETIMEDOUT -- */ -- if (ret < 0 && ret1 < 0) -- return ret1; -- -- return ret; --} -- --static int --qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) --{ -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum) | -- QCA8K_MDIO_MASTER_DATA(data); -- -- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- -- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_BUSY); -- --exit: -- /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -- -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- --static int --qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) --{ -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum); -- -- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- -- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_BUSY); -- if (ret) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -- --exit: -- /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -- -- mutex_unlock(&bus->mdio_lock); -- -- if (ret >= 0) -- ret = val & QCA8K_MDIO_MASTER_DATA_MASK; -- -- return ret; --} -- --static int --qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) --{ -- struct qca8k_priv *priv = slave_bus->priv; -- int ret; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); -- if (!ret) -- return 0; -- -- return qca8k_mdio_write(priv, phy, regnum, data); --} -- --static int --qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) --{ -- struct qca8k_priv *priv = slave_bus->priv; -- int ret; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); -- if (ret >= 0) -- return ret; -- -- ret = qca8k_mdio_read(priv, phy, regnum); -- -- if (ret < 0) -- return 0xffff; -- -- return ret; --} -- --static int --qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) --{ -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- return qca8k_internal_mdio_write(slave_bus, port, regnum, data); --} -- --static int --qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) --{ -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- return qca8k_internal_mdio_read(slave_bus, port, regnum); --} -- --static int --qca8k_mdio_register(struct qca8k_priv *priv) --{ -- struct dsa_switch *ds = priv->ds; -- struct device_node *mdio; -- struct mii_bus *bus; -- -- bus = devm_mdiobus_alloc(ds->dev); -- if (!bus) -- return -ENOMEM; -- -- bus->priv = (void *)priv; -- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -- ds->dst->index, ds->index); -- bus->parent = ds->dev; -- bus->phy_mask = ~ds->phys_mii_mask; -- ds->slave_mii_bus = bus; -- -- /* Check if the devicetree declare the port:phy mapping */ -- mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -- if (of_device_is_available(mdio)) { -- bus->name = "qca8k slave mii"; -- bus->read = qca8k_internal_mdio_read; -- bus->write = qca8k_internal_mdio_write; -- return devm_of_mdiobus_register(priv->dev, bus, mdio); -- } -- -- /* If a mapping can't be found the legacy mapping is used, -- * using the qca8k_port_to_phy function -- */ -- bus->name = "qca8k-legacy slave mii"; -- bus->read = qca8k_legacy_mdio_read; -- bus->write = qca8k_legacy_mdio_write; -- return devm_mdiobus_register(priv->dev, bus); --} -- --static int --qca8k_setup_mdio_bus(struct qca8k_priv *priv) --{ -- u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; -- struct device_node *ports, *port; -- phy_interface_t mode; -- int err; -- -- ports = of_get_child_by_name(priv->dev->of_node, "ports"); -- if (!ports) -- ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports"); -- -- if (!ports) -- return -EINVAL; -- -- for_each_available_child_of_node(ports, port) { -- err = of_property_read_u32(port, "reg", ®); -- if (err) { -- of_node_put(port); -- of_node_put(ports); -- return err; -- } -- -- if (!dsa_is_user_port(priv->ds, reg)) -- continue; -- -- of_get_phy_mode(port, &mode); -- -- if (of_property_read_bool(port, "phy-handle") && -- mode != PHY_INTERFACE_MODE_INTERNAL) -- external_mdio_mask |= BIT(reg); -- else -- internal_mdio_mask |= BIT(reg); -- } -- -- of_node_put(ports); -- if (!external_mdio_mask && !internal_mdio_mask) { -- dev_err(priv->dev, "no PHYs are defined.\n"); -- return -EINVAL; -- } -- -- /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through -- * the MDIO_MASTER register also _disconnects_ the external MDC -- * passthrough to the internal PHYs. It's not possible to use both -- * configurations at the same time! -- * -- * Because this came up during the review process: -- * If the external mdio-bus driver is capable magically disabling -- * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's -- * accessors for the time being, it would be possible to pull this -- * off. -- */ -- if (!!external_mdio_mask && !!internal_mdio_mask) { -- dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); -- return -EINVAL; -- } -- -- if (external_mdio_mask) { -- /* Make sure to disable the internal mdio bus in cases -- * a dt-overlay and driver reload changed the configuration -- */ -- -- return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_EN); -- } -- -- return qca8k_mdio_register(priv); --} -- --static int --qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) --{ -- u32 mask = 0; -- int ret = 0; -- -- /* SoC specific settings for ipq8064. -- * If more device require this consider adding -- * a dedicated binding. -- */ -- if (of_machine_is_compatible("qcom,ipq8064")) -- mask |= QCA8K_MAC_PWR_RGMII0_1_8V; -- -- /* SoC specific settings for ipq8065 */ -- if (of_machine_is_compatible("qcom,ipq8065")) -- mask |= QCA8K_MAC_PWR_RGMII1_1_8V; -- -- if (mask) { -- ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, -- QCA8K_MAC_PWR_RGMII0_1_8V | -- QCA8K_MAC_PWR_RGMII1_1_8V, -- mask); -- } -- -- return ret; --} -- --static int qca8k_find_cpu_port(struct dsa_switch *ds) --{ -- struct qca8k_priv *priv = ds->priv; -- -- /* Find the connected cpu port. Valid port are 0 or 6 */ -- if (dsa_is_cpu_port(ds, 0)) -- return 0; -- -- dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); -- -- if (dsa_is_cpu_port(ds, 6)) -- return 6; -- -- return -EINVAL; --} -- --static int --qca8k_setup_of_pws_reg(struct qca8k_priv *priv) --{ -- struct device_node *node = priv->dev->of_node; -- const struct qca8k_match_data *data; -- u32 val = 0; -- int ret; -- -- /* QCA8327 require to set to the correct mode. -- * His bigger brother QCA8328 have the 172 pin layout. -- * Should be applied by default but we set this just to make sure. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- data = of_device_get_match_data(priv->dev); -- -- /* Set the correct package of 148 pin for QCA8327 */ -- if (data->reduced_package) -- val |= QCA8327_PWS_PACKAGE148_EN; -- -- ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -- val); -- if (ret) -- return ret; -- } -- -- if (of_property_read_bool(node, "qca,ignore-power-on-sel")) -- val |= QCA8K_PWS_POWER_ON_SEL; -- -- if (of_property_read_bool(node, "qca,led-open-drain")) { -- if (!(val & QCA8K_PWS_POWER_ON_SEL)) { -- dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); -- return -EINVAL; -- } -- -- val |= QCA8K_PWS_LED_OPEN_EN_CSR; -- } -- -- return qca8k_rmw(priv, QCA8K_REG_PWS, -- QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, -- val); --} -- --static int --qca8k_parse_port_config(struct qca8k_priv *priv) --{ -- int port, cpu_port_index = -1, ret; -- struct device_node *port_dn; -- phy_interface_t mode; -- struct dsa_port *dp; -- u32 delay; -- -- /* We have 2 CPU port. Check them */ -- for (port = 0; port < QCA8K_NUM_PORTS; port++) { -- /* Skip every other port */ -- if (port != 0 && port != 6) -- continue; -- -- dp = dsa_to_port(priv->ds, port); -- port_dn = dp->dn; -- cpu_port_index++; -- -- if (!of_device_is_available(port_dn)) -- continue; -- -- ret = of_get_phy_mode(port_dn, &mode); -- if (ret) -- continue; -- -- switch (mode) { -- case PHY_INTERFACE_MODE_RGMII: -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_TXID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- case PHY_INTERFACE_MODE_SGMII: -- delay = 0; -- -- if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -- /* Switch regs accept value in ns, convert ps to ns */ -- delay = delay / 1000; -- else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_TXID) -- delay = 1; -- -- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { -- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -- delay = 3; -- } -- -- priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; -- -- delay = 0; -- -- if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) -- /* Switch regs accept value in ns, convert ps to ns */ -- delay = delay / 1000; -- else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_RXID) -- delay = 2; -- -- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { -- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -- delay = 3; -- } -- -- priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; -- -- /* Skip sgmii parsing for rgmii* mode */ -- if (mode == PHY_INTERFACE_MODE_RGMII || -- mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_TXID || -- mode == PHY_INTERFACE_MODE_RGMII_RXID) -- break; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -- priv->ports_config.sgmii_tx_clk_falling_edge = true; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -- priv->ports_config.sgmii_rx_clk_falling_edge = true; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -- priv->ports_config.sgmii_enable_pll = true; -- -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -- priv->ports_config.sgmii_enable_pll = false; -- } -- -- if (priv->switch_revision < 2) -- dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); -- } -- -- break; -- default: -- continue; -- } -- } -- -- return 0; --} -- --static int --qca8k_setup(struct dsa_switch *ds) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int cpu_port, ret, i; -- u32 mask; -- -- cpu_port = qca8k_find_cpu_port(ds); -- if (cpu_port < 0) { -- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); -- return cpu_port; -- } -- -- /* Parse CPU port config to be later used in phy_link mac_config */ -- ret = qca8k_parse_port_config(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_mdio_bus(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_of_pws_reg(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_mac_pwr_sel(priv); -- if (ret) -- return ret; -- -- /* Make sure MAC06 is disabled */ -- ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, -- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -- if (ret) { -- dev_err(priv->dev, "failed disabling MAC06 exchange"); -- return ret; -- } -- -- /* Enable CPU Port */ -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); -- if (ret) { -- dev_err(priv->dev, "failed enabling CPU port"); -- return ret; -- } -- -- /* Enable MIB counters */ -- ret = qca8k_mib_init(priv); -- if (ret) -- dev_warn(priv->dev, "mib init failed"); -- -- /* Initial setup of all ports */ -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- /* Disable forwarding by default on all ports */ -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, 0); -- if (ret) -- return ret; -- -- /* Enable QCA header mode on all cpu ports */ -- if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -- FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | -- FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); -- if (ret) { -- dev_err(priv->dev, "failed enabling QCA header mode"); -- return ret; -- } -- } -- -- /* Disable MAC by default on all user ports */ -- if (dsa_is_user_port(ds, i)) -- qca8k_port_set_status(priv, i, 0); -- } -- -- /* Forward all unknown frames to CPU port for Linux processing -- * Notice that in multi-cpu config only one port should be set -- * for igmp, unknown, multicast and broadcast packet -- */ -- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); -- if (ret) -- return ret; -- -- /* Setup connection between CPU port & user ports -- * Configure specific switch configuration for ports -- */ -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- /* CPU port gets connected to all user ports of the switch */ -- if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); -- if (ret) -- return ret; -- } -- -- /* Individual user ports get connected to CPU port only */ -- if (dsa_is_user_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, -- BIT(cpu_port)); -- if (ret) -- return ret; -- -- /* Enable ARP Auto-learning by default */ -- ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_LEARN); -- if (ret) -- return ret; -- -- /* For port based vlans to work we need to set the -- * default egress vid -- */ -- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), -- QCA8K_EGREES_VLAN_PORT_MASK(i), -- QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); -- if (ret) -- return ret; -- -- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), -- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | -- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); -- if (ret) -- return ret; -- } -- -- /* The port 5 of the qca8337 have some problem in flood condition. The -- * original legacy driver had some specific buffer and priority settings -- * for the different port suggested by the QCA switch team. Add this -- * missing settings to improve switch stability under load condition. -- * This problem is limited to qca8337 and other qca8k switch are not affected. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8337) { -- switch (i) { -- /* The 2 CPU port and port 5 requires some different -- * priority than any other ports. -- */ -- case 0: -- case 5: -- case 6: -- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | -- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); -- break; -- default: -- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | -- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); -- } -- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); -- -- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | -- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_WRED_EN; -- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), -- QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | -- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_WRED_EN, -- mask); -- } -- } -- -- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | -- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); -- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, -- QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | -- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, -- mask); -- } -- -- /* Setup our port MTUs to match power on defaults */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); -- if (ret) -- dev_warn(priv->dev, "failed setting MTU settings"); -- -- /* Flush the FDB table */ -- qca8k_fdb_flush(priv); -- -- /* We don't have interrupts for link changes, so we need to poll */ -- ds->pcs_poll = true; -- -- /* Set min a max ageing value supported */ -- ds->ageing_time_min = 7000; -- ds->ageing_time_max = 458745000; -- -- /* Set max number of LAGs supported */ -- ds->num_lag_ids = QCA8K_NUM_LAGS; -- -- return 0; --} -- --static void --qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, -- u32 reg) --{ -- u32 delay, val = 0; -- int ret; -- -- /* Delay can be declared in 3 different way. -- * Mode to rgmii and internal-delay standard binding defined -- * rgmii-id or rgmii-tx/rx phy mode set. -- * The parse logic set a delay different than 0 only when one -- * of the 3 different way is used. In all other case delay is -- * not enabled. With ID or TX/RXID delay is enabled and set -- * to the default and recommended value. -- */ -- if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { -- delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -- } -- -- if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { -- delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -- } -- -- /* Set RGMII delay based on the selected values */ -- ret = qca8k_rmw(priv, reg, -- QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, -- val); -- if (ret) -- dev_err(priv->dev, "Failed to set internal delay for CPU port%d", -- cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); --} -- --static void --qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, -- const struct phylink_link_state *state) --{ -- struct qca8k_priv *priv = ds->priv; -- int cpu_port_index, ret; -- u32 reg, val; -- -- switch (port) { -- case 0: /* 1st CPU port */ -- if (state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII) -- return; -- -- reg = QCA8K_REG_PORT0_PAD_CTRL; -- cpu_port_index = QCA8K_CPU_PORT0; -- break; -- case 1: -- case 2: -- case 3: -- case 4: -- case 5: -- /* Internal PHY, nothing to do */ -- return; -- case 6: /* 2nd CPU port / external PHY */ -- if (state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII && -- state->interface != PHY_INTERFACE_MODE_1000BASEX) -- return; -- -- reg = QCA8K_REG_PORT6_PAD_CTRL; -- cpu_port_index = QCA8K_CPU_PORT6; -- break; -- default: -- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); -- return; -- } -- -- if (port != 6 && phylink_autoneg_inband(mode)) { -- dev_err(ds->dev, "%s: in-band negotiation unsupported\n", -- __func__); -- return; -- } -- -- switch (state->interface) { -- case PHY_INTERFACE_MODE_RGMII: -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_TXID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -- -- /* Configure rgmii delay */ -- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -- -- /* QCA8337 requires to set rgmii rx delay for all ports. -- * This is enabled through PORT5_PAD_CTRL for all ports, -- * rather than individual port registers. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8337) -- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -- break; -- case PHY_INTERFACE_MODE_SGMII: -- case PHY_INTERFACE_MODE_1000BASEX: -- /* Enable SGMII on the port */ -- qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); -- -- /* Enable/disable SerDes auto-negotiation as necessary */ -- ret = qca8k_read(priv, QCA8K_REG_PWS, &val); -- if (ret) -- return; -- if (phylink_autoneg_inband(mode)) -- val &= ~QCA8K_PWS_SERDES_AEN_DIS; -- else -- val |= QCA8K_PWS_SERDES_AEN_DIS; -- qca8k_write(priv, QCA8K_REG_PWS, val); -- -- /* Configure the SGMII parameters */ -- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); -- if (ret) -- return; -- -- val |= QCA8K_SGMII_EN_SD; -- -- if (priv->ports_config.sgmii_enable_pll) -- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -- QCA8K_SGMII_EN_TX; -- -- if (dsa_is_cpu_port(ds, port)) { -- /* CPU port, we're talking to the CPU MAC, be a PHY */ -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_PHY; -- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_MAC; -- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_BASEX; -- } -- -- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); -- -- /* From original code is reported port instability as SGMII also -- * require delay set. Apply advised values here or take them from DT. -- */ -- if (state->interface == PHY_INTERFACE_MODE_SGMII) -- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -- -- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and -- * falling edge is set writing in the PORT0 PAD reg -- */ -- if (priv->switch_id == QCA8K_ID_QCA8327 || -- priv->switch_id == QCA8K_ID_QCA8337) -- reg = QCA8K_REG_PORT0_PAD_CTRL; -- -- val = 0; -- -- /* SGMII Clock phase configuration */ -- if (priv->ports_config.sgmii_rx_clk_falling_edge) -- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; -- -- if (priv->ports_config.sgmii_tx_clk_falling_edge) -- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; -- -- if (val) -- ret = qca8k_rmw(priv, reg, -- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | -- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, -- val); -- -- break; -- default: -- dev_err(ds->dev, "xMII mode %s not supported for port %d\n", -- phy_modes(state->interface), port); -- return; -- } --} -- --static void --qca8k_phylink_validate(struct dsa_switch *ds, int port, -- unsigned long *supported, -- struct phylink_link_state *state) --{ -- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- -- switch (port) { -- case 0: /* 1st CPU port */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII) -- goto unsupported; -- break; -- case 1: -- case 2: -- case 3: -- case 4: -- case 5: -- /* Internal PHY */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_GMII && -- state->interface != PHY_INTERFACE_MODE_INTERNAL) -- goto unsupported; -- break; -- case 6: /* 2nd CPU port / external PHY */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII && -- state->interface != PHY_INTERFACE_MODE_1000BASEX) -- goto unsupported; -- break; -- default: --unsupported: -- linkmode_zero(supported); -- return; -- } -- -- phylink_set_port_modes(mask); -- phylink_set(mask, Autoneg); -- -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 10baseT_Half); -- phylink_set(mask, 10baseT_Full); -- phylink_set(mask, 100baseT_Half); -- phylink_set(mask, 100baseT_Full); -- -- if (state->interface == PHY_INTERFACE_MODE_1000BASEX) -- phylink_set(mask, 1000baseX_Full); -- -- phylink_set(mask, Pause); -- phylink_set(mask, Asym_Pause); -- -- linkmode_and(supported, supported, mask); -- linkmode_and(state->advertising, state->advertising, mask); --} -- --static int --qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, -- struct phylink_link_state *state) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg; -- int ret; -- -- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); -- if (ret < 0) -- return ret; -- -- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); -- state->an_complete = state->link; -- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); -- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : -- DUPLEX_HALF; -- -- switch (reg & QCA8K_PORT_STATUS_SPEED) { -- case QCA8K_PORT_STATUS_SPEED_10: -- state->speed = SPEED_10; -- break; -- case QCA8K_PORT_STATUS_SPEED_100: -- state->speed = SPEED_100; -- break; -- case QCA8K_PORT_STATUS_SPEED_1000: -- state->speed = SPEED_1000; -- break; -- default: -- state->speed = SPEED_UNKNOWN; -- break; -- } -- -- state->pause = MLO_PAUSE_NONE; -- if (reg & QCA8K_PORT_STATUS_RXFLOW) -- state->pause |= MLO_PAUSE_RX; -- if (reg & QCA8K_PORT_STATUS_TXFLOW) -- state->pause |= MLO_PAUSE_TX; -- -- return 1; --} -- --static void --qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, -- phy_interface_t interface) --{ -- struct qca8k_priv *priv = ds->priv; -- -- qca8k_port_set_status(priv, port, 0); --} -- --static void --qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, -- phy_interface_t interface, struct phy_device *phydev, -- int speed, int duplex, bool tx_pause, bool rx_pause) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg; -- -- if (phylink_autoneg_inband(mode)) { -- reg = QCA8K_PORT_STATUS_LINK_AUTO; -- } else { -- switch (speed) { -- case SPEED_10: -- reg = QCA8K_PORT_STATUS_SPEED_10; -- break; -- case SPEED_100: -- reg = QCA8K_PORT_STATUS_SPEED_100; -- break; -- case SPEED_1000: -- reg = QCA8K_PORT_STATUS_SPEED_1000; -- break; -- default: -- reg = QCA8K_PORT_STATUS_LINK_AUTO; -- break; -- } -- -- if (duplex == DUPLEX_FULL) -- reg |= QCA8K_PORT_STATUS_DUPLEX; -- -- if (rx_pause || dsa_is_cpu_port(ds, port)) -- reg |= QCA8K_PORT_STATUS_RXFLOW; -- -- if (tx_pause || dsa_is_cpu_port(ds, port)) -- reg |= QCA8K_PORT_STATUS_TXFLOW; -- } -- -- reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -- -- qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); --} -- --static void --qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) --{ -- const struct qca8k_match_data *match_data; -- struct qca8k_priv *priv = ds->priv; -- int i; -- -- if (stringset != ETH_SS_STATS) -- return; -- -- match_data = of_device_get_match_data(priv->dev); -- -- for (i = 0; i < match_data->mib_count; i++) -- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -- ETH_GSTRING_LEN); --} -- --static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) --{ -- const struct qca8k_match_data *match_data; -- struct qca8k_mib_eth_data *mib_eth_data; -- struct qca8k_priv *priv = ds->priv; -- const struct qca8k_mib_desc *mib; -- struct mib_ethhdr *mib_ethhdr; -- int i, mib_len, offset = 0; -- u64 *data; -- u8 port; -- -- mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); -- mib_eth_data = &priv->mib_eth_data; -- -- /* The switch autocast every port. Ignore other packet and -- * parse only the requested one. -- */ -- port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); -- if (port != mib_eth_data->req_port) -- goto exit; -- -- match_data = device_get_match_data(priv->dev); -- data = mib_eth_data->data; -- -- for (i = 0; i < match_data->mib_count; i++) { -- mib = &ar8327_mib[i]; -- -- /* First 3 mib are present in the skb head */ -- if (i < 3) { -- data[i] = mib_ethhdr->data[i]; -- continue; -- } -- -- mib_len = sizeof(uint32_t); -- -- /* Some mib are 64 bit wide */ -- if (mib->size == 2) -- mib_len = sizeof(uint64_t); -- -- /* Copy the mib value from packet to the */ -- memcpy(data + i, skb->data + offset, mib_len); -- -- /* Set the offset for the next mib */ -- offset += mib_len; -- } -- --exit: -- /* Complete on receiving all the mib packet */ -- if (refcount_dec_and_test(&mib_eth_data->port_parsed)) -- complete(&mib_eth_data->rw_done); --} -- --static int --qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) --{ -- struct dsa_port *dp = dsa_to_port(ds, port); -- struct qca8k_mib_eth_data *mib_eth_data; -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- mib_eth_data = &priv->mib_eth_data; -- -- mutex_lock(&mib_eth_data->mutex); -- -- reinit_completion(&mib_eth_data->rw_done); -- -- mib_eth_data->req_port = dp->index; -- mib_eth_data->data = data; -- refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); -- -- mutex_lock(&priv->reg_mutex); -- -- /* Send mib autocast request */ -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | -- QCA8K_MIB_BUSY); -- -- mutex_unlock(&priv->reg_mutex); -- -- if (ret) -- goto exit; -- -- ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); -- --exit: -- mutex_unlock(&mib_eth_data->mutex); -- -- return ret; --} -- --static void --qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -- uint64_t *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- const struct qca8k_match_data *match_data; -- const struct qca8k_mib_desc *mib; -- u32 reg, i, val; -- u32 hi = 0; -- int ret; -- -- if (priv->mgmt_master && -- qca8k_get_ethtool_stats_eth(ds, port, data) > 0) -- return; -- -- match_data = of_device_get_match_data(priv->dev); -- -- for (i = 0; i < match_data->mib_count; i++) { -- mib = &ar8327_mib[i]; -- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -- -- ret = qca8k_read(priv, reg, &val); -- if (ret < 0) -- continue; -- -- if (mib->size == 2) { -- ret = qca8k_read(priv, reg + 4, &hi); -- if (ret < 0) -- continue; -- } -- -- data[i] = val; -- if (mib->size == 2) -- data[i] |= (u64)hi << 32; -- } --} -- --static int --qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) --{ -- const struct qca8k_match_data *match_data; -- struct qca8k_priv *priv = ds->priv; -- -- if (sset != ETH_SS_STATS) -- return 0; -- -- match_data = of_device_get_match_data(priv->dev); -- -- return match_data->mib_count; --} -- --static int --qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -- u32 reg; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -- if (ret < 0) -- goto exit; -- -- if (eee->eee_enabled) -- reg |= lpi_en; -- else -- reg &= ~lpi_en; -- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) --{ -- /* Nothing to do on the port's MAC */ -- return 0; --} -- --static void --qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 stp_state; -- -- switch (state) { -- case BR_STATE_DISABLED: -- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -- break; -- case BR_STATE_BLOCKING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -- break; -- case BR_STATE_LISTENING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -- break; -- case BR_STATE_LEARNING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -- break; -- case BR_STATE_FORWARDING: -- default: -- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -- break; -- } -- -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); --} -- --static int --qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int port_mask, cpu_port; -- int i, ret; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- port_mask = BIT(cpu_port); -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Add this port to the portvlan mask of the other ports -- * in the bridge -- */ -- ret = regmap_set_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- if (ret) -- return ret; -- if (i != port) -- port_mask |= BIT(i); -- } -- -- /* Add all other ports to this ports portvlan mask */ -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, port_mask); -- -- return ret; --} -- --static void --qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int cpu_port, i; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Remove this port to the portvlan mask of the other ports -- * in the bridge -- */ -- regmap_clear_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- } -- -- /* Set the cpu port to be the only one in the portvlan mask of -- * this port -- */ -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); --} -- --static void --qca8k_port_fast_age(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = ds->priv; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) --{ -- struct qca8k_priv *priv = ds->priv; -- unsigned int secs = msecs / 1000; -- u32 val; -- -- /* AGE_TIME reg is set in 7s step */ -- val = secs / 7; -- -- /* Handle case with 0 as val to NOT disable -- * learning -- */ -- if (!val) -- val = 1; -- -- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -- QCA8K_ATU_AGE_TIME(val)); --} -- --static int --qca8k_port_enable(struct dsa_switch *ds, int port, -- struct phy_device *phy) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 1); -- priv->port_enabled_map |= BIT(port); -- -- if (dsa_is_user_port(ds, port)) -- phy_support_asym_pause(phy); -- -- return 0; --} -- --static void --qca8k_port_disable(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 0); -- priv->port_enabled_map &= ~BIT(port); --} -- --static int --qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- /* We have only have a general MTU setting. -- * DSA always set the CPU port's MTU to the largest MTU of the slave -- * ports. -- * Setting MTU just for the CPU port is sufficient to correctly set a -- * value for every port. -- */ -- if (!dsa_is_cpu_port(ds, port)) -- return 0; -- -- /* To change the MAX_FRAME_SIZE the cpu ports must be off or -- * the switch panics. -- * Turn off both cpu ports before applying the new value to prevent -- * this. -- */ -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 0); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 0); -- -- /* Include L2 header / FCS length */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); -- -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 1); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 1); -- -- return ret; --} -- --static int --qca8k_port_max_mtu(struct dsa_switch *ds, int port) --{ -- return QCA8K_MAX_MTU; --} -- --static int --qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -- u16 port_mask, u16 vid) --{ -- /* Set the vid to the port vlan id if no vid is set */ -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_add(priv, addr, port_mask, vid, -- QCA8K_ATU_STATUS_STATIC); --} -- --static int --qca8k_port_fdb_add(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- return qca8k_port_fdb_insert(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_del(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_del(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -- dsa_fdb_dump_cb_t *cb, void *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- struct qca8k_fdb _fdb = { 0 }; -- int cnt = QCA8K_NUM_FDB_RECORDS; -- bool is_static; -- int ret = 0; -- -- mutex_lock(&priv->reg_mutex); -- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -- if (!_fdb.aging) -- break; -- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -- ret = cb(_fdb.mac, _fdb.vid, is_static, data); -- if (ret) -- break; -- } -- mutex_unlock(&priv->reg_mutex); -- -- return 0; --} -- --static int --qca8k_port_mdb_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); --} -- --static int --qca8k_port_mdb_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); --} -- --static int --qca8k_port_mirror_add(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror, -- bool ingress) --{ -- struct qca8k_priv *priv = ds->priv; -- int monitor_port, ret; -- u32 reg, val; -- -- /* Check for existent entry */ -- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -- return -EEXIST; -- -- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -- if (ret) -- return ret; -- -- /* QCA83xx can have only one port set to mirror mode. -- * Check that the correct port is requested and return error otherwise. -- * When no mirror port is set, the values is set to 0xF -- */ -- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -- return -EEXIST; -- -- /* Set the monitor port */ -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -- mirror->to_local_port); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- return ret; -- -- if (ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_update_bits(priv->regmap, reg, val, val); -- if (ret) -- return ret; -- -- /* Track mirror port for tx and rx to decide when the -- * mirror port has to be disabled. -- */ -- if (ingress) -- priv->mirror_rx |= BIT(port); -- else -- priv->mirror_tx |= BIT(port); -- -- return 0; --} -- --static void --qca8k_port_mirror_del(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg, val; -- int ret; -- -- if (mirror->ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_clear_bits(priv->regmap, reg, val); -- if (ret) -- goto err; -- -- if (mirror->ingress) -- priv->mirror_rx &= ~BIT(port); -- else -- priv->mirror_tx &= ~BIT(port); -- -- /* No port set to send packet to mirror port. Disable mirror port */ -- if (!priv->mirror_rx && !priv->mirror_tx) { -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- goto err; -- } --err: -- dev_err(priv->dev, "Failed to del mirror port from %d", port); --} -- --static int --qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -- struct netlink_ext_ack *extack) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- if (vlan_filtering) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -- } else { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan, -- struct netlink_ext_ack *extack) --{ -- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -- if (ret) { -- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -- return ret; -- } -- -- if (pvid) { -- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -- QCA8K_EGREES_VLAN_PORT_MASK(port), -- QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -- if (ret) -- return ret; -- -- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -- QCA8K_PORT_VLAN_CVID(vlan->vid) | -- QCA8K_PORT_VLAN_SVID(vlan->vid)); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_del(priv, port, vlan->vid); -- if (ret) -- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -- -- return ret; --} -- --static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = ds->priv; -- -- /* Communicate to the phy internal driver the switch revision. -- * Based on the switch revision different values needs to be -- * set to the dbg and mmd reg on the phy. -- * The first 2 bit are used to communicate the switch revision -- * to the phy driver. -- */ -- if (port > 0 && port < 6) -- return priv->switch_revision; -- -- return 0; --} -- --static enum dsa_tag_protocol --qca8k_get_tag_protocol(struct dsa_switch *ds, int port, -- enum dsa_tag_protocol mp) --{ -- return DSA_TAG_PROTO_QCA; --} -- --static bool --qca8k_lag_can_offload(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct dsa_port *dp; -- int id, members = 0; -- -- id = dsa_lag_id(ds->dst, lag); -- if (id < 0 || id >= ds->num_lag_ids) -- return false; -- -- dsa_lag_foreach_port(dp, ds->dst, lag) -- /* Includes the port joining the LAG */ -- members++; -- -- if (members > QCA8K_NUM_PORTS_FOR_LAG) -- return false; -- -- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -- return false; -- -- if (info->hash_type != NETDEV_LAG_HASH_L2 && -- info->hash_type != NETDEV_LAG_HASH_L23) -- return false; -- -- return true; --} -- --static int --qca8k_lag_setup_hash(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct qca8k_priv *priv = ds->priv; -- bool unique_lag = true; -- u32 hash = 0; -- int i, id; -- -- id = dsa_lag_id(ds->dst, lag); -- -- switch (info->hash_type) { -- case NETDEV_LAG_HASH_L23: -- hash |= QCA8K_TRUNK_HASH_SIP_EN; -- hash |= QCA8K_TRUNK_HASH_DIP_EN; -- fallthrough; -- case NETDEV_LAG_HASH_L2: -- hash |= QCA8K_TRUNK_HASH_SA_EN; -- hash |= QCA8K_TRUNK_HASH_DA_EN; -- break; -- default: /* We should NEVER reach this */ -- return -EOPNOTSUPP; -- } -- -- /* Check if we are the unique configured LAG */ -- dsa_lags_foreach_id(i, ds->dst) -- if (i != id && dsa_lag_dev(ds->dst, i)) { -- unique_lag = false; -- break; -- } -- -- /* Hash Mode is global. Make sure the same Hash Mode -- * is set to all the 4 possible lag. -- * If we are the unique LAG we can set whatever hash -- * mode we want. -- * To change hash mode it's needed to remove all LAG -- * and change the mode with the latest. -- */ -- if (unique_lag) { -- priv->lag_hash_mode = hash; -- } else if (priv->lag_hash_mode != hash) { -- netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -- return -EOPNOTSUPP; -- } -- -- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -- QCA8K_TRUNK_HASH_MASK, hash); --} -- --static int --qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -- struct net_device *lag, bool delete) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret, id, i; -- u32 val; -- -- id = dsa_lag_id(ds->dst, lag); -- -- /* Read current port member */ -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -- if (ret) -- return ret; -- -- /* Shift val to the correct trunk */ -- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -- if (delete) -- val &= ~BIT(port); -- else -- val |= BIT(port); -- -- /* Update port member. With empty portmap disable trunk */ -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -- QCA8K_REG_GOL_TRUNK_MEMBER(id) | -- QCA8K_REG_GOL_TRUNK_EN(id), -- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -- val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -- -- /* Search empty member if adding or port on deleting */ -- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -- if (ret) -- return ret; -- -- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -- -- if (delete) { -- /* If port flagged to be disabled assume this member is -- * empty -- */ -- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -- if (val != port) -- continue; -- } else { -- /* If port flagged to be enabled assume this member is -- * already set -- */ -- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- } -- -- /* We have found the member to add/remove */ -- break; -- } -- -- /* Set port in the correct port mask or disable port if in delete mode */ -- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); --} -- --static int --qca8k_port_lag_join(struct dsa_switch *ds, int port, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- int ret; -- -- if (!qca8k_lag_can_offload(ds, lag, info)) -- return -EOPNOTSUPP; -- -- ret = qca8k_lag_setup_hash(ds, lag, info); -- if (ret) -- return ret; -- -- return qca8k_lag_refresh_portmap(ds, port, lag, false); --} -- --static int --qca8k_port_lag_leave(struct dsa_switch *ds, int port, -- struct net_device *lag) --{ -- return qca8k_lag_refresh_portmap(ds, port, lag, true); --} -- --static void --qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, -- bool operational) --{ -- struct dsa_port *dp = master->dsa_ptr; -- struct qca8k_priv *priv = ds->priv; -- -- /* Ethernet MIB/MDIO is only supported for CPU port 0 */ -- if (dp->index != 0) -- return; -- -- mutex_lock(&priv->mgmt_eth_data.mutex); -- mutex_lock(&priv->mib_eth_data.mutex); -- -- priv->mgmt_master = operational ? (struct net_device *)master : NULL; -- -- mutex_unlock(&priv->mib_eth_data.mutex); -- mutex_unlock(&priv->mgmt_eth_data.mutex); --} -- --static int qca8k_connect_tag_protocol(struct dsa_switch *ds, -- enum dsa_tag_protocol proto) --{ -- struct qca_tagger_data *tagger_data; -- -- switch (proto) { -- case DSA_TAG_PROTO_QCA: -- tagger_data = ds->tagger_data; -- -- tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -- tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; -- -- break; -- default: -- return -EOPNOTSUPP; -- } -- -- return 0; --} -- --static const struct dsa_switch_ops qca8k_switch_ops = { -- .get_tag_protocol = qca8k_get_tag_protocol, -- .setup = qca8k_setup, -- .get_strings = qca8k_get_strings, -- .get_ethtool_stats = qca8k_get_ethtool_stats, -- .get_sset_count = qca8k_get_sset_count, -- .set_ageing_time = qca8k_set_ageing_time, -- .get_mac_eee = qca8k_get_mac_eee, -- .set_mac_eee = qca8k_set_mac_eee, -- .port_enable = qca8k_port_enable, -- .port_disable = qca8k_port_disable, -- .port_change_mtu = qca8k_port_change_mtu, -- .port_max_mtu = qca8k_port_max_mtu, -- .port_stp_state_set = qca8k_port_stp_state_set, -- .port_bridge_join = qca8k_port_bridge_join, -- .port_bridge_leave = qca8k_port_bridge_leave, -- .port_fast_age = qca8k_port_fast_age, -- .port_fdb_add = qca8k_port_fdb_add, -- .port_fdb_del = qca8k_port_fdb_del, -- .port_fdb_dump = qca8k_port_fdb_dump, -- .port_mdb_add = qca8k_port_mdb_add, -- .port_mdb_del = qca8k_port_mdb_del, -- .port_mirror_add = qca8k_port_mirror_add, -- .port_mirror_del = qca8k_port_mirror_del, -- .port_vlan_filtering = qca8k_port_vlan_filtering, -- .port_vlan_add = qca8k_port_vlan_add, -- .port_vlan_del = qca8k_port_vlan_del, -- .phylink_validate = qca8k_phylink_validate, -- .phylink_mac_link_state = qca8k_phylink_mac_link_state, -- .phylink_mac_config = qca8k_phylink_mac_config, -- .phylink_mac_link_down = qca8k_phylink_mac_link_down, -- .phylink_mac_link_up = qca8k_phylink_mac_link_up, -- .get_phy_flags = qca8k_get_phy_flags, -- .port_lag_join = qca8k_port_lag_join, -- .port_lag_leave = qca8k_port_lag_leave, -- .master_state_change = qca8k_master_change, -- .connect_tag_protocol = qca8k_connect_tag_protocol, --}; -- --static int qca8k_read_switch_id(struct qca8k_priv *priv) --{ -- const struct qca8k_match_data *data; -- u32 val; -- u8 id; -- int ret; -- -- /* get the switches ID from the compatible */ -- data = of_device_get_match_data(priv->dev); -- if (!data) -- return -ENODEV; -- -- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -- if (ret < 0) -- return -ENODEV; -- -- id = QCA8K_MASK_CTRL_DEVICE_ID(val); -- if (id != data->id) { -- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); -- return -ENODEV; -- } -- -- priv->switch_id = id; -- -- /* Save revision to communicate to the internal PHY driver */ -- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -- -- return 0; --} -- --static int --qca8k_sw_probe(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv; -- int ret; -- -- /* allocate the private data struct so that we can probe the switches -- * ID register -- */ -- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL); -- if (!priv) -- return -ENOMEM; -- -- priv->bus = mdiodev->bus; -- priv->dev = &mdiodev->dev; -- -- priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", -- GPIOD_ASIS); -- if (IS_ERR(priv->reset_gpio)) -- return PTR_ERR(priv->reset_gpio); -- -- if (priv->reset_gpio) { -- gpiod_set_value_cansleep(priv->reset_gpio, 1); -- /* The active low duration must be greater than 10 ms -- * and checkpatch.pl wants 20 ms. -- */ -- msleep(20); -- gpiod_set_value_cansleep(priv->reset_gpio, 0); -- } -- -- /* Start by setting up the register mapping */ -- priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, -- &qca8k_regmap_config); -- if (IS_ERR(priv->regmap)) { -- dev_err(priv->dev, "regmap initialization failed"); -- return PTR_ERR(priv->regmap); -- } -- -- priv->mdio_cache.page = 0xffff; -- priv->mdio_cache.lo = 0xffff; -- priv->mdio_cache.hi = 0xffff; -- -- /* Check the detected switch id */ -- ret = qca8k_read_switch_id(priv); -- if (ret) -- return ret; -- -- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); -- if (!priv->ds) -- return -ENOMEM; -- -- mutex_init(&priv->mgmt_eth_data.mutex); -- init_completion(&priv->mgmt_eth_data.rw_done); -- -- mutex_init(&priv->mib_eth_data.mutex); -- init_completion(&priv->mib_eth_data.rw_done); -- -- priv->ds->dev = &mdiodev->dev; -- priv->ds->num_ports = QCA8K_NUM_PORTS; -- priv->ds->priv = priv; -- priv->ds->ops = &qca8k_switch_ops; -- mutex_init(&priv->reg_mutex); -- dev_set_drvdata(&mdiodev->dev, priv); -- -- return dsa_register_switch(priv->ds); --} -- --static void --qca8k_sw_remove(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -- int i; -- -- if (!priv) -- return; -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) -- qca8k_port_set_status(priv, i, 0); -- -- dsa_unregister_switch(priv->ds); -- -- dev_set_drvdata(&mdiodev->dev, NULL); --} -- --static void qca8k_sw_shutdown(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -- -- if (!priv) -- return; -- -- dsa_switch_shutdown(priv->ds); -- -- dev_set_drvdata(&mdiodev->dev, NULL); --} -- --#ifdef CONFIG_PM_SLEEP --static void --qca8k_set_pm(struct qca8k_priv *priv, int enable) --{ -- int port; -- -- for (port = 0; port < QCA8K_NUM_PORTS; port++) { -- /* Do not enable on resume if the port was -- * disabled before. -- */ -- if (!(priv->port_enabled_map & BIT(port))) -- continue; -- -- qca8k_port_set_status(priv, port, enable); -- } --} -- --static int qca8k_suspend(struct device *dev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(dev); -- -- qca8k_set_pm(priv, 0); -- -- return dsa_switch_suspend(priv->ds); --} -- --static int qca8k_resume(struct device *dev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(dev); -- -- qca8k_set_pm(priv, 1); -- -- return dsa_switch_resume(priv->ds); --} --#endif /* CONFIG_PM_SLEEP */ -- --static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, -- qca8k_suspend, qca8k_resume); -- --static const struct qca8k_match_data qca8327 = { -- .id = QCA8K_ID_QCA8327, -- .reduced_package = true, -- .mib_count = QCA8K_QCA832X_MIB_COUNT, --}; -- --static const struct qca8k_match_data qca8328 = { -- .id = QCA8K_ID_QCA8327, -- .mib_count = QCA8K_QCA832X_MIB_COUNT, --}; -- --static const struct qca8k_match_data qca833x = { -- .id = QCA8K_ID_QCA8337, -- .mib_count = QCA8K_QCA833X_MIB_COUNT, --}; -- --static const struct of_device_id qca8k_of_match[] = { -- { .compatible = "qca,qca8327", .data = &qca8327 }, -- { .compatible = "qca,qca8328", .data = &qca8328 }, -- { .compatible = "qca,qca8334", .data = &qca833x }, -- { .compatible = "qca,qca8337", .data = &qca833x }, -- { /* sentinel */ }, --}; -- --static struct mdio_driver qca8kmdio_driver = { -- .probe = qca8k_sw_probe, -- .remove = qca8k_sw_remove, -- .shutdown = qca8k_sw_shutdown, -- .mdiodrv.driver = { -- .name = "qca8k", -- .of_match_table = qca8k_of_match, -- .pm = &qca8k_pm_ops, -- }, --}; -- --mdio_module_driver(qca8kmdio_driver); -- --MODULE_AUTHOR("Mathieu Olivari, John Crispin "); --MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family"); --MODULE_LICENSE("GPL v2"); --MODULE_ALIAS("platform:qca8k"); ---- a/drivers/net/dsa/qca8k.h -+++ /dev/null -@@ -1,411 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-only */ --/* -- * Copyright (C) 2009 Felix Fietkau -- * Copyright (C) 2011-2012 Gabor Juhos -- * Copyright (c) 2015, The Linux Foundation. All rights reserved. -- */ -- --#ifndef __QCA8K_H --#define __QCA8K_H -- --#include --#include --#include --#include -- --#define QCA8K_ETHERNET_MDIO_PRIORITY 7 --#define QCA8K_ETHERNET_PHY_PRIORITY 6 --#define QCA8K_ETHERNET_TIMEOUT 100 -- --#define QCA8K_NUM_PORTS 7 --#define QCA8K_NUM_CPU_PORTS 2 --#define QCA8K_MAX_MTU 9000 --#define QCA8K_NUM_LAGS 4 --#define QCA8K_NUM_PORTS_FOR_LAG 4 -- --#define PHY_ID_QCA8327 0x004dd034 --#define QCA8K_ID_QCA8327 0x12 --#define PHY_ID_QCA8337 0x004dd036 --#define QCA8K_ID_QCA8337 0x13 -- --#define QCA8K_QCA832X_MIB_COUNT 39 --#define QCA8K_QCA833X_MIB_COUNT 41 -- --#define QCA8K_BUSY_WAIT_TIMEOUT 2000 -- --#define QCA8K_NUM_FDB_RECORDS 2048 -- --#define QCA8K_PORT_VID_DEF 1 -- --/* Global control registers */ --#define QCA8K_REG_MASK_CTRL 0x000 --#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) --#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) --#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) --#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) --#define QCA8K_REG_PORT0_PAD_CTRL 0x004 --#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) --#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) --#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18) --#define QCA8K_REG_PORT5_PAD_CTRL 0x008 --#define QCA8K_REG_PORT6_PAD_CTRL 0x00c --#define QCA8K_PORT_PAD_RGMII_EN BIT(26) --#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) --#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) --#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) --#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) --#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) --#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) --#define QCA8K_PORT_PAD_SGMII_EN BIT(7) --#define QCA8K_REG_PWS 0x010 --#define QCA8K_PWS_POWER_ON_SEL BIT(31) --/* This reg is only valid for QCA832x and toggle the package -- * type from 176 pin (by default) to 148 pin used on QCA8327 -- */ --#define QCA8327_PWS_PACKAGE148_EN BIT(30) --#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24) --#define QCA8K_PWS_SERDES_AEN_DIS BIT(7) --#define QCA8K_REG_MODULE_EN 0x030 --#define QCA8K_MODULE_EN_MIB BIT(0) --#define QCA8K_REG_MIB 0x034 --#define QCA8K_MIB_FUNC GENMASK(26, 24) --#define QCA8K_MIB_CPU_KEEP BIT(20) --#define QCA8K_MIB_BUSY BIT(17) --#define QCA8K_MDIO_MASTER_CTRL 0x3c --#define QCA8K_MDIO_MASTER_BUSY BIT(31) --#define QCA8K_MDIO_MASTER_EN BIT(30) --#define QCA8K_MDIO_MASTER_READ BIT(27) --#define QCA8K_MDIO_MASTER_WRITE 0 --#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) --#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) --#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) --#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) --#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) --#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) --#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) --#define QCA8K_MDIO_MASTER_MAX_PORTS 5 --#define QCA8K_MDIO_MASTER_MAX_REG 32 --#define QCA8K_GOL_MAC_ADDR0 0x60 --#define QCA8K_GOL_MAC_ADDR1 0x64 --#define QCA8K_MAX_FRAME_SIZE 0x78 --#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) --#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) --#define QCA8K_PORT_STATUS_SPEED_10 0 --#define QCA8K_PORT_STATUS_SPEED_100 0x1 --#define QCA8K_PORT_STATUS_SPEED_1000 0x2 --#define QCA8K_PORT_STATUS_TXMAC BIT(2) --#define QCA8K_PORT_STATUS_RXMAC BIT(3) --#define QCA8K_PORT_STATUS_TXFLOW BIT(4) --#define QCA8K_PORT_STATUS_RXFLOW BIT(5) --#define QCA8K_PORT_STATUS_DUPLEX BIT(6) --#define QCA8K_PORT_STATUS_LINK_UP BIT(8) --#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9) --#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10) --#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) --#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) --#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) --#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) --#define QCA8K_PORT_HDR_CTRL_ALL 2 --#define QCA8K_PORT_HDR_CTRL_MGMT 1 --#define QCA8K_PORT_HDR_CTRL_NONE 0 --#define QCA8K_REG_SGMII_CTRL 0x0e0 --#define QCA8K_SGMII_EN_PLL BIT(1) --#define QCA8K_SGMII_EN_RX BIT(2) --#define QCA8K_SGMII_EN_TX BIT(3) --#define QCA8K_SGMII_EN_SD BIT(4) --#define QCA8K_SGMII_CLK125M_DELAY BIT(7) --#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) --#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) --#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) --#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) --#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) -- --/* MAC_PWR_SEL registers */ --#define QCA8K_REG_MAC_PWR_SEL 0x0e4 --#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18) --#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19) -- --/* EEE control registers */ --#define QCA8K_REG_EEE_CTRL 0x100 --#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) -- --/* TRUNK_HASH_EN registers */ --#define QCA8K_TRUNK_HASH_EN_CTRL 0x270 --#define QCA8K_TRUNK_HASH_SIP_EN BIT(3) --#define QCA8K_TRUNK_HASH_DIP_EN BIT(2) --#define QCA8K_TRUNK_HASH_SA_EN BIT(1) --#define QCA8K_TRUNK_HASH_DA_EN BIT(0) --#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0) -- --/* ACL registers */ --#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) --#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) --#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) --#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) --#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) --#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) --#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 --#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 -- --/* Lookup registers */ --#define QCA8K_REG_ATU_DATA0 0x600 --#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) --#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) --#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) --#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) --#define QCA8K_REG_ATU_DATA1 0x604 --#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) --#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) --#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) --#define QCA8K_REG_ATU_DATA2 0x608 --#define QCA8K_ATU_VID_MASK GENMASK(19, 8) --#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) --#define QCA8K_ATU_STATUS_STATIC 0xf --#define QCA8K_REG_ATU_FUNC 0x60c --#define QCA8K_ATU_FUNC_BUSY BIT(31) --#define QCA8K_ATU_FUNC_PORT_EN BIT(14) --#define QCA8K_ATU_FUNC_MULTI_EN BIT(13) --#define QCA8K_ATU_FUNC_FULL BIT(12) --#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) --#define QCA8K_REG_VTU_FUNC0 0x610 --#define QCA8K_VTU_FUNC0_VALID BIT(20) --#define QCA8K_VTU_FUNC0_IVL_EN BIT(19) --/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) -- * It does contain VLAN_MODE for each port [5:4] for port0, -- * [7:6] for port1 ... [17:16] for port6. Use virtual port -- * define to handle this. -- */ --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) --#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) --#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) --#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) --#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) --#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) --#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) --#define QCA8K_REG_VTU_FUNC1 0x614 --#define QCA8K_VTU_FUNC1_BUSY BIT(31) --#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) --#define QCA8K_VTU_FUNC1_FULL BIT(4) --#define QCA8K_REG_ATU_CTRL 0x618 --#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) --#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) --#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 --#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) --#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4) --#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 --#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) --#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) --#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) --#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) --#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) --#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) --#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) --#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) --#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) --#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) --#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) --#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) --#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) --#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) --#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) --#define QCA8K_PORT_LOOKUP_LEARN BIT(20) --#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) -- --#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700 --/* 4 max trunk first -- * first 6 bit for member bitmap -- * 7th bit is to enable trunk port -- */ --#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8) --#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7) --#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) --#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0) --#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) --/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */ --#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4)) --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0) --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3) --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0) --#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16) --#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4) --/* Complex shift: FIRST shift for port THEN shift for trunk */ --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i)) --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) --#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) -- --#define QCA8K_REG_GLOBAL_FC_THRESH 0x800 --#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) --#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) --#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) --#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) -- --#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) --#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) --#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) -- --#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) --#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) --#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) --#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) --#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) --#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) --#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) -- --/* Pkt edit registers */ --#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) --#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) --#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) --#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) -- --/* L3 registers */ --#define QCA8K_HROUTER_CONTROL 0xe00 --#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16) --#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16 --#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1 --#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08 --#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c --#define QCA8K_HNAT_CONTROL 0xe38 -- --/* MIB registers */ --#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100) -- --/* QCA specific MII registers */ --#define MII_ATH_MMD_ADDR 0x0d --#define MII_ATH_MMD_DATA 0x0e -- --enum { -- QCA8K_PORT_SPEED_10M = 0, -- QCA8K_PORT_SPEED_100M = 1, -- QCA8K_PORT_SPEED_1000M = 2, -- QCA8K_PORT_SPEED_ERR = 3, --}; -- --enum qca8k_fdb_cmd { -- QCA8K_FDB_FLUSH = 1, -- QCA8K_FDB_LOAD = 2, -- QCA8K_FDB_PURGE = 3, -- QCA8K_FDB_FLUSH_PORT = 5, -- QCA8K_FDB_NEXT = 6, -- QCA8K_FDB_SEARCH = 7, --}; -- --enum qca8k_vlan_cmd { -- QCA8K_VLAN_FLUSH = 1, -- QCA8K_VLAN_LOAD = 2, -- QCA8K_VLAN_PURGE = 3, -- QCA8K_VLAN_REMOVE_PORT = 4, -- QCA8K_VLAN_NEXT = 5, -- QCA8K_VLAN_READ = 6, --}; -- --enum qca8k_mid_cmd { -- QCA8K_MIB_FLUSH = 1, -- QCA8K_MIB_FLUSH_PORT = 2, -- QCA8K_MIB_CAST = 3, --}; -- --struct qca8k_match_data { -- u8 id; -- bool reduced_package; -- u8 mib_count; --}; -- --enum { -- QCA8K_CPU_PORT0, -- QCA8K_CPU_PORT6, --}; -- --struct qca8k_mgmt_eth_data { -- struct completion rw_done; -- struct mutex mutex; /* Enforce one mdio read/write at time */ -- bool ack; -- u32 seq; -- u32 data[4]; --}; -- --struct qca8k_mib_eth_data { -- struct completion rw_done; -- struct mutex mutex; /* Process one command at time */ -- refcount_t port_parsed; /* Counter to track parsed port */ -- u8 req_port; -- u64 *data; /* pointer to ethtool data */ --}; -- --struct qca8k_ports_config { -- bool sgmii_rx_clk_falling_edge; -- bool sgmii_tx_clk_falling_edge; -- bool sgmii_enable_pll; -- u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ -- u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ --}; -- --struct qca8k_mdio_cache { --/* The 32bit switch registers are accessed indirectly. To achieve this we need -- * to set the page of the register. Track the last page that was set to reduce -- * mdio writes -- */ -- u16 page; --/* lo and hi can also be cached and from Documentation we can skip one -- * extra mdio write if lo or hi is didn't change. -- */ -- u16 lo; -- u16 hi; --}; -- --struct qca8k_priv { -- u8 switch_id; -- u8 switch_revision; -- u8 mirror_rx; -- u8 mirror_tx; -- u8 lag_hash_mode; -- /* Each bit correspond to a port. This switch can support a max of 7 port. -- * Bit 1: port enabled. Bit 0: port disabled. -- */ -- u8 port_enabled_map; -- struct qca8k_ports_config ports_config; -- struct regmap *regmap; -- struct mii_bus *bus; -- struct dsa_switch *ds; -- struct mutex reg_mutex; -- struct device *dev; -- struct gpio_desc *reset_gpio; -- struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ -- struct qca8k_mgmt_eth_data mgmt_eth_data; -- struct qca8k_mib_eth_data mib_eth_data; -- struct qca8k_mdio_cache mdio_cache; --}; -- --struct qca8k_mib_desc { -- unsigned int size; -- unsigned int offset; -- const char *name; --}; -- --struct qca8k_fdb { -- u16 vid; -- u8 port_mask; -- u8 aging; -- u8 mac[6]; --}; -- --#endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-01-net-dsa-qca8k-cache-match-data-to-speed-up-access.patch b/target/linux/generic/backport-6.1/771-v6.0-01-net-dsa-qca8k-cache-match-data-to-speed-up-access.patch deleted file mode 100644 index 77fe64632f40..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-01-net-dsa-qca8k-cache-match-data-to-speed-up-access.patch +++ /dev/null @@ -1,157 +0,0 @@ -From 3bb0844e7bcd0fb0bcfab6202b5edd349ef5250a Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:10 +0200 -Subject: [PATCH 01/14] net: dsa: qca8k: cache match data to speed up access - -Using of_device_get_match_data is expensive. Cache match data to speed -up access and rework user of match data to use the new cached value. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k.c | 35 +++++++++++------------------------ - drivers/net/dsa/qca/qca8k.h | 1 + - 2 files changed, 12 insertions(+), 24 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k.c -+++ b/drivers/net/dsa/qca/qca8k.c -@@ -1462,8 +1462,8 @@ static int qca8k_find_cpu_port(struct ds - static int - qca8k_setup_of_pws_reg(struct qca8k_priv *priv) - { -+ const struct qca8k_match_data *data = priv->info; - struct device_node *node = priv->dev->of_node; -- const struct qca8k_match_data *data; - u32 val = 0; - int ret; - -@@ -1472,8 +1472,6 @@ qca8k_setup_of_pws_reg(struct qca8k_priv - * Should be applied by default but we set this just to make sure. - */ - if (priv->switch_id == QCA8K_ID_QCA8327) { -- data = of_device_get_match_data(priv->dev); -- - /* Set the correct package of 148 pin for QCA8327 */ - if (data->reduced_package) - val |= QCA8327_PWS_PACKAGE148_EN; -@@ -2146,23 +2144,19 @@ qca8k_phylink_mac_link_up(struct dsa_swi - static void - qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) - { -- const struct qca8k_match_data *match_data; - struct qca8k_priv *priv = ds->priv; - int i; - - if (stringset != ETH_SS_STATS) - return; - -- match_data = of_device_get_match_data(priv->dev); -- -- for (i = 0; i < match_data->mib_count; i++) -+ for (i = 0; i < priv->info->mib_count; i++) - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, - ETH_GSTRING_LEN); - } - - static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) - { -- const struct qca8k_match_data *match_data; - struct qca8k_mib_eth_data *mib_eth_data; - struct qca8k_priv *priv = ds->priv; - const struct qca8k_mib_desc *mib; -@@ -2181,10 +2175,9 @@ static void qca8k_mib_autocast_handler(s - if (port != mib_eth_data->req_port) - goto exit; - -- match_data = device_get_match_data(priv->dev); - data = mib_eth_data->data; - -- for (i = 0; i < match_data->mib_count; i++) { -+ for (i = 0; i < priv->info->mib_count; i++) { - mib = &ar8327_mib[i]; - - /* First 3 mib are present in the skb head */ -@@ -2256,7 +2249,6 @@ qca8k_get_ethtool_stats(struct dsa_switc - uint64_t *data) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- const struct qca8k_match_data *match_data; - const struct qca8k_mib_desc *mib; - u32 reg, i, val; - u32 hi = 0; -@@ -2266,9 +2258,7 @@ qca8k_get_ethtool_stats(struct dsa_switc - qca8k_get_ethtool_stats_eth(ds, port, data) > 0) - return; - -- match_data = of_device_get_match_data(priv->dev); -- -- for (i = 0; i < match_data->mib_count; i++) { -+ for (i = 0; i < priv->info->mib_count; i++) { - mib = &ar8327_mib[i]; - reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; - -@@ -2291,15 +2281,12 @@ qca8k_get_ethtool_stats(struct dsa_switc - static int - qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) - { -- const struct qca8k_match_data *match_data; - struct qca8k_priv *priv = ds->priv; - - if (sset != ETH_SS_STATS) - return 0; - -- match_data = of_device_get_match_data(priv->dev); -- -- return match_data->mib_count; -+ return priv->info->mib_count; - } - - static int -@@ -3037,14 +3024,11 @@ static const struct dsa_switch_ops qca8k - - static int qca8k_read_switch_id(struct qca8k_priv *priv) - { -- const struct qca8k_match_data *data; - u32 val; - u8 id; - int ret; - -- /* get the switches ID from the compatible */ -- data = of_device_get_match_data(priv->dev); -- if (!data) -+ if (!priv->info) - return -ENODEV; - - ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -@@ -3052,8 +3036,10 @@ static int qca8k_read_switch_id(struct q - return -ENODEV; - - id = QCA8K_MASK_CTRL_DEVICE_ID(val); -- if (id != data->id) { -- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); -+ if (id != priv->info->id) { -+ dev_err(priv->dev, -+ "Switch id detected %x but expected %x", -+ id, priv->info->id); - return -ENODEV; - } - -@@ -3078,6 +3064,7 @@ qca8k_sw_probe(struct mdio_device *mdiod - if (!priv) - return -ENOMEM; - -+ priv->info = of_device_get_match_data(priv->dev); - priv->bus = mdiodev->bus; - priv->dev = &mdiodev->dev; - ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -393,6 +393,7 @@ struct qca8k_priv { - struct qca8k_mgmt_eth_data mgmt_eth_data; - struct qca8k_mib_eth_data mib_eth_data; - struct qca8k_mdio_cache mdio_cache; -+ const struct qca8k_match_data *info; - }; - - struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-6.1/771-v6.0-02-net-dsa-qca8k-make-mib-autocast-feature-optional.patch b/target/linux/generic/backport-6.1/771-v6.0-02-net-dsa-qca8k-make-mib-autocast-feature-optional.patch deleted file mode 100644 index 5b2dce4c556c..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-02-net-dsa-qca8k-make-mib-autocast-feature-optional.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 533c64bca62a8654f00698bc893f639013e38c7b Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:11 +0200 -Subject: [PATCH 02/14] net: dsa: qca8k: make mib autocast feature optional - -Some switch may not support mib autocast feature and require the legacy -way of reading the regs directly. -Make the mib autocast feature optional and permit to declare support for -it using match_data struct in a dedicated qca8k_info_ops struct. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k.c | 11 +++++++++-- - drivers/net/dsa/qca/qca8k.h | 5 +++++ - 2 files changed, 14 insertions(+), 2 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k.c -+++ b/drivers/net/dsa/qca/qca8k.c -@@ -2254,8 +2254,8 @@ qca8k_get_ethtool_stats(struct dsa_switc - u32 hi = 0; - int ret; - -- if (priv->mgmt_master && -- qca8k_get_ethtool_stats_eth(ds, port, data) > 0) -+ if (priv->mgmt_master && priv->info->ops->autocast_mib && -+ priv->info->ops->autocast_mib(ds, port, data) > 0) - return; - - for (i = 0; i < priv->info->mib_count; i++) { -@@ -3187,20 +3187,27 @@ static int qca8k_resume(struct device *d - static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, - qca8k_suspend, qca8k_resume); - -+static const struct qca8k_info_ops qca8xxx_ops = { -+ .autocast_mib = qca8k_get_ethtool_stats_eth, -+}; -+ - static const struct qca8k_match_data qca8327 = { - .id = QCA8K_ID_QCA8327, - .reduced_package = true, - .mib_count = QCA8K_QCA832X_MIB_COUNT, -+ .ops = &qca8xxx_ops, - }; - - static const struct qca8k_match_data qca8328 = { - .id = QCA8K_ID_QCA8327, - .mib_count = QCA8K_QCA832X_MIB_COUNT, -+ .ops = &qca8xxx_ops, - }; - - static const struct qca8k_match_data qca833x = { - .id = QCA8K_ID_QCA8337, - .mib_count = QCA8K_QCA833X_MIB_COUNT, -+ .ops = &qca8xxx_ops, - }; - - static const struct of_device_id qca8k_of_match[] = { ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -324,10 +324,15 @@ enum qca8k_mid_cmd { - QCA8K_MIB_CAST = 3, - }; - -+struct qca8k_info_ops { -+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data); -+}; -+ - struct qca8k_match_data { - u8 id; - bool reduced_package; - u8 mib_count; -+ const struct qca8k_info_ops *ops; - }; - - enum { diff --git a/target/linux/generic/backport-6.1/771-v6.0-03-net-dsa-qca8k-move-mib-struct-to-common-code.patch b/target/linux/generic/backport-6.1/771-v6.0-03-net-dsa-qca8k-move-mib-struct-to-common-code.patch deleted file mode 100644 index afa466693a98..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-03-net-dsa-qca8k-move-mib-struct-to-common-code.patch +++ /dev/null @@ -1,6532 +0,0 @@ -From 027152b830434e3632ad5dd678cc5d4740358dbb Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:12 +0200 -Subject: [PATCH 03/14] net: dsa: qca8k: move mib struct to common code - -The same MIB struct is used by drivers based on qca8k family switch. Move -it to common code to make it accessible also by other drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/Makefile | 1 + - drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} | 51 --------------- - drivers/net/dsa/qca/qca8k-common.c | 63 +++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 3 + - 4 files changed, 67 insertions(+), 51 deletions(-) - rename drivers/net/dsa/qca/{qca8k.c => qca8k-8xxx.c} (98%) - create mode 100644 drivers/net/dsa/qca/qca8k-common.c - ---- a/drivers/net/dsa/qca/Makefile -+++ b/drivers/net/dsa/qca/Makefile -@@ -1,3 +1,4 @@ - # SPDX-License-Identifier: GPL-2.0-only - obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o - obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o -+qca8k-y += qca8k-common.o qca8k-8xxx.o ---- a/drivers/net/dsa/qca/qca8k.c -+++ /dev/null -@@ -1,3237 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (C) 2009 Felix Fietkau -- * Copyright (C) 2011-2012 Gabor Juhos -- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. -- * Copyright (c) 2016 John Crispin -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#include "qca8k.h" -- --#define MIB_DESC(_s, _o, _n) \ -- { \ -- .size = (_s), \ -- .offset = (_o), \ -- .name = (_n), \ -- } -- --static const struct qca8k_mib_desc ar8327_mib[] = { -- MIB_DESC(1, 0x00, "RxBroad"), -- MIB_DESC(1, 0x04, "RxPause"), -- MIB_DESC(1, 0x08, "RxMulti"), -- MIB_DESC(1, 0x0c, "RxFcsErr"), -- MIB_DESC(1, 0x10, "RxAlignErr"), -- MIB_DESC(1, 0x14, "RxRunt"), -- MIB_DESC(1, 0x18, "RxFragment"), -- MIB_DESC(1, 0x1c, "Rx64Byte"), -- MIB_DESC(1, 0x20, "Rx128Byte"), -- MIB_DESC(1, 0x24, "Rx256Byte"), -- MIB_DESC(1, 0x28, "Rx512Byte"), -- MIB_DESC(1, 0x2c, "Rx1024Byte"), -- MIB_DESC(1, 0x30, "Rx1518Byte"), -- MIB_DESC(1, 0x34, "RxMaxByte"), -- MIB_DESC(1, 0x38, "RxTooLong"), -- MIB_DESC(2, 0x3c, "RxGoodByte"), -- MIB_DESC(2, 0x44, "RxBadByte"), -- MIB_DESC(1, 0x4c, "RxOverFlow"), -- MIB_DESC(1, 0x50, "Filtered"), -- MIB_DESC(1, 0x54, "TxBroad"), -- MIB_DESC(1, 0x58, "TxPause"), -- MIB_DESC(1, 0x5c, "TxMulti"), -- MIB_DESC(1, 0x60, "TxUnderRun"), -- MIB_DESC(1, 0x64, "Tx64Byte"), -- MIB_DESC(1, 0x68, "Tx128Byte"), -- MIB_DESC(1, 0x6c, "Tx256Byte"), -- MIB_DESC(1, 0x70, "Tx512Byte"), -- MIB_DESC(1, 0x74, "Tx1024Byte"), -- MIB_DESC(1, 0x78, "Tx1518Byte"), -- MIB_DESC(1, 0x7c, "TxMaxByte"), -- MIB_DESC(1, 0x80, "TxOverSize"), -- MIB_DESC(2, 0x84, "TxByte"), -- MIB_DESC(1, 0x8c, "TxCollision"), -- MIB_DESC(1, 0x90, "TxAbortCol"), -- MIB_DESC(1, 0x94, "TxMultiCol"), -- MIB_DESC(1, 0x98, "TxSingleCol"), -- MIB_DESC(1, 0x9c, "TxExcDefer"), -- MIB_DESC(1, 0xa0, "TxDefer"), -- MIB_DESC(1, 0xa4, "TxLateCol"), -- MIB_DESC(1, 0xa8, "RXUnicast"), -- MIB_DESC(1, 0xac, "TXUnicast"), --}; -- --static void --qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) --{ -- regaddr >>= 1; -- *r1 = regaddr & 0x1e; -- -- regaddr >>= 5; -- *r2 = regaddr & 0x7; -- -- regaddr >>= 3; -- *page = regaddr & 0x3ff; --} -- --static int --qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) --{ -- u16 *cached_lo = &priv->mdio_cache.lo; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (lo == *cached_lo) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, lo); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit lo register\n"); -- -- *cached_lo = lo; -- return 0; --} -- --static int --qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) --{ -- u16 *cached_hi = &priv->mdio_cache.hi; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (hi == *cached_hi) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, hi); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit hi register\n"); -- -- *cached_hi = hi; -- return 0; --} -- --static int --qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) --{ -- int ret; -- -- ret = bus->read(bus, phy_id, regnum); -- if (ret >= 0) { -- *val = ret; -- ret = bus->read(bus, phy_id, regnum + 1); -- *val |= ret << 16; -- } -- -- if (ret < 0) { -- dev_err_ratelimited(&bus->dev, -- "failed to read qca8k 32bit register\n"); -- *val = 0; -- return ret; -- } -- -- return 0; --} -- --static void --qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) --{ -- u16 lo, hi; -- int ret; -- -- lo = val & 0xffff; -- hi = (u16)(val >> 16); -- -- ret = qca8k_set_lo(priv, phy_id, regnum, lo); -- if (ret >= 0) -- ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); --} -- --static int --qca8k_set_page(struct qca8k_priv *priv, u16 page) --{ -- u16 *cached_page = &priv->mdio_cache.page; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (page == *cached_page) -- return 0; -- -- ret = bus->write(bus, 0x18, 0, page); -- if (ret < 0) { -- dev_err_ratelimited(&bus->dev, -- "failed to set qca8k page\n"); -- return ret; -- } -- -- *cached_page = page; -- usleep_range(1000, 2000); -- return 0; --} -- --static int --qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) --{ -- return regmap_read(priv->regmap, reg, val); --} -- --static int --qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) --{ -- return regmap_write(priv->regmap, reg, val); --} -- --static int --qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) --{ -- return regmap_update_bits(priv->regmap, reg, mask, write_val); --} -- --static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data; -- struct qca8k_priv *priv = ds->priv; -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- u8 len, cmd; -- -- mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); -- mgmt_eth_data = &priv->mgmt_eth_data; -- -- cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -- len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -- -- /* Make sure the seq match the requested packet */ -- if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -- mgmt_eth_data->ack = true; -- -- if (cmd == MDIO_READ) { -- mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -- -- /* Get the rest of the 12 byte of data. -- * The read/write function will extract the requested data. -- */ -- if (len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(mgmt_eth_data->data + 1, skb->data, -- QCA_HDR_MGMT_DATA2_LEN); -- } -- -- complete(&mgmt_eth_data->rw_done); --} -- --static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -- int priority, unsigned int len) --{ -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- unsigned int real_len; -- struct sk_buff *skb; -- u32 *data2; -- u16 hdr; -- -- skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); -- if (!skb) -- return NULL; -- -- /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -- * Actually for some reason the steps are: -- * 0: nothing -- * 1-4: first 4 byte -- * 5-6: first 12 byte -- * 7-15: all 16 byte -- */ -- if (len == 16) -- real_len = 15; -- else -- real_len = len; -- -- skb_reset_mac_header(skb); -- skb_set_network_header(skb, skb->len); -- -- mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); -- -- hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -- hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); -- hdr |= QCA_HDR_XMIT_FROM_CPU; -- hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); -- hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); -- -- mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -- QCA_HDR_MGMT_CHECK_CODE_VAL); -- -- if (cmd == MDIO_WRITE) -- mgmt_ethhdr->mdio_data = *val; -- -- mgmt_ethhdr->hdr = htons(hdr); -- -- data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -- if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); -- -- return skb; --} -- --static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) --{ -- struct qca_mgmt_ethhdr *mgmt_ethhdr; -- -- mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -- mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); --} -- --static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -- struct sk_buff *skb; -- bool ack; -- int ret; -- -- skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -- QCA8K_ETHERNET_MDIO_PRIORITY, len); -- if (!skb) -- return -ENOMEM; -- -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check mgmt_master if is operational */ -- if (!priv->mgmt_master) { -- kfree_skb(skb); -- mutex_unlock(&mgmt_eth_data->mutex); -- return -EINVAL; -- } -- -- skb->dev = priv->mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the mdio pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -- -- *val = mgmt_eth_data->data[0]; -- if (len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); -- -- ack = mgmt_eth_data->ack; -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- return 0; --} -- --static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -- struct sk_buff *skb; -- bool ack; -- int ret; -- -- skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, -- QCA8K_ETHERNET_MDIO_PRIORITY, len); -- if (!skb) -- return -ENOMEM; -- -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check mgmt_master if is operational */ -- if (!priv->mgmt_master) { -- kfree_skb(skb); -- mutex_unlock(&mgmt_eth_data->mutex); -- return -EINVAL; -- } -- -- skb->dev = priv->mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the mdio pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -- -- ack = mgmt_eth_data->ack; -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- return 0; --} -- --static int --qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) --{ -- u32 val = 0; -- int ret; -- -- ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); -- if (ret) -- return ret; -- -- val &= ~mask; -- val |= write_val; -- -- return qca8k_write_eth(priv, reg, &val, sizeof(val)); --} -- --static int --qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- -- if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int --qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- u32 tmp; -- -- if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- tmp = val[i]; -- -- ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int --qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- int ret; -- -- if (!qca8k_read_eth(priv, reg, val, sizeof(*val))) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- return ret; --} -- --static int --qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- int ret; -- -- if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- return ret; --} -- --static int --qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) -- return 0; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -- if (ret < 0) -- goto exit; -- -- val &= ~mask; -- val |= write_val; -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- --exit: -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- --static const struct regmap_range qca8k_readable_ranges[] = { -- regmap_reg_range(0x0000, 0x00e4), /* Global control */ -- regmap_reg_range(0x0100, 0x0168), /* EEE control */ -- regmap_reg_range(0x0200, 0x0270), /* Parser control */ -- regmap_reg_range(0x0400, 0x0454), /* ACL */ -- regmap_reg_range(0x0600, 0x0718), /* Lookup */ -- regmap_reg_range(0x0800, 0x0b70), /* QM */ -- regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -- regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -- --}; -- --static const struct regmap_access_table qca8k_readable_table = { -- .yes_ranges = qca8k_readable_ranges, -- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), --}; -- --static struct regmap_config qca8k_regmap_config = { -- .reg_bits = 16, -- .val_bits = 32, -- .reg_stride = 4, -- .max_register = 0x16ac, /* end MIB - Port6 range */ -- .reg_read = qca8k_regmap_read, -- .reg_write = qca8k_regmap_write, -- .reg_update_bits = qca8k_regmap_update_bits, -- .rd_table = &qca8k_readable_table, -- .disable_locking = true, /* Locking is handled by qca8k read/write */ -- .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ --}; -- --static int --qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) --{ -- u32 val; -- -- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); --} -- --static int --qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) --{ -- u32 reg[3]; -- int ret; -- -- /* load the ARL table into an array */ -- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -- if (ret) -- return ret; -- -- /* vid - 83:72 */ -- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -- /* aging - 67:64 */ -- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -- /* portmask - 54:48 */ -- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -- /* mac - 47:0 */ -- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -- -- return 0; --} -- --static void --qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, -- u8 aging) --{ -- u32 reg[3] = { 0 }; -- -- /* vid - 83:72 */ -- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -- /* aging - 67:64 */ -- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -- /* portmask - 54:48 */ -- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -- /* mac - 47:0 */ -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -- -- /* load the array into the ARL table */ -- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); --} -- --static int --qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) --{ -- u32 reg; -- int ret; -- -- /* Set the command and FDB index */ -- reg = QCA8K_ATU_FUNC_BUSY; -- reg |= cmd; -- if (port >= 0) { -- reg |= QCA8K_ATU_FUNC_PORT_EN; -- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -- } -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_FDB_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_ATU_FUNC_FULL) -- return -1; -- } -- -- return 0; --} -- --static int --qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) --{ -- int ret; -- -- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -- if (ret < 0) -- return ret; -- -- return qca8k_fdb_read(priv, fdb); --} -- --static int --qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, -- u16 vid, u8 aging) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static void --qca8k_fdb_flush(struct qca8k_priv *priv) --{ -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_fdb_read(priv, &fdb); -- if (ret < 0) -- goto exit; -- -- /* Rule exist. Delete first */ -- if (!fdb.aging) { -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- } -- -- /* Add port to fdb portmask */ -- fdb.port_mask |= port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- /* Rule doesn't exist. Why delete? */ -- if (!fdb.aging) { -- ret = -EINVAL; -- goto exit; -- } -- -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- -- /* Only port in the rule is this port. Don't re insert */ -- if (fdb.port_mask == port_mask) -- goto exit; -- -- /* Remove port from port mask */ -- fdb.port_mask &= ~port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) --{ -- u32 reg; -- int ret; -- -- /* Set the command and VLAN index */ -- reg = QCA8K_VTU_FUNC1_BUSY; -- reg |= cmd; -- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_VLAN_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_VTU_FUNC1_FULL) -- return -ENOMEM; -- } -- -- return 0; --} -- --static int --qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) --{ -- u32 reg; -- int ret; -- -- /* -- We do the right thing with VLAN 0 and treat it as untagged while -- preserving the tag on egress. -- */ -- if (vid == 0) -- return 0; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- if (untagged) -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -- else -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -- -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) --{ -- u32 reg, mask; -- int ret, i; -- bool del; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -- -- /* Check if we're the last member to be removed */ -- del = true; -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -- -- if ((reg & mask) != mask) { -- del = false; -- break; -- } -- } -- -- if (del) { -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -- } else { -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- } -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_mib_init(struct qca8k_priv *priv) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -- QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -- if (ret) -- goto exit; -- -- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static void --qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) --{ -- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -- -- /* Port 0 and 6 have no internal PHY */ -- if (port > 0 && port < 6) -- mask |= QCA8K_PORT_STATUS_LINK_AUTO; -- -- if (enable) -- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -- else -- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); --} -- --static int --qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, -- struct sk_buff *read_skb, u32 *val) --{ -- struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); -- bool ack; -- int ret; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the copy pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) -- return -ETIMEDOUT; -- -- if (!ack) -- return -EINVAL; -- -- *val = mgmt_eth_data->data[0]; -- -- return 0; --} -- --static int --qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, -- int regnum, u16 data) --{ -- struct sk_buff *write_skb, *clear_skb, *read_skb; -- struct qca8k_mgmt_eth_data *mgmt_eth_data; -- u32 write_val, clear_val = 0, val; -- struct net_device *mgmt_master; -- int ret, ret1; -- bool ack; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- mgmt_eth_data = &priv->mgmt_eth_data; -- -- write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum); -- -- if (read) { -- write_val |= QCA8K_MDIO_MASTER_READ; -- } else { -- write_val |= QCA8K_MDIO_MASTER_WRITE; -- write_val |= QCA8K_MDIO_MASTER_DATA(data); -- } -- -- /* Prealloc all the needed skb before the lock */ -- write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); -- if (!write_skb) -- return -ENOMEM; -- -- clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -- if (!clear_skb) { -- ret = -ENOMEM; -- goto err_clear_skb; -- } -- -- read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, -- QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -- if (!read_skb) { -- ret = -ENOMEM; -- goto err_read_skb; -- } -- -- /* Actually start the request: -- * 1. Send mdio master packet -- * 2. Busy Wait for mdio master command -- * 3. Get the data if we are reading -- * 4. Reset the mdio master (even with error) -- */ -- mutex_lock(&mgmt_eth_data->mutex); -- -- /* Check if mgmt_master is operational */ -- mgmt_master = priv->mgmt_master; -- if (!mgmt_master) { -- mutex_unlock(&mgmt_eth_data->mutex); -- ret = -EINVAL; -- goto err_mgmt_master; -- } -- -- read_skb->dev = mgmt_master; -- clear_skb->dev = mgmt_master; -- write_skb->dev = mgmt_master; -- -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the write pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(write_skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) { -- ret = -ETIMEDOUT; -- kfree_skb(read_skb); -- goto exit; -- } -- -- if (!ack) { -- ret = -EINVAL; -- kfree_skb(read_skb); -- goto exit; -- } -- -- ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, -- !(val & QCA8K_MDIO_MASTER_BUSY), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -- mgmt_eth_data, read_skb, &val); -- -- if (ret < 0 && ret1 < 0) { -- ret = ret1; -- goto exit; -- } -- -- if (read) { -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the read pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(read_skb); -- -- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- ack = mgmt_eth_data->ack; -- -- if (ret <= 0) { -- ret = -ETIMEDOUT; -- goto exit; -- } -- -- if (!ack) { -- ret = -EINVAL; -- goto exit; -- } -- -- ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; -- } else { -- kfree_skb(read_skb); -- } --exit: -- reinit_completion(&mgmt_eth_data->rw_done); -- -- /* Increment seq_num and set it in the clear pkt */ -- mgmt_eth_data->seq++; -- qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); -- mgmt_eth_data->ack = false; -- -- dev_queue_xmit(clear_skb); -- -- wait_for_completion_timeout(&mgmt_eth_data->rw_done, -- QCA8K_ETHERNET_TIMEOUT); -- -- mutex_unlock(&mgmt_eth_data->mutex); -- -- return ret; -- -- /* Error handling before lock */ --err_mgmt_master: -- kfree_skb(read_skb); --err_read_skb: -- kfree_skb(clear_skb); --err_clear_skb: -- kfree_skb(write_skb); -- -- return ret; --} -- --static u32 --qca8k_port_to_phy(int port) --{ -- /* From Andrew Lunn: -- * Port 0 has no internal phy. -- * Port 1 has an internal PHY at MDIO address 0. -- * Port 2 has an internal PHY at MDIO address 1. -- * ... -- * Port 5 has an internal PHY at MDIO address 4. -- * Port 6 has no internal PHY. -- */ -- -- return port - 1; --} -- --static int --qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) --{ -- u16 r1, r2, page; -- u32 val; -- int ret, ret1; -- -- qca8k_split_addr(reg, &r1, &r2, &page); -- -- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -- bus, 0x10 | r2, r1, &val); -- -- /* Check if qca8k_read has failed for a different reason -- * before returnting -ETIMEDOUT -- */ -- if (ret < 0 && ret1 < 0) -- return ret1; -- -- return ret; --} -- --static int --qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) --{ -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum) | -- QCA8K_MDIO_MASTER_DATA(data); -- -- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- -- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_BUSY); -- --exit: -- /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -- -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- --static int --qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) --{ -- struct mii_bus *bus = priv->bus; -- u16 r1, r2, page; -- u32 val; -- int ret; -- -- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -- return -EINVAL; -- -- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -- QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -- QCA8K_MDIO_MASTER_REG_ADDR(regnum); -- -- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -- -- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -- -- ret = qca8k_set_page(priv, page); -- if (ret) -- goto exit; -- -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -- -- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_BUSY); -- if (ret) -- goto exit; -- -- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -- --exit: -- /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -- -- mutex_unlock(&bus->mdio_lock); -- -- if (ret >= 0) -- ret = val & QCA8K_MDIO_MASTER_DATA_MASK; -- -- return ret; --} -- --static int --qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) --{ -- struct qca8k_priv *priv = slave_bus->priv; -- int ret; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); -- if (!ret) -- return 0; -- -- return qca8k_mdio_write(priv, phy, regnum, data); --} -- --static int --qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) --{ -- struct qca8k_priv *priv = slave_bus->priv; -- int ret; -- -- /* Use mdio Ethernet when available, fallback to legacy one on error */ -- ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); -- if (ret >= 0) -- return ret; -- -- ret = qca8k_mdio_read(priv, phy, regnum); -- -- if (ret < 0) -- return 0xffff; -- -- return ret; --} -- --static int --qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) --{ -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- return qca8k_internal_mdio_write(slave_bus, port, regnum, data); --} -- --static int --qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) --{ -- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -- -- return qca8k_internal_mdio_read(slave_bus, port, regnum); --} -- --static int --qca8k_mdio_register(struct qca8k_priv *priv) --{ -- struct dsa_switch *ds = priv->ds; -- struct device_node *mdio; -- struct mii_bus *bus; -- -- bus = devm_mdiobus_alloc(ds->dev); -- if (!bus) -- return -ENOMEM; -- -- bus->priv = (void *)priv; -- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -- ds->dst->index, ds->index); -- bus->parent = ds->dev; -- bus->phy_mask = ~ds->phys_mii_mask; -- ds->slave_mii_bus = bus; -- -- /* Check if the devicetree declare the port:phy mapping */ -- mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -- if (of_device_is_available(mdio)) { -- bus->name = "qca8k slave mii"; -- bus->read = qca8k_internal_mdio_read; -- bus->write = qca8k_internal_mdio_write; -- return devm_of_mdiobus_register(priv->dev, bus, mdio); -- } -- -- /* If a mapping can't be found the legacy mapping is used, -- * using the qca8k_port_to_phy function -- */ -- bus->name = "qca8k-legacy slave mii"; -- bus->read = qca8k_legacy_mdio_read; -- bus->write = qca8k_legacy_mdio_write; -- return devm_mdiobus_register(priv->dev, bus); --} -- --static int --qca8k_setup_mdio_bus(struct qca8k_priv *priv) --{ -- u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; -- struct device_node *ports, *port; -- phy_interface_t mode; -- int err; -- -- ports = of_get_child_by_name(priv->dev->of_node, "ports"); -- if (!ports) -- ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports"); -- -- if (!ports) -- return -EINVAL; -- -- for_each_available_child_of_node(ports, port) { -- err = of_property_read_u32(port, "reg", ®); -- if (err) { -- of_node_put(port); -- of_node_put(ports); -- return err; -- } -- -- if (!dsa_is_user_port(priv->ds, reg)) -- continue; -- -- of_get_phy_mode(port, &mode); -- -- if (of_property_read_bool(port, "phy-handle") && -- mode != PHY_INTERFACE_MODE_INTERNAL) -- external_mdio_mask |= BIT(reg); -- else -- internal_mdio_mask |= BIT(reg); -- } -- -- of_node_put(ports); -- if (!external_mdio_mask && !internal_mdio_mask) { -- dev_err(priv->dev, "no PHYs are defined.\n"); -- return -EINVAL; -- } -- -- /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through -- * the MDIO_MASTER register also _disconnects_ the external MDC -- * passthrough to the internal PHYs. It's not possible to use both -- * configurations at the same time! -- * -- * Because this came up during the review process: -- * If the external mdio-bus driver is capable magically disabling -- * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's -- * accessors for the time being, it would be possible to pull this -- * off. -- */ -- if (!!external_mdio_mask && !!internal_mdio_mask) { -- dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); -- return -EINVAL; -- } -- -- if (external_mdio_mask) { -- /* Make sure to disable the internal mdio bus in cases -- * a dt-overlay and driver reload changed the configuration -- */ -- -- return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, -- QCA8K_MDIO_MASTER_EN); -- } -- -- return qca8k_mdio_register(priv); --} -- --static int --qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) --{ -- u32 mask = 0; -- int ret = 0; -- -- /* SoC specific settings for ipq8064. -- * If more device require this consider adding -- * a dedicated binding. -- */ -- if (of_machine_is_compatible("qcom,ipq8064")) -- mask |= QCA8K_MAC_PWR_RGMII0_1_8V; -- -- /* SoC specific settings for ipq8065 */ -- if (of_machine_is_compatible("qcom,ipq8065")) -- mask |= QCA8K_MAC_PWR_RGMII1_1_8V; -- -- if (mask) { -- ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, -- QCA8K_MAC_PWR_RGMII0_1_8V | -- QCA8K_MAC_PWR_RGMII1_1_8V, -- mask); -- } -- -- return ret; --} -- --static int qca8k_find_cpu_port(struct dsa_switch *ds) --{ -- struct qca8k_priv *priv = ds->priv; -- -- /* Find the connected cpu port. Valid port are 0 or 6 */ -- if (dsa_is_cpu_port(ds, 0)) -- return 0; -- -- dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); -- -- if (dsa_is_cpu_port(ds, 6)) -- return 6; -- -- return -EINVAL; --} -- --static int --qca8k_setup_of_pws_reg(struct qca8k_priv *priv) --{ -- const struct qca8k_match_data *data = priv->info; -- struct device_node *node = priv->dev->of_node; -- u32 val = 0; -- int ret; -- -- /* QCA8327 require to set to the correct mode. -- * His bigger brother QCA8328 have the 172 pin layout. -- * Should be applied by default but we set this just to make sure. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- /* Set the correct package of 148 pin for QCA8327 */ -- if (data->reduced_package) -- val |= QCA8327_PWS_PACKAGE148_EN; -- -- ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -- val); -- if (ret) -- return ret; -- } -- -- if (of_property_read_bool(node, "qca,ignore-power-on-sel")) -- val |= QCA8K_PWS_POWER_ON_SEL; -- -- if (of_property_read_bool(node, "qca,led-open-drain")) { -- if (!(val & QCA8K_PWS_POWER_ON_SEL)) { -- dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); -- return -EINVAL; -- } -- -- val |= QCA8K_PWS_LED_OPEN_EN_CSR; -- } -- -- return qca8k_rmw(priv, QCA8K_REG_PWS, -- QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, -- val); --} -- --static int --qca8k_parse_port_config(struct qca8k_priv *priv) --{ -- int port, cpu_port_index = -1, ret; -- struct device_node *port_dn; -- phy_interface_t mode; -- struct dsa_port *dp; -- u32 delay; -- -- /* We have 2 CPU port. Check them */ -- for (port = 0; port < QCA8K_NUM_PORTS; port++) { -- /* Skip every other port */ -- if (port != 0 && port != 6) -- continue; -- -- dp = dsa_to_port(priv->ds, port); -- port_dn = dp->dn; -- cpu_port_index++; -- -- if (!of_device_is_available(port_dn)) -- continue; -- -- ret = of_get_phy_mode(port_dn, &mode); -- if (ret) -- continue; -- -- switch (mode) { -- case PHY_INTERFACE_MODE_RGMII: -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_TXID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- case PHY_INTERFACE_MODE_SGMII: -- delay = 0; -- -- if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -- /* Switch regs accept value in ns, convert ps to ns */ -- delay = delay / 1000; -- else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_TXID) -- delay = 1; -- -- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { -- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -- delay = 3; -- } -- -- priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; -- -- delay = 0; -- -- if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) -- /* Switch regs accept value in ns, convert ps to ns */ -- delay = delay / 1000; -- else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_RXID) -- delay = 2; -- -- if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { -- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -- delay = 3; -- } -- -- priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; -- -- /* Skip sgmii parsing for rgmii* mode */ -- if (mode == PHY_INTERFACE_MODE_RGMII || -- mode == PHY_INTERFACE_MODE_RGMII_ID || -- mode == PHY_INTERFACE_MODE_RGMII_TXID || -- mode == PHY_INTERFACE_MODE_RGMII_RXID) -- break; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -- priv->ports_config.sgmii_tx_clk_falling_edge = true; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -- priv->ports_config.sgmii_rx_clk_falling_edge = true; -- -- if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -- priv->ports_config.sgmii_enable_pll = true; -- -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -- priv->ports_config.sgmii_enable_pll = false; -- } -- -- if (priv->switch_revision < 2) -- dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); -- } -- -- break; -- default: -- continue; -- } -- } -- -- return 0; --} -- --static int --qca8k_setup(struct dsa_switch *ds) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int cpu_port, ret, i; -- u32 mask; -- -- cpu_port = qca8k_find_cpu_port(ds); -- if (cpu_port < 0) { -- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); -- return cpu_port; -- } -- -- /* Parse CPU port config to be later used in phy_link mac_config */ -- ret = qca8k_parse_port_config(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_mdio_bus(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_of_pws_reg(priv); -- if (ret) -- return ret; -- -- ret = qca8k_setup_mac_pwr_sel(priv); -- if (ret) -- return ret; -- -- /* Make sure MAC06 is disabled */ -- ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, -- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -- if (ret) { -- dev_err(priv->dev, "failed disabling MAC06 exchange"); -- return ret; -- } -- -- /* Enable CPU Port */ -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); -- if (ret) { -- dev_err(priv->dev, "failed enabling CPU port"); -- return ret; -- } -- -- /* Enable MIB counters */ -- ret = qca8k_mib_init(priv); -- if (ret) -- dev_warn(priv->dev, "mib init failed"); -- -- /* Initial setup of all ports */ -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- /* Disable forwarding by default on all ports */ -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, 0); -- if (ret) -- return ret; -- -- /* Enable QCA header mode on all cpu ports */ -- if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -- FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | -- FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); -- if (ret) { -- dev_err(priv->dev, "failed enabling QCA header mode"); -- return ret; -- } -- } -- -- /* Disable MAC by default on all user ports */ -- if (dsa_is_user_port(ds, i)) -- qca8k_port_set_status(priv, i, 0); -- } -- -- /* Forward all unknown frames to CPU port for Linux processing -- * Notice that in multi-cpu config only one port should be set -- * for igmp, unknown, multicast and broadcast packet -- */ -- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | -- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); -- if (ret) -- return ret; -- -- /* Setup connection between CPU port & user ports -- * Configure specific switch configuration for ports -- */ -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- /* CPU port gets connected to all user ports of the switch */ -- if (dsa_is_cpu_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); -- if (ret) -- return ret; -- } -- -- /* Individual user ports get connected to CPU port only */ -- if (dsa_is_user_port(ds, i)) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_MEMBER, -- BIT(cpu_port)); -- if (ret) -- return ret; -- -- /* Enable ARP Auto-learning by default */ -- ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), -- QCA8K_PORT_LOOKUP_LEARN); -- if (ret) -- return ret; -- -- /* For port based vlans to work we need to set the -- * default egress vid -- */ -- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), -- QCA8K_EGREES_VLAN_PORT_MASK(i), -- QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); -- if (ret) -- return ret; -- -- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), -- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | -- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); -- if (ret) -- return ret; -- } -- -- /* The port 5 of the qca8337 have some problem in flood condition. The -- * original legacy driver had some specific buffer and priority settings -- * for the different port suggested by the QCA switch team. Add this -- * missing settings to improve switch stability under load condition. -- * This problem is limited to qca8337 and other qca8k switch are not affected. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8337) { -- switch (i) { -- /* The 2 CPU port and port 5 requires some different -- * priority than any other ports. -- */ -- case 0: -- case 5: -- case 6: -- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | -- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); -- break; -- default: -- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | -- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | -- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); -- } -- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); -- -- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | -- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_WRED_EN; -- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), -- QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | -- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -- QCA8K_PORT_HOL_CTRL1_WRED_EN, -- mask); -- } -- } -- -- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -- if (priv->switch_id == QCA8K_ID_QCA8327) { -- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | -- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); -- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, -- QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | -- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, -- mask); -- } -- -- /* Setup our port MTUs to match power on defaults */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); -- if (ret) -- dev_warn(priv->dev, "failed setting MTU settings"); -- -- /* Flush the FDB table */ -- qca8k_fdb_flush(priv); -- -- /* We don't have interrupts for link changes, so we need to poll */ -- ds->pcs_poll = true; -- -- /* Set min a max ageing value supported */ -- ds->ageing_time_min = 7000; -- ds->ageing_time_max = 458745000; -- -- /* Set max number of LAGs supported */ -- ds->num_lag_ids = QCA8K_NUM_LAGS; -- -- return 0; --} -- --static void --qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, -- u32 reg) --{ -- u32 delay, val = 0; -- int ret; -- -- /* Delay can be declared in 3 different way. -- * Mode to rgmii and internal-delay standard binding defined -- * rgmii-id or rgmii-tx/rx phy mode set. -- * The parse logic set a delay different than 0 only when one -- * of the 3 different way is used. In all other case delay is -- * not enabled. With ID or TX/RXID delay is enabled and set -- * to the default and recommended value. -- */ -- if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { -- delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -- } -- -- if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { -- delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; -- -- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -- } -- -- /* Set RGMII delay based on the selected values */ -- ret = qca8k_rmw(priv, reg, -- QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | -- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, -- val); -- if (ret) -- dev_err(priv->dev, "Failed to set internal delay for CPU port%d", -- cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); --} -- --static void --qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, -- const struct phylink_link_state *state) --{ -- struct qca8k_priv *priv = ds->priv; -- int cpu_port_index, ret; -- u32 reg, val; -- -- switch (port) { -- case 0: /* 1st CPU port */ -- if (state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII) -- return; -- -- reg = QCA8K_REG_PORT0_PAD_CTRL; -- cpu_port_index = QCA8K_CPU_PORT0; -- break; -- case 1: -- case 2: -- case 3: -- case 4: -- case 5: -- /* Internal PHY, nothing to do */ -- return; -- case 6: /* 2nd CPU port / external PHY */ -- if (state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII && -- state->interface != PHY_INTERFACE_MODE_1000BASEX) -- return; -- -- reg = QCA8K_REG_PORT6_PAD_CTRL; -- cpu_port_index = QCA8K_CPU_PORT6; -- break; -- default: -- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); -- return; -- } -- -- if (port != 6 && phylink_autoneg_inband(mode)) { -- dev_err(ds->dev, "%s: in-band negotiation unsupported\n", -- __func__); -- return; -- } -- -- switch (state->interface) { -- case PHY_INTERFACE_MODE_RGMII: -- case PHY_INTERFACE_MODE_RGMII_ID: -- case PHY_INTERFACE_MODE_RGMII_TXID: -- case PHY_INTERFACE_MODE_RGMII_RXID: -- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -- -- /* Configure rgmii delay */ -- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -- -- /* QCA8337 requires to set rgmii rx delay for all ports. -- * This is enabled through PORT5_PAD_CTRL for all ports, -- * rather than individual port registers. -- */ -- if (priv->switch_id == QCA8K_ID_QCA8337) -- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, -- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -- break; -- case PHY_INTERFACE_MODE_SGMII: -- case PHY_INTERFACE_MODE_1000BASEX: -- /* Enable SGMII on the port */ -- qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); -- -- /* Enable/disable SerDes auto-negotiation as necessary */ -- ret = qca8k_read(priv, QCA8K_REG_PWS, &val); -- if (ret) -- return; -- if (phylink_autoneg_inband(mode)) -- val &= ~QCA8K_PWS_SERDES_AEN_DIS; -- else -- val |= QCA8K_PWS_SERDES_AEN_DIS; -- qca8k_write(priv, QCA8K_REG_PWS, val); -- -- /* Configure the SGMII parameters */ -- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); -- if (ret) -- return; -- -- val |= QCA8K_SGMII_EN_SD; -- -- if (priv->ports_config.sgmii_enable_pll) -- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -- QCA8K_SGMII_EN_TX; -- -- if (dsa_is_cpu_port(ds, port)) { -- /* CPU port, we're talking to the CPU MAC, be a PHY */ -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_PHY; -- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_MAC; -- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { -- val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -- val |= QCA8K_SGMII_MODE_CTRL_BASEX; -- } -- -- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); -- -- /* From original code is reported port instability as SGMII also -- * require delay set. Apply advised values here or take them from DT. -- */ -- if (state->interface == PHY_INTERFACE_MODE_SGMII) -- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -- -- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and -- * falling edge is set writing in the PORT0 PAD reg -- */ -- if (priv->switch_id == QCA8K_ID_QCA8327 || -- priv->switch_id == QCA8K_ID_QCA8337) -- reg = QCA8K_REG_PORT0_PAD_CTRL; -- -- val = 0; -- -- /* SGMII Clock phase configuration */ -- if (priv->ports_config.sgmii_rx_clk_falling_edge) -- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; -- -- if (priv->ports_config.sgmii_tx_clk_falling_edge) -- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; -- -- if (val) -- ret = qca8k_rmw(priv, reg, -- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | -- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, -- val); -- -- break; -- default: -- dev_err(ds->dev, "xMII mode %s not supported for port %d\n", -- phy_modes(state->interface), port); -- return; -- } --} -- --static void --qca8k_phylink_validate(struct dsa_switch *ds, int port, -- unsigned long *supported, -- struct phylink_link_state *state) --{ -- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -- -- switch (port) { -- case 0: /* 1st CPU port */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII) -- goto unsupported; -- break; -- case 1: -- case 2: -- case 3: -- case 4: -- case 5: -- /* Internal PHY */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_GMII && -- state->interface != PHY_INTERFACE_MODE_INTERNAL) -- goto unsupported; -- break; -- case 6: /* 2nd CPU port / external PHY */ -- if (state->interface != PHY_INTERFACE_MODE_NA && -- state->interface != PHY_INTERFACE_MODE_RGMII && -- state->interface != PHY_INTERFACE_MODE_RGMII_ID && -- state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -- state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -- state->interface != PHY_INTERFACE_MODE_SGMII && -- state->interface != PHY_INTERFACE_MODE_1000BASEX) -- goto unsupported; -- break; -- default: --unsupported: -- linkmode_zero(supported); -- return; -- } -- -- phylink_set_port_modes(mask); -- phylink_set(mask, Autoneg); -- -- phylink_set(mask, 1000baseT_Full); -- phylink_set(mask, 10baseT_Half); -- phylink_set(mask, 10baseT_Full); -- phylink_set(mask, 100baseT_Half); -- phylink_set(mask, 100baseT_Full); -- -- if (state->interface == PHY_INTERFACE_MODE_1000BASEX) -- phylink_set(mask, 1000baseX_Full); -- -- phylink_set(mask, Pause); -- phylink_set(mask, Asym_Pause); -- -- linkmode_and(supported, supported, mask); -- linkmode_and(state->advertising, state->advertising, mask); --} -- --static int --qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, -- struct phylink_link_state *state) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg; -- int ret; -- -- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); -- if (ret < 0) -- return ret; -- -- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); -- state->an_complete = state->link; -- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); -- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : -- DUPLEX_HALF; -- -- switch (reg & QCA8K_PORT_STATUS_SPEED) { -- case QCA8K_PORT_STATUS_SPEED_10: -- state->speed = SPEED_10; -- break; -- case QCA8K_PORT_STATUS_SPEED_100: -- state->speed = SPEED_100; -- break; -- case QCA8K_PORT_STATUS_SPEED_1000: -- state->speed = SPEED_1000; -- break; -- default: -- state->speed = SPEED_UNKNOWN; -- break; -- } -- -- state->pause = MLO_PAUSE_NONE; -- if (reg & QCA8K_PORT_STATUS_RXFLOW) -- state->pause |= MLO_PAUSE_RX; -- if (reg & QCA8K_PORT_STATUS_TXFLOW) -- state->pause |= MLO_PAUSE_TX; -- -- return 1; --} -- --static void --qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, -- phy_interface_t interface) --{ -- struct qca8k_priv *priv = ds->priv; -- -- qca8k_port_set_status(priv, port, 0); --} -- --static void --qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, -- phy_interface_t interface, struct phy_device *phydev, -- int speed, int duplex, bool tx_pause, bool rx_pause) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg; -- -- if (phylink_autoneg_inband(mode)) { -- reg = QCA8K_PORT_STATUS_LINK_AUTO; -- } else { -- switch (speed) { -- case SPEED_10: -- reg = QCA8K_PORT_STATUS_SPEED_10; -- break; -- case SPEED_100: -- reg = QCA8K_PORT_STATUS_SPEED_100; -- break; -- case SPEED_1000: -- reg = QCA8K_PORT_STATUS_SPEED_1000; -- break; -- default: -- reg = QCA8K_PORT_STATUS_LINK_AUTO; -- break; -- } -- -- if (duplex == DUPLEX_FULL) -- reg |= QCA8K_PORT_STATUS_DUPLEX; -- -- if (rx_pause || dsa_is_cpu_port(ds, port)) -- reg |= QCA8K_PORT_STATUS_RXFLOW; -- -- if (tx_pause || dsa_is_cpu_port(ds, port)) -- reg |= QCA8K_PORT_STATUS_TXFLOW; -- } -- -- reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -- -- qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); --} -- --static void --qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) --{ -- struct qca8k_priv *priv = ds->priv; -- int i; -- -- if (stringset != ETH_SS_STATS) -- return; -- -- for (i = 0; i < priv->info->mib_count; i++) -- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -- ETH_GSTRING_LEN); --} -- --static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) --{ -- struct qca8k_mib_eth_data *mib_eth_data; -- struct qca8k_priv *priv = ds->priv; -- const struct qca8k_mib_desc *mib; -- struct mib_ethhdr *mib_ethhdr; -- int i, mib_len, offset = 0; -- u64 *data; -- u8 port; -- -- mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); -- mib_eth_data = &priv->mib_eth_data; -- -- /* The switch autocast every port. Ignore other packet and -- * parse only the requested one. -- */ -- port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); -- if (port != mib_eth_data->req_port) -- goto exit; -- -- data = mib_eth_data->data; -- -- for (i = 0; i < priv->info->mib_count; i++) { -- mib = &ar8327_mib[i]; -- -- /* First 3 mib are present in the skb head */ -- if (i < 3) { -- data[i] = mib_ethhdr->data[i]; -- continue; -- } -- -- mib_len = sizeof(uint32_t); -- -- /* Some mib are 64 bit wide */ -- if (mib->size == 2) -- mib_len = sizeof(uint64_t); -- -- /* Copy the mib value from packet to the */ -- memcpy(data + i, skb->data + offset, mib_len); -- -- /* Set the offset for the next mib */ -- offset += mib_len; -- } -- --exit: -- /* Complete on receiving all the mib packet */ -- if (refcount_dec_and_test(&mib_eth_data->port_parsed)) -- complete(&mib_eth_data->rw_done); --} -- --static int --qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) --{ -- struct dsa_port *dp = dsa_to_port(ds, port); -- struct qca8k_mib_eth_data *mib_eth_data; -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- mib_eth_data = &priv->mib_eth_data; -- -- mutex_lock(&mib_eth_data->mutex); -- -- reinit_completion(&mib_eth_data->rw_done); -- -- mib_eth_data->req_port = dp->index; -- mib_eth_data->data = data; -- refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); -- -- mutex_lock(&priv->reg_mutex); -- -- /* Send mib autocast request */ -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | -- QCA8K_MIB_BUSY); -- -- mutex_unlock(&priv->reg_mutex); -- -- if (ret) -- goto exit; -- -- ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); -- --exit: -- mutex_unlock(&mib_eth_data->mutex); -- -- return ret; --} -- --static void --qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -- uint64_t *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- const struct qca8k_mib_desc *mib; -- u32 reg, i, val; -- u32 hi = 0; -- int ret; -- -- if (priv->mgmt_master && priv->info->ops->autocast_mib && -- priv->info->ops->autocast_mib(ds, port, data) > 0) -- return; -- -- for (i = 0; i < priv->info->mib_count; i++) { -- mib = &ar8327_mib[i]; -- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -- -- ret = qca8k_read(priv, reg, &val); -- if (ret < 0) -- continue; -- -- if (mib->size == 2) { -- ret = qca8k_read(priv, reg + 4, &hi); -- if (ret < 0) -- continue; -- } -- -- data[i] = val; -- if (mib->size == 2) -- data[i] |= (u64)hi << 32; -- } --} -- --static int --qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) --{ -- struct qca8k_priv *priv = ds->priv; -- -- if (sset != ETH_SS_STATS) -- return 0; -- -- return priv->info->mib_count; --} -- --static int --qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -- u32 reg; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -- if (ret < 0) -- goto exit; -- -- if (eee->eee_enabled) -- reg |= lpi_en; -- else -- reg &= ~lpi_en; -- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) --{ -- /* Nothing to do on the port's MAC */ -- return 0; --} -- --static void --qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 stp_state; -- -- switch (state) { -- case BR_STATE_DISABLED: -- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -- break; -- case BR_STATE_BLOCKING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -- break; -- case BR_STATE_LISTENING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -- break; -- case BR_STATE_LEARNING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -- break; -- case BR_STATE_FORWARDING: -- default: -- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -- break; -- } -- -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); --} -- --static int --qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int port_mask, cpu_port; -- int i, ret; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- port_mask = BIT(cpu_port); -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Add this port to the portvlan mask of the other ports -- * in the bridge -- */ -- ret = regmap_set_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- if (ret) -- return ret; -- if (i != port) -- port_mask |= BIT(i); -- } -- -- /* Add all other ports to this ports portvlan mask */ -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, port_mask); -- -- return ret; --} -- --static void --qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int cpu_port, i; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Remove this port to the portvlan mask of the other ports -- * in the bridge -- */ -- regmap_clear_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- } -- -- /* Set the cpu port to be the only one in the portvlan mask of -- * this port -- */ -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); --} -- --static void --qca8k_port_fast_age(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = ds->priv; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) --{ -- struct qca8k_priv *priv = ds->priv; -- unsigned int secs = msecs / 1000; -- u32 val; -- -- /* AGE_TIME reg is set in 7s step */ -- val = secs / 7; -- -- /* Handle case with 0 as val to NOT disable -- * learning -- */ -- if (!val) -- val = 1; -- -- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -- QCA8K_ATU_AGE_TIME(val)); --} -- --static int --qca8k_port_enable(struct dsa_switch *ds, int port, -- struct phy_device *phy) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 1); -- priv->port_enabled_map |= BIT(port); -- -- if (dsa_is_user_port(ds, port)) -- phy_support_asym_pause(phy); -- -- return 0; --} -- --static void --qca8k_port_disable(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 0); -- priv->port_enabled_map &= ~BIT(port); --} -- --static int --qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- /* We have only have a general MTU setting. -- * DSA always set the CPU port's MTU to the largest MTU of the slave -- * ports. -- * Setting MTU just for the CPU port is sufficient to correctly set a -- * value for every port. -- */ -- if (!dsa_is_cpu_port(ds, port)) -- return 0; -- -- /* To change the MAX_FRAME_SIZE the cpu ports must be off or -- * the switch panics. -- * Turn off both cpu ports before applying the new value to prevent -- * this. -- */ -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 0); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 0); -- -- /* Include L2 header / FCS length */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); -- -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 1); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 1); -- -- return ret; --} -- --static int --qca8k_port_max_mtu(struct dsa_switch *ds, int port) --{ -- return QCA8K_MAX_MTU; --} -- --static int --qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -- u16 port_mask, u16 vid) --{ -- /* Set the vid to the port vlan id if no vid is set */ -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_add(priv, addr, port_mask, vid, -- QCA8K_ATU_STATUS_STATIC); --} -- --static int --qca8k_port_fdb_add(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- return qca8k_port_fdb_insert(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_del(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_del(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -- dsa_fdb_dump_cb_t *cb, void *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- struct qca8k_fdb _fdb = { 0 }; -- int cnt = QCA8K_NUM_FDB_RECORDS; -- bool is_static; -- int ret = 0; -- -- mutex_lock(&priv->reg_mutex); -- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -- if (!_fdb.aging) -- break; -- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -- ret = cb(_fdb.mac, _fdb.vid, is_static, data); -- if (ret) -- break; -- } -- mutex_unlock(&priv->reg_mutex); -- -- return 0; --} -- --static int --qca8k_port_mdb_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); --} -- --static int --qca8k_port_mdb_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); --} -- --static int --qca8k_port_mirror_add(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror, -- bool ingress) --{ -- struct qca8k_priv *priv = ds->priv; -- int monitor_port, ret; -- u32 reg, val; -- -- /* Check for existent entry */ -- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -- return -EEXIST; -- -- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -- if (ret) -- return ret; -- -- /* QCA83xx can have only one port set to mirror mode. -- * Check that the correct port is requested and return error otherwise. -- * When no mirror port is set, the values is set to 0xF -- */ -- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -- return -EEXIST; -- -- /* Set the monitor port */ -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -- mirror->to_local_port); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- return ret; -- -- if (ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_update_bits(priv->regmap, reg, val, val); -- if (ret) -- return ret; -- -- /* Track mirror port for tx and rx to decide when the -- * mirror port has to be disabled. -- */ -- if (ingress) -- priv->mirror_rx |= BIT(port); -- else -- priv->mirror_tx |= BIT(port); -- -- return 0; --} -- --static void --qca8k_port_mirror_del(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg, val; -- int ret; -- -- if (mirror->ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_clear_bits(priv->regmap, reg, val); -- if (ret) -- goto err; -- -- if (mirror->ingress) -- priv->mirror_rx &= ~BIT(port); -- else -- priv->mirror_tx &= ~BIT(port); -- -- /* No port set to send packet to mirror port. Disable mirror port */ -- if (!priv->mirror_rx && !priv->mirror_tx) { -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- goto err; -- } --err: -- dev_err(priv->dev, "Failed to del mirror port from %d", port); --} -- --static int --qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -- struct netlink_ext_ack *extack) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- if (vlan_filtering) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -- } else { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan, -- struct netlink_ext_ack *extack) --{ -- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -- if (ret) { -- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -- return ret; -- } -- -- if (pvid) { -- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -- QCA8K_EGREES_VLAN_PORT_MASK(port), -- QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -- if (ret) -- return ret; -- -- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -- QCA8K_PORT_VLAN_CVID(vlan->vid) | -- QCA8K_PORT_VLAN_SVID(vlan->vid)); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_del(priv, port, vlan->vid); -- if (ret) -- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -- -- return ret; --} -- --static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = ds->priv; -- -- /* Communicate to the phy internal driver the switch revision. -- * Based on the switch revision different values needs to be -- * set to the dbg and mmd reg on the phy. -- * The first 2 bit are used to communicate the switch revision -- * to the phy driver. -- */ -- if (port > 0 && port < 6) -- return priv->switch_revision; -- -- return 0; --} -- --static enum dsa_tag_protocol --qca8k_get_tag_protocol(struct dsa_switch *ds, int port, -- enum dsa_tag_protocol mp) --{ -- return DSA_TAG_PROTO_QCA; --} -- --static bool --qca8k_lag_can_offload(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct dsa_port *dp; -- int id, members = 0; -- -- id = dsa_lag_id(ds->dst, lag); -- if (id < 0 || id >= ds->num_lag_ids) -- return false; -- -- dsa_lag_foreach_port(dp, ds->dst, lag) -- /* Includes the port joining the LAG */ -- members++; -- -- if (members > QCA8K_NUM_PORTS_FOR_LAG) -- return false; -- -- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -- return false; -- -- if (info->hash_type != NETDEV_LAG_HASH_L2 && -- info->hash_type != NETDEV_LAG_HASH_L23) -- return false; -- -- return true; --} -- --static int --qca8k_lag_setup_hash(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct qca8k_priv *priv = ds->priv; -- bool unique_lag = true; -- u32 hash = 0; -- int i, id; -- -- id = dsa_lag_id(ds->dst, lag); -- -- switch (info->hash_type) { -- case NETDEV_LAG_HASH_L23: -- hash |= QCA8K_TRUNK_HASH_SIP_EN; -- hash |= QCA8K_TRUNK_HASH_DIP_EN; -- fallthrough; -- case NETDEV_LAG_HASH_L2: -- hash |= QCA8K_TRUNK_HASH_SA_EN; -- hash |= QCA8K_TRUNK_HASH_DA_EN; -- break; -- default: /* We should NEVER reach this */ -- return -EOPNOTSUPP; -- } -- -- /* Check if we are the unique configured LAG */ -- dsa_lags_foreach_id(i, ds->dst) -- if (i != id && dsa_lag_dev(ds->dst, i)) { -- unique_lag = false; -- break; -- } -- -- /* Hash Mode is global. Make sure the same Hash Mode -- * is set to all the 4 possible lag. -- * If we are the unique LAG we can set whatever hash -- * mode we want. -- * To change hash mode it's needed to remove all LAG -- * and change the mode with the latest. -- */ -- if (unique_lag) { -- priv->lag_hash_mode = hash; -- } else if (priv->lag_hash_mode != hash) { -- netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -- return -EOPNOTSUPP; -- } -- -- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -- QCA8K_TRUNK_HASH_MASK, hash); --} -- --static int --qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -- struct net_device *lag, bool delete) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret, id, i; -- u32 val; -- -- id = dsa_lag_id(ds->dst, lag); -- -- /* Read current port member */ -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -- if (ret) -- return ret; -- -- /* Shift val to the correct trunk */ -- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -- if (delete) -- val &= ~BIT(port); -- else -- val |= BIT(port); -- -- /* Update port member. With empty portmap disable trunk */ -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -- QCA8K_REG_GOL_TRUNK_MEMBER(id) | -- QCA8K_REG_GOL_TRUNK_EN(id), -- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -- val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -- -- /* Search empty member if adding or port on deleting */ -- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -- if (ret) -- return ret; -- -- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -- -- if (delete) { -- /* If port flagged to be disabled assume this member is -- * empty -- */ -- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -- if (val != port) -- continue; -- } else { -- /* If port flagged to be enabled assume this member is -- * already set -- */ -- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- } -- -- /* We have found the member to add/remove */ -- break; -- } -- -- /* Set port in the correct port mask or disable port if in delete mode */ -- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); --} -- --static int --qca8k_port_lag_join(struct dsa_switch *ds, int port, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- int ret; -- -- if (!qca8k_lag_can_offload(ds, lag, info)) -- return -EOPNOTSUPP; -- -- ret = qca8k_lag_setup_hash(ds, lag, info); -- if (ret) -- return ret; -- -- return qca8k_lag_refresh_portmap(ds, port, lag, false); --} -- --static int --qca8k_port_lag_leave(struct dsa_switch *ds, int port, -- struct net_device *lag) --{ -- return qca8k_lag_refresh_portmap(ds, port, lag, true); --} -- --static void --qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, -- bool operational) --{ -- struct dsa_port *dp = master->dsa_ptr; -- struct qca8k_priv *priv = ds->priv; -- -- /* Ethernet MIB/MDIO is only supported for CPU port 0 */ -- if (dp->index != 0) -- return; -- -- mutex_lock(&priv->mgmt_eth_data.mutex); -- mutex_lock(&priv->mib_eth_data.mutex); -- -- priv->mgmt_master = operational ? (struct net_device *)master : NULL; -- -- mutex_unlock(&priv->mib_eth_data.mutex); -- mutex_unlock(&priv->mgmt_eth_data.mutex); --} -- --static int qca8k_connect_tag_protocol(struct dsa_switch *ds, -- enum dsa_tag_protocol proto) --{ -- struct qca_tagger_data *tagger_data; -- -- switch (proto) { -- case DSA_TAG_PROTO_QCA: -- tagger_data = ds->tagger_data; -- -- tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -- tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; -- -- break; -- default: -- return -EOPNOTSUPP; -- } -- -- return 0; --} -- --static const struct dsa_switch_ops qca8k_switch_ops = { -- .get_tag_protocol = qca8k_get_tag_protocol, -- .setup = qca8k_setup, -- .get_strings = qca8k_get_strings, -- .get_ethtool_stats = qca8k_get_ethtool_stats, -- .get_sset_count = qca8k_get_sset_count, -- .set_ageing_time = qca8k_set_ageing_time, -- .get_mac_eee = qca8k_get_mac_eee, -- .set_mac_eee = qca8k_set_mac_eee, -- .port_enable = qca8k_port_enable, -- .port_disable = qca8k_port_disable, -- .port_change_mtu = qca8k_port_change_mtu, -- .port_max_mtu = qca8k_port_max_mtu, -- .port_stp_state_set = qca8k_port_stp_state_set, -- .port_bridge_join = qca8k_port_bridge_join, -- .port_bridge_leave = qca8k_port_bridge_leave, -- .port_fast_age = qca8k_port_fast_age, -- .port_fdb_add = qca8k_port_fdb_add, -- .port_fdb_del = qca8k_port_fdb_del, -- .port_fdb_dump = qca8k_port_fdb_dump, -- .port_mdb_add = qca8k_port_mdb_add, -- .port_mdb_del = qca8k_port_mdb_del, -- .port_mirror_add = qca8k_port_mirror_add, -- .port_mirror_del = qca8k_port_mirror_del, -- .port_vlan_filtering = qca8k_port_vlan_filtering, -- .port_vlan_add = qca8k_port_vlan_add, -- .port_vlan_del = qca8k_port_vlan_del, -- .phylink_validate = qca8k_phylink_validate, -- .phylink_mac_link_state = qca8k_phylink_mac_link_state, -- .phylink_mac_config = qca8k_phylink_mac_config, -- .phylink_mac_link_down = qca8k_phylink_mac_link_down, -- .phylink_mac_link_up = qca8k_phylink_mac_link_up, -- .get_phy_flags = qca8k_get_phy_flags, -- .port_lag_join = qca8k_port_lag_join, -- .port_lag_leave = qca8k_port_lag_leave, -- .master_state_change = qca8k_master_change, -- .connect_tag_protocol = qca8k_connect_tag_protocol, --}; -- --static int qca8k_read_switch_id(struct qca8k_priv *priv) --{ -- u32 val; -- u8 id; -- int ret; -- -- if (!priv->info) -- return -ENODEV; -- -- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -- if (ret < 0) -- return -ENODEV; -- -- id = QCA8K_MASK_CTRL_DEVICE_ID(val); -- if (id != priv->info->id) { -- dev_err(priv->dev, -- "Switch id detected %x but expected %x", -- id, priv->info->id); -- return -ENODEV; -- } -- -- priv->switch_id = id; -- -- /* Save revision to communicate to the internal PHY driver */ -- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -- -- return 0; --} -- --static int --qca8k_sw_probe(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv; -- int ret; -- -- /* allocate the private data struct so that we can probe the switches -- * ID register -- */ -- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL); -- if (!priv) -- return -ENOMEM; -- -- priv->info = of_device_get_match_data(priv->dev); -- priv->bus = mdiodev->bus; -- priv->dev = &mdiodev->dev; -- -- priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", -- GPIOD_ASIS); -- if (IS_ERR(priv->reset_gpio)) -- return PTR_ERR(priv->reset_gpio); -- -- if (priv->reset_gpio) { -- gpiod_set_value_cansleep(priv->reset_gpio, 1); -- /* The active low duration must be greater than 10 ms -- * and checkpatch.pl wants 20 ms. -- */ -- msleep(20); -- gpiod_set_value_cansleep(priv->reset_gpio, 0); -- } -- -- /* Start by setting up the register mapping */ -- priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, -- &qca8k_regmap_config); -- if (IS_ERR(priv->regmap)) { -- dev_err(priv->dev, "regmap initialization failed"); -- return PTR_ERR(priv->regmap); -- } -- -- priv->mdio_cache.page = 0xffff; -- priv->mdio_cache.lo = 0xffff; -- priv->mdio_cache.hi = 0xffff; -- -- /* Check the detected switch id */ -- ret = qca8k_read_switch_id(priv); -- if (ret) -- return ret; -- -- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); -- if (!priv->ds) -- return -ENOMEM; -- -- mutex_init(&priv->mgmt_eth_data.mutex); -- init_completion(&priv->mgmt_eth_data.rw_done); -- -- mutex_init(&priv->mib_eth_data.mutex); -- init_completion(&priv->mib_eth_data.rw_done); -- -- priv->ds->dev = &mdiodev->dev; -- priv->ds->num_ports = QCA8K_NUM_PORTS; -- priv->ds->priv = priv; -- priv->ds->ops = &qca8k_switch_ops; -- mutex_init(&priv->reg_mutex); -- dev_set_drvdata(&mdiodev->dev, priv); -- -- return dsa_register_switch(priv->ds); --} -- --static void --qca8k_sw_remove(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -- int i; -- -- if (!priv) -- return; -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) -- qca8k_port_set_status(priv, i, 0); -- -- dsa_unregister_switch(priv->ds); -- -- dev_set_drvdata(&mdiodev->dev, NULL); --} -- --static void qca8k_sw_shutdown(struct mdio_device *mdiodev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -- -- if (!priv) -- return; -- -- dsa_switch_shutdown(priv->ds); -- -- dev_set_drvdata(&mdiodev->dev, NULL); --} -- --#ifdef CONFIG_PM_SLEEP --static void --qca8k_set_pm(struct qca8k_priv *priv, int enable) --{ -- int port; -- -- for (port = 0; port < QCA8K_NUM_PORTS; port++) { -- /* Do not enable on resume if the port was -- * disabled before. -- */ -- if (!(priv->port_enabled_map & BIT(port))) -- continue; -- -- qca8k_port_set_status(priv, port, enable); -- } --} -- --static int qca8k_suspend(struct device *dev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(dev); -- -- qca8k_set_pm(priv, 0); -- -- return dsa_switch_suspend(priv->ds); --} -- --static int qca8k_resume(struct device *dev) --{ -- struct qca8k_priv *priv = dev_get_drvdata(dev); -- -- qca8k_set_pm(priv, 1); -- -- return dsa_switch_resume(priv->ds); --} --#endif /* CONFIG_PM_SLEEP */ -- --static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, -- qca8k_suspend, qca8k_resume); -- --static const struct qca8k_info_ops qca8xxx_ops = { -- .autocast_mib = qca8k_get_ethtool_stats_eth, --}; -- --static const struct qca8k_match_data qca8327 = { -- .id = QCA8K_ID_QCA8327, -- .reduced_package = true, -- .mib_count = QCA8K_QCA832X_MIB_COUNT, -- .ops = &qca8xxx_ops, --}; -- --static const struct qca8k_match_data qca8328 = { -- .id = QCA8K_ID_QCA8327, -- .mib_count = QCA8K_QCA832X_MIB_COUNT, -- .ops = &qca8xxx_ops, --}; -- --static const struct qca8k_match_data qca833x = { -- .id = QCA8K_ID_QCA8337, -- .mib_count = QCA8K_QCA833X_MIB_COUNT, -- .ops = &qca8xxx_ops, --}; -- --static const struct of_device_id qca8k_of_match[] = { -- { .compatible = "qca,qca8327", .data = &qca8327 }, -- { .compatible = "qca,qca8328", .data = &qca8328 }, -- { .compatible = "qca,qca8334", .data = &qca833x }, -- { .compatible = "qca,qca8337", .data = &qca833x }, -- { /* sentinel */ }, --}; -- --static struct mdio_driver qca8kmdio_driver = { -- .probe = qca8k_sw_probe, -- .remove = qca8k_sw_remove, -- .shutdown = qca8k_sw_shutdown, -- .mdiodrv.driver = { -- .name = "qca8k", -- .of_match_table = qca8k_of_match, -- .pm = &qca8k_pm_ops, -- }, --}; -- --mdio_module_driver(qca8kmdio_driver); -- --MODULE_AUTHOR("Mathieu Olivari, John Crispin "); --MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family"); --MODULE_LICENSE("GPL v2"); --MODULE_ALIAS("platform:qca8k"); ---- /dev/null -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -0,0 +1,3186 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2009 Felix Fietkau -+ * Copyright (C) 2011-2012 Gabor Juhos -+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2016 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "qca8k.h" -+ -+static void -+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) -+{ -+ regaddr >>= 1; -+ *r1 = regaddr & 0x1e; -+ -+ regaddr >>= 5; -+ *r2 = regaddr & 0x7; -+ -+ regaddr >>= 3; -+ *page = regaddr & 0x3ff; -+} -+ -+static int -+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) -+{ -+ u16 *cached_lo = &priv->mdio_cache.lo; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (lo == *cached_lo) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, lo); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit lo register\n"); -+ -+ *cached_lo = lo; -+ return 0; -+} -+ -+static int -+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) -+{ -+ u16 *cached_hi = &priv->mdio_cache.hi; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (hi == *cached_hi) -+ return 0; -+ -+ ret = bus->write(bus, phy_id, regnum, hi); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit hi register\n"); -+ -+ *cached_hi = hi; -+ return 0; -+} -+ -+static int -+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) -+{ -+ int ret; -+ -+ ret = bus->read(bus, phy_id, regnum); -+ if (ret >= 0) { -+ *val = ret; -+ ret = bus->read(bus, phy_id, regnum + 1); -+ *val |= ret << 16; -+ } -+ -+ if (ret < 0) { -+ dev_err_ratelimited(&bus->dev, -+ "failed to read qca8k 32bit register\n"); -+ *val = 0; -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void -+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) -+{ -+ u16 lo, hi; -+ int ret; -+ -+ lo = val & 0xffff; -+ hi = (u16)(val >> 16); -+ -+ ret = qca8k_set_lo(priv, phy_id, regnum, lo); -+ if (ret >= 0) -+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); -+} -+ -+static int -+qca8k_set_page(struct qca8k_priv *priv, u16 page) -+{ -+ u16 *cached_page = &priv->mdio_cache.page; -+ struct mii_bus *bus = priv->bus; -+ int ret; -+ -+ if (page == *cached_page) -+ return 0; -+ -+ ret = bus->write(bus, 0x18, 0, page); -+ if (ret < 0) { -+ dev_err_ratelimited(&bus->dev, -+ "failed to set qca8k page\n"); -+ return ret; -+ } -+ -+ *cached_page = page; -+ usleep_range(1000, 2000); -+ return 0; -+} -+ -+static int -+qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) -+{ -+ return regmap_read(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) -+{ -+ return regmap_write(priv->regmap, reg, val); -+} -+ -+static int -+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ return regmap_update_bits(priv->regmap, reg, mask, write_val); -+} -+ -+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ u8 len, cmd; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -+ -+ /* Make sure the seq match the requested packet */ -+ if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -+ mgmt_eth_data->ack = true; -+ -+ if (cmd == MDIO_READ) { -+ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -+ -+ /* Get the rest of the 12 byte of data. -+ * The read/write function will extract the requested data. -+ */ -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(mgmt_eth_data->data + 1, skb->data, -+ QCA_HDR_MGMT_DATA2_LEN); -+ } -+ -+ complete(&mgmt_eth_data->rw_done); -+} -+ -+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, -+ int priority, unsigned int len) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ unsigned int real_len; -+ struct sk_buff *skb; -+ u32 *data2; -+ u16 hdr; -+ -+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); -+ if (!skb) -+ return NULL; -+ -+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -+ * Actually for some reason the steps are: -+ * 0: nothing -+ * 1-4: first 4 byte -+ * 5-6: first 12 byte -+ * 7-15: all 16 byte -+ */ -+ if (len == 16) -+ real_len = 15; -+ else -+ real_len = len; -+ -+ skb_reset_mac_header(skb); -+ skb_set_network_header(skb, skb->len); -+ -+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); -+ -+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); -+ hdr |= QCA_HDR_XMIT_FROM_CPU; -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); -+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); -+ -+ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -+ QCA_HDR_MGMT_CHECK_CODE_VAL); -+ -+ if (cmd == MDIO_WRITE) -+ mgmt_ethhdr->mdio_data = *val; -+ -+ mgmt_ethhdr->hdr = htons(hdr); -+ -+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ return skb; -+} -+ -+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) -+{ -+ struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ -+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -+ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); -+} -+ -+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ *val = mgmt_eth_data->data[0]; -+ if (len > QCA_HDR_MGMT_DATA1_LEN) -+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; -+ struct sk_buff *skb; -+ bool ack; -+ int ret; -+ -+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, -+ QCA8K_ETHERNET_MDIO_PRIORITY, len); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check mgmt_master if is operational */ -+ if (!priv->mgmt_master) { -+ kfree_skb(skb); -+ mutex_unlock(&mgmt_eth_data->mutex); -+ return -EINVAL; -+ } -+ -+ skb->dev = priv->mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the mdio pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); -+ -+ ack = mgmt_eth_data->ack; -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int -+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ u32 val = 0; -+ int ret; -+ -+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); -+ if (ret) -+ return ret; -+ -+ val &= ~mask; -+ val |= write_val; -+ -+ return qca8k_write_eth(priv, reg, &val, sizeof(val)); -+} -+ -+static int -+qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ -+ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ u32 tmp; -+ -+ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ tmp = val[i]; -+ -+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ int ret; -+ -+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val))) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ return ret; -+} -+ -+static int -+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ int ret; -+ -+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ return ret; -+} -+ -+static int -+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) -+ return 0; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -+ if (ret < 0) -+ goto exit; -+ -+ val &= ~mask; -+ val |= write_val; -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+exit: -+ mutex_unlock(&bus->mdio_lock); -+ -+ return ret; -+} -+ -+static const struct regmap_range qca8k_readable_ranges[] = { -+ regmap_reg_range(0x0000, 0x00e4), /* Global control */ -+ regmap_reg_range(0x0100, 0x0168), /* EEE control */ -+ regmap_reg_range(0x0200, 0x0270), /* Parser control */ -+ regmap_reg_range(0x0400, 0x0454), /* ACL */ -+ regmap_reg_range(0x0600, 0x0718), /* Lookup */ -+ regmap_reg_range(0x0800, 0x0b70), /* QM */ -+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -+ -+}; -+ -+static const struct regmap_access_table qca8k_readable_table = { -+ .yes_ranges = qca8k_readable_ranges, -+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), -+}; -+ -+static struct regmap_config qca8k_regmap_config = { -+ .reg_bits = 16, -+ .val_bits = 32, -+ .reg_stride = 4, -+ .max_register = 0x16ac, /* end MIB - Port6 range */ -+ .reg_read = qca8k_regmap_read, -+ .reg_write = qca8k_regmap_write, -+ .reg_update_bits = qca8k_regmap_update_bits, -+ .rd_table = &qca8k_readable_table, -+ .disable_locking = true, /* Locking is handled by qca8k read/write */ -+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ -+}; -+ -+static int -+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) -+{ -+ u32 val; -+ -+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); -+} -+ -+static int -+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) -+{ -+ u32 reg[3]; -+ int ret; -+ -+ /* load the ARL table into an array */ -+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+ if (ret) -+ return ret; -+ -+ /* vid - 83:72 */ -+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -+ /* aging - 67:64 */ -+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -+ /* portmask - 54:48 */ -+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -+ /* mac - 47:0 */ -+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -+ -+ return 0; -+} -+ -+static void -+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, -+ u8 aging) -+{ -+ u32 reg[3] = { 0 }; -+ -+ /* vid - 83:72 */ -+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -+ /* aging - 67:64 */ -+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -+ /* portmask - 54:48 */ -+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -+ /* mac - 47:0 */ -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -+ -+ /* load the array into the ARL table */ -+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+} -+ -+static int -+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and FDB index */ -+ reg = QCA8K_ATU_FUNC_BUSY; -+ reg |= cmd; -+ if (port >= 0) { -+ reg |= QCA8K_ATU_FUNC_PORT_EN; -+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -+ } -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_FDB_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_ATU_FUNC_FULL) -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) -+{ -+ int ret; -+ -+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -+ if (ret < 0) -+ return ret; -+ -+ return qca8k_fdb_read(priv, fdb); -+} -+ -+static int -+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, -+ u16 vid, u8 aging) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static void -+qca8k_fdb_flush(struct qca8k_priv *priv) -+{ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -+ mutex_unlock(&priv->reg_mutex); -+} -+ -+static int -+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_fdb_read(priv, &fdb); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule exist. Delete first */ -+ if (!fdb.aging) { -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ } -+ -+ /* Add port to fdb portmask */ -+ fdb.port_mask |= port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule doesn't exist. Why delete? */ -+ if (!fdb.aging) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ -+ /* Only port in the rule is this port. Don't re insert */ -+ if (fdb.port_mask == port_mask) -+ goto exit; -+ -+ /* Remove port from port mask */ -+ fdb.port_mask &= ~port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and VLAN index */ -+ reg = QCA8K_VTU_FUNC1_BUSY; -+ reg |= cmd; -+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_VLAN_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_VTU_FUNC1_FULL) -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) -+{ -+ u32 reg; -+ int ret; -+ -+ /* -+ We do the right thing with VLAN 0 and treat it as untagged while -+ preserving the tag on egress. -+ */ -+ if (vid == 0) -+ return 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ if (untagged) -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -+ else -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -+ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) -+{ -+ u32 reg, mask; -+ int ret, i; -+ bool del; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -+ -+ /* Check if we're the last member to be removed */ -+ del = true; -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -+ -+ if ((reg & mask) != mask) { -+ del = false; -+ break; -+ } -+ } -+ -+ if (del) { -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -+ } else { -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ } -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int -+qca8k_mib_init(struct qca8k_priv *priv) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -+ QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static void -+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) -+{ -+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -+ -+ /* Port 0 and 6 have no internal PHY */ -+ if (port > 0 && port < 6) -+ mask |= QCA8K_PORT_STATUS_LINK_AUTO; -+ -+ if (enable) -+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+ else -+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+} -+ -+static int -+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, -+ struct sk_buff *read_skb, u32 *val) -+{ -+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); -+ bool ack; -+ int ret; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the copy pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) -+ return -ETIMEDOUT; -+ -+ if (!ack) -+ return -EINVAL; -+ -+ *val = mgmt_eth_data->data[0]; -+ -+ return 0; -+} -+ -+static int -+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, -+ int regnum, u16 data) -+{ -+ struct sk_buff *write_skb, *clear_skb, *read_skb; -+ struct qca8k_mgmt_eth_data *mgmt_eth_data; -+ u32 write_val, clear_val = 0, val; -+ struct net_device *mgmt_master; -+ int ret, ret1; -+ bool ack; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ mgmt_eth_data = &priv->mgmt_eth_data; -+ -+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum); -+ -+ if (read) { -+ write_val |= QCA8K_MDIO_MASTER_READ; -+ } else { -+ write_val |= QCA8K_MDIO_MASTER_WRITE; -+ write_val |= QCA8K_MDIO_MASTER_DATA(data); -+ } -+ -+ /* Prealloc all the needed skb before the lock */ -+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); -+ if (!write_skb) -+ return -ENOMEM; -+ -+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -+ if (!clear_skb) { -+ ret = -ENOMEM; -+ goto err_clear_skb; -+ } -+ -+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, -+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); -+ if (!read_skb) { -+ ret = -ENOMEM; -+ goto err_read_skb; -+ } -+ -+ /* Actually start the request: -+ * 1. Send mdio master packet -+ * 2. Busy Wait for mdio master command -+ * 3. Get the data if we are reading -+ * 4. Reset the mdio master (even with error) -+ */ -+ mutex_lock(&mgmt_eth_data->mutex); -+ -+ /* Check if mgmt_master is operational */ -+ mgmt_master = priv->mgmt_master; -+ if (!mgmt_master) { -+ mutex_unlock(&mgmt_eth_data->mutex); -+ ret = -EINVAL; -+ goto err_mgmt_master; -+ } -+ -+ read_skb->dev = mgmt_master; -+ clear_skb->dev = mgmt_master; -+ write_skb->dev = mgmt_master; -+ -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the write pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(write_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ kfree_skb(read_skb); -+ goto exit; -+ } -+ -+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, -+ !(val & QCA8K_MDIO_MASTER_BUSY), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -+ mgmt_eth_data, read_skb, &val); -+ -+ if (ret < 0 && ret1 < 0) { -+ ret = ret1; -+ goto exit; -+ } -+ -+ if (read) { -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the read pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(read_skb); -+ -+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ ack = mgmt_eth_data->ack; -+ -+ if (ret <= 0) { -+ ret = -ETIMEDOUT; -+ goto exit; -+ } -+ -+ if (!ack) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; -+ } else { -+ kfree_skb(read_skb); -+ } -+exit: -+ reinit_completion(&mgmt_eth_data->rw_done); -+ -+ /* Increment seq_num and set it in the clear pkt */ -+ mgmt_eth_data->seq++; -+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); -+ mgmt_eth_data->ack = false; -+ -+ dev_queue_xmit(clear_skb); -+ -+ wait_for_completion_timeout(&mgmt_eth_data->rw_done, -+ QCA8K_ETHERNET_TIMEOUT); -+ -+ mutex_unlock(&mgmt_eth_data->mutex); -+ -+ return ret; -+ -+ /* Error handling before lock */ -+err_mgmt_master: -+ kfree_skb(read_skb); -+err_read_skb: -+ kfree_skb(clear_skb); -+err_clear_skb: -+ kfree_skb(write_skb); -+ -+ return ret; -+} -+ -+static u32 -+qca8k_port_to_phy(int port) -+{ -+ /* From Andrew Lunn: -+ * Port 0 has no internal phy. -+ * Port 1 has an internal PHY at MDIO address 0. -+ * Port 2 has an internal PHY at MDIO address 1. -+ * ... -+ * Port 5 has an internal PHY at MDIO address 4. -+ * Port 6 has no internal PHY. -+ */ -+ -+ return port - 1; -+} -+ -+static int -+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) -+{ -+ u16 r1, r2, page; -+ u32 val; -+ int ret, ret1; -+ -+ qca8k_split_addr(reg, &r1, &r2, &page); -+ -+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, -+ bus, 0x10 | r2, r1, &val); -+ -+ /* Check if qca8k_read has failed for a different reason -+ * before returnting -ETIMEDOUT -+ */ -+ if (ret < 0 && ret1 < 0) -+ return ret1; -+ -+ return ret; -+} -+ -+static int -+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) -+{ -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) | -+ QCA8K_MDIO_MASTER_DATA(data); -+ -+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_BUSY); -+ -+exit: -+ /* even if the busy_wait timeouts try to clear the MASTER_EN */ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ -+ mutex_unlock(&bus->mdio_lock); -+ -+ return ret; -+} -+ -+static int -+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) -+{ -+ struct mii_bus *bus = priv->bus; -+ u16 r1, r2, page; -+ u32 val; -+ int ret; -+ -+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) -+ return -EINVAL; -+ -+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | -+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) | -+ QCA8K_MDIO_MASTER_REG_ADDR(regnum); -+ -+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page); -+ -+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); -+ -+ ret = qca8k_set_page(priv, page); -+ if (ret) -+ goto exit; -+ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ -+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val); -+ -+exit: -+ /* even if the busy_wait timeouts try to clear the MASTER_EN */ -+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ -+ mutex_unlock(&bus->mdio_lock); -+ -+ if (ret >= 0) -+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK; -+ -+ return ret; -+} -+ -+static int -+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) -+{ -+ struct qca8k_priv *priv = slave_bus->priv; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); -+ if (!ret) -+ return 0; -+ -+ return qca8k_mdio_write(priv, phy, regnum, data); -+} -+ -+static int -+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) -+{ -+ struct qca8k_priv *priv = slave_bus->priv; -+ int ret; -+ -+ /* Use mdio Ethernet when available, fallback to legacy one on error */ -+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); -+ if (ret >= 0) -+ return ret; -+ -+ ret = qca8k_mdio_read(priv, phy, regnum); -+ -+ if (ret < 0) -+ return 0xffff; -+ -+ return ret; -+} -+ -+static int -+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) -+{ -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -+ -+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data); -+} -+ -+static int -+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) -+{ -+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; -+ -+ return qca8k_internal_mdio_read(slave_bus, port, regnum); -+} -+ -+static int -+qca8k_mdio_register(struct qca8k_priv *priv) -+{ -+ struct dsa_switch *ds = priv->ds; -+ struct device_node *mdio; -+ struct mii_bus *bus; -+ -+ bus = devm_mdiobus_alloc(ds->dev); -+ if (!bus) -+ return -ENOMEM; -+ -+ bus->priv = (void *)priv; -+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", -+ ds->dst->index, ds->index); -+ bus->parent = ds->dev; -+ bus->phy_mask = ~ds->phys_mii_mask; -+ ds->slave_mii_bus = bus; -+ -+ /* Check if the devicetree declare the port:phy mapping */ -+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); -+ if (of_device_is_available(mdio)) { -+ bus->name = "qca8k slave mii"; -+ bus->read = qca8k_internal_mdio_read; -+ bus->write = qca8k_internal_mdio_write; -+ return devm_of_mdiobus_register(priv->dev, bus, mdio); -+ } -+ -+ /* If a mapping can't be found the legacy mapping is used, -+ * using the qca8k_port_to_phy function -+ */ -+ bus->name = "qca8k-legacy slave mii"; -+ bus->read = qca8k_legacy_mdio_read; -+ bus->write = qca8k_legacy_mdio_write; -+ return devm_mdiobus_register(priv->dev, bus); -+} -+ -+static int -+qca8k_setup_mdio_bus(struct qca8k_priv *priv) -+{ -+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; -+ struct device_node *ports, *port; -+ phy_interface_t mode; -+ int err; -+ -+ ports = of_get_child_by_name(priv->dev->of_node, "ports"); -+ if (!ports) -+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports"); -+ -+ if (!ports) -+ return -EINVAL; -+ -+ for_each_available_child_of_node(ports, port) { -+ err = of_property_read_u32(port, "reg", ®); -+ if (err) { -+ of_node_put(port); -+ of_node_put(ports); -+ return err; -+ } -+ -+ if (!dsa_is_user_port(priv->ds, reg)) -+ continue; -+ -+ of_get_phy_mode(port, &mode); -+ -+ if (of_property_read_bool(port, "phy-handle") && -+ mode != PHY_INTERFACE_MODE_INTERNAL) -+ external_mdio_mask |= BIT(reg); -+ else -+ internal_mdio_mask |= BIT(reg); -+ } -+ -+ of_node_put(ports); -+ if (!external_mdio_mask && !internal_mdio_mask) { -+ dev_err(priv->dev, "no PHYs are defined.\n"); -+ return -EINVAL; -+ } -+ -+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through -+ * the MDIO_MASTER register also _disconnects_ the external MDC -+ * passthrough to the internal PHYs. It's not possible to use both -+ * configurations at the same time! -+ * -+ * Because this came up during the review process: -+ * If the external mdio-bus driver is capable magically disabling -+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's -+ * accessors for the time being, it would be possible to pull this -+ * off. -+ */ -+ if (!!external_mdio_mask && !!internal_mdio_mask) { -+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n"); -+ return -EINVAL; -+ } -+ -+ if (external_mdio_mask) { -+ /* Make sure to disable the internal mdio bus in cases -+ * a dt-overlay and driver reload changed the configuration -+ */ -+ -+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, -+ QCA8K_MDIO_MASTER_EN); -+ } -+ -+ return qca8k_mdio_register(priv); -+} -+ -+static int -+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv) -+{ -+ u32 mask = 0; -+ int ret = 0; -+ -+ /* SoC specific settings for ipq8064. -+ * If more device require this consider adding -+ * a dedicated binding. -+ */ -+ if (of_machine_is_compatible("qcom,ipq8064")) -+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V; -+ -+ /* SoC specific settings for ipq8065 */ -+ if (of_machine_is_compatible("qcom,ipq8065")) -+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V; -+ -+ if (mask) { -+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL, -+ QCA8K_MAC_PWR_RGMII0_1_8V | -+ QCA8K_MAC_PWR_RGMII1_1_8V, -+ mask); -+ } -+ -+ return ret; -+} -+ -+static int qca8k_find_cpu_port(struct dsa_switch *ds) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Find the connected cpu port. Valid port are 0 or 6 */ -+ if (dsa_is_cpu_port(ds, 0)) -+ return 0; -+ -+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6"); -+ -+ if (dsa_is_cpu_port(ds, 6)) -+ return 6; -+ -+ return -EINVAL; -+} -+ -+static int -+qca8k_setup_of_pws_reg(struct qca8k_priv *priv) -+{ -+ const struct qca8k_match_data *data = priv->info; -+ struct device_node *node = priv->dev->of_node; -+ u32 val = 0; -+ int ret; -+ -+ /* QCA8327 require to set to the correct mode. -+ * His bigger brother QCA8328 have the 172 pin layout. -+ * Should be applied by default but we set this just to make sure. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ /* Set the correct package of 148 pin for QCA8327 */ -+ if (data->reduced_package) -+ val |= QCA8327_PWS_PACKAGE148_EN; -+ -+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN, -+ val); -+ if (ret) -+ return ret; -+ } -+ -+ if (of_property_read_bool(node, "qca,ignore-power-on-sel")) -+ val |= QCA8K_PWS_POWER_ON_SEL; -+ -+ if (of_property_read_bool(node, "qca,led-open-drain")) { -+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) { -+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set."); -+ return -EINVAL; -+ } -+ -+ val |= QCA8K_PWS_LED_OPEN_EN_CSR; -+ } -+ -+ return qca8k_rmw(priv, QCA8K_REG_PWS, -+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL, -+ val); -+} -+ -+static int -+qca8k_parse_port_config(struct qca8k_priv *priv) -+{ -+ int port, cpu_port_index = -1, ret; -+ struct device_node *port_dn; -+ phy_interface_t mode; -+ struct dsa_port *dp; -+ u32 delay; -+ -+ /* We have 2 CPU port. Check them */ -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Skip every other port */ -+ if (port != 0 && port != 6) -+ continue; -+ -+ dp = dsa_to_port(priv->ds, port); -+ port_dn = dp->dn; -+ cpu_port_index++; -+ -+ if (!of_device_is_available(port_dn)) -+ continue; -+ -+ ret = of_get_phy_mode(port_dn, &mode); -+ if (ret) -+ continue; -+ -+ switch (mode) { -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ case PHY_INTERFACE_MODE_SGMII: -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID) -+ delay = 1; -+ -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { -+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay; -+ -+ delay = 0; -+ -+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) -+ /* Switch regs accept value in ns, convert ps to ns */ -+ delay = delay / 1000; -+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ delay = 2; -+ -+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { -+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); -+ delay = 3; -+ } -+ -+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay; -+ -+ /* Skip sgmii parsing for rgmii* mode */ -+ if (mode == PHY_INTERFACE_MODE_RGMII || -+ mode == PHY_INTERFACE_MODE_RGMII_ID || -+ mode == PHY_INTERFACE_MODE_RGMII_TXID || -+ mode == PHY_INTERFACE_MODE_RGMII_RXID) -+ break; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge")) -+ priv->ports_config.sgmii_tx_clk_falling_edge = true; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge")) -+ priv->ports_config.sgmii_rx_clk_falling_edge = true; -+ -+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) { -+ priv->ports_config.sgmii_enable_pll = true; -+ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling"); -+ priv->ports_config.sgmii_enable_pll = false; -+ } -+ -+ if (priv->switch_revision < 2) -+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more."); -+ } -+ -+ break; -+ default: -+ continue; -+ } -+ } -+ -+ return 0; -+} -+ -+static int -+qca8k_setup(struct dsa_switch *ds) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int cpu_port, ret, i; -+ u32 mask; -+ -+ cpu_port = qca8k_find_cpu_port(ds); -+ if (cpu_port < 0) { -+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6"); -+ return cpu_port; -+ } -+ -+ /* Parse CPU port config to be later used in phy_link mac_config */ -+ ret = qca8k_parse_port_config(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_mdio_bus(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_of_pws_reg(priv); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_setup_mac_pwr_sel(priv); -+ if (ret) -+ return ret; -+ -+ /* Make sure MAC06 is disabled */ -+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, -+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); -+ if (ret) { -+ dev_err(priv->dev, "failed disabling MAC06 exchange"); -+ return ret; -+ } -+ -+ /* Enable CPU Port */ -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); -+ if (ret) { -+ dev_err(priv->dev, "failed enabling CPU port"); -+ return ret; -+ } -+ -+ /* Enable MIB counters */ -+ ret = qca8k_mib_init(priv); -+ if (ret) -+ dev_warn(priv->dev, "mib init failed"); -+ -+ /* Initial setup of all ports */ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* Disable forwarding by default on all ports */ -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, 0); -+ if (ret) -+ return ret; -+ -+ /* Enable QCA header mode on all cpu ports */ -+ if (dsa_is_cpu_port(ds, i)) { -+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | -+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); -+ if (ret) { -+ dev_err(priv->dev, "failed enabling QCA header mode"); -+ return ret; -+ } -+ } -+ -+ /* Disable MAC by default on all user ports */ -+ if (dsa_is_user_port(ds, i)) -+ qca8k_port_set_status(priv, i, 0); -+ } -+ -+ /* Forward all unknown frames to CPU port for Linux processing -+ * Notice that in multi-cpu config only one port should be set -+ * for igmp, unknown, multicast and broadcast packet -+ */ -+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | -+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); -+ if (ret) -+ return ret; -+ -+ /* Setup connection between CPU port & user ports -+ * Configure specific switch configuration for ports -+ */ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ /* CPU port gets connected to all user ports of the switch */ -+ if (dsa_is_cpu_port(ds, i)) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); -+ if (ret) -+ return ret; -+ } -+ -+ /* Individual user ports get connected to CPU port only */ -+ if (dsa_is_user_port(ds, i)) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_MEMBER, -+ BIT(cpu_port)); -+ if (ret) -+ return ret; -+ -+ /* Enable ARP Auto-learning by default */ -+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), -+ QCA8K_PORT_LOOKUP_LEARN); -+ if (ret) -+ return ret; -+ -+ /* For port based vlans to work we need to set the -+ * default egress vid -+ */ -+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), -+ QCA8K_EGREES_VLAN_PORT_MASK(i), -+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), -+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | -+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); -+ if (ret) -+ return ret; -+ } -+ -+ /* The port 5 of the qca8337 have some problem in flood condition. The -+ * original legacy driver had some specific buffer and priority settings -+ * for the different port suggested by the QCA switch team. Add this -+ * missing settings to improve switch stability under load condition. -+ * This problem is limited to qca8337 and other qca8k switch are not affected. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8337) { -+ switch (i) { -+ /* The 2 CPU port and port 5 requires some different -+ * priority than any other ports. -+ */ -+ case 0: -+ case 5: -+ case 6: -+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | -+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); -+ break; -+ default: -+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | -+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | -+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); -+ } -+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); -+ -+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | -+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_WRED_EN; -+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), -+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | -+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | -+ QCA8K_PORT_HOL_CTRL1_WRED_EN, -+ mask); -+ } -+ } -+ -+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ -+ if (priv->switch_id == QCA8K_ID_QCA8327) { -+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | -+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); -+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, -+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | -+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, -+ mask); -+ } -+ -+ /* Setup our port MTUs to match power on defaults */ -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); -+ if (ret) -+ dev_warn(priv->dev, "failed setting MTU settings"); -+ -+ /* Flush the FDB table */ -+ qca8k_fdb_flush(priv); -+ -+ /* We don't have interrupts for link changes, so we need to poll */ -+ ds->pcs_poll = true; -+ -+ /* Set min a max ageing value supported */ -+ ds->ageing_time_min = 7000; -+ ds->ageing_time_max = 458745000; -+ -+ /* Set max number of LAGs supported */ -+ ds->num_lag_ids = QCA8K_NUM_LAGS; -+ -+ return 0; -+} -+ -+static void -+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index, -+ u32 reg) -+{ -+ u32 delay, val = 0; -+ int ret; -+ -+ /* Delay can be declared in 3 different way. -+ * Mode to rgmii and internal-delay standard binding defined -+ * rgmii-id or rgmii-tx/rx phy mode set. -+ * The parse logic set a delay different than 0 only when one -+ * of the 3 different way is used. In all other case delay is -+ * not enabled. With ID or TX/RXID delay is enabled and set -+ * to the default and recommended value. -+ */ -+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN; -+ } -+ -+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) { -+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index]; -+ -+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN; -+ } -+ -+ /* Set RGMII delay based on the selected values */ -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK | -+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN | -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN, -+ val); -+ if (ret) -+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d", -+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6); -+} -+ -+static void -+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, -+ const struct phylink_link_state *state) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int cpu_port_index, ret; -+ u32 reg, val; -+ -+ switch (port) { -+ case 0: /* 1st CPU port */ -+ if (state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII) -+ return; -+ -+ reg = QCA8K_REG_PORT0_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT0; -+ break; -+ case 1: -+ case 2: -+ case 3: -+ case 4: -+ case 5: -+ /* Internal PHY, nothing to do */ -+ return; -+ case 6: /* 2nd CPU port / external PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII && -+ state->interface != PHY_INTERFACE_MODE_1000BASEX) -+ return; -+ -+ reg = QCA8K_REG_PORT6_PAD_CTRL; -+ cpu_port_index = QCA8K_CPU_PORT6; -+ break; -+ default: -+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); -+ return; -+ } -+ -+ if (port != 6 && phylink_autoneg_inband(mode)) { -+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n", -+ __func__); -+ return; -+ } -+ -+ switch (state->interface) { -+ case PHY_INTERFACE_MODE_RGMII: -+ case PHY_INTERFACE_MODE_RGMII_ID: -+ case PHY_INTERFACE_MODE_RGMII_TXID: -+ case PHY_INTERFACE_MODE_RGMII_RXID: -+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); -+ -+ /* Configure rgmii delay */ -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ -+ /* QCA8337 requires to set rgmii rx delay for all ports. -+ * This is enabled through PORT5_PAD_CTRL for all ports, -+ * rather than individual port registers. -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8337) -+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, -+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); -+ break; -+ case PHY_INTERFACE_MODE_SGMII: -+ case PHY_INTERFACE_MODE_1000BASEX: -+ /* Enable SGMII on the port */ -+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); -+ -+ /* Enable/disable SerDes auto-negotiation as necessary */ -+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val); -+ if (ret) -+ return; -+ if (phylink_autoneg_inband(mode)) -+ val &= ~QCA8K_PWS_SERDES_AEN_DIS; -+ else -+ val |= QCA8K_PWS_SERDES_AEN_DIS; -+ qca8k_write(priv, QCA8K_REG_PWS, val); -+ -+ /* Configure the SGMII parameters */ -+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val); -+ if (ret) -+ return; -+ -+ val |= QCA8K_SGMII_EN_SD; -+ -+ if (priv->ports_config.sgmii_enable_pll) -+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | -+ QCA8K_SGMII_EN_TX; -+ -+ if (dsa_is_cpu_port(ds, port)) { -+ /* CPU port, we're talking to the CPU MAC, be a PHY */ -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_PHY; -+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_MAC; -+ } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { -+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK; -+ val |= QCA8K_SGMII_MODE_CTRL_BASEX; -+ } -+ -+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); -+ -+ /* From original code is reported port instability as SGMII also -+ * require delay set. Apply advised values here or take them from DT. -+ */ -+ if (state->interface == PHY_INTERFACE_MODE_SGMII) -+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); -+ -+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and -+ * falling edge is set writing in the PORT0 PAD reg -+ */ -+ if (priv->switch_id == QCA8K_ID_QCA8327 || -+ priv->switch_id == QCA8K_ID_QCA8337) -+ reg = QCA8K_REG_PORT0_PAD_CTRL; -+ -+ val = 0; -+ -+ /* SGMII Clock phase configuration */ -+ if (priv->ports_config.sgmii_rx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE; -+ -+ if (priv->ports_config.sgmii_tx_clk_falling_edge) -+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE; -+ -+ if (val) -+ ret = qca8k_rmw(priv, reg, -+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE | -+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, -+ val); -+ -+ break; -+ default: -+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n", -+ phy_modes(state->interface), port); -+ return; -+ } -+} -+ -+static void -+qca8k_phylink_validate(struct dsa_switch *ds, int port, -+ unsigned long *supported, -+ struct phylink_link_state *state) -+{ -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; -+ -+ switch (port) { -+ case 0: /* 1st CPU port */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII) -+ goto unsupported; -+ break; -+ case 1: -+ case 2: -+ case 3: -+ case 4: -+ case 5: -+ /* Internal PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_GMII && -+ state->interface != PHY_INTERFACE_MODE_INTERNAL) -+ goto unsupported; -+ break; -+ case 6: /* 2nd CPU port / external PHY */ -+ if (state->interface != PHY_INTERFACE_MODE_NA && -+ state->interface != PHY_INTERFACE_MODE_RGMII && -+ state->interface != PHY_INTERFACE_MODE_RGMII_ID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID && -+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID && -+ state->interface != PHY_INTERFACE_MODE_SGMII && -+ state->interface != PHY_INTERFACE_MODE_1000BASEX) -+ goto unsupported; -+ break; -+ default: -+unsupported: -+ linkmode_zero(supported); -+ return; -+ } -+ -+ phylink_set_port_modes(mask); -+ phylink_set(mask, Autoneg); -+ -+ phylink_set(mask, 1000baseT_Full); -+ phylink_set(mask, 10baseT_Half); -+ phylink_set(mask, 10baseT_Full); -+ phylink_set(mask, 100baseT_Half); -+ phylink_set(mask, 100baseT_Full); -+ -+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX) -+ phylink_set(mask, 1000baseX_Full); -+ -+ phylink_set(mask, Pause); -+ phylink_set(mask, Asym_Pause); -+ -+ linkmode_and(supported, supported, mask); -+ linkmode_and(state->advertising, state->advertising, mask); -+} -+ -+static int -+qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, -+ struct phylink_link_state *state) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg; -+ int ret; -+ -+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®); -+ if (ret < 0) -+ return ret; -+ -+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); -+ state->an_complete = state->link; -+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); -+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : -+ DUPLEX_HALF; -+ -+ switch (reg & QCA8K_PORT_STATUS_SPEED) { -+ case QCA8K_PORT_STATUS_SPEED_10: -+ state->speed = SPEED_10; -+ break; -+ case QCA8K_PORT_STATUS_SPEED_100: -+ state->speed = SPEED_100; -+ break; -+ case QCA8K_PORT_STATUS_SPEED_1000: -+ state->speed = SPEED_1000; -+ break; -+ default: -+ state->speed = SPEED_UNKNOWN; -+ break; -+ } -+ -+ state->pause = MLO_PAUSE_NONE; -+ if (reg & QCA8K_PORT_STATUS_RXFLOW) -+ state->pause |= MLO_PAUSE_RX; -+ if (reg & QCA8K_PORT_STATUS_TXFLOW) -+ state->pause |= MLO_PAUSE_TX; -+ -+ return 1; -+} -+ -+static void -+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, -+ phy_interface_t interface) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ qca8k_port_set_status(priv, port, 0); -+} -+ -+static void -+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, -+ phy_interface_t interface, struct phy_device *phydev, -+ int speed, int duplex, bool tx_pause, bool rx_pause) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg; -+ -+ if (phylink_autoneg_inband(mode)) { -+ reg = QCA8K_PORT_STATUS_LINK_AUTO; -+ } else { -+ switch (speed) { -+ case SPEED_10: -+ reg = QCA8K_PORT_STATUS_SPEED_10; -+ break; -+ case SPEED_100: -+ reg = QCA8K_PORT_STATUS_SPEED_100; -+ break; -+ case SPEED_1000: -+ reg = QCA8K_PORT_STATUS_SPEED_1000; -+ break; -+ default: -+ reg = QCA8K_PORT_STATUS_LINK_AUTO; -+ break; -+ } -+ -+ if (duplex == DUPLEX_FULL) -+ reg |= QCA8K_PORT_STATUS_DUPLEX; -+ -+ if (rx_pause || dsa_is_cpu_port(ds, port)) -+ reg |= QCA8K_PORT_STATUS_RXFLOW; -+ -+ if (tx_pause || dsa_is_cpu_port(ds, port)) -+ reg |= QCA8K_PORT_STATUS_TXFLOW; -+ } -+ -+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -+ -+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); -+} -+ -+static void -+qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int i; -+ -+ if (stringset != ETH_SS_STATS) -+ return; -+ -+ for (i = 0; i < priv->info->mib_count; i++) -+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -+ ETH_GSTRING_LEN); -+} -+ -+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) -+{ -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ const struct qca8k_mib_desc *mib; -+ struct mib_ethhdr *mib_ethhdr; -+ int i, mib_len, offset = 0; -+ u64 *data; -+ u8 port; -+ -+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); -+ mib_eth_data = &priv->mib_eth_data; -+ -+ /* The switch autocast every port. Ignore other packet and -+ * parse only the requested one. -+ */ -+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); -+ if (port != mib_eth_data->req_port) -+ goto exit; -+ -+ data = mib_eth_data->data; -+ -+ for (i = 0; i < priv->info->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ -+ /* First 3 mib are present in the skb head */ -+ if (i < 3) { -+ data[i] = mib_ethhdr->data[i]; -+ continue; -+ } -+ -+ mib_len = sizeof(uint32_t); -+ -+ /* Some mib are 64 bit wide */ -+ if (mib->size == 2) -+ mib_len = sizeof(uint64_t); -+ -+ /* Copy the mib value from packet to the */ -+ memcpy(data + i, skb->data + offset, mib_len); -+ -+ /* Set the offset for the next mib */ -+ offset += mib_len; -+ } -+ -+exit: -+ /* Complete on receiving all the mib packet */ -+ if (refcount_dec_and_test(&mib_eth_data->port_parsed)) -+ complete(&mib_eth_data->rw_done); -+} -+ -+static int -+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) -+{ -+ struct dsa_port *dp = dsa_to_port(ds, port); -+ struct qca8k_mib_eth_data *mib_eth_data; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ mib_eth_data = &priv->mib_eth_data; -+ -+ mutex_lock(&mib_eth_data->mutex); -+ -+ reinit_completion(&mib_eth_data->rw_done); -+ -+ mib_eth_data->req_port = dp->index; -+ mib_eth_data->data = data; -+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ /* Send mib autocast request */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | -+ QCA8K_MIB_BUSY); -+ -+ mutex_unlock(&priv->reg_mutex); -+ -+ if (ret) -+ goto exit; -+ -+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); -+ -+exit: -+ mutex_unlock(&mib_eth_data->mutex); -+ -+ return ret; -+} -+ -+static void -+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -+ uint64_t *data) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ const struct qca8k_mib_desc *mib; -+ u32 reg, i, val; -+ u32 hi = 0; -+ int ret; -+ -+ if (priv->mgmt_master && priv->info->ops->autocast_mib && -+ priv->info->ops->autocast_mib(ds, port, data) > 0) -+ return; -+ -+ for (i = 0; i < priv->info->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -+ -+ ret = qca8k_read(priv, reg, &val); -+ if (ret < 0) -+ continue; -+ -+ if (mib->size == 2) { -+ ret = qca8k_read(priv, reg + 4, &hi); -+ if (ret < 0) -+ continue; -+ } -+ -+ data[i] = val; -+ if (mib->size == 2) -+ data[i] |= (u64)hi << 32; -+ } -+} -+ -+static int -+qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ if (sset != ETH_SS_STATS) -+ return 0; -+ -+ return priv->info->mib_count; -+} -+ -+static int -+qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -+ u32 reg; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -+ if (ret < 0) -+ goto exit; -+ -+ if (eee->eee_enabled) -+ reg |= lpi_en; -+ else -+ reg &= ~lpi_en; -+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int -+qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) -+{ -+ /* Nothing to do on the port's MAC */ -+ return 0; -+} -+ -+static void -+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u32 stp_state; -+ -+ switch (state) { -+ case BR_STATE_DISABLED: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -+ break; -+ case BR_STATE_BLOCKING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -+ break; -+ case BR_STATE_LISTENING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -+ break; -+ case BR_STATE_LEARNING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -+ break; -+ case BR_STATE_FORWARDING: -+ default: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -+ break; -+ } -+ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); -+} -+ -+static int -+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int port_mask, cpu_port; -+ int i, ret; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ port_mask = BIT(cpu_port); -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Add this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ ret = regmap_set_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ if (ret) -+ return ret; -+ if (i != port) -+ port_mask |= BIT(i); -+ } -+ -+ /* Add all other ports to this ports portvlan mask */ -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, port_mask); -+ -+ return ret; -+} -+ -+static void -+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ int cpu_port, i; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Remove this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ regmap_clear_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ } -+ -+ /* Set the cpu port to be the only one in the portvlan mask of -+ * this port -+ */ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); -+} -+ -+static void -+qca8k_port_fast_age(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -+ mutex_unlock(&priv->reg_mutex); -+} -+ -+static int -+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ unsigned int secs = msecs / 1000; -+ u32 val; -+ -+ /* AGE_TIME reg is set in 7s step */ -+ val = secs / 7; -+ -+ /* Handle case with 0 as val to NOT disable -+ * learning -+ */ -+ if (!val) -+ val = 1; -+ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -+ QCA8K_ATU_AGE_TIME(val)); -+} -+ -+static int -+qca8k_port_enable(struct dsa_switch *ds, int port, -+ struct phy_device *phy) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ -+ qca8k_port_set_status(priv, port, 1); -+ priv->port_enabled_map |= BIT(port); -+ -+ if (dsa_is_user_port(ds, port)) -+ phy_support_asym_pause(phy); -+ -+ return 0; -+} -+ -+static void -+qca8k_port_disable(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ -+ qca8k_port_set_status(priv, port, 0); -+ priv->port_enabled_map &= ~BIT(port); -+} -+ -+static int -+qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ /* We have only have a general MTU setting. -+ * DSA always set the CPU port's MTU to the largest MTU of the slave -+ * ports. -+ * Setting MTU just for the CPU port is sufficient to correctly set a -+ * value for every port. -+ */ -+ if (!dsa_is_cpu_port(ds, port)) -+ return 0; -+ -+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or -+ * the switch panics. -+ * Turn off both cpu ports before applying the new value to prevent -+ * this. -+ */ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 0); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 0); -+ -+ /* Include L2 header / FCS length */ -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); -+ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 1); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 1); -+ -+ return ret; -+} -+ -+static int -+qca8k_port_max_mtu(struct dsa_switch *ds, int port) -+{ -+ return QCA8K_MAX_MTU; -+} -+ -+static int -+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -+ u16 port_mask, u16 vid) -+{ -+ /* Set the vid to the port vlan id if no vid is set */ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_add(priv, addr, port_mask, vid, -+ QCA8K_ATU_STATUS_STATIC); -+} -+ -+static int -+qca8k_port_fdb_add(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid); -+} -+ -+static int -+qca8k_port_fdb_del(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_del(priv, addr, port_mask, vid); -+} -+ -+static int -+qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -+ dsa_fdb_dump_cb_t *cb, void *data) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ struct qca8k_fdb _fdb = { 0 }; -+ int cnt = QCA8K_NUM_FDB_RECORDS; -+ bool is_static; -+ int ret = 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -+ if (!_fdb.aging) -+ break; -+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -+ ret = cb(_fdb.mac, _fdb.vid, is_static, data); -+ if (ret) -+ break; -+ } -+ mutex_unlock(&priv->reg_mutex); -+ -+ return 0; -+} -+ -+static int -+qca8k_port_mdb_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); -+} -+ -+static int -+qca8k_port_mdb_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); -+} -+ -+static int -+qca8k_port_mirror_add(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror, -+ bool ingress) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int monitor_port, ret; -+ u32 reg, val; -+ -+ /* Check for existent entry */ -+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -+ return -EEXIST; -+ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* QCA83xx can have only one port set to mirror mode. -+ * Check that the correct port is requested and return error otherwise. -+ * When no mirror port is set, the values is set to 0xF -+ */ -+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -+ return -EEXIST; -+ -+ /* Set the monitor port */ -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -+ mirror->to_local_port); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ return ret; -+ -+ if (ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_update_bits(priv->regmap, reg, val, val); -+ if (ret) -+ return ret; -+ -+ /* Track mirror port for tx and rx to decide when the -+ * mirror port has to be disabled. -+ */ -+ if (ingress) -+ priv->mirror_rx |= BIT(port); -+ else -+ priv->mirror_tx |= BIT(port); -+ -+ return 0; -+} -+ -+static void -+qca8k_port_mirror_del(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg, val; -+ int ret; -+ -+ if (mirror->ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_clear_bits(priv->regmap, reg, val); -+ if (ret) -+ goto err; -+ -+ if (mirror->ingress) -+ priv->mirror_rx &= ~BIT(port); -+ else -+ priv->mirror_tx &= ~BIT(port); -+ -+ /* No port set to send packet to mirror port. Disable mirror port */ -+ if (!priv->mirror_rx && !priv->mirror_tx) { -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ goto err; -+ } -+err: -+ dev_err(priv->dev, "Failed to del mirror port from %d", port); -+} -+ -+static int -+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -+ struct netlink_ext_ack *extack) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ if (vlan_filtering) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -+ } else { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -+ } -+ -+ return ret; -+} -+ -+static int -+qca8k_port_vlan_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan, -+ struct netlink_ext_ack *extack) -+{ -+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -+ if (ret) { -+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -+ return ret; -+ } -+ -+ if (pvid) { -+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -+ QCA8K_EGREES_VLAN_PORT_MASK(port), -+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -+ QCA8K_PORT_VLAN_CVID(vlan->vid) | -+ QCA8K_PORT_VLAN_SVID(vlan->vid)); -+ } -+ -+ return ret; -+} -+ -+static int -+qca8k_port_vlan_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_del(priv, port, vlan->vid); -+ if (ret) -+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -+ -+ return ret; -+} -+ -+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Communicate to the phy internal driver the switch revision. -+ * Based on the switch revision different values needs to be -+ * set to the dbg and mmd reg on the phy. -+ * The first 2 bit are used to communicate the switch revision -+ * to the phy driver. -+ */ -+ if (port > 0 && port < 6) -+ return priv->switch_revision; -+ -+ return 0; -+} -+ -+static enum dsa_tag_protocol -+qca8k_get_tag_protocol(struct dsa_switch *ds, int port, -+ enum dsa_tag_protocol mp) -+{ -+ return DSA_TAG_PROTO_QCA; -+} -+ -+static bool -+qca8k_lag_can_offload(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct dsa_port *dp; -+ int id, members = 0; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ if (id < 0 || id >= ds->num_lag_ids) -+ return false; -+ -+ dsa_lag_foreach_port(dp, ds->dst, lag) -+ /* Includes the port joining the LAG */ -+ members++; -+ -+ if (members > QCA8K_NUM_PORTS_FOR_LAG) -+ return false; -+ -+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -+ return false; -+ -+ if (info->hash_type != NETDEV_LAG_HASH_L2 && -+ info->hash_type != NETDEV_LAG_HASH_L23) -+ return false; -+ -+ return true; -+} -+ -+static int -+qca8k_lag_setup_hash(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ bool unique_lag = true; -+ u32 hash = 0; -+ int i, id; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ switch (info->hash_type) { -+ case NETDEV_LAG_HASH_L23: -+ hash |= QCA8K_TRUNK_HASH_SIP_EN; -+ hash |= QCA8K_TRUNK_HASH_DIP_EN; -+ fallthrough; -+ case NETDEV_LAG_HASH_L2: -+ hash |= QCA8K_TRUNK_HASH_SA_EN; -+ hash |= QCA8K_TRUNK_HASH_DA_EN; -+ break; -+ default: /* We should NEVER reach this */ -+ return -EOPNOTSUPP; -+ } -+ -+ /* Check if we are the unique configured LAG */ -+ dsa_lags_foreach_id(i, ds->dst) -+ if (i != id && dsa_lag_dev(ds->dst, i)) { -+ unique_lag = false; -+ break; -+ } -+ -+ /* Hash Mode is global. Make sure the same Hash Mode -+ * is set to all the 4 possible lag. -+ * If we are the unique LAG we can set whatever hash -+ * mode we want. -+ * To change hash mode it's needed to remove all LAG -+ * and change the mode with the latest. -+ */ -+ if (unique_lag) { -+ priv->lag_hash_mode = hash; -+ } else if (priv->lag_hash_mode != hash) { -+ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -+ QCA8K_TRUNK_HASH_MASK, hash); -+} -+ -+static int -+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -+ struct net_device *lag, bool delete) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret, id, i; -+ u32 val; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ /* Read current port member */ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* Shift val to the correct trunk */ -+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -+ if (delete) -+ val &= ~BIT(port); -+ else -+ val |= BIT(port); -+ -+ /* Update port member. With empty portmap disable trunk */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -+ QCA8K_REG_GOL_TRUNK_MEMBER(id) | -+ QCA8K_REG_GOL_TRUNK_EN(id), -+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -+ -+ /* Search empty member if adding or port on deleting */ -+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -+ if (ret) -+ return ret; -+ -+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -+ -+ if (delete) { -+ /* If port flagged to be disabled assume this member is -+ * empty -+ */ -+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -+ if (val != port) -+ continue; -+ } else { -+ /* If port flagged to be enabled assume this member is -+ * already set -+ */ -+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ } -+ -+ /* We have found the member to add/remove */ -+ break; -+ } -+ -+ /* Set port in the correct port mask or disable port if in delete mode */ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); -+} -+ -+static int -+qca8k_port_lag_join(struct dsa_switch *ds, int port, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ int ret; -+ -+ if (!qca8k_lag_can_offload(ds, lag, info)) -+ return -EOPNOTSUPP; -+ -+ ret = qca8k_lag_setup_hash(ds, lag, info); -+ if (ret) -+ return ret; -+ -+ return qca8k_lag_refresh_portmap(ds, port, lag, false); -+} -+ -+static int -+qca8k_port_lag_leave(struct dsa_switch *ds, int port, -+ struct net_device *lag) -+{ -+ return qca8k_lag_refresh_portmap(ds, port, lag, true); -+} -+ -+static void -+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, -+ bool operational) -+{ -+ struct dsa_port *dp = master->dsa_ptr; -+ struct qca8k_priv *priv = ds->priv; -+ -+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */ -+ if (dp->index != 0) -+ return; -+ -+ mutex_lock(&priv->mgmt_eth_data.mutex); -+ mutex_lock(&priv->mib_eth_data.mutex); -+ -+ priv->mgmt_master = operational ? (struct net_device *)master : NULL; -+ -+ mutex_unlock(&priv->mib_eth_data.mutex); -+ mutex_unlock(&priv->mgmt_eth_data.mutex); -+} -+ -+static int qca8k_connect_tag_protocol(struct dsa_switch *ds, -+ enum dsa_tag_protocol proto) -+{ -+ struct qca_tagger_data *tagger_data; -+ -+ switch (proto) { -+ case DSA_TAG_PROTO_QCA: -+ tagger_data = ds->tagger_data; -+ -+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; -+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; -+ -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+static const struct dsa_switch_ops qca8k_switch_ops = { -+ .get_tag_protocol = qca8k_get_tag_protocol, -+ .setup = qca8k_setup, -+ .get_strings = qca8k_get_strings, -+ .get_ethtool_stats = qca8k_get_ethtool_stats, -+ .get_sset_count = qca8k_get_sset_count, -+ .set_ageing_time = qca8k_set_ageing_time, -+ .get_mac_eee = qca8k_get_mac_eee, -+ .set_mac_eee = qca8k_set_mac_eee, -+ .port_enable = qca8k_port_enable, -+ .port_disable = qca8k_port_disable, -+ .port_change_mtu = qca8k_port_change_mtu, -+ .port_max_mtu = qca8k_port_max_mtu, -+ .port_stp_state_set = qca8k_port_stp_state_set, -+ .port_bridge_join = qca8k_port_bridge_join, -+ .port_bridge_leave = qca8k_port_bridge_leave, -+ .port_fast_age = qca8k_port_fast_age, -+ .port_fdb_add = qca8k_port_fdb_add, -+ .port_fdb_del = qca8k_port_fdb_del, -+ .port_fdb_dump = qca8k_port_fdb_dump, -+ .port_mdb_add = qca8k_port_mdb_add, -+ .port_mdb_del = qca8k_port_mdb_del, -+ .port_mirror_add = qca8k_port_mirror_add, -+ .port_mirror_del = qca8k_port_mirror_del, -+ .port_vlan_filtering = qca8k_port_vlan_filtering, -+ .port_vlan_add = qca8k_port_vlan_add, -+ .port_vlan_del = qca8k_port_vlan_del, -+ .phylink_validate = qca8k_phylink_validate, -+ .phylink_mac_link_state = qca8k_phylink_mac_link_state, -+ .phylink_mac_config = qca8k_phylink_mac_config, -+ .phylink_mac_link_down = qca8k_phylink_mac_link_down, -+ .phylink_mac_link_up = qca8k_phylink_mac_link_up, -+ .get_phy_flags = qca8k_get_phy_flags, -+ .port_lag_join = qca8k_port_lag_join, -+ .port_lag_leave = qca8k_port_lag_leave, -+ .master_state_change = qca8k_master_change, -+ .connect_tag_protocol = qca8k_connect_tag_protocol, -+}; -+ -+static int qca8k_read_switch_id(struct qca8k_priv *priv) -+{ -+ u32 val; -+ u8 id; -+ int ret; -+ -+ if (!priv->info) -+ return -ENODEV; -+ -+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -+ if (ret < 0) -+ return -ENODEV; -+ -+ id = QCA8K_MASK_CTRL_DEVICE_ID(val); -+ if (id != priv->info->id) { -+ dev_err(priv->dev, -+ "Switch id detected %x but expected %x", -+ id, priv->info->id); -+ return -ENODEV; -+ } -+ -+ priv->switch_id = id; -+ -+ /* Save revision to communicate to the internal PHY driver */ -+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -+ -+ return 0; -+} -+ -+static int -+qca8k_sw_probe(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv; -+ int ret; -+ -+ /* allocate the private data struct so that we can probe the switches -+ * ID register -+ */ -+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ priv->info = of_device_get_match_data(priv->dev); -+ priv->bus = mdiodev->bus; -+ priv->dev = &mdiodev->dev; -+ -+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", -+ GPIOD_ASIS); -+ if (IS_ERR(priv->reset_gpio)) -+ return PTR_ERR(priv->reset_gpio); -+ -+ if (priv->reset_gpio) { -+ gpiod_set_value_cansleep(priv->reset_gpio, 1); -+ /* The active low duration must be greater than 10 ms -+ * and checkpatch.pl wants 20 ms. -+ */ -+ msleep(20); -+ gpiod_set_value_cansleep(priv->reset_gpio, 0); -+ } -+ -+ /* Start by setting up the register mapping */ -+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, -+ &qca8k_regmap_config); -+ if (IS_ERR(priv->regmap)) { -+ dev_err(priv->dev, "regmap initialization failed"); -+ return PTR_ERR(priv->regmap); -+ } -+ -+ priv->mdio_cache.page = 0xffff; -+ priv->mdio_cache.lo = 0xffff; -+ priv->mdio_cache.hi = 0xffff; -+ -+ /* Check the detected switch id */ -+ ret = qca8k_read_switch_id(priv); -+ if (ret) -+ return ret; -+ -+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); -+ if (!priv->ds) -+ return -ENOMEM; -+ -+ mutex_init(&priv->mgmt_eth_data.mutex); -+ init_completion(&priv->mgmt_eth_data.rw_done); -+ -+ mutex_init(&priv->mib_eth_data.mutex); -+ init_completion(&priv->mib_eth_data.rw_done); -+ -+ priv->ds->dev = &mdiodev->dev; -+ priv->ds->num_ports = QCA8K_NUM_PORTS; -+ priv->ds->priv = priv; -+ priv->ds->ops = &qca8k_switch_ops; -+ mutex_init(&priv->reg_mutex); -+ dev_set_drvdata(&mdiodev->dev, priv); -+ -+ return dsa_register_switch(priv->ds); -+} -+ -+static void -+qca8k_sw_remove(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -+ int i; -+ -+ if (!priv) -+ return; -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) -+ qca8k_port_set_status(priv, i, 0); -+ -+ dsa_unregister_switch(priv->ds); -+ -+ dev_set_drvdata(&mdiodev->dev, NULL); -+} -+ -+static void qca8k_sw_shutdown(struct mdio_device *mdiodev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); -+ -+ if (!priv) -+ return; -+ -+ dsa_switch_shutdown(priv->ds); -+ -+ dev_set_drvdata(&mdiodev->dev, NULL); -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static void -+qca8k_set_pm(struct qca8k_priv *priv, int enable) -+{ -+ int port; -+ -+ for (port = 0; port < QCA8K_NUM_PORTS; port++) { -+ /* Do not enable on resume if the port was -+ * disabled before. -+ */ -+ if (!(priv->port_enabled_map & BIT(port))) -+ continue; -+ -+ qca8k_port_set_status(priv, port, enable); -+ } -+} -+ -+static int qca8k_suspend(struct device *dev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(dev); -+ -+ qca8k_set_pm(priv, 0); -+ -+ return dsa_switch_suspend(priv->ds); -+} -+ -+static int qca8k_resume(struct device *dev) -+{ -+ struct qca8k_priv *priv = dev_get_drvdata(dev); -+ -+ qca8k_set_pm(priv, 1); -+ -+ return dsa_switch_resume(priv->ds); -+} -+#endif /* CONFIG_PM_SLEEP */ -+ -+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, -+ qca8k_suspend, qca8k_resume); -+ -+static const struct qca8k_info_ops qca8xxx_ops = { -+ .autocast_mib = qca8k_get_ethtool_stats_eth, -+}; -+ -+static const struct qca8k_match_data qca8327 = { -+ .id = QCA8K_ID_QCA8327, -+ .reduced_package = true, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, -+ .ops = &qca8xxx_ops, -+}; -+ -+static const struct qca8k_match_data qca8328 = { -+ .id = QCA8K_ID_QCA8327, -+ .mib_count = QCA8K_QCA832X_MIB_COUNT, -+ .ops = &qca8xxx_ops, -+}; -+ -+static const struct qca8k_match_data qca833x = { -+ .id = QCA8K_ID_QCA8337, -+ .mib_count = QCA8K_QCA833X_MIB_COUNT, -+ .ops = &qca8xxx_ops, -+}; -+ -+static const struct of_device_id qca8k_of_match[] = { -+ { .compatible = "qca,qca8327", .data = &qca8327 }, -+ { .compatible = "qca,qca8328", .data = &qca8328 }, -+ { .compatible = "qca,qca8334", .data = &qca833x }, -+ { .compatible = "qca,qca8337", .data = &qca833x }, -+ { /* sentinel */ }, -+}; -+ -+static struct mdio_driver qca8kmdio_driver = { -+ .probe = qca8k_sw_probe, -+ .remove = qca8k_sw_remove, -+ .shutdown = qca8k_sw_shutdown, -+ .mdiodrv.driver = { -+ .name = "qca8k", -+ .of_match_table = qca8k_of_match, -+ .pm = &qca8k_pm_ops, -+ }, -+}; -+ -+mdio_module_driver(qca8kmdio_driver); -+ -+MODULE_AUTHOR("Mathieu Olivari, John Crispin "); -+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family"); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS("platform:qca8k"); ---- /dev/null -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -0,0 +1,63 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2009 Felix Fietkau -+ * Copyright (C) 2011-2012 Gabor Juhos -+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2016 John Crispin -+ */ -+ -+#include -+#include -+ -+#include "qca8k.h" -+ -+#define MIB_DESC(_s, _o, _n) \ -+ { \ -+ .size = (_s), \ -+ .offset = (_o), \ -+ .name = (_n), \ -+ } -+ -+const struct qca8k_mib_desc ar8327_mib[] = { -+ MIB_DESC(1, 0x00, "RxBroad"), -+ MIB_DESC(1, 0x04, "RxPause"), -+ MIB_DESC(1, 0x08, "RxMulti"), -+ MIB_DESC(1, 0x0c, "RxFcsErr"), -+ MIB_DESC(1, 0x10, "RxAlignErr"), -+ MIB_DESC(1, 0x14, "RxRunt"), -+ MIB_DESC(1, 0x18, "RxFragment"), -+ MIB_DESC(1, 0x1c, "Rx64Byte"), -+ MIB_DESC(1, 0x20, "Rx128Byte"), -+ MIB_DESC(1, 0x24, "Rx256Byte"), -+ MIB_DESC(1, 0x28, "Rx512Byte"), -+ MIB_DESC(1, 0x2c, "Rx1024Byte"), -+ MIB_DESC(1, 0x30, "Rx1518Byte"), -+ MIB_DESC(1, 0x34, "RxMaxByte"), -+ MIB_DESC(1, 0x38, "RxTooLong"), -+ MIB_DESC(2, 0x3c, "RxGoodByte"), -+ MIB_DESC(2, 0x44, "RxBadByte"), -+ MIB_DESC(1, 0x4c, "RxOverFlow"), -+ MIB_DESC(1, 0x50, "Filtered"), -+ MIB_DESC(1, 0x54, "TxBroad"), -+ MIB_DESC(1, 0x58, "TxPause"), -+ MIB_DESC(1, 0x5c, "TxMulti"), -+ MIB_DESC(1, 0x60, "TxUnderRun"), -+ MIB_DESC(1, 0x64, "Tx64Byte"), -+ MIB_DESC(1, 0x68, "Tx128Byte"), -+ MIB_DESC(1, 0x6c, "Tx256Byte"), -+ MIB_DESC(1, 0x70, "Tx512Byte"), -+ MIB_DESC(1, 0x74, "Tx1024Byte"), -+ MIB_DESC(1, 0x78, "Tx1518Byte"), -+ MIB_DESC(1, 0x7c, "TxMaxByte"), -+ MIB_DESC(1, 0x80, "TxOverSize"), -+ MIB_DESC(2, 0x84, "TxByte"), -+ MIB_DESC(1, 0x8c, "TxCollision"), -+ MIB_DESC(1, 0x90, "TxAbortCol"), -+ MIB_DESC(1, 0x94, "TxMultiCol"), -+ MIB_DESC(1, 0x98, "TxSingleCol"), -+ MIB_DESC(1, 0x9c, "TxExcDefer"), -+ MIB_DESC(1, 0xa0, "TxDefer"), -+ MIB_DESC(1, 0xa4, "TxLateCol"), -+ MIB_DESC(1, 0xa8, "RXUnicast"), -+ MIB_DESC(1, 0xac, "TXUnicast"), -+}; ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -414,4 +414,7 @@ struct qca8k_fdb { - u8 mac[6]; - }; - -+/* Common setup function */ -+extern const struct qca8k_mib_desc ar8327_mib[]; -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-04-net-dsa-qca8k-move-qca8k-read-write-rmw-and-reg-tabl.patch b/target/linux/generic/backport-6.1/771-v6.0-04-net-dsa-qca8k-move-qca8k-read-write-rmw-and-reg-tabl.patch deleted file mode 100644 index 012ab854742a..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-04-net-dsa-qca8k-move-qca8k-read-write-rmw-and-reg-tabl.patch +++ /dev/null @@ -1,135 +0,0 @@ -From d5f901eab2e9dfed1095995dfc98f231f4fd2971 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:13 +0200 -Subject: [PATCH 04/14] net: dsa: qca8k: move qca8k read/write/rmw and reg - table to common code - -The same reg table and read/write/rmw function are used by drivers -based on qca8k family switch. -Move them to common code to make it accessible also by other drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 42 ------------------------------ - drivers/net/dsa/qca/qca8k-common.c | 38 +++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 6 +++++ - 3 files changed, 44 insertions(+), 42 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -133,24 +133,6 @@ qca8k_set_page(struct qca8k_priv *priv, - return 0; - } - --static int --qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) --{ -- return regmap_read(priv->regmap, reg, val); --} -- --static int --qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) --{ -- return regmap_write(priv->regmap, reg, val); --} -- --static int --qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) --{ -- return regmap_update_bits(priv->regmap, reg, mask, write_val); --} -- - static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) - { - struct qca8k_mgmt_eth_data *mgmt_eth_data; -@@ -483,30 +465,6 @@ exit: - return ret; - } - --static const struct regmap_range qca8k_readable_ranges[] = { -- regmap_reg_range(0x0000, 0x00e4), /* Global control */ -- regmap_reg_range(0x0100, 0x0168), /* EEE control */ -- regmap_reg_range(0x0200, 0x0270), /* Parser control */ -- regmap_reg_range(0x0400, 0x0454), /* ACL */ -- regmap_reg_range(0x0600, 0x0718), /* Lookup */ -- regmap_reg_range(0x0800, 0x0b70), /* QM */ -- regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -- regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -- --}; -- --static const struct regmap_access_table qca8k_readable_table = { -- .yes_ranges = qca8k_readable_ranges, -- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), --}; -- - static struct regmap_config qca8k_regmap_config = { - .reg_bits = 16, - .val_bits = 32, ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -61,3 +61,41 @@ const struct qca8k_mib_desc ar8327_mib[] - MIB_DESC(1, 0xa8, "RXUnicast"), - MIB_DESC(1, 0xac, "TXUnicast"), - }; -+ -+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) -+{ -+ return regmap_read(priv->regmap, reg, val); -+} -+ -+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) -+{ -+ return regmap_write(priv->regmap, reg, val); -+} -+ -+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) -+{ -+ return regmap_update_bits(priv->regmap, reg, mask, write_val); -+} -+ -+static const struct regmap_range qca8k_readable_ranges[] = { -+ regmap_reg_range(0x0000, 0x00e4), /* Global control */ -+ regmap_reg_range(0x0100, 0x0168), /* EEE control */ -+ regmap_reg_range(0x0200, 0x0270), /* Parser control */ -+ regmap_reg_range(0x0400, 0x0454), /* ACL */ -+ regmap_reg_range(0x0600, 0x0718), /* Lookup */ -+ regmap_reg_range(0x0800, 0x0b70), /* QM */ -+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */ -+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */ -+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ -+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ -+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ -+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ -+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ -+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ -+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ -+}; -+ -+const struct regmap_access_table qca8k_readable_table = { -+ .yes_ranges = qca8k_readable_ranges, -+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), -+}; ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -416,5 +416,11 @@ struct qca8k_fdb { - - /* Common setup function */ - extern const struct qca8k_mib_desc ar8327_mib[]; -+extern const struct regmap_access_table qca8k_readable_table; -+ -+/* Common read/write/rmw function */ -+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val); -+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val); -+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val); - - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-05-net-dsa-qca8k-move-qca8k-bulk-read-write-helper-to-c.patch b/target/linux/generic/backport-6.1/771-v6.0-05-net-dsa-qca8k-move-qca8k-bulk-read-write-helper-to-c.patch deleted file mode 100644 index 0ed7ed41fbe6..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-05-net-dsa-qca8k-move-qca8k-bulk-read-write-helper-to-c.patch +++ /dev/null @@ -1,145 +0,0 @@ -From 910746444313dc463396cd63024cdf54ef04ef39 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:14 +0200 -Subject: [PATCH 05/14] net: dsa: qca8k: move qca8k bulk read/write helper to - common code - -The same ATU function are used by drivers based on qca8k family switch. -Move the bulk read/write helper to common code to declare these shared -ATU functions in common code. -These helper will be dropped when regmap correctly support bulk -read/write. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 39 ++---------------------------- - drivers/net/dsa/qca/qca8k-common.c | 39 ++++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 8 ++++++ - 3 files changed, 49 insertions(+), 37 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -343,43 +343,6 @@ qca8k_regmap_update_bits_eth(struct qca8 - } - - static int --qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- -- if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int --qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) --{ -- int i, count = len / sizeof(u32), ret; -- u32 tmp; -- -- if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) -- return 0; -- -- for (i = 0; i < count; i++) { -- tmp = val[i]; -- -- ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -- if (ret < 0) -- return ret; -- } -- -- return 0; --} -- --static int - qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ctx; -@@ -3096,6 +3059,8 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, - - static const struct qca8k_info_ops qca8xxx_ops = { - .autocast_mib = qca8k_get_ethtool_stats_eth, -+ .read_eth = qca8k_read_eth, -+ .write_eth = qca8k_write_eth, - }; - - static const struct qca8k_match_data qca8327 = { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -99,3 +99,42 @@ const struct regmap_access_table qca8k_r - .yes_ranges = qca8k_readable_ranges, - .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges), - }; -+ -+/* TODO: remove these extra ops when we can support regmap bulk read/write */ -+int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ -+ if (priv->mgmt_master && priv->info->ops->read_eth && -+ !priv->info->ops->read_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} -+ -+/* TODO: remove these extra ops when we can support regmap bulk read/write */ -+int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+{ -+ int i, count = len / sizeof(u32), ret; -+ u32 tmp; -+ -+ if (priv->mgmt_master && priv->info->ops->write_eth && -+ !priv->info->ops->write_eth(priv, reg, val, len)) -+ return 0; -+ -+ for (i = 0; i < count; i++) { -+ tmp = val[i]; -+ -+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp); -+ if (ret < 0) -+ return ret; -+ } -+ -+ return 0; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -324,8 +324,13 @@ enum qca8k_mid_cmd { - QCA8K_MIB_CAST = 3, - }; - -+struct qca8k_priv; -+ - struct qca8k_info_ops { - int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data); -+ /* TODO: remove these extra ops when we can support regmap bulk read/write */ -+ int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len); -+ int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len); - }; - - struct qca8k_match_data { -@@ -423,4 +428,7 @@ int qca8k_read(struct qca8k_priv *priv, - int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val); - int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val); - -+int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len); -+int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-06-net-dsa-qca8k-move-mib-init-function-to-common-code.patch b/target/linux/generic/backport-6.1/771-v6.0-06-net-dsa-qca8k-move-mib-init-function-to-common-code.patch deleted file mode 100644 index a39a55b89b4b..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-06-net-dsa-qca8k-move-mib-init-function-to-common-code.patch +++ /dev/null @@ -1,137 +0,0 @@ -From fce1ec0c4e2d03d9c62ffc615a42bdba78eb4c14 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:15 +0200 -Subject: [PATCH 06/14] net: dsa: qca8k: move mib init function to common code - -The same mib function is used by drivers based on qca8k family switch. -Move it to common code to make it accessible also by other drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 37 ------------------------------ - drivers/net/dsa/qca/qca8k-common.c | 35 ++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 4 ++++ - 3 files changed, 39 insertions(+), 37 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -442,15 +442,6 @@ static struct regmap_config qca8k_regmap - }; - - static int --qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) --{ -- u32 val; -- -- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); --} -- --static int - qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) - { - u32 reg[3]; -@@ -777,34 +768,6 @@ out: - return ret; - } - --static int --qca8k_mib_init(struct qca8k_priv *priv) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -- QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -- if (ret) -- goto exit; -- -- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -- if (ret) -- goto exit; -- -- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- - static void - qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) - { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -7,6 +7,7 @@ - */ - - #include -+#include - #include - - #include "qca8k.h" -@@ -138,3 +139,38 @@ int qca8k_bulk_write(struct qca8k_priv * - - return 0; - } -+ -+int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) -+{ -+ u32 val; -+ -+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, -+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); -+} -+ -+int qca8k_mib_init(struct qca8k_priv *priv) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, -+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, -+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | -+ QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); -+ if (ret) -+ goto exit; -+ -+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); -+ if (ret) -+ goto exit; -+ -+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -422,6 +422,7 @@ struct qca8k_fdb { - /* Common setup function */ - extern const struct qca8k_mib_desc ar8327_mib[]; - extern const struct regmap_access_table qca8k_readable_table; -+int qca8k_mib_init(struct qca8k_priv *priv); - - /* Common read/write/rmw function */ - int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val); -@@ -431,4 +432,7 @@ int qca8k_rmw(struct qca8k_priv *priv, u - int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len); - int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len); - -+/* Common ops function */ -+int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-07-net-dsa-qca8k-move-port-set-status-eee-ethtool-stats.patch b/target/linux/generic/backport-6.1/771-v6.0-07-net-dsa-qca8k-move-port-set-status-eee-ethtool-stats.patch deleted file mode 100644 index 6fd1c66b0ae1..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-07-net-dsa-qca8k-move-port-set-status-eee-ethtool-stats.patch +++ /dev/null @@ -1,281 +0,0 @@ -From 472fcea160f27a5d9b7526093d9d8d89ba0b6137 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:16 +0200 -Subject: [PATCH 07/14] net: dsa: qca8k: move port set status/eee/ethtool stats - function to common code - -The same logic to disable/enable port, set eee and get ethtool stats is -used by drivers based on qca8k family switch. -Move it to common code to make it accessible also by other drivers. -While at it also drop unnecessary qca8k_priv cast for void pointers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 105 ----------------------------- - drivers/net/dsa/qca/qca8k-common.c | 102 ++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 11 +++ - 3 files changed, 113 insertions(+), 105 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -768,21 +768,6 @@ out: - return ret; - } - --static void --qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) --{ -- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -- -- /* Port 0 and 6 have no internal PHY */ -- if (port > 0 && port < 6) -- mask |= QCA8K_PORT_STATUS_LINK_AUTO; -- -- if (enable) -- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -- else -- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); --} -- - static int - qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, - struct sk_buff *read_skb, u32 *val) -@@ -1974,20 +1959,6 @@ qca8k_phylink_mac_link_up(struct dsa_swi - qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); - } - --static void --qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) --{ -- struct qca8k_priv *priv = ds->priv; -- int i; -- -- if (stringset != ETH_SS_STATS) -- return; -- -- for (i = 0; i < priv->info->mib_count; i++) -- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -- ETH_GSTRING_LEN); --} -- - static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) - { - struct qca8k_mib_eth_data *mib_eth_data; -@@ -2078,82 +2049,6 @@ exit: - } - - static void --qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -- uint64_t *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- const struct qca8k_mib_desc *mib; -- u32 reg, i, val; -- u32 hi = 0; -- int ret; -- -- if (priv->mgmt_master && priv->info->ops->autocast_mib && -- priv->info->ops->autocast_mib(ds, port, data) > 0) -- return; -- -- for (i = 0; i < priv->info->mib_count; i++) { -- mib = &ar8327_mib[i]; -- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -- -- ret = qca8k_read(priv, reg, &val); -- if (ret < 0) -- continue; -- -- if (mib->size == 2) { -- ret = qca8k_read(priv, reg + 4, &hi); -- if (ret < 0) -- continue; -- } -- -- data[i] = val; -- if (mib->size == 2) -- data[i] |= (u64)hi << 32; -- } --} -- --static int --qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) --{ -- struct qca8k_priv *priv = ds->priv; -- -- if (sset != ETH_SS_STATS) -- return 0; -- -- return priv->info->mib_count; --} -- --static int --qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -- u32 reg; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -- if (ret < 0) -- goto exit; -- -- if (eee->eee_enabled) -- reg |= lpi_en; -- else -- reg &= ~lpi_en; -- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) --{ -- /* Nothing to do on the port's MAC */ -- return 0; --} -- --static void - qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) - { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -174,3 +174,105 @@ exit: - mutex_unlock(&priv->reg_mutex); - return ret; - } -+ -+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) -+{ -+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; -+ -+ /* Port 0 and 6 have no internal PHY */ -+ if (port > 0 && port < 6) -+ mask |= QCA8K_PORT_STATUS_LINK_AUTO; -+ -+ if (enable) -+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+ else -+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); -+} -+ -+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, -+ uint8_t *data) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int i; -+ -+ if (stringset != ETH_SS_STATS) -+ return; -+ -+ for (i = 0; i < priv->info->mib_count; i++) -+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, -+ ETH_GSTRING_LEN); -+} -+ -+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -+ uint64_t *data) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const struct qca8k_mib_desc *mib; -+ u32 reg, i, val; -+ u32 hi = 0; -+ int ret; -+ -+ if (priv->mgmt_master && priv->info->ops->autocast_mib && -+ priv->info->ops->autocast_mib(ds, port, data) > 0) -+ return; -+ -+ for (i = 0; i < priv->info->mib_count; i++) { -+ mib = &ar8327_mib[i]; -+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; -+ -+ ret = qca8k_read(priv, reg, &val); -+ if (ret < 0) -+ continue; -+ -+ if (mib->size == 2) { -+ ret = qca8k_read(priv, reg + 4, &hi); -+ if (ret < 0) -+ continue; -+ } -+ -+ data[i] = val; -+ if (mib->size == 2) -+ data[i] |= (u64)hi << 32; -+ } -+} -+ -+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ if (sset != ETH_SS_STATS) -+ return 0; -+ -+ return priv->info->mib_count; -+} -+ -+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, -+ struct ethtool_eee *eee) -+{ -+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®); -+ if (ret < 0) -+ goto exit; -+ -+ if (eee->eee_enabled) -+ reg |= lpi_en; -+ else -+ reg &= ~lpi_en; -+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, -+ struct ethtool_eee *e) -+{ -+ /* Nothing to do on the port's MAC */ -+ return 0; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -423,6 +423,7 @@ struct qca8k_fdb { - extern const struct qca8k_mib_desc ar8327_mib[]; - extern const struct regmap_access_table qca8k_readable_table; - int qca8k_mib_init(struct qca8k_priv *priv); -+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable); - - /* Common read/write/rmw function */ - int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val); -@@ -435,4 +436,14 @@ int qca8k_bulk_write(struct qca8k_priv * - /* Common ops function */ - int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask); - -+/* Common ethtool stats function */ -+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data); -+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, -+ uint64_t *data); -+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset); -+ -+/* Common eee function */ -+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee); -+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-08-net-dsa-qca8k-move-bridge-functions-to-common-code.patch b/target/linux/generic/backport-6.1/771-v6.0-08-net-dsa-qca8k-move-bridge-functions-to-common-code.patch deleted file mode 100644 index 3ca682d72ccc..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-08-net-dsa-qca8k-move-bridge-functions-to-common-code.patch +++ /dev/null @@ -1,237 +0,0 @@ -From fd3cae2f3ac190d06e48f43739237e02f9dc51ff Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:17 +0200 -Subject: [PATCH 08/14] net: dsa: qca8k: move bridge functions to common code - -The same bridge functions are used by drivers based on qca8k family -switch. Move them to common code to make them accessible also by other -drivers. -While at it also drop unnecessary qca8k_priv cast for void pointers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 93 ------------------------------ - drivers/net/dsa/qca/qca8k-common.c | 93 ++++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 9 +++ - 3 files changed, 102 insertions(+), 93 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -2049,97 +2049,6 @@ exit: - } - - static void --qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u32 stp_state; -- -- switch (state) { -- case BR_STATE_DISABLED: -- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -- break; -- case BR_STATE_BLOCKING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -- break; -- case BR_STATE_LISTENING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -- break; -- case BR_STATE_LEARNING: -- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -- break; -- case BR_STATE_FORWARDING: -- default: -- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -- break; -- } -- -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); --} -- --static int --qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int port_mask, cpu_port; -- int i, ret; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- port_mask = BIT(cpu_port); -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Add this port to the portvlan mask of the other ports -- * in the bridge -- */ -- ret = regmap_set_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- if (ret) -- return ret; -- if (i != port) -- port_mask |= BIT(i); -- } -- -- /* Add all other ports to this ports portvlan mask */ -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, port_mask); -- -- return ret; --} -- --static void --qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- int cpu_port, i; -- -- cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -- -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- if (dsa_is_cpu_port(ds, i)) -- continue; -- if (dsa_to_port(ds, i)->bridge_dev != br) -- continue; -- /* Remove this port to the portvlan mask of the other ports -- * in the bridge -- */ -- regmap_clear_bits(priv->regmap, -- QCA8K_PORT_LOOKUP_CTRL(i), -- BIT(port)); -- } -- -- /* Set the cpu port to be the only one in the portvlan mask of -- * this port -- */ -- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); --} -- --static void - qca8k_port_fast_age(struct dsa_switch *ds, int port) - { - struct qca8k_priv *priv = ds->priv; ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - - #include "qca8k.h" - -@@ -276,3 +277,93 @@ int qca8k_get_mac_eee(struct dsa_switch - /* Nothing to do on the port's MAC */ - return 0; - } -+ -+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 stp_state; -+ -+ switch (state) { -+ case BR_STATE_DISABLED: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; -+ break; -+ case BR_STATE_BLOCKING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; -+ break; -+ case BR_STATE_LISTENING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING; -+ break; -+ case BR_STATE_LEARNING: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; -+ break; -+ case BR_STATE_FORWARDING: -+ default: -+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; -+ break; -+ } -+ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); -+} -+ -+int qca8k_port_bridge_join(struct dsa_switch *ds, int port, -+ struct net_device *br) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int port_mask, cpu_port; -+ int i, ret; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ port_mask = BIT(cpu_port); -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Add this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ ret = regmap_set_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ if (ret) -+ return ret; -+ if (i != port) -+ port_mask |= BIT(i); -+ } -+ -+ /* Add all other ports to this ports portvlan mask */ -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, port_mask); -+ -+ return ret; -+} -+ -+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, -+ struct net_device *br) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int cpu_port, i; -+ -+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index; -+ -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ if (dsa_is_cpu_port(ds, i)) -+ continue; -+ if (dsa_to_port(ds, i)->bridge_dev != br) -+ continue; -+ /* Remove this port to the portvlan mask of the other ports -+ * in the bridge -+ */ -+ regmap_clear_bits(priv->regmap, -+ QCA8K_PORT_LOOKUP_CTRL(i), -+ BIT(port)); -+ } -+ -+ /* Set the cpu port to be the only one in the portvlan mask of -+ * this port -+ */ -+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -446,4 +446,11 @@ int qca8k_get_sset_count(struct dsa_swit - int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee); - int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); - -+/* Common bridge function */ -+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); -+int qca8k_port_bridge_join(struct dsa_switch *ds, int port, -+ struct net_device *br); -+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, -+ struct net_device *br); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-09-net-dsa-qca8k-move-set-age-MTU-port-enable-disable-f.patch b/target/linux/generic/backport-6.1/771-v6.0-09-net-dsa-qca8k-move-set-age-MTU-port-enable-disable-f.patch deleted file mode 100644 index e3414408d648..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-09-net-dsa-qca8k-move-set-age-MTU-port-enable-disable-f.patch +++ /dev/null @@ -1,227 +0,0 @@ -From b3a302b171f73425b41de8d3357fae3fa7057322 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:18 +0200 -Subject: [PATCH 09/14] net: dsa: qca8k: move set age/MTU/port enable/disable - functions to common code - -The same set age, MTU and port enable/disable function are used by -driver based on qca8k family switch. -Move them to common code to make them accessible also by other drivers. -While at it also drop unnecessary qca8k_priv cast for void pointers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 88 ------------------------------ - drivers/net/dsa/qca/qca8k-common.c | 85 +++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 12 ++++ - 3 files changed, 97 insertions(+), 88 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -2059,94 +2059,6 @@ qca8k_port_fast_age(struct dsa_switch *d - } - - static int --qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) --{ -- struct qca8k_priv *priv = ds->priv; -- unsigned int secs = msecs / 1000; -- u32 val; -- -- /* AGE_TIME reg is set in 7s step */ -- val = secs / 7; -- -- /* Handle case with 0 as val to NOT disable -- * learning -- */ -- if (!val) -- val = 1; -- -- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, -- QCA8K_ATU_AGE_TIME(val)); --} -- --static int --qca8k_port_enable(struct dsa_switch *ds, int port, -- struct phy_device *phy) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 1); -- priv->port_enabled_map |= BIT(port); -- -- if (dsa_is_user_port(ds, port)) -- phy_support_asym_pause(phy); -- -- return 0; --} -- --static void --qca8k_port_disable(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- -- qca8k_port_set_status(priv, port, 0); -- priv->port_enabled_map &= ~BIT(port); --} -- --static int --qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- /* We have only have a general MTU setting. -- * DSA always set the CPU port's MTU to the largest MTU of the slave -- * ports. -- * Setting MTU just for the CPU port is sufficient to correctly set a -- * value for every port. -- */ -- if (!dsa_is_cpu_port(ds, port)) -- return 0; -- -- /* To change the MAX_FRAME_SIZE the cpu ports must be off or -- * the switch panics. -- * Turn off both cpu ports before applying the new value to prevent -- * this. -- */ -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 0); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 0); -- -- /* Include L2 header / FCS length */ -- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); -- -- if (priv->port_enabled_map & BIT(0)) -- qca8k_port_set_status(priv, 0, 1); -- -- if (priv->port_enabled_map & BIT(6)) -- qca8k_port_set_status(priv, 6, 1); -- -- return ret; --} -- --static int --qca8k_port_max_mtu(struct dsa_switch *ds, int port) --{ -- return QCA8K_MAX_MTU; --} -- --static int - qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, - u16 port_mask, u16 vid) - { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -367,3 +367,88 @@ void qca8k_port_bridge_leave(struct dsa_ - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); - } -+ -+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ unsigned int secs = msecs / 1000; -+ u32 val; -+ -+ /* AGE_TIME reg is set in 7s step */ -+ val = secs / 7; -+ -+ /* Handle case with 0 as val to NOT disable -+ * learning -+ */ -+ if (!val) -+ val = 1; -+ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, -+ QCA8K_ATU_AGE_TIME_MASK, -+ QCA8K_ATU_AGE_TIME(val)); -+} -+ -+int qca8k_port_enable(struct dsa_switch *ds, int port, -+ struct phy_device *phy) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ qca8k_port_set_status(priv, port, 1); -+ priv->port_enabled_map |= BIT(port); -+ -+ if (dsa_is_user_port(ds, port)) -+ phy_support_asym_pause(phy); -+ -+ return 0; -+} -+ -+void qca8k_port_disable(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ qca8k_port_set_status(priv, port, 0); -+ priv->port_enabled_map &= ~BIT(port); -+} -+ -+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ /* We have only have a general MTU setting. -+ * DSA always set the CPU port's MTU to the largest MTU of the slave -+ * ports. -+ * Setting MTU just for the CPU port is sufficient to correctly set a -+ * value for every port. -+ */ -+ if (!dsa_is_cpu_port(ds, port)) -+ return 0; -+ -+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or -+ * the switch panics. -+ * Turn off both cpu ports before applying the new value to prevent -+ * this. -+ */ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 0); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 0); -+ -+ /* Include L2 header / FCS length */ -+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + -+ ETH_HLEN + ETH_FCS_LEN); -+ -+ if (priv->port_enabled_map & BIT(0)) -+ qca8k_port_set_status(priv, 0, 1); -+ -+ if (priv->port_enabled_map & BIT(6)) -+ qca8k_port_set_status(priv, 6, 1); -+ -+ return ret; -+} -+ -+int qca8k_port_max_mtu(struct dsa_switch *ds, int port) -+{ -+ return QCA8K_MAX_MTU; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -453,4 +453,16 @@ int qca8k_port_bridge_join(struct dsa_sw - void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br); - -+/* Common port enable/disable function */ -+int qca8k_port_enable(struct dsa_switch *ds, int port, -+ struct phy_device *phy); -+void qca8k_port_disable(struct dsa_switch *ds, int port); -+ -+/* Common MTU function */ -+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu); -+int qca8k_port_max_mtu(struct dsa_switch *ds, int port); -+ -+/* Common fast age function */ -+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-10-net-dsa-qca8k-move-port-FDB-MDB-function-to-common-c.patch b/target/linux/generic/backport-6.1/771-v6.0-10-net-dsa-qca8k-move-port-FDB-MDB-function-to-common-c.patch deleted file mode 100644 index 96468ae74e9a..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-10-net-dsa-qca8k-move-port-FDB-MDB-function-to-common-c.patch +++ /dev/null @@ -1,704 +0,0 @@ -From 2e5bd96eea86a246b4de3bf756f7a11b43e6187d Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:19 +0200 -Subject: [PATCH 10/14] net: dsa: qca8k: move port FDB/MDB function to common - code - -The same port FDB/MDB function are used by drivers based on qca8k family -switch. Move them to common code to make them accessible also by other -drivers. -Also drop bulk read/write functions and make them static - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 306 ----------------------------- - drivers/net/dsa/qca/qca8k-common.c | 297 +++++++++++++++++++++++++++- - drivers/net/dsa/qca/qca8k.h | 25 ++- - 3 files changed, 317 insertions(+), 311 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -442,217 +442,6 @@ static struct regmap_config qca8k_regmap - }; - - static int --qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) --{ -- u32 reg[3]; -- int ret; -- -- /* load the ARL table into an array */ -- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -- if (ret) -- return ret; -- -- /* vid - 83:72 */ -- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -- /* aging - 67:64 */ -- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -- /* portmask - 54:48 */ -- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -- /* mac - 47:0 */ -- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -- -- return 0; --} -- --static void --qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, -- u8 aging) --{ -- u32 reg[3] = { 0 }; -- -- /* vid - 83:72 */ -- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -- /* aging - 67:64 */ -- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -- /* portmask - 54:48 */ -- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -- /* mac - 47:0 */ -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -- -- /* load the array into the ARL table */ -- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); --} -- --static int --qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) --{ -- u32 reg; -- int ret; -- -- /* Set the command and FDB index */ -- reg = QCA8K_ATU_FUNC_BUSY; -- reg |= cmd; -- if (port >= 0) { -- reg |= QCA8K_ATU_FUNC_PORT_EN; -- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -- } -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_FDB_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_ATU_FUNC_FULL) -- return -1; -- } -- -- return 0; --} -- --static int --qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) --{ -- int ret; -- -- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -- if (ret < 0) -- return ret; -- -- return qca8k_fdb_read(priv, fdb); --} -- --static int --qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, -- u16 vid, u8 aging) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) --{ -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_write(priv, vid, port_mask, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static void --qca8k_fdb_flush(struct qca8k_priv *priv) --{ -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- ret = qca8k_fdb_read(priv, &fdb); -- if (ret < 0) -- goto exit; -- -- /* Rule exist. Delete first */ -- if (!fdb.aging) { -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- } -- -- /* Add port to fdb portmask */ -- fdb.port_mask |= port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int --qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -- const u8 *mac, u16 vid) --{ -- struct qca8k_fdb fdb = { 0 }; -- int ret; -- -- mutex_lock(&priv->reg_mutex); -- -- qca8k_fdb_write(priv, vid, 0, mac, 0); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -- if (ret < 0) -- goto exit; -- -- /* Rule doesn't exist. Why delete? */ -- if (!fdb.aging) { -- ret = -EINVAL; -- goto exit; -- } -- -- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -- if (ret) -- goto exit; -- -- /* Only port in the rule is this port. Don't re insert */ -- if (fdb.port_mask == port_mask) -- goto exit; -- -- /* Remove port from port mask */ -- fdb.port_mask &= ~port_mask; -- -- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -- --exit: -- mutex_unlock(&priv->reg_mutex); -- return ret; --} -- --static int - qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) - { - u32 reg; -@@ -2048,97 +1837,6 @@ exit: - return ret; - } - --static void --qca8k_port_fast_age(struct dsa_switch *ds, int port) --{ -- struct qca8k_priv *priv = ds->priv; -- -- mutex_lock(&priv->reg_mutex); -- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -- mutex_unlock(&priv->reg_mutex); --} -- --static int --qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -- u16 port_mask, u16 vid) --{ -- /* Set the vid to the port vlan id if no vid is set */ -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_add(priv, addr, port_mask, vid, -- QCA8K_ATU_STATUS_STATIC); --} -- --static int --qca8k_port_fdb_add(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- return qca8k_port_fdb_insert(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_del(struct dsa_switch *ds, int port, -- const unsigned char *addr, u16 vid) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- u16 port_mask = BIT(port); -- -- if (!vid) -- vid = QCA8K_PORT_VID_DEF; -- -- return qca8k_fdb_del(priv, addr, port_mask, vid); --} -- --static int --qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -- dsa_fdb_dump_cb_t *cb, void *data) --{ -- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -- struct qca8k_fdb _fdb = { 0 }; -- int cnt = QCA8K_NUM_FDB_RECORDS; -- bool is_static; -- int ret = 0; -- -- mutex_lock(&priv->reg_mutex); -- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -- if (!_fdb.aging) -- break; -- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -- ret = cb(_fdb.mac, _fdb.vid, is_static, data); -- if (ret) -- break; -- } -- mutex_unlock(&priv->reg_mutex); -- -- return 0; --} -- --static int --qca8k_port_mdb_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); --} -- --static int --qca8k_port_mdb_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_mdb *mdb) --{ -- struct qca8k_priv *priv = ds->priv; -- const u8 *addr = mdb->addr; -- u16 vid = mdb->vid; -- -- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); --} -- - static int - qca8k_port_mirror_add(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror, ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -103,7 +103,7 @@ const struct regmap_access_table qca8k_r - }; - - /* TODO: remove these extra ops when we can support regmap bulk read/write */ --int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) - { - int i, count = len / sizeof(u32), ret; - -@@ -121,7 +121,7 @@ int qca8k_bulk_read(struct qca8k_priv *p - } - - /* TODO: remove these extra ops when we can support regmap bulk read/write */ --int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) -+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) - { - int i, count = len / sizeof(u32), ret; - u32 tmp; -@@ -149,6 +149,211 @@ int qca8k_busy_wait(struct qca8k_priv *p - QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); - } - -+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) -+{ -+ u32 reg[3]; -+ int ret; -+ -+ /* load the ARL table into an array */ -+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+ if (ret) -+ return ret; -+ -+ /* vid - 83:72 */ -+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); -+ /* aging - 67:64 */ -+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); -+ /* portmask - 54:48 */ -+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); -+ /* mac - 47:0 */ -+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); -+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); -+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); -+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); -+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); -+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); -+ -+ return 0; -+} -+ -+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, -+ const u8 *mac, u8 aging) -+{ -+ u32 reg[3] = { 0 }; -+ -+ /* vid - 83:72 */ -+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); -+ /* aging - 67:64 */ -+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); -+ /* portmask - 54:48 */ -+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); -+ /* mac - 47:0 */ -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); -+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); -+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); -+ -+ /* load the array into the ARL table */ -+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); -+} -+ -+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, -+ int port) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and FDB index */ -+ reg = QCA8K_ATU_FUNC_BUSY; -+ reg |= cmd; -+ if (port >= 0) { -+ reg |= QCA8K_ATU_FUNC_PORT_EN; -+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); -+ } -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_FDB_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_ATU_FUNC_FULL) -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, -+ int port) -+{ -+ int ret; -+ -+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); -+ if (ret < 0) -+ return ret; -+ -+ return qca8k_fdb_read(priv, fdb); -+} -+ -+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, -+ u16 port_mask, u16 vid, u8 aging) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, -+ u16 port_mask, u16 vid) -+{ -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_write(priv, vid, port_mask, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+void qca8k_fdb_flush(struct qca8k_priv *priv) -+{ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); -+ mutex_unlock(&priv->reg_mutex); -+} -+ -+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ ret = qca8k_fdb_read(priv, &fdb); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule exist. Delete first */ -+ if (!fdb.aging) { -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ } -+ -+ /* Add port to fdb portmask */ -+ fdb.port_mask |= port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ -+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, -+ const u8 *mac, u16 vid) -+{ -+ struct qca8k_fdb fdb = { 0 }; -+ int ret; -+ -+ mutex_lock(&priv->reg_mutex); -+ -+ qca8k_fdb_write(priv, vid, 0, mac, 0); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); -+ if (ret < 0) -+ goto exit; -+ -+ /* Rule doesn't exist. Why delete? */ -+ if (!fdb.aging) { -+ ret = -EINVAL; -+ goto exit; -+ } -+ -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); -+ if (ret) -+ goto exit; -+ -+ /* Only port in the rule is this port. Don't re insert */ -+ if (fdb.port_mask == port_mask) -+ goto exit; -+ -+ /* Remove port from port mask */ -+ fdb.port_mask &= ~port_mask; -+ -+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); -+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); -+ -+exit: -+ mutex_unlock(&priv->reg_mutex); -+ return ret; -+} -+ - int qca8k_mib_init(struct qca8k_priv *priv) - { - int ret; -@@ -368,6 +573,15 @@ void qca8k_port_bridge_leave(struct dsa_ - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); - } - -+void qca8k_port_fast_age(struct dsa_switch *ds, int port) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ -+ mutex_lock(&priv->reg_mutex); -+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); -+ mutex_unlock(&priv->reg_mutex); -+} -+ - int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) - { - struct qca8k_priv *priv = ds->priv; -@@ -452,3 +666,78 @@ int qca8k_port_max_mtu(struct dsa_switch - { - return QCA8K_MAX_MTU; - } -+ -+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -+ u16 port_mask, u16 vid) -+{ -+ /* Set the vid to the port vlan id if no vid is set */ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_add(priv, addr, port_mask, vid, -+ QCA8K_ATU_STATUS_STATIC); -+} -+ -+int qca8k_port_fdb_add(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid); -+} -+ -+int qca8k_port_fdb_del(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ u16 port_mask = BIT(port); -+ -+ if (!vid) -+ vid = QCA8K_PORT_VID_DEF; -+ -+ return qca8k_fdb_del(priv, addr, port_mask, vid); -+} -+ -+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -+ dsa_fdb_dump_cb_t *cb, void *data) -+{ -+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; -+ struct qca8k_fdb _fdb = { 0 }; -+ int cnt = QCA8K_NUM_FDB_RECORDS; -+ bool is_static; -+ int ret = 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { -+ if (!_fdb.aging) -+ break; -+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); -+ ret = cb(_fdb.mac, _fdb.vid, is_static, data); -+ if (ret) -+ break; -+ } -+ mutex_unlock(&priv->reg_mutex); -+ -+ return 0; -+} -+ -+int qca8k_port_mdb_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); -+} -+ -+int qca8k_port_mdb_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ const u8 *addr = mdb->addr; -+ u16 vid = mdb->vid; -+ -+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -430,11 +430,9 @@ int qca8k_read(struct qca8k_priv *priv, - int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val); - int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val); - --int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len); --int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len); -- - /* Common ops function */ - int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask); -+void qca8k_fdb_flush(struct qca8k_priv *priv); - - /* Common ethtool stats function */ - void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data); -@@ -463,6 +461,23 @@ int qca8k_port_change_mtu(struct dsa_swi - int qca8k_port_max_mtu(struct dsa_switch *ds, int port); - - /* Common fast age function */ -+void qca8k_port_fast_age(struct dsa_switch *ds, int port); - int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs); - -+/* Common FDB function */ -+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, -+ u16 port_mask, u16 vid); -+int qca8k_port_fdb_add(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid); -+int qca8k_port_fdb_del(struct dsa_switch *ds, int port, -+ const unsigned char *addr, u16 vid); -+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, -+ dsa_fdb_dump_cb_t *cb, void *data); -+ -+/* Common MDB function */ -+int qca8k_port_mdb_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb); -+int qca8k_port_mdb_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_mdb *mdb); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-11-net-dsa-qca8k-move-port-mirror-functions-to-common-c.patch b/target/linux/generic/backport-6.1/771-v6.0-11-net-dsa-qca8k-move-port-mirror-functions-to-common-c.patch deleted file mode 100644 index c1336d4a9216..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-11-net-dsa-qca8k-move-port-mirror-functions-to-common-c.patch +++ /dev/null @@ -1,232 +0,0 @@ -From 742d37a84d3f7bb60d9b2d9ada9ad4e599f65ebf Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:20 +0200 -Subject: [PATCH 11/14] net: dsa: qca8k: move port mirror functions to common - code - -The same port mirror functions are used by drivers based on qca8k family -switch. Move them to common code to make them accessible also by other -drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 93 ------------------------------ - drivers/net/dsa/qca/qca8k-common.c | 91 +++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 7 +++ - 3 files changed, 98 insertions(+), 93 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -1838,99 +1838,6 @@ exit: - } - - static int --qca8k_port_mirror_add(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror, -- bool ingress) --{ -- struct qca8k_priv *priv = ds->priv; -- int monitor_port, ret; -- u32 reg, val; -- -- /* Check for existent entry */ -- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -- return -EEXIST; -- -- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -- if (ret) -- return ret; -- -- /* QCA83xx can have only one port set to mirror mode. -- * Check that the correct port is requested and return error otherwise. -- * When no mirror port is set, the values is set to 0xF -- */ -- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -- return -EEXIST; -- -- /* Set the monitor port */ -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -- mirror->to_local_port); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- return ret; -- -- if (ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_update_bits(priv->regmap, reg, val, val); -- if (ret) -- return ret; -- -- /* Track mirror port for tx and rx to decide when the -- * mirror port has to be disabled. -- */ -- if (ingress) -- priv->mirror_rx |= BIT(port); -- else -- priv->mirror_tx |= BIT(port); -- -- return 0; --} -- --static void --qca8k_port_mirror_del(struct dsa_switch *ds, int port, -- struct dsa_mall_mirror_tc_entry *mirror) --{ -- struct qca8k_priv *priv = ds->priv; -- u32 reg, val; -- int ret; -- -- if (mirror->ingress) { -- reg = QCA8K_PORT_LOOKUP_CTRL(port); -- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -- } else { -- reg = QCA8K_REG_PORT_HOL_CTRL1(port); -- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -- } -- -- ret = regmap_clear_bits(priv->regmap, reg, val); -- if (ret) -- goto err; -- -- if (mirror->ingress) -- priv->mirror_rx &= ~BIT(port); -- else -- priv->mirror_tx &= ~BIT(port); -- -- /* No port set to send packet to mirror port. Disable mirror port */ -- if (!priv->mirror_rx && !priv->mirror_tx) { -- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -- if (ret) -- goto err; -- } --err: -- dev_err(priv->dev, "Failed to del mirror port from %d", port); --} -- --static int - qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) - { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -741,3 +741,94 @@ int qca8k_port_mdb_del(struct dsa_switch - - return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); - } -+ -+int qca8k_port_mirror_add(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror, -+ bool ingress) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int monitor_port, ret; -+ u32 reg, val; -+ -+ /* Check for existent entry */ -+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) -+ return -EEXIST; -+ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* QCA83xx can have only one port set to mirror mode. -+ * Check that the correct port is requested and return error otherwise. -+ * When no mirror port is set, the values is set to 0xF -+ */ -+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port) -+ return -EEXIST; -+ -+ /* Set the monitor port */ -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, -+ mirror->to_local_port); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ return ret; -+ -+ if (ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_update_bits(priv->regmap, reg, val, val); -+ if (ret) -+ return ret; -+ -+ /* Track mirror port for tx and rx to decide when the -+ * mirror port has to be disabled. -+ */ -+ if (ingress) -+ priv->mirror_rx |= BIT(port); -+ else -+ priv->mirror_tx |= BIT(port); -+ -+ return 0; -+} -+ -+void qca8k_port_mirror_del(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ u32 reg, val; -+ int ret; -+ -+ if (mirror->ingress) { -+ reg = QCA8K_PORT_LOOKUP_CTRL(port); -+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; -+ } else { -+ reg = QCA8K_REG_PORT_HOL_CTRL1(port); -+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; -+ } -+ -+ ret = regmap_clear_bits(priv->regmap, reg, val); -+ if (ret) -+ goto err; -+ -+ if (mirror->ingress) -+ priv->mirror_rx &= ~BIT(port); -+ else -+ priv->mirror_tx &= ~BIT(port); -+ -+ /* No port set to send packet to mirror port. Disable mirror port */ -+ if (!priv->mirror_rx && !priv->mirror_tx) { -+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, -+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); -+ if (ret) -+ goto err; -+ } -+err: -+ dev_err(priv->dev, "Failed to del mirror port from %d", port); -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -480,4 +480,11 @@ int qca8k_port_mdb_add(struct dsa_switch - int qca8k_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); - -+/* Common port mirror function */ -+int qca8k_port_mirror_add(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror, -+ bool ingress); -+void qca8k_port_mirror_del(struct dsa_switch *ds, int port, -+ struct dsa_mall_mirror_tc_entry *mirror); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-12-net-dsa-qca8k-move-port-VLAN-functions-to-common-cod.patch b/target/linux/generic/backport-6.1/771-v6.0-12-net-dsa-qca8k-move-port-VLAN-functions-to-common-cod.patch deleted file mode 100644 index 898010f95096..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-12-net-dsa-qca8k-move-port-VLAN-functions-to-common-cod.patch +++ /dev/null @@ -1,448 +0,0 @@ -From c5290f636624b98e76a82bd63ffec0a8a9daa620 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:21 +0200 -Subject: [PATCH 12/14] net: dsa: qca8k: move port VLAN functions to common - code - -The same port VLAN functions are used by drivers based on qca8k family -switch. Move them to common code to make them accessible also by other -drivers. -Also drop exposing busy_wait and make it static. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 182 ----------------------------- - drivers/net/dsa/qca/qca8k-common.c | 179 +++++++++++++++++++++++++++- - drivers/net/dsa/qca/qca8k.h | 10 +- - 3 files changed, 187 insertions(+), 184 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -15,7 +15,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -442,122 +441,6 @@ static struct regmap_config qca8k_regmap - }; - - static int --qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) --{ -- u32 reg; -- int ret; -- -- /* Set the command and VLAN index */ -- reg = QCA8K_VTU_FUNC1_BUSY; -- reg |= cmd; -- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -- -- /* Write the function register triggering the table access */ -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -- if (ret) -- return ret; -- -- /* wait for completion */ -- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -- if (ret) -- return ret; -- -- /* Check for table full violation when adding an entry */ -- if (cmd == QCA8K_VLAN_LOAD) { -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -- if (ret < 0) -- return ret; -- if (reg & QCA8K_VTU_FUNC1_FULL) -- return -ENOMEM; -- } -- -- return 0; --} -- --static int --qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) --{ -- u32 reg; -- int ret; -- -- /* -- We do the right thing with VLAN 0 and treat it as untagged while -- preserving the tag on egress. -- */ -- if (vid == 0) -- return 0; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- if (untagged) -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -- else -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -- -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int --qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) --{ -- u32 reg, mask; -- int ret, i; -- bool del; -- -- mutex_lock(&priv->reg_mutex); -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -- if (ret < 0) -- goto out; -- -- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -- if (ret < 0) -- goto out; -- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -- -- /* Check if we're the last member to be removed */ -- del = true; -- for (i = 0; i < QCA8K_NUM_PORTS; i++) { -- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -- -- if ((reg & mask) != mask) { -- del = false; -- break; -- } -- } -- -- if (del) { -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -- } else { -- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -- if (ret) -- goto out; -- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -- } -- --out: -- mutex_unlock(&priv->reg_mutex); -- -- return ret; --} -- --static int - qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, - struct sk_buff *read_skb, u32 *val) - { -@@ -1836,71 +1719,6 @@ exit: - - return ret; - } -- --static int --qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -- struct netlink_ext_ack *extack) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- if (vlan_filtering) { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -- } else { -- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_add(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan, -- struct netlink_ext_ack *extack) --{ -- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -- if (ret) { -- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -- return ret; -- } -- -- if (pvid) { -- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -- QCA8K_EGREES_VLAN_PORT_MASK(port), -- QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -- if (ret) -- return ret; -- -- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -- QCA8K_PORT_VLAN_CVID(vlan->vid) | -- QCA8K_PORT_VLAN_SVID(vlan->vid)); -- } -- -- return ret; --} -- --static int --qca8k_port_vlan_del(struct dsa_switch *ds, int port, -- const struct switchdev_obj_port_vlan *vlan) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret; -- -- ret = qca8k_vlan_del(priv, port, vlan->vid); -- if (ret) -- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -- -- return ret; --} - - static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port) - { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -141,7 +141,7 @@ static int qca8k_bulk_write(struct qca8k - return 0; - } - --int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) -+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) - { - u32 val; - -@@ -354,6 +354,120 @@ exit: - return ret; - } - -+static int qca8k_vlan_access(struct qca8k_priv *priv, -+ enum qca8k_vlan_cmd cmd, u16 vid) -+{ -+ u32 reg; -+ int ret; -+ -+ /* Set the command and VLAN index */ -+ reg = QCA8K_VTU_FUNC1_BUSY; -+ reg |= cmd; -+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); -+ -+ /* Write the function register triggering the table access */ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); -+ if (ret) -+ return ret; -+ -+ /* wait for completion */ -+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY); -+ if (ret) -+ return ret; -+ -+ /* Check for table full violation when adding an entry */ -+ if (cmd == QCA8K_VLAN_LOAD) { -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®); -+ if (ret < 0) -+ return ret; -+ if (reg & QCA8K_VTU_FUNC1_FULL) -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, -+ bool untagged) -+{ -+ u32 reg; -+ int ret; -+ -+ /* We do the right thing with VLAN 0 and treat it as untagged while -+ * preserving the tag on egress. -+ */ -+ if (vid == 0) -+ return 0; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ if (untagged) -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); -+ else -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); -+ -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ -+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) -+{ -+ u32 reg, mask; -+ int ret, i; -+ bool del; -+ -+ mutex_lock(&priv->reg_mutex); -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); -+ if (ret < 0) -+ goto out; -+ -+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); -+ if (ret < 0) -+ goto out; -+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); -+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); -+ -+ /* Check if we're the last member to be removed */ -+ del = true; -+ for (i = 0; i < QCA8K_NUM_PORTS; i++) { -+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); -+ -+ if ((reg & mask) != mask) { -+ del = false; -+ break; -+ } -+ } -+ -+ if (del) { -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); -+ } else { -+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); -+ if (ret) -+ goto out; -+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); -+ } -+ -+out: -+ mutex_unlock(&priv->reg_mutex); -+ -+ return ret; -+} -+ - int qca8k_mib_init(struct qca8k_priv *priv) - { - int ret; -@@ -832,3 +946,66 @@ void qca8k_port_mirror_del(struct dsa_sw - err: - dev_err(priv->dev, "Failed to del mirror port from %d", port); - } -+ -+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, -+ bool vlan_filtering, -+ struct netlink_ext_ack *extack) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ if (vlan_filtering) { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); -+ } else { -+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), -+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, -+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); -+ } -+ -+ return ret; -+} -+ -+int qca8k_port_vlan_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan, -+ struct netlink_ext_ack *extack) -+{ -+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; -+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); -+ if (ret) { -+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); -+ return ret; -+ } -+ -+ if (pvid) { -+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), -+ QCA8K_EGREES_VLAN_PORT_MASK(port), -+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); -+ if (ret) -+ return ret; -+ -+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), -+ QCA8K_PORT_VLAN_CVID(vlan->vid) | -+ QCA8K_PORT_VLAN_SVID(vlan->vid)); -+ } -+ -+ return ret; -+} -+ -+int qca8k_port_vlan_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret; -+ -+ ret = qca8k_vlan_del(priv, port, vlan->vid); -+ if (ret) -+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); -+ -+ return ret; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -431,7 +431,6 @@ int qca8k_write(struct qca8k_priv *priv, - int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val); - - /* Common ops function */ --int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask); - void qca8k_fdb_flush(struct qca8k_priv *priv); - - /* Common ethtool stats function */ -@@ -487,4 +486,13 @@ int qca8k_port_mirror_add(struct dsa_swi - void qca8k_port_mirror_del(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror); - -+/* Common port VLAN function */ -+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -+ struct netlink_ext_ack *extack); -+int qca8k_port_vlan_add(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan, -+ struct netlink_ext_ack *extack); -+int qca8k_port_vlan_del(struct dsa_switch *ds, int port, -+ const struct switchdev_obj_port_vlan *vlan); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-13-net-dsa-qca8k-move-port-LAG-functions-to-common-code.patch b/target/linux/generic/backport-6.1/771-v6.0-13-net-dsa-qca8k-move-port-LAG-functions-to-common-code.patch deleted file mode 100644 index 1802b17eaad7..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-13-net-dsa-qca8k-move-port-LAG-functions-to-common-code.patch +++ /dev/null @@ -1,384 +0,0 @@ -From e9bbf019af44b204b71ef8edf224002550aab641 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:22 +0200 -Subject: [PATCH 13/14] net: dsa: qca8k: move port LAG functions to common code - -The same port LAG functions are used by drivers based on qca8k family -switch. Move them to common code to make them accessible also by other -drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 168 ----------------------------- - drivers/net/dsa/qca/qca8k-common.c | 165 ++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 6 ++ - 3 files changed, 171 insertions(+), 168 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -1743,178 +1743,6 @@ qca8k_get_tag_protocol(struct dsa_switch - return DSA_TAG_PROTO_QCA; - } - --static bool --qca8k_lag_can_offload(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct dsa_port *dp; -- int id, members = 0; -- -- id = dsa_lag_id(ds->dst, lag); -- if (id < 0 || id >= ds->num_lag_ids) -- return false; -- -- dsa_lag_foreach_port(dp, ds->dst, lag) -- /* Includes the port joining the LAG */ -- members++; -- -- if (members > QCA8K_NUM_PORTS_FOR_LAG) -- return false; -- -- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -- return false; -- -- if (info->hash_type != NETDEV_LAG_HASH_L2 && -- info->hash_type != NETDEV_LAG_HASH_L23) -- return false; -- -- return true; --} -- --static int --qca8k_lag_setup_hash(struct dsa_switch *ds, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- struct qca8k_priv *priv = ds->priv; -- bool unique_lag = true; -- u32 hash = 0; -- int i, id; -- -- id = dsa_lag_id(ds->dst, lag); -- -- switch (info->hash_type) { -- case NETDEV_LAG_HASH_L23: -- hash |= QCA8K_TRUNK_HASH_SIP_EN; -- hash |= QCA8K_TRUNK_HASH_DIP_EN; -- fallthrough; -- case NETDEV_LAG_HASH_L2: -- hash |= QCA8K_TRUNK_HASH_SA_EN; -- hash |= QCA8K_TRUNK_HASH_DA_EN; -- break; -- default: /* We should NEVER reach this */ -- return -EOPNOTSUPP; -- } -- -- /* Check if we are the unique configured LAG */ -- dsa_lags_foreach_id(i, ds->dst) -- if (i != id && dsa_lag_dev(ds->dst, i)) { -- unique_lag = false; -- break; -- } -- -- /* Hash Mode is global. Make sure the same Hash Mode -- * is set to all the 4 possible lag. -- * If we are the unique LAG we can set whatever hash -- * mode we want. -- * To change hash mode it's needed to remove all LAG -- * and change the mode with the latest. -- */ -- if (unique_lag) { -- priv->lag_hash_mode = hash; -- } else if (priv->lag_hash_mode != hash) { -- netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); -- return -EOPNOTSUPP; -- } -- -- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -- QCA8K_TRUNK_HASH_MASK, hash); --} -- --static int --qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -- struct net_device *lag, bool delete) --{ -- struct qca8k_priv *priv = ds->priv; -- int ret, id, i; -- u32 val; -- -- id = dsa_lag_id(ds->dst, lag); -- -- /* Read current port member */ -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -- if (ret) -- return ret; -- -- /* Shift val to the correct trunk */ -- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -- if (delete) -- val &= ~BIT(port); -- else -- val |= BIT(port); -- -- /* Update port member. With empty portmap disable trunk */ -- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -- QCA8K_REG_GOL_TRUNK_MEMBER(id) | -- QCA8K_REG_GOL_TRUNK_EN(id), -- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -- val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -- -- /* Search empty member if adding or port on deleting */ -- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -- if (ret) -- return ret; -- -- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -- -- if (delete) { -- /* If port flagged to be disabled assume this member is -- * empty -- */ -- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- -- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -- if (val != port) -- continue; -- } else { -- /* If port flagged to be enabled assume this member is -- * already set -- */ -- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -- continue; -- } -- -- /* We have found the member to add/remove */ -- break; -- } -- -- /* Set port in the correct port mask or disable port if in delete mode */ -- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); --} -- --static int --qca8k_port_lag_join(struct dsa_switch *ds, int port, -- struct net_device *lag, -- struct netdev_lag_upper_info *info) --{ -- int ret; -- -- if (!qca8k_lag_can_offload(ds, lag, info)) -- return -EOPNOTSUPP; -- -- ret = qca8k_lag_setup_hash(ds, lag, info); -- if (ret) -- return ret; -- -- return qca8k_lag_refresh_portmap(ds, port, lag, false); --} -- --static int --qca8k_port_lag_leave(struct dsa_switch *ds, int port, -- struct net_device *lag) --{ -- return qca8k_lag_refresh_portmap(ds, port, lag, true); --} -- - static void - qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, - bool operational) ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -1009,3 +1009,169 @@ int qca8k_port_vlan_del(struct dsa_switc - - return ret; - } -+ -+static bool qca8k_lag_can_offload(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct dsa_port *dp; -+ int id, members = 0; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ if (id < 0 || id >= ds->num_lag_ids) -+ return false; -+ -+ dsa_lag_foreach_port(dp, ds->dst, lag) -+ /* Includes the port joining the LAG */ -+ members++; -+ -+ if (members > QCA8K_NUM_PORTS_FOR_LAG) -+ return false; -+ -+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) -+ return false; -+ -+ if (info->hash_type != NETDEV_LAG_HASH_L2 && -+ info->hash_type != NETDEV_LAG_HASH_L23) -+ return false; -+ -+ return true; -+} -+ -+static int qca8k_lag_setup_hash(struct dsa_switch *ds, -+ struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ bool unique_lag = true; -+ u32 hash = 0; -+ int i, id; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ switch (info->hash_type) { -+ case NETDEV_LAG_HASH_L23: -+ hash |= QCA8K_TRUNK_HASH_SIP_EN; -+ hash |= QCA8K_TRUNK_HASH_DIP_EN; -+ fallthrough; -+ case NETDEV_LAG_HASH_L2: -+ hash |= QCA8K_TRUNK_HASH_SA_EN; -+ hash |= QCA8K_TRUNK_HASH_DA_EN; -+ break; -+ default: /* We should NEVER reach this */ -+ return -EOPNOTSUPP; -+ } -+ -+ /* Check if we are the unique configured LAG */ -+ dsa_lags_foreach_id(i, ds->dst) -+ if (i != id && dsa_lag_dev(ds->dst, i)) { -+ unique_lag = false; -+ break; -+ } -+ -+ /* Hash Mode is global. Make sure the same Hash Mode -+ * is set to all the 4 possible lag. -+ * If we are the unique LAG we can set whatever hash -+ * mode we want. -+ * To change hash mode it's needed to remove all LAG -+ * and change the mode with the latest. -+ */ -+ if (unique_lag) { -+ priv->lag_hash_mode = hash; -+ } else if (priv->lag_hash_mode != hash) { -+ netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, -+ QCA8K_TRUNK_HASH_MASK, hash); -+} -+ -+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, -+ struct net_device *lag, bool delete) -+{ -+ struct qca8k_priv *priv = ds->priv; -+ int ret, id, i; -+ u32 val; -+ -+ id = dsa_lag_id(ds->dst, lag); -+ -+ /* Read current port member */ -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); -+ if (ret) -+ return ret; -+ -+ /* Shift val to the correct trunk */ -+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); -+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; -+ if (delete) -+ val &= ~BIT(port); -+ else -+ val |= BIT(port); -+ -+ /* Update port member. With empty portmap disable trunk */ -+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, -+ QCA8K_REG_GOL_TRUNK_MEMBER(id) | -+ QCA8K_REG_GOL_TRUNK_EN(id), -+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | -+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); -+ -+ /* Search empty member if adding or port on deleting */ -+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { -+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); -+ if (ret) -+ return ret; -+ -+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; -+ -+ if (delete) { -+ /* If port flagged to be disabled assume this member is -+ * empty -+ */ -+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ -+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; -+ if (val != port) -+ continue; -+ } else { -+ /* If port flagged to be enabled assume this member is -+ * already set -+ */ -+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) -+ continue; -+ } -+ -+ /* We have found the member to add/remove */ -+ break; -+ } -+ -+ /* Set port in the correct port mask or disable port if in delete mode */ -+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | -+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), -+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | -+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); -+} -+ -+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct net_device *lag, -+ struct netdev_lag_upper_info *info) -+{ -+ int ret; -+ -+ if (!qca8k_lag_can_offload(ds, lag, info)) -+ return -EOPNOTSUPP; -+ -+ ret = qca8k_lag_setup_hash(ds, lag, info); -+ if (ret) -+ return ret; -+ -+ return qca8k_lag_refresh_portmap(ds, port, lag, false); -+} -+ -+int qca8k_port_lag_leave(struct dsa_switch *ds, int port, -+ struct net_device *lag) -+{ -+ return qca8k_lag_refresh_portmap(ds, port, lag, true); -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -495,4 +495,10 @@ int qca8k_port_vlan_add(struct dsa_switc - int qca8k_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan); - -+/* Common port LAG function */ -+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct net_device *lag, -+ struct netdev_lag_upper_info *info); -+int qca8k_port_lag_leave(struct dsa_switch *ds, int port, -+ struct net_device *lag); -+ - #endif /* __QCA8K_H */ diff --git a/target/linux/generic/backport-6.1/771-v6.0-14-net-dsa-qca8k-move-read_switch_id-function-to-common.patch b/target/linux/generic/backport-6.1/771-v6.0-14-net-dsa-qca8k-move-read_switch_id-function-to-common.patch deleted file mode 100644 index d6ec8b77e0ce..000000000000 --- a/target/linux/generic/backport-6.1/771-v6.0-14-net-dsa-qca8k-move-read_switch_id-function-to-common.patch +++ /dev/null @@ -1,102 +0,0 @@ -From 9d1bcb1f293f1391302a109c9819c3705c804700 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 27 Jul 2022 13:35:23 +0200 -Subject: [PATCH 14/14] net: dsa: qca8k: move read_switch_id function to common - code - -The same function to read the switch id is used by drivers based on -qca8k family switch. Move them to common code to make them accessible -also by other drivers. - -Signed-off-by: Christian Marangi -Reviewed-by: Vladimir Oltean -Signed-off-by: Jakub Kicinski ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 29 ----------------------------- - drivers/net/dsa/qca/qca8k-common.c | 29 +++++++++++++++++++++++++++++ - drivers/net/dsa/qca/qca8k.h | 1 + - 3 files changed, 30 insertions(+), 29 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -1822,35 +1822,6 @@ static const struct dsa_switch_ops qca8k - .connect_tag_protocol = qca8k_connect_tag_protocol, - }; - --static int qca8k_read_switch_id(struct qca8k_priv *priv) --{ -- u32 val; -- u8 id; -- int ret; -- -- if (!priv->info) -- return -ENODEV; -- -- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -- if (ret < 0) -- return -ENODEV; -- -- id = QCA8K_MASK_CTRL_DEVICE_ID(val); -- if (id != priv->info->id) { -- dev_err(priv->dev, -- "Switch id detected %x but expected %x", -- id, priv->info->id); -- return -ENODEV; -- } -- -- priv->switch_id = id; -- -- /* Save revision to communicate to the internal PHY driver */ -- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -- -- return 0; --} -- - static int - qca8k_sw_probe(struct mdio_device *mdiodev) - { ---- a/drivers/net/dsa/qca/qca8k-common.c -+++ b/drivers/net/dsa/qca/qca8k-common.c -@@ -1175,3 +1175,32 @@ int qca8k_port_lag_leave(struct dsa_swit - { - return qca8k_lag_refresh_portmap(ds, port, lag, true); - } -+ -+int qca8k_read_switch_id(struct qca8k_priv *priv) -+{ -+ u32 val; -+ u8 id; -+ int ret; -+ -+ if (!priv->info) -+ return -ENODEV; -+ -+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val); -+ if (ret < 0) -+ return -ENODEV; -+ -+ id = QCA8K_MASK_CTRL_DEVICE_ID(val); -+ if (id != priv->info->id) { -+ dev_err(priv->dev, -+ "Switch id detected %x but expected %x", -+ id, priv->info->id); -+ return -ENODEV; -+ } -+ -+ priv->switch_id = id; -+ -+ /* Save revision to communicate to the internal PHY driver */ -+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); -+ -+ return 0; -+} ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -424,6 +424,7 @@ extern const struct qca8k_mib_desc ar832 - extern const struct regmap_access_table qca8k_readable_table; - int qca8k_mib_init(struct qca8k_priv *priv); - void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable); -+int qca8k_read_switch_id(struct qca8k_priv *priv); - - /* Common read/write/rmw function */ - int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val); diff --git a/target/linux/generic/backport-6.1/772-v6.0-net-dsa-qca8k-fix-NULL-pointer-dereference-for-of_de.patch b/target/linux/generic/backport-6.1/772-v6.0-net-dsa-qca8k-fix-NULL-pointer-dereference-for-of_de.patch deleted file mode 100644 index 0cca2788f6d3..000000000000 --- a/target/linux/generic/backport-6.1/772-v6.0-net-dsa-qca8k-fix-NULL-pointer-dereference-for-of_de.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 057bcf15db8e625276ddf02b2b7c668a3cb43f81 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sun, 4 Sep 2022 23:46:24 +0200 -Subject: [net PATCH] net: dsa: qca8k: fix NULL pointer dereference for - of_device_get_match_data - -of_device_get_match_data is called on priv->dev before priv->dev is -actually set. Move of_device_get_match_data after priv->dev is correctly -set to fix this kernel panic. - -Fixes: 3bb0844e7bcd ("net: dsa: qca8k: cache match data to speed up access") -Signed-off-by: Christian Marangi ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -1835,9 +1835,9 @@ qca8k_sw_probe(struct mdio_device *mdiod - if (!priv) - return -ENOMEM; - -- priv->info = of_device_get_match_data(priv->dev); - priv->bus = mdiodev->bus; - priv->dev = &mdiodev->dev; -+ priv->info = of_device_get_match_data(priv->dev); - - priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", - GPIOD_ASIS); diff --git a/target/linux/generic/backport-6.1/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch b/target/linux/generic/backport-6.1/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch deleted file mode 100644 index 44093eab95b3..000000000000 --- a/target/linux/generic/backport-6.1/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 381a730182f1d174e1950cd4e63e885b1c302051 Mon Sep 17 00:00:00 2001 -From: Tobias Waldekranz -Date: Mon, 24 Jan 2022 22:09:43 +0100 -Subject: net: dsa: Move VLAN filtering syncing out of dsa_switch_bridge_leave - -Most of dsa_switch_bridge_leave was, in fact, dealing with the syncing -of VLAN filtering for switches on which that is a global -setting. Separate the two phases to prepare for the cross-chip related -bugfix in the following commit. - -Signed-off-by: Tobias Waldekranz -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - net/dsa/switch.c | 38 +++++++++++++++++++++++++------------- - 1 file changed, 25 insertions(+), 13 deletions(-) - ---- a/net/dsa/switch.c -+++ b/net/dsa/switch.c -@@ -113,25 +113,14 @@ static int dsa_switch_bridge_join(struct - return dsa_tag_8021q_bridge_join(ds, info); - } - --static int dsa_switch_bridge_leave(struct dsa_switch *ds, -- struct dsa_notifier_bridge_info *info) -+static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, -+ struct dsa_notifier_bridge_info *info) - { -- struct dsa_switch_tree *dst = ds->dst; - struct netlink_ext_ack extack = {0}; - bool change_vlan_filtering = false; - bool vlan_filtering; - int err, port; - -- if (dst->index == info->tree_index && ds->index == info->sw_index && -- ds->ops->port_bridge_leave) -- ds->ops->port_bridge_leave(ds, info->port, info->br); -- -- if ((dst->index != info->tree_index || ds->index != info->sw_index) && -- ds->ops->crosschip_bridge_leave) -- ds->ops->crosschip_bridge_leave(ds, info->tree_index, -- info->sw_index, info->port, -- info->br); -- - if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { - change_vlan_filtering = true; - vlan_filtering = true; -@@ -172,6 +161,29 @@ static int dsa_switch_bridge_leave(struc - return err; - } - -+ return 0; -+} -+ -+static int dsa_switch_bridge_leave(struct dsa_switch *ds, -+ struct dsa_notifier_bridge_info *info) -+{ -+ struct dsa_switch_tree *dst = ds->dst; -+ int err; -+ -+ if (dst->index == info->tree_index && ds->index == info->sw_index && -+ ds->ops->port_bridge_leave) -+ ds->ops->port_bridge_leave(ds, info->port, info->br); -+ -+ if ((dst->index != info->tree_index || ds->index != info->sw_index) && -+ ds->ops->crosschip_bridge_leave) -+ ds->ops->crosschip_bridge_leave(ds, info->tree_index, -+ info->sw_index, info->port, -+ info->br); -+ -+ err = dsa_switch_sync_vlan_filtering(ds, info); -+ if (err) -+ return err; -+ - return dsa_tag_8021q_bridge_leave(ds, info); - } - diff --git a/target/linux/generic/backport-6.1/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch b/target/linux/generic/backport-6.1/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch deleted file mode 100644 index cdddbcf14ee9..000000000000 --- a/target/linux/generic/backport-6.1/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 108dc8741c203e9d6ce4e973367f1bac20c7192b Mon Sep 17 00:00:00 2001 -From: Tobias Waldekranz -Date: Mon, 24 Jan 2022 22:09:44 +0100 -Subject: net: dsa: Avoid cross-chip syncing of VLAN filtering - -Changes to VLAN filtering are not applicable to cross-chip -notifications. - -On a system like this: - -.-----. .-----. .-----. -| sw1 +---+ sw2 +---+ sw3 | -'-1-2-' '-1-2-' '-1-2-' - -Before this change, upon sw1p1 leaving a bridge, a call to -dsa_port_vlan_filtering would also be made to sw2p1 and sw3p1. - -In this scenario: - -.---------. .-----. .-----. -| sw1 +---+ sw2 +---+ sw3 | -'-1-2-3-4-' '-1-2-' '-1-2-' - -When sw1p4 would leave a bridge, dsa_port_vlan_filtering would be -called for sw2 and sw3 with a non-existing port - leading to array -out-of-bounds accesses and crashes on mv88e6xxx. - -Fixes: d371b7c92d19 ("net: dsa: Unset vlan_filtering when ports leave the bridge") -Signed-off-by: Tobias Waldekranz -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - net/dsa/switch.c | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - ---- a/net/dsa/switch.c -+++ b/net/dsa/switch.c -@@ -180,9 +180,11 @@ static int dsa_switch_bridge_leave(struc - info->sw_index, info->port, - info->br); - -- err = dsa_switch_sync_vlan_filtering(ds, info); -- if (err) -- return err; -+ if (ds->dst->index == info->tree_index && ds->index == info->sw_index) { -+ err = dsa_switch_sync_vlan_filtering(ds, info); -+ if (err) -+ return err; -+ } - - return dsa_tag_8021q_bridge_leave(ds, info); - } diff --git a/target/linux/generic/backport-6.1/774-v5.16-01-net-dsa-rtl8366rb-Support-bridge-offloading.patch b/target/linux/generic/backport-6.1/774-v5.16-01-net-dsa-rtl8366rb-Support-bridge-offloading.patch deleted file mode 100644 index 78570c5e6e3e..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-01-net-dsa-rtl8366rb-Support-bridge-offloading.patch +++ /dev/null @@ -1,141 +0,0 @@ -From c9111895fd38dadf125e07be627778a9950d8d77 Mon Sep 17 00:00:00 2001 -From: DENG Qingfang -Date: Sun, 26 Sep 2021 00:59:24 +0200 -Subject: [PATCH 01/11] net: dsa: rtl8366rb: Support bridge offloading -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Use port isolation registers to configure bridge offloading. - -Tested on the D-Link DIR-685, switching between ports and -sniffing ports to make sure no packets leak. - -Cc: Vladimir Oltean -Cc: Mauri Sandberg -Reviewed-by: Vladimir Oltean -Reviewed-by: Alvin Å ipraga -Reviewed-by: Florian Fainelli -Signed-off-by: DENG Qingfang -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366rb.c | 86 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 86 insertions(+) - ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -300,6 +300,13 @@ - #define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 - #define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ - -+/* Port isolation registers */ -+#define RTL8366RB_PORT_ISO_BASE 0x0F08 -+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum)) -+#define RTL8366RB_PORT_ISO_EN BIT(0) -+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1) -+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1) -+ - /* bits 0..5 enable force when cleared */ - #define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 - -@@ -835,6 +842,21 @@ static int rtl8366rb_setup(struct dsa_sw - if (ret) - return ret; - -+ /* Isolate all user ports so they can only send packets to itself and the CPU port */ -+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { -+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i), -+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) | -+ RTL8366RB_PORT_ISO_EN); -+ if (ret) -+ return ret; -+ } -+ /* CPU port can send packets to all ports */ -+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU), -+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) | -+ RTL8366RB_PORT_ISO_EN); -+ if (ret) -+ return ret; -+ - /* Set up the "green ethernet" feature */ - ret = rtl8366rb_jam_table(rtl8366rb_green_jam, - ARRAY_SIZE(rtl8366rb_green_jam), smi, false); -@@ -1127,6 +1149,68 @@ rtl8366rb_port_disable(struct dsa_switch - rb8366rb_set_port_led(smi, port, false); - } - -+static int -+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, -+ struct net_device *bridge) -+{ -+ struct realtek_smi *smi = ds->priv; -+ unsigned int port_bitmap = 0; -+ int ret, i; -+ -+ /* Loop over all other ports than the current one */ -+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { -+ /* Current port handled last */ -+ if (i == port) -+ continue; -+ /* Not on this bridge */ -+ if (dsa_to_port(ds, i)->bridge_dev != bridge) -+ continue; -+ /* Join this port to each other port on the bridge */ -+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), -+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), -+ RTL8366RB_PORT_ISO_PORTS(BIT(port))); -+ if (ret) -+ dev_err(smi->dev, "failed to join port %d\n", port); -+ -+ port_bitmap |= BIT(i); -+ } -+ -+ /* Set the bits for the ports we can access */ -+ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), -+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), -+ RTL8366RB_PORT_ISO_PORTS(port_bitmap)); -+} -+ -+static void -+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, -+ struct net_device *bridge) -+{ -+ struct realtek_smi *smi = ds->priv; -+ unsigned int port_bitmap = 0; -+ int ret, i; -+ -+ /* Loop over all other ports than this one */ -+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { -+ /* Current port handled last */ -+ if (i == port) -+ continue; -+ /* Not on this bridge */ -+ if (dsa_to_port(ds, i)->bridge_dev != bridge) -+ continue; -+ /* Remove this port from any other port on the bridge */ -+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), -+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0); -+ if (ret) -+ dev_err(smi->dev, "failed to leave port %d\n", port); -+ -+ port_bitmap |= BIT(i); -+ } -+ -+ /* Clear the bits for the ports we can not access, leave ourselves */ -+ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), -+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0); -+} -+ - static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) - { - struct realtek_smi *smi = ds->priv; -@@ -1510,6 +1594,8 @@ static const struct dsa_switch_ops rtl83 - .get_strings = rtl8366_get_strings, - .get_ethtool_stats = rtl8366_get_ethtool_stats, - .get_sset_count = rtl8366_get_sset_count, -+ .port_bridge_join = rtl8366rb_port_bridge_join, -+ .port_bridge_leave = rtl8366rb_port_bridge_leave, - .port_vlan_filtering = rtl8366_vlan_filtering, - .port_vlan_add = rtl8366_vlan_add, - .port_vlan_del = rtl8366_vlan_del, diff --git a/target/linux/generic/backport-6.1/774-v5.16-02-net-dsa-rtl8366-Drop-custom-VLAN-set-up.patch b/target/linux/generic/backport-6.1/774-v5.16-02-net-dsa-rtl8366-Drop-custom-VLAN-set-up.patch deleted file mode 100644 index e61349a32cab..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-02-net-dsa-rtl8366-Drop-custom-VLAN-set-up.patch +++ /dev/null @@ -1,118 +0,0 @@ -From 96cf10a8e7297065459473c081a6fb6432a22312 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Sun, 26 Sep 2021 00:59:25 +0200 -Subject: [PATCH 02/11] net: dsa: rtl8366: Drop custom VLAN set-up -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This hacky default VLAN setup was done in order to direct -packets to the right ports and provide port isolation, both -which we now support properly using custom tags and proper -bridge port isolation. - -We can drop the custom VLAN code and leave all VLAN handling -alone, as users expect things to be. We can also drop -ds->configure_vlan_while_not_filtering = false; and let -the core deal with any VLANs it wants. - -Cc: Mauri Sandberg -Cc: DENG Qingfang -Reviewed-by: Vladimir Oltean -Reviewed-by: Alvin Å ipraga -Reviewed-by: Florian Fainelli -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/realtek-smi-core.h | 1 - - drivers/net/dsa/rtl8366.c | 48 ------------------------------ - drivers/net/dsa/rtl8366rb.c | 4 +-- - 3 files changed, 1 insertion(+), 52 deletions(-) - ---- a/drivers/net/dsa/realtek-smi-core.h -+++ b/drivers/net/dsa/realtek-smi-core.h -@@ -129,7 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi - int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); - int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); - int rtl8366_reset_vlan(struct realtek_smi *smi); --int rtl8366_init_vlan(struct realtek_smi *smi); - int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack); - int rtl8366_vlan_add(struct dsa_switch *ds, int port, ---- a/drivers/net/dsa/rtl8366.c -+++ b/drivers/net/dsa/rtl8366.c -@@ -292,54 +292,6 @@ int rtl8366_reset_vlan(struct realtek_sm - } - EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); - --int rtl8366_init_vlan(struct realtek_smi *smi) --{ -- int port; -- int ret; -- -- ret = rtl8366_reset_vlan(smi); -- if (ret) -- return ret; -- -- /* Loop over the available ports, for each port, associate -- * it with the VLAN (port+1) -- */ -- for (port = 0; port < smi->num_ports; port++) { -- u32 mask; -- -- if (port == smi->cpu_port) -- /* For the CPU port, make all ports members of this -- * VLAN. -- */ -- mask = GENMASK((int)smi->num_ports - 1, 0); -- else -- /* For all other ports, enable itself plus the -- * CPU port. -- */ -- mask = BIT(port) | BIT(smi->cpu_port); -- -- /* For each port, set the port as member of VLAN (port+1) -- * and untagged, except for the CPU port: the CPU port (5) is -- * member of VLAN 6 and so are ALL the other ports as well. -- * Use filter 0 (no filter). -- */ -- dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", -- (port + 1), port, mask); -- ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); -- if (ret) -- return ret; -- -- dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", -- (port + 1), port, (port + 1)); -- ret = rtl8366_set_pvid(smi, port, (port + 1)); -- if (ret) -- return ret; -- } -- -- return rtl8366_enable_vlan(smi, true); --} --EXPORT_SYMBOL_GPL(rtl8366_init_vlan); -- - int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) - { ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -985,7 +985,7 @@ static int rtl8366rb_setup(struct dsa_sw - return ret; - } - -- ret = rtl8366_init_vlan(smi); -+ ret = rtl8366_reset_vlan(smi); - if (ret) - return ret; - -@@ -999,8 +999,6 @@ static int rtl8366rb_setup(struct dsa_sw - return -ENODEV; - } - -- ds->configure_vlan_while_not_filtering = false; -- - return 0; - } - diff --git a/target/linux/generic/backport-6.1/774-v5.16-03-net-dsa-rtl8366rb-Rewrite-weird-VLAN-filering-enable.patch b/target/linux/generic/backport-6.1/774-v5.16-03-net-dsa-rtl8366rb-Rewrite-weird-VLAN-filering-enable.patch deleted file mode 100644 index 2b1975239945..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-03-net-dsa-rtl8366rb-Rewrite-weird-VLAN-filering-enable.patch +++ /dev/null @@ -1,270 +0,0 @@ -From 7028f54b620f8df344b18e46e4a78e266091ab45 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Sun, 26 Sep 2021 00:59:26 +0200 -Subject: [PATCH 03/11] net: dsa: rtl8366rb: Rewrite weird VLAN filering - enablement -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -While we were defining one VLAN per port for isolating the ports -the port_vlan_filtering() callback was implemented to enable a -VLAN on the port + 1. This function makes no sense, not only is -it incomplete as it only enables the VLAN, it doesn't do what -the callback is supposed to do, which is to selectively enable -and disable filtering on a certain port. - -Implement the correct callback: we have two registers dealing -with filtering on the RTL9366RB, so we implement an ASIC-specific -callback and implement filering using the register bit that makes -the switch drop frames if the port is not in the VLAN member set. - -The DSA documentation Documentation/networking/switchdev.rst states: - - When the bridge has VLAN filtering enabled and a PVID is not - configured on the ingress port, untagged and 802.1p tagged - packets must be dropped. When the bridge has VLAN filtering - enabled and a PVID exists on the ingress port, untagged and - priority-tagged packets must be accepted and forwarded according - to the bridge's port membership of the PVID VLAN. When the - bridge has VLAN filtering disabled, the presence/lack of a - PVID should not influence the packet forwarding decision. - -To comply with this, we add two arrays of bool in the RTL8366RB -state that keeps track of if filtering and PVID is enabled or -not for each port. We then add code such that whenever filtering -or PVID changes, we update the filter according to the -specification. - -Cc: Vladimir Oltean -Cc: Mauri Sandberg -Cc: Alvin Å ipraga -Cc: Florian Fainelli -Cc: DENG Qingfang -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/realtek-smi-core.h | 2 - - drivers/net/dsa/rtl8366.c | 35 ---------- - drivers/net/dsa/rtl8366rb.c | 102 +++++++++++++++++++++++++++-- - 3 files changed, 95 insertions(+), 44 deletions(-) - ---- a/drivers/net/dsa/realtek-smi-core.h -+++ b/drivers/net/dsa/realtek-smi-core.h -@@ -129,8 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi - int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); - int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); - int rtl8366_reset_vlan(struct realtek_smi *smi); --int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -- struct netlink_ext_ack *extack); - int rtl8366_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack); ---- a/drivers/net/dsa/rtl8366.c -+++ b/drivers/net/dsa/rtl8366.c -@@ -292,41 +292,6 @@ int rtl8366_reset_vlan(struct realtek_sm - } - EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); - --int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, -- struct netlink_ext_ack *extack) --{ -- struct realtek_smi *smi = ds->priv; -- struct rtl8366_vlan_4k vlan4k; -- int ret; -- -- /* Use VLAN nr port + 1 since VLAN0 is not valid */ -- if (!smi->ops->is_vlan_valid(smi, port + 1)) -- return -EINVAL; -- -- dev_info(smi->dev, "%s filtering on port %d\n", -- vlan_filtering ? "enable" : "disable", -- port); -- -- /* TODO: -- * The hardware support filter ID (FID) 0..7, I have no clue how to -- * support this in the driver when the callback only says on/off. -- */ -- ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); -- if (ret) -- return ret; -- -- /* Just set the filter to FID 1 for now then */ -- ret = rtl8366_set_vlan(smi, port + 1, -- vlan4k.member, -- vlan4k.untag, -- 1); -- if (ret) -- return ret; -- -- return 0; --} --EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); -- - int rtl8366_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -143,6 +143,21 @@ - #define RTL8366RB_PHY_NO_OFFSET 9 - #define RTL8366RB_PHY_NO_MASK (0x1f << 9) - -+/* VLAN Ingress Control Register 1, one bit per port. -+ * bit 0 .. 5 will make the switch drop ingress frames without -+ * VID such as untagged or priority-tagged frames for respective -+ * port. -+ * bit 6 .. 11 will make the switch drop ingress frames carrying -+ * a C-tag with VID != 0 for respective port. -+ */ -+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E -+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6)) -+ -+/* VLAN Ingress Control Register 2, one bit per port. -+ * bit0 .. bit5 will make the switch drop all ingress frames with -+ * a VLAN classification that does not include the port is in its -+ * member set. -+ */ - #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f - - /* LED control registers */ -@@ -321,9 +336,13 @@ - /** - * struct rtl8366rb - RTL8366RB-specific data - * @max_mtu: per-port max MTU setting -+ * @pvid_enabled: if PVID is set for respective port -+ * @vlan_filtering: if VLAN filtering is enabled for respective port - */ - struct rtl8366rb { - unsigned int max_mtu[RTL8366RB_NUM_PORTS]; -+ bool pvid_enabled[RTL8366RB_NUM_PORTS]; -+ bool vlan_filtering[RTL8366RB_NUM_PORTS]; - }; - - static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { -@@ -933,11 +952,13 @@ static int rtl8366rb_setup(struct dsa_sw - if (ret) - return ret; - -- /* Discard VLAN tagged packets if the port is not a member of -- * the VLAN with which the packets is associated. -- */ -+ /* Accept all packets by default, we enable filtering on-demand */ -+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, -+ 0); -+ if (ret) -+ return ret; - ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, -- RTL8366RB_PORT_ALL); -+ 0); - if (ret) - return ret; - -@@ -1209,6 +1230,53 @@ rtl8366rb_port_bridge_leave(struct dsa_s - RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0); - } - -+/** -+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames -+ * @smi: SMI state container -+ * @port: the port to drop untagged and C-tagged frames on -+ * @drop: whether to drop or pass untagged and C-tagged frames -+ */ -+static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop) -+{ -+ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, -+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port), -+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0); -+} -+ -+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port, -+ bool vlan_filtering, -+ struct netlink_ext_ack *extack) -+{ -+ struct realtek_smi *smi = ds->priv; -+ struct rtl8366rb *rb; -+ int ret; -+ -+ rb = smi->chip_data; -+ -+ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port, -+ vlan_filtering ? "enable" : "disable"); -+ -+ /* If the port is not in the member set, the frame will be dropped */ -+ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, -+ BIT(port), vlan_filtering ? BIT(port) : 0); -+ if (ret) -+ return ret; -+ -+ /* Keep track if filtering is enabled on each port */ -+ rb->vlan_filtering[port] = vlan_filtering; -+ -+ /* If VLAN filtering is enabled and PVID is also enabled, we must -+ * not drop any untagged or C-tagged frames. If we turn off VLAN -+ * filtering on a port, we need ti accept any frames. -+ */ -+ if (vlan_filtering) -+ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]); -+ else -+ ret = rtl8366rb_drop_untagged(smi, port, false); -+ -+ return ret; -+} -+ - static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) - { - struct realtek_smi *smi = ds->priv; -@@ -1420,14 +1488,34 @@ static int rtl8366rb_get_mc_index(struct - - static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) - { -+ struct rtl8366rb *rb; -+ bool pvid_enabled; -+ int ret; -+ -+ rb = smi->chip_data; -+ pvid_enabled = !!index; -+ - if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) - return -EINVAL; - -- return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), -+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), - RTL8366RB_PORT_VLAN_CTRL_MASK << - RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), - (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << - RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); -+ if (ret) -+ return ret; -+ -+ rb->pvid_enabled[port] = pvid_enabled; -+ -+ /* If VLAN filtering is enabled and PVID is also enabled, we must -+ * not drop any untagged or C-tagged frames. Make sure to update the -+ * filtering setting. -+ */ -+ if (rb->vlan_filtering[port]) -+ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled); -+ -+ return ret; - } - - static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) -@@ -1437,7 +1525,7 @@ static bool rtl8366rb_is_vlan_valid(stru - if (smi->vlan4k_enabled) - max = RTL8366RB_NUM_VIDS - 1; - -- if (vlan == 0 || vlan > max) -+ if (vlan > max) - return false; - - return true; -@@ -1594,7 +1682,7 @@ static const struct dsa_switch_ops rtl83 - .get_sset_count = rtl8366_get_sset_count, - .port_bridge_join = rtl8366rb_port_bridge_join, - .port_bridge_leave = rtl8366rb_port_bridge_leave, -- .port_vlan_filtering = rtl8366_vlan_filtering, -+ .port_vlan_filtering = rtl8366rb_vlan_filtering, - .port_vlan_add = rtl8366_vlan_add, - .port_vlan_del = rtl8366_vlan_del, - .port_enable = rtl8366rb_port_enable, diff --git a/target/linux/generic/backport-6.1/774-v5.16-06-net-dsa-rtl8366-Drop-and-depromote-pointless-prints.patch b/target/linux/generic/backport-6.1/774-v5.16-06-net-dsa-rtl8366-Drop-and-depromote-pointless-prints.patch deleted file mode 100644 index b56032c366b0..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-06-net-dsa-rtl8366-Drop-and-depromote-pointless-prints.patch +++ /dev/null @@ -1,51 +0,0 @@ -From ddb59a5dc42714999c335dab4bf256125ba3120c Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Sun, 26 Sep 2021 00:59:29 +0200 -Subject: [PATCH 06/11] net: dsa: rtl8366: Drop and depromote pointless prints -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -We don't need a message for every VLAN association, dbg -is fine. The message about adding the DSA or CPU -port to a VLAN is directly misleading, this is perfectly -fine. - -Cc: Vladimir Oltean -Cc: Mauri Sandberg -Cc: DENG Qingfang -Reviewed-by: Alvin Å ipraga -Reviewed-by: Florian Fainelli -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366.c | 11 ++++------- - 1 file changed, 4 insertions(+), 7 deletions(-) - ---- a/drivers/net/dsa/rtl8366.c -+++ b/drivers/net/dsa/rtl8366.c -@@ -318,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch * - return ret; - } - -- dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", -- vlan->vid, port, untagged ? "untagged" : "tagged", -- pvid ? " PVID" : "no PVID"); -- -- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) -- dev_err(smi->dev, "port is DSA or CPU port\n"); -+ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n", -+ vlan->vid, port, untagged ? "untagged" : "tagged", -+ pvid ? "PVID" : "no PVID"); - - member |= BIT(port); - -@@ -356,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch * - struct realtek_smi *smi = ds->priv; - int ret, i; - -- dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port); -+ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port); - - for (i = 0; i < smi->num_vlan_mc; i++) { - struct rtl8366_vlan_mc vlanmc; diff --git a/target/linux/generic/backport-6.1/774-v5.16-07-net-dsa-rtl8366rb-Use-core-filtering-tracking.patch b/target/linux/generic/backport-6.1/774-v5.16-07-net-dsa-rtl8366rb-Use-core-filtering-tracking.patch deleted file mode 100644 index 8cd1df97f24e..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-07-net-dsa-rtl8366rb-Use-core-filtering-tracking.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 5c9b66f3c8a3f72fa2a58e89a57c6d7afd550bf0 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Wed, 29 Sep 2021 13:23:22 +0200 -Subject: [PATCH 07/11] net: dsa: rtl8366rb: Use core filtering tracking -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -We added a state variable to track whether a certain port -was VLAN filtering or not, but we can just inquire the DSA -core about this. - -Cc: Vladimir Oltean -Cc: Mauri Sandberg -Cc: DENG Qingfang -Cc: Alvin Å ipraga -Cc: Reviewed-by: Florian Fainelli -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366rb.c | 9 ++------- - 1 file changed, 2 insertions(+), 7 deletions(-) - ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -337,12 +337,10 @@ - * struct rtl8366rb - RTL8366RB-specific data - * @max_mtu: per-port max MTU setting - * @pvid_enabled: if PVID is set for respective port -- * @vlan_filtering: if VLAN filtering is enabled for respective port - */ - struct rtl8366rb { - unsigned int max_mtu[RTL8366RB_NUM_PORTS]; - bool pvid_enabled[RTL8366RB_NUM_PORTS]; -- bool vlan_filtering[RTL8366RB_NUM_PORTS]; - }; - - static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { -@@ -1262,12 +1260,9 @@ static int rtl8366rb_vlan_filtering(stru - if (ret) - return ret; - -- /* Keep track if filtering is enabled on each port */ -- rb->vlan_filtering[port] = vlan_filtering; -- - /* If VLAN filtering is enabled and PVID is also enabled, we must - * not drop any untagged or C-tagged frames. If we turn off VLAN -- * filtering on a port, we need ti accept any frames. -+ * filtering on a port, we need to accept any frames. - */ - if (vlan_filtering) - ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]); -@@ -1512,7 +1507,7 @@ static int rtl8366rb_set_mc_index(struct - * not drop any untagged or C-tagged frames. Make sure to update the - * filtering setting. - */ -- if (rb->vlan_filtering[port]) -+ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port))) - ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled); - - return ret; diff --git a/target/linux/generic/backport-6.1/774-v5.16-08-net-dsa-rtl8366rb-Support-disabling-learning.patch b/target/linux/generic/backport-6.1/774-v5.16-08-net-dsa-rtl8366rb-Support-disabling-learning.patch deleted file mode 100644 index 8306eb5aefa1..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-08-net-dsa-rtl8366rb-Support-disabling-learning.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 831a3d26bea0d14f8563eecf96def660a74a3000 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Tue, 5 Oct 2021 21:47:02 +0200 -Subject: [PATCH 08/11] net: dsa: rtl8366rb: Support disabling learning -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The RTL8366RB hardware supports disabling learning per-port -so let's make use of this feature. Rename some unfortunately -named registers in the process. - -Suggested-by: Vladimir Oltean -Cc: Alvin Å ipraga -Cc: Mauri Sandberg -Cc: Florian Fainelli -Cc: DENG Qingfang -Reviewed-by: Vladimir Oltean -Signed-off-by: Linus Walleij -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366rb.c | 50 ++++++++++++++++++++++++++++++++----- - 1 file changed, 44 insertions(+), 6 deletions(-) - ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -14,6 +14,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -42,9 +43,12 @@ - /* Port Enable Control register */ - #define RTL8366RB_PECR 0x0001 - --/* Switch Security Control registers */ --#define RTL8366RB_SSCR0 0x0002 --#define RTL8366RB_SSCR1 0x0003 -+/* Switch per-port learning disablement register */ -+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002 -+ -+/* Security control, actually aging register */ -+#define RTL8366RB_SECURITY_CTRL 0x0003 -+ - #define RTL8366RB_SSCR2 0x0004 - #define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) - -@@ -927,13 +931,14 @@ static int rtl8366rb_setup(struct dsa_sw - /* layer 2 size, see rtl8366rb_change_mtu() */ - rb->max_mtu[i] = 1532; - -- /* Enable learning for all ports */ -- ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); -+ /* Disable learning for all ports */ -+ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, -+ RTL8366RB_PORT_ALL); - if (ret) - return ret; - - /* Enable auto ageing for all ports */ -- ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); -+ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0); - if (ret) - return ret; - -@@ -1272,6 +1277,37 @@ static int rtl8366rb_vlan_filtering(stru - return ret; - } - -+static int -+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port, -+ struct switchdev_brport_flags flags, -+ struct netlink_ext_ack *extack) -+{ -+ /* We support enabling/disabling learning */ -+ if (flags.mask & ~(BR_LEARNING)) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static int -+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port, -+ struct switchdev_brport_flags flags, -+ struct netlink_ext_ack *extack) -+{ -+ struct realtek_smi *smi = ds->priv; -+ int ret; -+ -+ if (flags.mask & BR_LEARNING) { -+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL, -+ BIT(port), -+ (flags.val & BR_LEARNING) ? 0 : BIT(port)); -+ if (ret) -+ return ret; -+ } -+ -+ return 0; -+} -+ - static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) - { - struct realtek_smi *smi = ds->priv; -@@ -1682,6 +1718,8 @@ static const struct dsa_switch_ops rtl83 - .port_vlan_del = rtl8366_vlan_del, - .port_enable = rtl8366rb_port_enable, - .port_disable = rtl8366rb_port_disable, -+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags, -+ .port_bridge_flags = rtl8366rb_port_bridge_flags, - .port_change_mtu = rtl8366rb_change_mtu, - .port_max_mtu = rtl8366rb_max_mtu, - }; diff --git a/target/linux/generic/backport-6.1/774-v5.16-09-net-dsa-rtl8366rb-Support-fast-aging.patch b/target/linux/generic/backport-6.1/774-v5.16-09-net-dsa-rtl8366rb-Support-fast-aging.patch deleted file mode 100644 index a74108e23dbd..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-09-net-dsa-rtl8366rb-Support-fast-aging.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 8eb13420eb9ab4a4e2ebd612bf5dc9dba0039236 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Tue, 5 Oct 2021 21:47:03 +0200 -Subject: [PATCH 09/11] net: dsa: rtl8366rb: Support fast aging -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This implements fast aging per-port using the special "security" -register, which will flush any learned L2 LUT entries on a port. - -The vendor API just enabled setting and clearing this bit, so -we set it to age out any entries on the port and then we clear -it again. - -Suggested-by: Vladimir Oltean -Cc: Mauri Sandberg -Cc: DENG Qingfang -Cc: Florian Fainelli -Reviewed-by: Alvin Å ipraga -Signed-off-by: Linus Walleij -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366rb.c | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -1308,6 +1308,19 @@ rtl8366rb_port_bridge_flags(struct dsa_s - return 0; - } - -+static void -+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port) -+{ -+ struct realtek_smi *smi = ds->priv; -+ -+ /* This will age out any learned L2 entries */ -+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, -+ BIT(port), BIT(port)); -+ /* Restore the normal state of things */ -+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL, -+ BIT(port), 0); -+} -+ - static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) - { - struct realtek_smi *smi = ds->priv; -@@ -1720,6 +1733,7 @@ static const struct dsa_switch_ops rtl83 - .port_disable = rtl8366rb_port_disable, - .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags, - .port_bridge_flags = rtl8366rb_port_bridge_flags, -+ .port_fast_age = rtl8366rb_port_fast_age, - .port_change_mtu = rtl8366rb_change_mtu, - .port_max_mtu = rtl8366rb_max_mtu, - }; diff --git a/target/linux/generic/backport-6.1/774-v5.16-10-net-dsa-rtl8366rb-Support-setting-STP-state.patch b/target/linux/generic/backport-6.1/774-v5.16-10-net-dsa-rtl8366rb-Support-setting-STP-state.patch deleted file mode 100644 index e787ce948119..000000000000 --- a/target/linux/generic/backport-6.1/774-v5.16-10-net-dsa-rtl8366rb-Support-setting-STP-state.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 90c855471a89d3e05ecf5b6464bd04abf2c83b70 Mon Sep 17 00:00:00 2001 -From: Linus Walleij -Date: Tue, 5 Oct 2021 21:47:04 +0200 -Subject: [PATCH 10/11] net: dsa: rtl8366rb: Support setting STP state -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This adds support for setting the STP state to the RTL8366RB -DSA switch. This rids the following message from the kernel on -e.g. OpenWrt: - -DSA: failed to set STP state 3 (-95) - -Since the RTL8366RB has one STP state register per FID with -two bit per port in each, we simply loop over all the FIDs -and set the state on all of them. - -Cc: Vladimir Oltean -Cc: Alvin Å ipraga -Cc: Mauri Sandberg -Cc: DENG Qingfang -Signed-off-by: Linus Walleij -Reviewed-by: Vladimir Oltean -Signed-off-by: David S. Miller ---- - drivers/net/dsa/rtl8366rb.c | 48 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 48 insertions(+) - ---- a/drivers/net/dsa/rtl8366rb.c -+++ b/drivers/net/dsa/rtl8366rb.c -@@ -110,6 +110,18 @@ - - #define RTL8366RB_POWER_SAVING_REG 0x0021 - -+/* Spanning tree status (STP) control, two bits per port per FID */ -+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */ -+#define RTL8366RB_STP_STATE_DISABLED 0x0 -+#define RTL8366RB_STP_STATE_BLOCKING 0x1 -+#define RTL8366RB_STP_STATE_LEARNING 0x2 -+#define RTL8366RB_STP_STATE_FORWARDING 0x3 -+#define RTL8366RB_STP_MASK GENMASK(1, 0) -+#define RTL8366RB_STP_STATE(port, state) \ -+ ((state) << ((port) * 2)) -+#define RTL8366RB_STP_STATE_MASK(port) \ -+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK) -+ - /* CPU port control reg */ - #define RTL8368RB_CPU_CTRL_REG 0x0061 - #define RTL8368RB_CPU_PORTS_MSK 0x00FF -@@ -234,6 +246,7 @@ - #define RTL8366RB_NUM_LEDGROUPS 4 - #define RTL8366RB_NUM_VIDS 4096 - #define RTL8366RB_PRIORITYMAX 7 -+#define RTL8366RB_NUM_FIDS 8 - #define RTL8366RB_FIDMAX 7 - - #define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */ -@@ -1309,6 +1322,40 @@ rtl8366rb_port_bridge_flags(struct dsa_s - } - - static void -+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -+{ -+ struct realtek_smi *smi = ds->priv; -+ u32 val; -+ int i; -+ -+ switch (state) { -+ case BR_STATE_DISABLED: -+ val = RTL8366RB_STP_STATE_DISABLED; -+ break; -+ case BR_STATE_BLOCKING: -+ case BR_STATE_LISTENING: -+ val = RTL8366RB_STP_STATE_BLOCKING; -+ break; -+ case BR_STATE_LEARNING: -+ val = RTL8366RB_STP_STATE_LEARNING; -+ break; -+ case BR_STATE_FORWARDING: -+ val = RTL8366RB_STP_STATE_FORWARDING; -+ break; -+ default: -+ dev_err(smi->dev, "unknown bridge state requested\n"); -+ return; -+ }; -+ -+ /* Set the same status for the port on all the FIDs */ -+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) { -+ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i, -+ RTL8366RB_STP_STATE_MASK(port), -+ RTL8366RB_STP_STATE(port, val)); -+ } -+} -+ -+static void - rtl8366rb_port_fast_age(struct dsa_switch *ds, int port) - { - struct realtek_smi *smi = ds->priv; -@@ -1733,6 +1780,7 @@ static const struct dsa_switch_ops rtl83 - .port_disable = rtl8366rb_port_disable, - .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags, - .port_bridge_flags = rtl8366rb_port_bridge_flags, -+ .port_stp_state_set = rtl8366rb_port_stp_state_set, - .port_fast_age = rtl8366rb_port_fast_age, - .port_change_mtu = rtl8366rb_change_mtu, - .port_max_mtu = rtl8366rb_max_mtu, diff --git a/target/linux/generic/backport-6.1/775-v6.0-01-net-ethernet-stmicro-stmmac-move-queue-reset-to-dedi.patch b/target/linux/generic/backport-6.1/775-v6.0-01-net-ethernet-stmicro-stmmac-move-queue-reset-to-dedi.patch deleted file mode 100644 index 9b9ab04af6cb..000000000000 --- a/target/linux/generic/backport-6.1/775-v6.0-01-net-ethernet-stmicro-stmmac-move-queue-reset-to-dedi.patch +++ /dev/null @@ -1,142 +0,0 @@ -From f9ec5723c3dbfcede9c7b0dcdf85e401ce16316c Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sat, 23 Jul 2022 16:29:29 +0200 -Subject: [PATCH 1/5] net: ethernet: stmicro: stmmac: move queue reset to - dedicated functions - -Move queue reset to dedicated functions. This aside from a simple -cleanup is also required to allocate a dma conf without resetting the tx -queue while the device is temporarily detached as now the reset is not -part of the dma init function and can be done later in the code flow. - -Signed-off-by: Christian Marangi -Signed-off-by: Jakub Kicinski ---- - .../net/ethernet/stmicro/stmmac/stmmac_main.c | 59 ++++++++++--------- - 1 file changed, 31 insertions(+), 28 deletions(-) - ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -@@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt( - static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); - static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); - static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); -+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); -+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); -+static void stmmac_reset_queues_param(struct stmmac_priv *priv); - static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); - static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); - -@@ -1712,9 +1715,6 @@ static int __init_dma_rx_desc_rings(stru - return -ENOMEM; - } - -- rx_q->cur_rx = 0; -- rx_q->dirty_rx = 0; -- - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) -@@ -1820,12 +1820,6 @@ static int __init_dma_tx_desc_rings(stru - tx_q->tx_skbuff[i] = NULL; - } - -- tx_q->dirty_tx = 0; -- tx_q->cur_tx = 0; -- tx_q->mss = 0; -- -- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); -- - return 0; - } - -@@ -2694,10 +2688,7 @@ static void stmmac_tx_err(struct stmmac_ - stmmac_stop_tx_dma(priv, chan); - dma_free_tx_skbufs(priv, chan); - stmmac_clear_tx_descriptors(priv, chan); -- tx_q->dirty_tx = 0; -- tx_q->cur_tx = 0; -- tx_q->mss = 0; -- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); -+ stmmac_reset_tx_queue(priv, chan); - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); - stmmac_start_tx_dma(priv, chan); -@@ -3781,6 +3772,8 @@ static int stmmac_open(struct net_device - } - } - -+ stmmac_reset_queues_param(priv); -+ - ret = stmmac_hw_setup(dev, true); - if (ret < 0) { - netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); -@@ -6430,6 +6423,7 @@ void stmmac_enable_rx_queue(struct stmma - return; - } - -+ stmmac_reset_rx_queue(priv, queue); - stmmac_clear_rx_descriptors(priv, queue); - - stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, -@@ -6491,6 +6485,7 @@ void stmmac_enable_tx_queue(struct stmma - return; - } - -+ stmmac_reset_tx_queue(priv, queue); - stmmac_clear_tx_descriptors(priv, queue); - - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, -@@ -7417,6 +7412,25 @@ int stmmac_suspend(struct device *dev) - } - EXPORT_SYMBOL_GPL(stmmac_suspend); - -+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) -+{ -+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ -+ rx_q->cur_rx = 0; -+ rx_q->dirty_rx = 0; -+} -+ -+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) -+{ -+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ -+ tx_q->cur_tx = 0; -+ tx_q->dirty_tx = 0; -+ tx_q->mss = 0; -+ -+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); -+} -+ - /** - * stmmac_reset_queues_param - reset queue parameters - * @priv: device pointer -@@ -7427,22 +7441,11 @@ static void stmmac_reset_queues_param(st - u32 tx_cnt = priv->plat->tx_queues_to_use; - u32 queue; - -- for (queue = 0; queue < rx_cnt; queue++) { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ for (queue = 0; queue < rx_cnt; queue++) -+ stmmac_reset_rx_queue(priv, queue); - -- rx_q->cur_rx = 0; -- rx_q->dirty_rx = 0; -- } -- -- for (queue = 0; queue < tx_cnt; queue++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -- -- tx_q->cur_tx = 0; -- tx_q->dirty_tx = 0; -- tx_q->mss = 0; -- -- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); -- } -+ for (queue = 0; queue < tx_cnt; queue++) -+ stmmac_reset_tx_queue(priv, queue); - } - - /** diff --git a/target/linux/generic/backport-6.1/775-v6.0-02-net-ethernet-stmicro-stmmac-first-disable-all-queues.patch b/target/linux/generic/backport-6.1/775-v6.0-02-net-ethernet-stmicro-stmmac-first-disable-all-queues.patch deleted file mode 100644 index 8eca92a5c54d..000000000000 --- a/target/linux/generic/backport-6.1/775-v6.0-02-net-ethernet-stmicro-stmmac-first-disable-all-queues.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 7028471edb646bfc532fec0973e50e784cdcb7c6 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sat, 23 Jul 2022 16:29:30 +0200 -Subject: [PATCH 2/5] net: ethernet: stmicro: stmmac: first disable all queues - and disconnect in release - -Disable all queues and disconnect before tx_disable in stmmac_release to -prevent a corner case where packet may be still queued at the same time -tx_disable is called resulting in kernel panic if some packet still has -to be processed. - -Signed-off-by: Christian Marangi -Signed-off-by: Jakub Kicinski ---- - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -@@ -3833,8 +3833,6 @@ static int stmmac_release(struct net_dev - struct stmmac_priv *priv = netdev_priv(dev); - u32 chan; - -- netif_tx_disable(dev); -- - if (device_may_wakeup(priv->device)) - phylink_speed_down(priv->phylink, false); - /* Stop and disconnect the PHY */ -@@ -3846,6 +3844,8 @@ static int stmmac_release(struct net_dev - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); - -+ netif_tx_disable(dev); -+ - /* Free the IRQ lines */ - stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); - diff --git a/target/linux/generic/backport-6.1/775-v6.0-03-net-ethernet-stmicro-stmmac-move-dma-conf-to-dedicat.patch b/target/linux/generic/backport-6.1/775-v6.0-03-net-ethernet-stmicro-stmmac-move-dma-conf-to-dedicat.patch deleted file mode 100644 index d4c2567a1fdf..000000000000 --- a/target/linux/generic/backport-6.1/775-v6.0-03-net-ethernet-stmicro-stmmac-move-dma-conf-to-dedicat.patch +++ /dev/null @@ -1,1289 +0,0 @@ -From 8531c80800c10e8ef7952022326c2f983e1314bf Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sat, 23 Jul 2022 16:29:31 +0200 -Subject: [PATCH 3/5] net: ethernet: stmicro: stmmac: move dma conf to - dedicated struct - -Move dma buf conf to dedicated struct. This in preparation for code -rework that will permit to allocate separate dma_conf without affecting -the priv struct. - -Signed-off-by: Christian Marangi -Signed-off-by: Jakub Kicinski ---- - .../net/ethernet/stmicro/stmmac/chain_mode.c | 6 +- - .../net/ethernet/stmicro/stmmac/ring_mode.c | 4 +- - drivers/net/ethernet/stmicro/stmmac/stmmac.h | 21 +- - .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 +- - .../net/ethernet/stmicro/stmmac/stmmac_main.c | 286 +++++++++--------- - .../stmicro/stmmac/stmmac_selftests.c | 8 +- - .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +- - 7 files changed, 172 insertions(+), 163 deletions(-) - ---- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c -+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c -@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_ - - while (len != 0) { - tx_q->tx_skbuff[entry] = NULL; -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - desc = tx_q->dma_tx + entry; - - if (len > bmax) { -@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, - */ - p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + - (((rx_q->dirty_rx) + 1) % -- priv->dma_rx_size) * -+ priv->dma_conf.dma_rx_size) * - sizeof(struct dma_desc))); - } - -@@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, - */ - p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + - ((tx_q->dirty_tx + 1) % -- priv->dma_tx_size)) -+ priv->dma_conf.dma_tx_size)) - * sizeof(struct dma_desc))); - } - ---- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c -+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c -@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_ - stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, false, skb->len); - tx_q->tx_skbuff[entry] = NULL; -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - - if (priv->extend_desc) - desc = (struct dma_desc *)(tx_q->dma_etx + entry); -@@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, - struct stmmac_priv *priv = rx_q->priv_data; - - /* Fill DES3 in case of RING mode */ -- if (priv->dma_buf_sz == BUF_SIZE_16KiB) -+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) - p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); - } - ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h -@@ -185,6 +185,18 @@ struct stmmac_rfs_entry { - int tc; - }; - -+struct stmmac_dma_conf { -+ unsigned int dma_buf_sz; -+ -+ /* RX Queue */ -+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; -+ unsigned int dma_rx_size; -+ -+ /* TX Queue */ -+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; -+ unsigned int dma_tx_size; -+}; -+ - struct stmmac_priv { - /* Frequently used values are kept adjacent for cache effect */ - u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; -@@ -199,7 +211,6 @@ struct stmmac_priv { - int sph_cap; - u32 sarc_type; - -- unsigned int dma_buf_sz; - unsigned int rx_copybreak; - u32 rx_riwt[MTL_MAX_TX_QUEUES]; - int hwts_rx_en; -@@ -211,13 +222,7 @@ struct stmmac_priv { - int (*hwif_quirks)(struct stmmac_priv *priv); - struct mutex lock; - -- /* RX Queue */ -- struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; -- unsigned int dma_rx_size; -- -- /* TX Queue */ -- struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; -- unsigned int dma_tx_size; -+ struct stmmac_dma_conf dma_conf; - - /* Generic channel for NAPI */ - struct stmmac_channel channel[STMMAC_CH_MAX]; ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c -@@ -484,8 +484,8 @@ static void stmmac_get_ringparam(struct - - ring->rx_max_pending = DMA_MAX_RX_SIZE; - ring->tx_max_pending = DMA_MAX_TX_SIZE; -- ring->rx_pending = priv->dma_rx_size; -- ring->tx_pending = priv->dma_tx_size; -+ ring->rx_pending = priv->dma_conf.dma_rx_size; -+ ring->tx_pending = priv->dma_conf.dma_tx_size; - } - - static int stmmac_set_ringparam(struct net_device *netdev, ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -@@ -74,8 +74,8 @@ static int phyaddr = -1; - module_param(phyaddr, int, 0444); - MODULE_PARM_DESC(phyaddr, "Physical device address"); - --#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) --#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) -+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) -+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) - - /* Limit to make sure XDP TX and slow path can coexist */ - #define STMMAC_XSK_TX_BUDGET_MAX 256 -@@ -232,7 +232,7 @@ static void stmmac_disable_all_queues(st - - /* synchronize_rcu() needed for pending XDP buffers to drain */ - for (queue = 0; queue < rx_queues_cnt; queue++) { -- rx_q = &priv->rx_queue[queue]; -+ rx_q = &priv->dma_conf.rx_queue[queue]; - if (rx_q->xsk_pool) { - synchronize_rcu(); - break; -@@ -358,13 +358,13 @@ static void print_pkt(unsigned char *buf - - static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - u32 avail; - - if (tx_q->dirty_tx > tx_q->cur_tx) - avail = tx_q->dirty_tx - tx_q->cur_tx - 1; - else -- avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; -+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; - - return avail; - } -@@ -376,13 +376,13 @@ static inline u32 stmmac_tx_avail(struct - */ - static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - u32 dirty; - - if (rx_q->dirty_rx <= rx_q->cur_rx) - dirty = rx_q->cur_rx - rx_q->dirty_rx; - else -- dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; -+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; - - return dirty; - } -@@ -410,7 +410,7 @@ static int stmmac_enable_eee_mode(struct - - /* check if all TX queues have the work finished */ - for (queue = 0; queue < tx_cnt; queue++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - if (tx_q->dirty_tx != tx_q->cur_tx) - return -EBUSY; /* still unfinished work */ -@@ -1309,7 +1309,7 @@ static void stmmac_display_rx_rings(stru - - /* Display RX rings */ - for (queue = 0; queue < rx_cnt; queue++) { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - - pr_info("\tRX Queue %u rings\n", queue); - -@@ -1322,7 +1322,7 @@ static void stmmac_display_rx_rings(stru - } - - /* Display RX ring */ -- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, -+ stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true, - rx_q->dma_rx_phy, desc_size); - } - } -@@ -1336,7 +1336,7 @@ static void stmmac_display_tx_rings(stru - - /* Display TX rings */ - for (queue = 0; queue < tx_cnt; queue++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - pr_info("\tTX Queue %d rings\n", queue); - -@@ -1351,7 +1351,7 @@ static void stmmac_display_tx_rings(stru - desc_size = sizeof(struct dma_desc); - } - -- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, -+ stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false, - tx_q->dma_tx_phy, desc_size); - } - } -@@ -1392,21 +1392,21 @@ static int stmmac_set_bfsize(int mtu, in - */ - static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int i; - - /* Clear the RX descriptors */ -- for (i = 0; i < priv->dma_rx_size; i++) -+ for (i = 0; i < priv->dma_conf.dma_rx_size; i++) - if (priv->extend_desc) - stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, - priv->use_riwt, priv->mode, -- (i == priv->dma_rx_size - 1), -- priv->dma_buf_sz); -+ (i == priv->dma_conf.dma_rx_size - 1), -+ priv->dma_conf.dma_buf_sz); - else - stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], - priv->use_riwt, priv->mode, -- (i == priv->dma_rx_size - 1), -- priv->dma_buf_sz); -+ (i == priv->dma_conf.dma_rx_size - 1), -+ priv->dma_conf.dma_buf_sz); - } - - /** -@@ -1418,12 +1418,12 @@ static void stmmac_clear_rx_descriptors( - */ - static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - int i; - - /* Clear the TX descriptors */ -- for (i = 0; i < priv->dma_tx_size; i++) { -- int last = (i == (priv->dma_tx_size - 1)); -+ for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { -+ int last = (i == (priv->dma_conf.dma_tx_size - 1)); - struct dma_desc *p; - - if (priv->extend_desc) -@@ -1471,7 +1471,7 @@ static void stmmac_clear_descriptors(str - static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, - int i, gfp_t flags, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (!buf->page) { -@@ -1496,7 +1496,7 @@ static int stmmac_init_rx_buffers(struct - buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; - - stmmac_set_desc_addr(priv, p, buf->addr); -- if (priv->dma_buf_sz == BUF_SIZE_16KiB) -+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) - stmmac_init_desc3(priv, p); - - return 0; -@@ -1510,7 +1510,7 @@ static int stmmac_init_rx_buffers(struct - */ - static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (buf->page) -@@ -1530,7 +1530,7 @@ static void stmmac_free_rx_buffer(struct - */ - static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - if (tx_q->tx_skbuff_dma[i].buf && - tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { -@@ -1575,17 +1575,17 @@ static void dma_free_rx_skbufs(struct st - { - int i; - -- for (i = 0; i < priv->dma_rx_size; i++) -+ for (i = 0; i < priv->dma_conf.dma_rx_size; i++) - stmmac_free_rx_buffer(priv, queue, i); - } - - static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, - gfp_t flags) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_rx_size; i++) { -+ for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { - struct dma_desc *p; - int ret; - -@@ -1612,10 +1612,10 @@ static int stmmac_alloc_rx_buffers(struc - */ - static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_rx_size; i++) { -+ for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (!buf->xdp) -@@ -1628,10 +1628,10 @@ static void dma_free_rx_xskbufs(struct s - - static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_rx_size; i++) { -+ for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { - struct stmmac_rx_buffer *buf; - dma_addr_t dma_addr; - struct dma_desc *p; -@@ -1674,7 +1674,7 @@ static struct xsk_buff_pool *stmmac_get_ - */ - static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int ret; - - netif_dbg(priv, probe, priv->dev, -@@ -1720,11 +1720,11 @@ static int __init_dma_rx_desc_rings(stru - if (priv->extend_desc) - stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, -- priv->dma_rx_size, 1); -+ priv->dma_conf.dma_rx_size, 1); - else - stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, -- priv->dma_rx_size, 0); -+ priv->dma_conf.dma_rx_size, 0); - } - - return 0; -@@ -1751,7 +1751,7 @@ static int init_dma_rx_desc_rings(struct - - err_init_rx_buffers: - while (queue >= 0) { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - - if (rx_q->xsk_pool) - dma_free_rx_xskbufs(priv, queue); -@@ -1780,7 +1780,7 @@ err_init_rx_buffers: - */ - static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - int i; - - netif_dbg(priv, probe, priv->dev, -@@ -1792,16 +1792,16 @@ static int __init_dma_tx_desc_rings(stru - if (priv->extend_desc) - stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, -- priv->dma_tx_size, 1); -+ priv->dma_conf.dma_tx_size, 1); - else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) - stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, -- priv->dma_tx_size, 0); -+ priv->dma_conf.dma_tx_size, 0); - } - - tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - -- for (i = 0; i < priv->dma_tx_size; i++) { -+ for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { - struct dma_desc *p; - - if (priv->extend_desc) -@@ -1871,12 +1871,12 @@ static int init_dma_desc_rings(struct ne - */ - static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - int i; - - tx_q->xsk_frames_done = 0; - -- for (i = 0; i < priv->dma_tx_size; i++) -+ for (i = 0; i < priv->dma_conf.dma_tx_size; i++) - stmmac_free_tx_buffer(priv, queue, i); - - if (tx_q->xsk_pool && tx_q->xsk_frames_done) { -@@ -1906,7 +1906,7 @@ static void stmmac_free_tx_skbufs(struct - */ - static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - - /* Release the DMA RX socket buffers */ - if (rx_q->xsk_pool) -@@ -1919,11 +1919,11 @@ static void __free_dma_rx_desc_resources - - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) -- dma_free_coherent(priv->device, priv->dma_rx_size * -+ dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * - sizeof(struct dma_desc), - rx_q->dma_rx, rx_q->dma_rx_phy); - else -- dma_free_coherent(priv->device, priv->dma_rx_size * -+ dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * - sizeof(struct dma_extended_desc), - rx_q->dma_erx, rx_q->dma_rx_phy); - -@@ -1952,7 +1952,7 @@ static void free_dma_rx_desc_resources(s - */ - static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - size_t size; - void *addr; - -@@ -1970,7 +1970,7 @@ static void __free_dma_tx_desc_resources - addr = tx_q->dma_tx; - } - -- size *= priv->dma_tx_size; -+ size *= priv->dma_conf.dma_tx_size; - - dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); - -@@ -1999,7 +1999,7 @@ static void free_dma_tx_desc_resources(s - */ - static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - bool xdp_prog = stmmac_xdp_is_enabled(priv); - struct page_pool_params pp_params = { 0 }; -@@ -2011,8 +2011,8 @@ static int __alloc_dma_rx_desc_resources - rx_q->priv_data = priv; - - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; -- pp_params.pool_size = priv->dma_rx_size; -- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); -+ pp_params.pool_size = priv->dma_conf.dma_rx_size; -+ num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); - pp_params.nid = dev_to_node(priv->device); - pp_params.dev = priv->device; -@@ -2027,7 +2027,7 @@ static int __alloc_dma_rx_desc_resources - return ret; - } - -- rx_q->buf_pool = kcalloc(priv->dma_rx_size, -+ rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size, - sizeof(*rx_q->buf_pool), - GFP_KERNEL); - if (!rx_q->buf_pool) -@@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources - - if (priv->extend_desc) { - rx_q->dma_erx = dma_alloc_coherent(priv->device, -- priv->dma_rx_size * -+ priv->dma_conf.dma_rx_size * - sizeof(struct dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); -@@ -2044,7 +2044,7 @@ static int __alloc_dma_rx_desc_resources - - } else { - rx_q->dma_rx = dma_alloc_coherent(priv->device, -- priv->dma_rx_size * -+ priv->dma_conf.dma_rx_size * - sizeof(struct dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); -@@ -2101,20 +2101,20 @@ err_dma: - */ - static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - size_t size; - void *addr; - - tx_q->queue_index = queue; - tx_q->priv_data = priv; - -- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, -+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size, - sizeof(*tx_q->tx_skbuff_dma), - GFP_KERNEL); - if (!tx_q->tx_skbuff_dma) - return -ENOMEM; - -- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, -+ tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size, - sizeof(struct sk_buff *), - GFP_KERNEL); - if (!tx_q->tx_skbuff) -@@ -2127,7 +2127,7 @@ static int __alloc_dma_tx_desc_resources - else - size = sizeof(struct dma_desc); - -- size *= priv->dma_tx_size; -+ size *= priv->dma_conf.dma_tx_size; - - addr = dma_alloc_coherent(priv->device, size, - &tx_q->dma_tx_phy, GFP_KERNEL); -@@ -2371,7 +2371,7 @@ static void stmmac_dma_operation_mode(st - - /* configure all channels */ - for (chan = 0; chan < rx_channels_count; chan++) { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; - u32 buf_size; - - qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; -@@ -2386,7 +2386,7 @@ static void stmmac_dma_operation_mode(st - chan); - } else { - stmmac_set_dma_bfsize(priv, priv->ioaddr, -- priv->dma_buf_sz, -+ priv->dma_conf.dma_buf_sz, - chan); - } - } -@@ -2402,7 +2402,7 @@ static void stmmac_dma_operation_mode(st - static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) - { - struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - struct xsk_buff_pool *pool = tx_q->xsk_pool; - unsigned int entry = tx_q->cur_tx; - struct dma_desc *tx_desc = NULL; -@@ -2477,7 +2477,7 @@ static bool stmmac_xdp_xmit_zc(struct st - - stmmac_enable_dma_transmission(priv, priv->ioaddr); - -- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); -+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); - entry = tx_q->cur_tx; - } - -@@ -2503,7 +2503,7 @@ static bool stmmac_xdp_xmit_zc(struct st - */ - static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry, xmits = 0, count = 0; - -@@ -2516,7 +2516,7 @@ static int stmmac_tx_clean(struct stmmac - entry = tx_q->dirty_tx; - - /* Try to clean all TX complete frame in 1 shot */ -- while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { -+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { - struct xdp_frame *xdpf; - struct sk_buff *skb; - struct dma_desc *p; -@@ -2616,7 +2616,7 @@ static int stmmac_tx_clean(struct stmmac - - stmmac_release_tx_desc(priv, p, priv->mode); - -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - } - tx_q->dirty_tx = entry; - -@@ -2681,7 +2681,7 @@ static int stmmac_tx_clean(struct stmmac - */ - static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - - netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); - -@@ -2748,8 +2748,8 @@ static int stmmac_napi_check(struct stmm - { - int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan, dir); -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - struct stmmac_channel *ch = &priv->channel[chan]; - struct napi_struct *rx_napi; - struct napi_struct *tx_napi; -@@ -2925,7 +2925,7 @@ static int stmmac_init_dma_engine(struct - - /* DMA RX Channel Configuration */ - for (chan = 0; chan < rx_channels_count; chan++) { -- rx_q = &priv->rx_queue[chan]; -+ rx_q = &priv->dma_conf.rx_queue[chan]; - - stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - rx_q->dma_rx_phy, chan); -@@ -2939,7 +2939,7 @@ static int stmmac_init_dma_engine(struct - - /* DMA TX Channel Configuration */ - for (chan = 0; chan < tx_channels_count; chan++) { -- tx_q = &priv->tx_queue[chan]; -+ tx_q = &priv->dma_conf.tx_queue[chan]; - - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); -@@ -2954,7 +2954,7 @@ static int stmmac_init_dma_engine(struct - - static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), -@@ -3004,7 +3004,7 @@ static void stmmac_init_coalesce(struct - u32 chan; - - for (chan = 0; chan < tx_channel_count; chan++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - - priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; - priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; -@@ -3026,12 +3026,12 @@ static void stmmac_set_rings_length(stru - /* set TX ring length */ - for (chan = 0; chan < tx_channels_count; chan++) - stmmac_set_tx_ring_len(priv, priv->ioaddr, -- (priv->dma_tx_size - 1), chan); -+ (priv->dma_conf.dma_tx_size - 1), chan); - - /* set RX ring length */ - for (chan = 0; chan < rx_channels_count; chan++) - stmmac_set_rx_ring_len(priv, priv->ioaddr, -- (priv->dma_rx_size - 1), chan); -+ (priv->dma_conf.dma_rx_size - 1), chan); - } - - /** -@@ -3366,7 +3366,7 @@ static int stmmac_hw_setup(struct net_de - /* Enable TSO */ - if (priv->tso) { - for (chan = 0; chan < tx_cnt; chan++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - - /* TSO and TBS cannot co-exist */ - if (tx_q->tbs & STMMAC_TBS_AVAIL) -@@ -3388,7 +3388,7 @@ static int stmmac_hw_setup(struct net_de - - /* TBS */ - for (chan = 0; chan < tx_cnt; chan++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - int enable = tx_q->tbs & STMMAC_TBS_AVAIL; - - stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); -@@ -3432,7 +3432,7 @@ static void stmmac_free_irq(struct net_d - for (j = irq_idx - 1; j >= 0; j--) { - if (priv->tx_irq[j] > 0) { - irq_set_affinity_hint(priv->tx_irq[j], NULL); -- free_irq(priv->tx_irq[j], &priv->tx_queue[j]); -+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); - } - } - irq_idx = priv->plat->rx_queues_to_use; -@@ -3441,7 +3441,7 @@ static void stmmac_free_irq(struct net_d - for (j = irq_idx - 1; j >= 0; j--) { - if (priv->rx_irq[j] > 0) { - irq_set_affinity_hint(priv->rx_irq[j], NULL); -- free_irq(priv->rx_irq[j], &priv->rx_queue[j]); -+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); - } - } - -@@ -3574,7 +3574,7 @@ static int stmmac_request_irq_multi_msi( - sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); - ret = request_irq(priv->rx_irq[i], - stmmac_msi_intr_rx, -- 0, int_name, &priv->rx_queue[i]); -+ 0, int_name, &priv->dma_conf.rx_queue[i]); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: alloc rx-%d MSI %d (error: %d)\n", -@@ -3597,7 +3597,7 @@ static int stmmac_request_irq_multi_msi( - sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); - ret = request_irq(priv->tx_irq[i], - stmmac_msi_intr_tx, -- 0, int_name, &priv->tx_queue[i]); -+ 0, int_name, &priv->dma_conf.tx_queue[i]); - if (unlikely(ret < 0)) { - netdev_err(priv->dev, - "%s: alloc tx-%d MSI %d (error: %d)\n", -@@ -3728,21 +3728,21 @@ static int stmmac_open(struct net_device - bfsize = 0; - - if (bfsize < BUF_SIZE_16KiB) -- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); -+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz); - -- priv->dma_buf_sz = bfsize; -+ priv->dma_conf.dma_buf_sz = bfsize; - buf_sz = bfsize; - - priv->rx_copybreak = STMMAC_RX_COPYBREAK; - -- if (!priv->dma_tx_size) -- priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; -- if (!priv->dma_rx_size) -- priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; -+ if (!priv->dma_conf.dma_tx_size) -+ priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE; -+ if (!priv->dma_conf.dma_rx_size) -+ priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE; - - /* Earlier check for TBS */ - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; - - /* Setup per-TXQ tbs flag before TX descriptor alloc */ -@@ -3800,7 +3800,7 @@ irq_error: - phylink_stop(priv->phylink); - - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) -- hrtimer_cancel(&priv->tx_queue[chan].txtimer); -+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - - stmmac_hw_teardown(dev); - init_error: -@@ -3842,7 +3842,7 @@ static int stmmac_release(struct net_dev - stmmac_disable_all_queues(priv); - - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) -- hrtimer_cancel(&priv->tx_queue[chan].txtimer); -+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - - netif_tx_disable(dev); - -@@ -3906,7 +3906,7 @@ static bool stmmac_vlan_insert(struct st - return false; - - stmmac_set_tx_owner(priv, p); -- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); -+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); - return true; - } - -@@ -3924,7 +3924,7 @@ static bool stmmac_vlan_insert(struct st - static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, - int total_len, bool last_segment, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - struct dma_desc *desc; - u32 buff_size; - int tmp_len; -@@ -3935,7 +3935,7 @@ static void stmmac_tso_allocator(struct - dma_addr_t curr_addr; - - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, -- priv->dma_tx_size); -+ priv->dma_conf.dma_tx_size); - WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); - - if (tx_q->tbs & STMMAC_TBS_AVAIL) -@@ -3963,7 +3963,7 @@ static void stmmac_tso_allocator(struct - - static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - int desc_size; - - if (likely(priv->extend_desc)) -@@ -4025,7 +4025,7 @@ static netdev_tx_t stmmac_tso_xmit(struc - dma_addr_t des; - int i; - -- tx_q = &priv->tx_queue[queue]; -+ tx_q = &priv->dma_conf.tx_queue[queue]; - first_tx = tx_q->cur_tx; - - /* Compute header lengths */ -@@ -4065,7 +4065,7 @@ static netdev_tx_t stmmac_tso_xmit(struc - stmmac_set_mss(priv, mss_desc, mss); - tx_q->mss = mss; - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, -- priv->dma_tx_size); -+ priv->dma_conf.dma_tx_size); - WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); - } - -@@ -4177,7 +4177,7 @@ static netdev_tx_t stmmac_tso_xmit(struc - * ndo_start_xmit will fill this descriptor the next time it's - * called and stmmac_tx_clean may clean up to this descriptor. - */ -- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); -+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); - - if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { - netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", -@@ -4265,7 +4265,7 @@ static netdev_tx_t stmmac_xmit(struct sk - int entry, first_tx; - dma_addr_t des; - -- tx_q = &priv->tx_queue[queue]; -+ tx_q = &priv->dma_conf.tx_queue[queue]; - first_tx = tx_q->cur_tx; - - if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) -@@ -4328,7 +4328,7 @@ static netdev_tx_t stmmac_xmit(struct sk - int len = skb_frag_size(frag); - bool last_segment = (i == (nfrags - 1)); - -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - WARN_ON(tx_q->tx_skbuff[entry]); - - if (likely(priv->extend_desc)) -@@ -4399,7 +4399,7 @@ static netdev_tx_t stmmac_xmit(struct sk - * ndo_start_xmit will fill this descriptor the next time it's - * called and stmmac_tx_clean may clean up to this descriptor. - */ -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - tx_q->cur_tx = entry; - - if (netif_msg_pktdata(priv)) { -@@ -4514,7 +4514,7 @@ static void stmmac_rx_vlan(struct net_de - */ - static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - int dirty = stmmac_rx_dirty(priv, queue); - unsigned int entry = rx_q->dirty_rx; - -@@ -4564,7 +4564,7 @@ static inline void stmmac_rx_refill(stru - dma_wmb(); - stmmac_set_rx_owner(priv, p, use_rx_wd); - -- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); - } - rx_q->dirty_rx = entry; - rx_q->rx_tail_addr = rx_q->dma_rx_phy + -@@ -4592,12 +4592,12 @@ static unsigned int stmmac_rx_buf1_len(s - - /* First descriptor, not last descriptor and not split header */ - if (status & rx_not_ls) -- return priv->dma_buf_sz; -+ return priv->dma_conf.dma_buf_sz; - - plen = stmmac_get_rx_frame_len(priv, p, coe); - - /* First descriptor and last descriptor and not split header */ -- return min_t(unsigned int, priv->dma_buf_sz, plen); -+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); - } - - static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, -@@ -4613,7 +4613,7 @@ static unsigned int stmmac_rx_buf2_len(s - - /* Not last descriptor */ - if (status & rx_not_ls) -- return priv->dma_buf_sz; -+ return priv->dma_conf.dma_buf_sz; - - plen = stmmac_get_rx_frame_len(priv, p, coe); - -@@ -4624,7 +4624,7 @@ static unsigned int stmmac_rx_buf2_len(s - static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, - struct xdp_frame *xdpf, bool dma_map) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - unsigned int entry = tx_q->cur_tx; - struct dma_desc *tx_desc; - dma_addr_t dma_addr; -@@ -4687,7 +4687,7 @@ static int stmmac_xdp_xmit_xdpf(struct s - - stmmac_enable_dma_transmission(priv, priv->ioaddr); - -- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); - tx_q->cur_tx = entry; - - return STMMAC_XDP_TX; -@@ -4861,7 +4861,7 @@ static void stmmac_dispatch_skb_zc(struc - - static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - unsigned int entry = rx_q->dirty_rx; - struct dma_desc *rx_desc = NULL; - bool ret = true; -@@ -4904,7 +4904,7 @@ static bool stmmac_rx_refill_zc(struct s - dma_wmb(); - stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); - -- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); -+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); - } - - if (rx_desc) { -@@ -4919,7 +4919,7 @@ static bool stmmac_rx_refill_zc(struct s - - static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - unsigned int count = 0, error = 0, len = 0; - int dirty = stmmac_rx_dirty(priv, queue); - unsigned int next_entry = rx_q->cur_rx; -@@ -4941,7 +4941,7 @@ static int stmmac_rx_zc(struct stmmac_pr - desc_size = sizeof(struct dma_desc); - } - -- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, -+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, - rx_q->dma_rx_phy, desc_size); - } - while (count < limit) { -@@ -4988,7 +4988,7 @@ read_again: - - /* Prefetch the next RX descriptor */ - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, -- priv->dma_rx_size); -+ priv->dma_conf.dma_rx_size); - next_entry = rx_q->cur_rx; - - if (priv->extend_desc) -@@ -5109,7 +5109,7 @@ read_again: - */ - static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - unsigned int count = 0, error = 0, len = 0; - int status = 0, coe = priv->hw->rx_csum; -@@ -5122,7 +5122,7 @@ static int stmmac_rx(struct stmmac_priv - int buf_sz; - - dma_dir = page_pool_get_dma_dir(rx_q->page_pool); -- buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; -+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; - - if (netif_msg_rx_status(priv)) { - void *rx_head; -@@ -5136,7 +5136,7 @@ static int stmmac_rx(struct stmmac_priv - desc_size = sizeof(struct dma_desc); - } - -- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, -+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, - rx_q->dma_rx_phy, desc_size); - } - while (count < limit) { -@@ -5180,7 +5180,7 @@ read_again: - break; - - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, -- priv->dma_rx_size); -+ priv->dma_conf.dma_rx_size); - next_entry = rx_q->cur_rx; - - if (priv->extend_desc) -@@ -5314,7 +5314,7 @@ read_again: - buf1_len, dma_dir); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->page, buf->page_offset, buf1_len, -- priv->dma_buf_sz); -+ priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->page); -@@ -5326,7 +5326,7 @@ read_again: - buf2_len, dma_dir); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->sec_page, 0, buf2_len, -- priv->dma_buf_sz); -+ priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->sec_page); -@@ -5768,11 +5768,13 @@ static irqreturn_t stmmac_safety_interru - static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) - { - struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; -+ struct stmmac_dma_conf *dma_conf; - int chan = tx_q->queue_index; - struct stmmac_priv *priv; - int status; - -- priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); -+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); -+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); -@@ -5812,10 +5814,12 @@ static irqreturn_t stmmac_msi_intr_tx(in - static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) - { - struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; -+ struct stmmac_dma_conf *dma_conf; - int chan = rx_q->queue_index; - struct stmmac_priv *priv; - -- priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); -+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); -+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); -@@ -5846,10 +5850,10 @@ static void stmmac_poll_controller(struc - - if (priv->plat->multi_msi_en) { - for (i = 0; i < priv->plat->rx_queues_to_use; i++) -- stmmac_msi_intr_rx(0, &priv->rx_queue[i]); -+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); - - for (i = 0; i < priv->plat->tx_queues_to_use; i++) -- stmmac_msi_intr_tx(0, &priv->tx_queue[i]); -+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); - } else { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); -@@ -6030,34 +6034,34 @@ static int stmmac_rings_status_show(stru - return 0; - - for (queue = 0; queue < rx_count; queue++) { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - - seq_printf(seq, "RX Queue %d:\n", queue); - - if (priv->extend_desc) { - seq_printf(seq, "Extended descriptor ring:\n"); - sysfs_display_ring((void *)rx_q->dma_erx, -- priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); -+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); - } else { - seq_printf(seq, "Descriptor ring:\n"); - sysfs_display_ring((void *)rx_q->dma_rx, -- priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); -+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); - } - } - - for (queue = 0; queue < tx_count; queue++) { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - seq_printf(seq, "TX Queue %d:\n", queue); - - if (priv->extend_desc) { - seq_printf(seq, "Extended descriptor ring:\n"); - sysfs_display_ring((void *)tx_q->dma_etx, -- priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); -+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); - } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { - seq_printf(seq, "Descriptor ring:\n"); - sysfs_display_ring((void *)tx_q->dma_tx, -- priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); -+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); - } - } - -@@ -6404,7 +6408,7 @@ void stmmac_disable_rx_queue(struct stmm - - void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - unsigned long flags; - u32 buf_size; -@@ -6441,7 +6445,7 @@ void stmmac_enable_rx_queue(struct stmma - rx_q->queue_index); - } else { - stmmac_set_dma_bfsize(priv, priv->ioaddr, -- priv->dma_buf_sz, -+ priv->dma_conf.dma_buf_sz, - rx_q->queue_index); - } - -@@ -6467,7 +6471,7 @@ void stmmac_disable_tx_queue(struct stmm - - void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - unsigned long flags; - int ret; -@@ -6517,7 +6521,7 @@ void stmmac_xdp_release(struct net_devic - stmmac_disable_all_queues(priv); - - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) -- hrtimer_cancel(&priv->tx_queue[chan].txtimer); -+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - - /* Free the IRQ lines */ - stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); -@@ -6576,7 +6580,7 @@ int stmmac_xdp_open(struct net_device *d - - /* DMA RX Channel Configuration */ - for (chan = 0; chan < rx_cnt; chan++) { -- rx_q = &priv->rx_queue[chan]; -+ rx_q = &priv->dma_conf.rx_queue[chan]; - - stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - rx_q->dma_rx_phy, chan); -@@ -6594,7 +6598,7 @@ int stmmac_xdp_open(struct net_device *d - rx_q->queue_index); - } else { - stmmac_set_dma_bfsize(priv, priv->ioaddr, -- priv->dma_buf_sz, -+ priv->dma_conf.dma_buf_sz, - rx_q->queue_index); - } - -@@ -6603,7 +6607,7 @@ int stmmac_xdp_open(struct net_device *d - - /* DMA TX Channel Configuration */ - for (chan = 0; chan < tx_cnt; chan++) { -- tx_q = &priv->tx_queue[chan]; -+ tx_q = &priv->dma_conf.tx_queue[chan]; - - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); -@@ -6636,7 +6640,7 @@ int stmmac_xdp_open(struct net_device *d - - irq_error: - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) -- hrtimer_cancel(&priv->tx_queue[chan].txtimer); -+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - - stmmac_hw_teardown(dev); - init_error: -@@ -6663,8 +6667,8 @@ int stmmac_xsk_wakeup(struct net_device - queue >= priv->plat->tx_queues_to_use) - return -EINVAL; - -- rx_q = &priv->rx_queue[queue]; -- tx_q = &priv->tx_queue[queue]; -+ rx_q = &priv->dma_conf.rx_queue[queue]; -+ tx_q = &priv->dma_conf.tx_queue[queue]; - ch = &priv->channel[queue]; - - if (!rx_q->xsk_pool && !tx_q->xsk_pool) -@@ -6924,8 +6928,8 @@ int stmmac_reinit_ringparam(struct net_d - if (netif_running(dev)) - stmmac_release(dev); - -- priv->dma_rx_size = rx_size; -- priv->dma_tx_size = tx_size; -+ priv->dma_conf.dma_rx_size = rx_size; -+ priv->dma_conf.dma_tx_size = tx_size; - - if (netif_running(dev)) - ret = stmmac_open(dev); -@@ -7363,7 +7367,7 @@ int stmmac_suspend(struct device *dev) - stmmac_disable_all_queues(priv); - - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) -- hrtimer_cancel(&priv->tx_queue[chan].txtimer); -+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - - if (priv->eee_enabled) { - priv->tx_path_in_lpi_mode = false; -@@ -7414,7 +7418,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend); - - static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - - rx_q->cur_rx = 0; - rx_q->dirty_rx = 0; -@@ -7422,7 +7426,7 @@ static void stmmac_reset_rx_queue(struct - - static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - - tx_q->cur_tx = 0; - tx_q->dirty_tx = 0; ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c -@@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct s - struct stmmac_channel *ch = &priv->channel[i]; - u32 tail; - -- tail = priv->rx_queue[i].dma_rx_phy + -- (priv->dma_rx_size * sizeof(struct dma_desc)); -+ tail = priv->dma_conf.rx_queue[i].dma_rx_phy + -+ (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc)); - - stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); - stmmac_start_rx(priv, priv->ioaddr, i); -@@ -1684,7 +1684,7 @@ cleanup: - static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) - { - struct stmmac_packet_attrs attr = { }; -- int size = priv->dma_buf_sz; -+ int size = priv->dma_conf.dma_buf_sz; - - attr.dst = priv->dev->dev_addr; - attr.max_size = size - ETH_FCS_LEN; -@@ -1767,7 +1767,7 @@ static int stmmac_test_tbs(struct stmmac - - /* Find first TBS enabled Queue, if any */ - for (i = 0; i < priv->plat->tx_queues_to_use; i++) -- if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) -+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL) - break; - - if (i >= priv->plat->tx_queues_to_use) ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c -@@ -970,13 +970,13 @@ static int tc_setup_etf(struct stmmac_pr - return -EOPNOTSUPP; - if (qopt->queue >= priv->plat->tx_queues_to_use) - return -EINVAL; -- if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) -+ if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) - return -EINVAL; - - if (qopt->enable) -- priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; -+ priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; - else -- priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; -+ priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; - - netdev_info(priv->dev, "%s ETF for Queue %d\n", - qopt->enable ? "enabled" : "disabled", qopt->queue); diff --git a/target/linux/generic/backport-6.1/775-v6.0-04-net-ethernet-stmicro-stmmac-generate-stmmac-dma-conf.patch b/target/linux/generic/backport-6.1/775-v6.0-04-net-ethernet-stmicro-stmmac-generate-stmmac-dma-conf.patch deleted file mode 100644 index e84373b47180..000000000000 --- a/target/linux/generic/backport-6.1/775-v6.0-04-net-ethernet-stmicro-stmmac-generate-stmmac-dma-conf.patch +++ /dev/null @@ -1,1161 +0,0 @@ -From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sat, 23 Jul 2022 16:29:32 +0200 -Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf - before open - -Rework the driver to generate the stmmac dma_conf before stmmac_open. -This permits a function to first check if it's possible to allocate a -new dma_config and then pass it directly to __stmmac_open and "open" the -interface with the new configuration. - -Signed-off-by: Christian Marangi -Signed-off-by: Jakub Kicinski ---- - .../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++------- - 1 file changed, 289 insertions(+), 173 deletions(-) - ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -@@ -1300,7 +1300,8 @@ static int stmmac_phy_setup(struct stmma - return 0; - } - --static void stmmac_display_rx_rings(struct stmmac_priv *priv) -+static void stmmac_display_rx_rings(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 rx_cnt = priv->plat->rx_queues_to_use; - unsigned int desc_size; -@@ -1309,7 +1310,7 @@ static void stmmac_display_rx_rings(stru - - /* Display RX rings */ - for (queue = 0; queue < rx_cnt; queue++) { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - - pr_info("\tRX Queue %u rings\n", queue); - -@@ -1322,12 +1323,13 @@ static void stmmac_display_rx_rings(stru - } - - /* Display RX ring */ -- stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true, -+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, - rx_q->dma_rx_phy, desc_size); - } - } - --static void stmmac_display_tx_rings(struct stmmac_priv *priv) -+static void stmmac_display_tx_rings(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 tx_cnt = priv->plat->tx_queues_to_use; - unsigned int desc_size; -@@ -1336,7 +1338,7 @@ static void stmmac_display_tx_rings(stru - - /* Display TX rings */ - for (queue = 0; queue < tx_cnt; queue++) { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - - pr_info("\tTX Queue %d rings\n", queue); - -@@ -1351,18 +1353,19 @@ static void stmmac_display_tx_rings(stru - desc_size = sizeof(struct dma_desc); - } - -- stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false, -+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, - tx_q->dma_tx_phy, desc_size); - } - } - --static void stmmac_display_rings(struct stmmac_priv *priv) -+static void stmmac_display_rings(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - /* Display RX ring */ -- stmmac_display_rx_rings(priv); -+ stmmac_display_rx_rings(priv, dma_conf); - - /* Display TX ring */ -- stmmac_display_tx_rings(priv); -+ stmmac_display_tx_rings(priv, dma_conf); - } - - static int stmmac_set_bfsize(int mtu, int bufsize) -@@ -1386,44 +1389,50 @@ static int stmmac_set_bfsize(int mtu, in - /** - * stmmac_clear_rx_descriptors - clear RX descriptors - * @priv: driver private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - * Description: this function is called to clear the RX descriptors - * in case of both basic and extended descriptors are used. - */ --static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) -+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int i; - - /* Clear the RX descriptors */ -- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) -+ for (i = 0; i < dma_conf->dma_rx_size; i++) - if (priv->extend_desc) - stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, - priv->use_riwt, priv->mode, -- (i == priv->dma_conf.dma_rx_size - 1), -- priv->dma_conf.dma_buf_sz); -+ (i == dma_conf->dma_rx_size - 1), -+ dma_conf->dma_buf_sz); - else - stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], - priv->use_riwt, priv->mode, -- (i == priv->dma_conf.dma_rx_size - 1), -- priv->dma_conf.dma_buf_sz); -+ (i == dma_conf->dma_rx_size - 1), -+ dma_conf->dma_buf_sz); - } - - /** - * stmmac_clear_tx_descriptors - clear tx descriptors - * @priv: driver private structure -+ * @dma_conf: structure to take the dma data - * @queue: TX queue index. - * Description: this function is called to clear the TX descriptors - * in case of both basic and extended descriptors are used. - */ --static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) -+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - int i; - - /* Clear the TX descriptors */ -- for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { -- int last = (i == (priv->dma_conf.dma_tx_size - 1)); -+ for (i = 0; i < dma_conf->dma_tx_size; i++) { -+ int last = (i == (dma_conf->dma_tx_size - 1)); - struct dma_desc *p; - - if (priv->extend_desc) -@@ -1440,10 +1449,12 @@ static void stmmac_clear_tx_descriptors( - /** - * stmmac_clear_descriptors - clear descriptors - * @priv: driver private structure -+ * @dma_conf: structure to take the dma data - * Description: this function is called to clear the TX and RX descriptors - * in case of both basic and extended descriptors are used. - */ --static void stmmac_clear_descriptors(struct stmmac_priv *priv) -+static void stmmac_clear_descriptors(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 rx_queue_cnt = priv->plat->rx_queues_to_use; - u32 tx_queue_cnt = priv->plat->tx_queues_to_use; -@@ -1451,16 +1462,17 @@ static void stmmac_clear_descriptors(str - - /* Clear the RX descriptors */ - for (queue = 0; queue < rx_queue_cnt; queue++) -- stmmac_clear_rx_descriptors(priv, queue); -+ stmmac_clear_rx_descriptors(priv, dma_conf, queue); - - /* Clear the TX descriptors */ - for (queue = 0; queue < tx_queue_cnt; queue++) -- stmmac_clear_tx_descriptors(priv, queue); -+ stmmac_clear_tx_descriptors(priv, dma_conf, queue); - } - - /** - * stmmac_init_rx_buffers - init the RX descriptor buffer. - * @priv: driver private structure -+ * @dma_conf: structure to take the dma data - * @p: descriptor pointer - * @i: descriptor index - * @flags: gfp flag -@@ -1468,10 +1480,12 @@ static void stmmac_clear_descriptors(str - * Description: this function is called to allocate a receive buffer, perform - * the DMA mapping and init the descriptor. - */ --static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, -+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ struct dma_desc *p, - int i, gfp_t flags, u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (!buf->page) { -@@ -1496,7 +1510,7 @@ static int stmmac_init_rx_buffers(struct - buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; - - stmmac_set_desc_addr(priv, p, buf->addr); -- if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) -+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) - stmmac_init_desc3(priv, p); - - return 0; -@@ -1505,12 +1519,13 @@ static int stmmac_init_rx_buffers(struct - /** - * stmmac_free_rx_buffer - free RX dma buffers - * @priv: private structure -- * @queue: RX queue index -+ * @rx_q: RX queue - * @i: buffer index. - */ --static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) -+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, -+ struct stmmac_rx_queue *rx_q, -+ int i) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (buf->page) -@@ -1525,12 +1540,15 @@ static void stmmac_free_rx_buffer(struct - /** - * stmmac_free_tx_buffer - free RX dma buffers - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - * @i: buffer index. - */ --static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) -+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue, int i) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - - if (tx_q->tx_skbuff_dma[i].buf && - tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { -@@ -1569,23 +1587,28 @@ static void stmmac_free_tx_buffer(struct - /** - * dma_free_rx_skbufs - free RX dma buffers - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - */ --static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) -+static void dma_free_rx_skbufs(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) -- stmmac_free_rx_buffer(priv, queue, i); -+ for (i = 0; i < dma_conf->dma_rx_size; i++) -+ stmmac_free_rx_buffer(priv, rx_q, i); - } - --static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, -- gfp_t flags) -+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue, gfp_t flags) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { -+ for (i = 0; i < dma_conf->dma_rx_size; i++) { - struct dma_desc *p; - int ret; - -@@ -1594,7 +1617,7 @@ static int stmmac_alloc_rx_buffers(struc - else - p = rx_q->dma_rx + i; - -- ret = stmmac_init_rx_buffers(priv, p, i, flags, -+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, - queue); - if (ret) - return ret; -@@ -1608,14 +1631,17 @@ static int stmmac_alloc_rx_buffers(struc - /** - * dma_free_rx_xskbufs - free RX dma buffers from XSK pool - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - */ --static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) -+static void dma_free_rx_xskbufs(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { -+ for (i = 0; i < dma_conf->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; - - if (!buf->xdp) -@@ -1626,12 +1652,14 @@ static void dma_free_rx_xskbufs(struct s - } - } - --static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) -+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int i; - -- for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { -+ for (i = 0; i < dma_conf->dma_rx_size; i++) { - struct stmmac_rx_buffer *buf; - dma_addr_t dma_addr; - struct dma_desc *p; -@@ -1666,22 +1694,25 @@ static struct xsk_buff_pool *stmmac_get_ - /** - * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) - * @priv: driver private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - * @flags: gfp flag. - * Description: this function initializes the DMA RX descriptors - * and allocates the socket buffers. It supports the chained and ring - * modes. - */ --static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) -+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue, gfp_t flags) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - int ret; - - netif_dbg(priv, probe, priv->dev, - "(%s) dma_rx_phy=0x%08x\n", __func__, - (u32)rx_q->dma_rx_phy); - -- stmmac_clear_rx_descriptors(priv, queue); -+ stmmac_clear_rx_descriptors(priv, dma_conf, queue); - - xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); - -@@ -1708,9 +1739,9 @@ static int __init_dma_rx_desc_rings(stru - /* RX XDP ZC buffer pool may not be populated, e.g. - * xdpsock TX-only. - */ -- stmmac_alloc_rx_buffers_zc(priv, queue); -+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); - } else { -- ret = stmmac_alloc_rx_buffers(priv, queue, flags); -+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); - if (ret < 0) - return -ENOMEM; - } -@@ -1720,17 +1751,19 @@ static int __init_dma_rx_desc_rings(stru - if (priv->extend_desc) - stmmac_mode_init(priv, rx_q->dma_erx, - rx_q->dma_rx_phy, -- priv->dma_conf.dma_rx_size, 1); -+ dma_conf->dma_rx_size, 1); - else - stmmac_mode_init(priv, rx_q->dma_rx, - rx_q->dma_rx_phy, -- priv->dma_conf.dma_rx_size, 0); -+ dma_conf->dma_rx_size, 0); - } - - return 0; - } - --static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) -+static int init_dma_rx_desc_rings(struct net_device *dev, -+ struct stmmac_dma_conf *dma_conf, -+ gfp_t flags) - { - struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_count = priv->plat->rx_queues_to_use; -@@ -1742,7 +1775,7 @@ static int init_dma_rx_desc_rings(struct - "SKB addresses:\nskb\t\tskb data\tdma data\n"); - - for (queue = 0; queue < rx_count; queue++) { -- ret = __init_dma_rx_desc_rings(priv, queue, flags); -+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); - if (ret) - goto err_init_rx_buffers; - } -@@ -1751,12 +1784,12 @@ static int init_dma_rx_desc_rings(struct - - err_init_rx_buffers: - while (queue >= 0) { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - - if (rx_q->xsk_pool) -- dma_free_rx_xskbufs(priv, queue); -+ dma_free_rx_xskbufs(priv, dma_conf, queue); - else -- dma_free_rx_skbufs(priv, queue); -+ dma_free_rx_skbufs(priv, dma_conf, queue); - - rx_q->buf_alloc_num = 0; - rx_q->xsk_pool = NULL; -@@ -1773,14 +1806,17 @@ err_init_rx_buffers: - /** - * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) - * @priv: driver private structure -- * @queue : TX queue index -+ * @dma_conf: structure to take the dma data -+ * @queue: TX queue index - * Description: this function initializes the DMA TX descriptors - * and allocates the socket buffers. It supports the chained and ring - * modes. - */ --static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) -+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - int i; - - netif_dbg(priv, probe, priv->dev, -@@ -1792,16 +1828,16 @@ static int __init_dma_tx_desc_rings(stru - if (priv->extend_desc) - stmmac_mode_init(priv, tx_q->dma_etx, - tx_q->dma_tx_phy, -- priv->dma_conf.dma_tx_size, 1); -+ dma_conf->dma_tx_size, 1); - else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) - stmmac_mode_init(priv, tx_q->dma_tx, - tx_q->dma_tx_phy, -- priv->dma_conf.dma_tx_size, 0); -+ dma_conf->dma_tx_size, 0); - } - - tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - -- for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { -+ for (i = 0; i < dma_conf->dma_tx_size; i++) { - struct dma_desc *p; - - if (priv->extend_desc) -@@ -1823,7 +1859,8 @@ static int __init_dma_tx_desc_rings(stru - return 0; - } - --static int init_dma_tx_desc_rings(struct net_device *dev) -+static int init_dma_tx_desc_rings(struct net_device *dev, -+ struct stmmac_dma_conf *dma_conf) - { - struct stmmac_priv *priv = netdev_priv(dev); - u32 tx_queue_cnt; -@@ -1832,7 +1869,7 @@ static int init_dma_tx_desc_rings(struct - tx_queue_cnt = priv->plat->tx_queues_to_use; - - for (queue = 0; queue < tx_queue_cnt; queue++) -- __init_dma_tx_desc_rings(priv, queue); -+ __init_dma_tx_desc_rings(priv, dma_conf, queue); - - return 0; - } -@@ -1840,26 +1877,29 @@ static int init_dma_tx_desc_rings(struct - /** - * init_dma_desc_rings - init the RX/TX descriptor rings - * @dev: net device structure -+ * @dma_conf: structure to take the dma data - * @flags: gfp flag. - * Description: this function initializes the DMA RX/TX descriptors - * and allocates the socket buffers. It supports the chained and ring - * modes. - */ --static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) -+static int init_dma_desc_rings(struct net_device *dev, -+ struct stmmac_dma_conf *dma_conf, -+ gfp_t flags) - { - struct stmmac_priv *priv = netdev_priv(dev); - int ret; - -- ret = init_dma_rx_desc_rings(dev, flags); -+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags); - if (ret) - return ret; - -- ret = init_dma_tx_desc_rings(dev); -+ ret = init_dma_tx_desc_rings(dev, dma_conf); - -- stmmac_clear_descriptors(priv); -+ stmmac_clear_descriptors(priv, dma_conf); - - if (netif_msg_hw(priv)) -- stmmac_display_rings(priv); -+ stmmac_display_rings(priv, dma_conf); - - return ret; - } -@@ -1867,17 +1907,20 @@ static int init_dma_desc_rings(struct ne - /** - * dma_free_tx_skbufs - free TX dma buffers - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: TX queue index - */ --static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) -+static void dma_free_tx_skbufs(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - int i; - - tx_q->xsk_frames_done = 0; - -- for (i = 0; i < priv->dma_conf.dma_tx_size; i++) -- stmmac_free_tx_buffer(priv, queue, i); -+ for (i = 0; i < dma_conf->dma_tx_size; i++) -+ stmmac_free_tx_buffer(priv, dma_conf, queue, i); - - if (tx_q->xsk_pool && tx_q->xsk_frames_done) { - xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); -@@ -1896,34 +1939,37 @@ static void stmmac_free_tx_skbufs(struct - u32 queue; - - for (queue = 0; queue < tx_queue_cnt; queue++) -- dma_free_tx_skbufs(priv, queue); -+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue); - } - - /** - * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - */ --static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) -+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - - /* Release the DMA RX socket buffers */ - if (rx_q->xsk_pool) -- dma_free_rx_xskbufs(priv, queue); -+ dma_free_rx_xskbufs(priv, dma_conf, queue); - else -- dma_free_rx_skbufs(priv, queue); -+ dma_free_rx_skbufs(priv, dma_conf, queue); - - rx_q->buf_alloc_num = 0; - rx_q->xsk_pool = NULL; - - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) -- dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * -+ dma_free_coherent(priv->device, dma_conf->dma_rx_size * - sizeof(struct dma_desc), - rx_q->dma_rx, rx_q->dma_rx_phy); - else -- dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * -+ dma_free_coherent(priv->device, dma_conf->dma_rx_size * - sizeof(struct dma_extended_desc), - rx_q->dma_erx, rx_q->dma_rx_phy); - -@@ -1935,29 +1981,33 @@ static void __free_dma_rx_desc_resources - page_pool_destroy(rx_q->page_pool); - } - --static void free_dma_rx_desc_resources(struct stmmac_priv *priv) -+static void free_dma_rx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; - - /* Free RX queue resources */ - for (queue = 0; queue < rx_count; queue++) -- __free_dma_rx_desc_resources(priv, queue); -+ __free_dma_rx_desc_resources(priv, dma_conf, queue); - } - - /** - * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: TX queue index - */ --static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) -+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - size_t size; - void *addr; - - /* Release the DMA TX socket buffers */ -- dma_free_tx_skbufs(priv, queue); -+ dma_free_tx_skbufs(priv, dma_conf, queue); - - if (priv->extend_desc) { - size = sizeof(struct dma_extended_desc); -@@ -1970,7 +2020,7 @@ static void __free_dma_tx_desc_resources - addr = tx_q->dma_tx; - } - -- size *= priv->dma_conf.dma_tx_size; -+ size *= dma_conf->dma_tx_size; - - dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); - -@@ -1978,28 +2028,32 @@ static void __free_dma_tx_desc_resources - kfree(tx_q->tx_skbuff); - } - --static void free_dma_tx_desc_resources(struct stmmac_priv *priv) -+static void free_dma_tx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue; - - /* Free TX queue resources */ - for (queue = 0; queue < tx_count; queue++) -- __free_dma_tx_desc_resources(priv, queue); -+ __free_dma_tx_desc_resources(priv, dma_conf, queue); - } - - /** - * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: RX queue index - * Description: according to which descriptor can be used (extend or basic) - * this function allocates the resources for TX and RX paths. In case of - * reception, for example, it pre-allocated the RX socket buffer in order to - * allow zero-copy mechanism. - */ --static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) -+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; -+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; - struct stmmac_channel *ch = &priv->channel[queue]; - bool xdp_prog = stmmac_xdp_is_enabled(priv); - struct page_pool_params pp_params = { 0 }; -@@ -2011,8 +2065,8 @@ static int __alloc_dma_rx_desc_resources - rx_q->priv_data = priv; - - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; -- pp_params.pool_size = priv->dma_conf.dma_rx_size; -- num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE); -+ pp_params.pool_size = dma_conf->dma_rx_size; -+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); - pp_params.nid = dev_to_node(priv->device); - pp_params.dev = priv->device; -@@ -2027,7 +2081,7 @@ static int __alloc_dma_rx_desc_resources - return ret; - } - -- rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size, -+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, - sizeof(*rx_q->buf_pool), - GFP_KERNEL); - if (!rx_q->buf_pool) -@@ -2035,7 +2089,7 @@ static int __alloc_dma_rx_desc_resources - - if (priv->extend_desc) { - rx_q->dma_erx = dma_alloc_coherent(priv->device, -- priv->dma_conf.dma_rx_size * -+ dma_conf->dma_rx_size * - sizeof(struct dma_extended_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); -@@ -2044,7 +2098,7 @@ static int __alloc_dma_rx_desc_resources - - } else { - rx_q->dma_rx = dma_alloc_coherent(priv->device, -- priv->dma_conf.dma_rx_size * -+ dma_conf->dma_rx_size * - sizeof(struct dma_desc), - &rx_q->dma_rx_phy, - GFP_KERNEL); -@@ -2069,7 +2123,8 @@ static int __alloc_dma_rx_desc_resources - return 0; - } - --static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) -+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 rx_count = priv->plat->rx_queues_to_use; - u32 queue; -@@ -2077,7 +2132,7 @@ static int alloc_dma_rx_desc_resources(s - - /* RX queues buffers and DMA */ - for (queue = 0; queue < rx_count; queue++) { -- ret = __alloc_dma_rx_desc_resources(priv, queue); -+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); - if (ret) - goto err_dma; - } -@@ -2085,7 +2140,7 @@ static int alloc_dma_rx_desc_resources(s - return 0; - - err_dma: -- free_dma_rx_desc_resources(priv); -+ free_dma_rx_desc_resources(priv, dma_conf); - - return ret; - } -@@ -2093,28 +2148,31 @@ err_dma: - /** - * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * @queue: TX queue index - * Description: according to which descriptor can be used (extend or basic) - * this function allocates the resources for TX and RX paths. In case of - * reception, for example, it pre-allocated the RX socket buffer in order to - * allow zero-copy mechanism. - */ --static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) -+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf, -+ u32 queue) - { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; - size_t size; - void *addr; - - tx_q->queue_index = queue; - tx_q->priv_data = priv; - -- tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size, -+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, - sizeof(*tx_q->tx_skbuff_dma), - GFP_KERNEL); - if (!tx_q->tx_skbuff_dma) - return -ENOMEM; - -- tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size, -+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, - sizeof(struct sk_buff *), - GFP_KERNEL); - if (!tx_q->tx_skbuff) -@@ -2127,7 +2185,7 @@ static int __alloc_dma_tx_desc_resources - else - size = sizeof(struct dma_desc); - -- size *= priv->dma_conf.dma_tx_size; -+ size *= dma_conf->dma_tx_size; - - addr = dma_alloc_coherent(priv->device, size, - &tx_q->dma_tx_phy, GFP_KERNEL); -@@ -2144,7 +2202,8 @@ static int __alloc_dma_tx_desc_resources - return 0; - } - --static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) -+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue; -@@ -2152,7 +2211,7 @@ static int alloc_dma_tx_desc_resources(s - - /* TX queues buffers and DMA */ - for (queue = 0; queue < tx_count; queue++) { -- ret = __alloc_dma_tx_desc_resources(priv, queue); -+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); - if (ret) - goto err_dma; - } -@@ -2160,27 +2219,29 @@ static int alloc_dma_tx_desc_resources(s - return 0; - - err_dma: -- free_dma_tx_desc_resources(priv); -+ free_dma_tx_desc_resources(priv, dma_conf); - return ret; - } - - /** - * alloc_dma_desc_resources - alloc TX/RX resources. - * @priv: private structure -+ * @dma_conf: structure to take the dma data - * Description: according to which descriptor can be used (extend or basic) - * this function allocates the resources for TX and RX paths. In case of - * reception, for example, it pre-allocated the RX socket buffer in order to - * allow zero-copy mechanism. - */ --static int alloc_dma_desc_resources(struct stmmac_priv *priv) -+static int alloc_dma_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - /* RX Allocation */ -- int ret = alloc_dma_rx_desc_resources(priv); -+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf); - - if (ret) - return ret; - -- ret = alloc_dma_tx_desc_resources(priv); -+ ret = alloc_dma_tx_desc_resources(priv, dma_conf); - - return ret; - } -@@ -2188,16 +2249,18 @@ static int alloc_dma_desc_resources(stru - /** - * free_dma_desc_resources - free dma desc resources - * @priv: private structure -+ * @dma_conf: structure to take the dma data - */ --static void free_dma_desc_resources(struct stmmac_priv *priv) -+static void free_dma_desc_resources(struct stmmac_priv *priv, -+ struct stmmac_dma_conf *dma_conf) - { - /* Release the DMA TX socket buffers */ -- free_dma_tx_desc_resources(priv); -+ free_dma_tx_desc_resources(priv, dma_conf); - - /* Release the DMA RX socket buffers later - * to ensure all pending XDP_TX buffers are returned. - */ -- free_dma_rx_desc_resources(priv); -+ free_dma_rx_desc_resources(priv, dma_conf); - } - - /** -@@ -2686,8 +2749,8 @@ static void stmmac_tx_err(struct stmmac_ - netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); - - stmmac_stop_tx_dma(priv, chan); -- dma_free_tx_skbufs(priv, chan); -- stmmac_clear_tx_descriptors(priv, chan); -+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan); -+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); - stmmac_reset_tx_queue(priv, chan); - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); -@@ -3684,19 +3747,93 @@ static int stmmac_request_irq(struct net - } - - /** -- * stmmac_open - open entry point of the driver -+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue -+ * @priv: driver private structure -+ * @mtu: MTU to setup the dma queue and buf with -+ * Description: Allocate and generate a dma_conf based on the provided MTU. -+ * Allocate the Tx/Rx DMA queue and init them. -+ * Return value: -+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. -+ */ -+static struct stmmac_dma_conf * -+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) -+{ -+ struct stmmac_dma_conf *dma_conf; -+ int chan, bfsize, ret; -+ -+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); -+ if (!dma_conf) { -+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n", -+ __func__); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ bfsize = stmmac_set_16kib_bfsize(priv, mtu); -+ if (bfsize < 0) -+ bfsize = 0; -+ -+ if (bfsize < BUF_SIZE_16KiB) -+ bfsize = stmmac_set_bfsize(mtu, 0); -+ -+ dma_conf->dma_buf_sz = bfsize; -+ /* Chose the tx/rx size from the already defined one in the -+ * priv struct. (if defined) -+ */ -+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; -+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; -+ -+ if (!dma_conf->dma_tx_size) -+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; -+ if (!dma_conf->dma_rx_size) -+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; -+ -+ /* Earlier check for TBS */ -+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { -+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; -+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; -+ -+ /* Setup per-TXQ tbs flag before TX descriptor alloc */ -+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; -+ } -+ -+ ret = alloc_dma_desc_resources(priv, dma_conf); -+ if (ret < 0) { -+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", -+ __func__); -+ goto alloc_error; -+ } -+ -+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); -+ if (ret < 0) { -+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", -+ __func__); -+ goto init_error; -+ } -+ -+ return dma_conf; -+ -+init_error: -+ free_dma_desc_resources(priv, dma_conf); -+alloc_error: -+ kfree(dma_conf); -+ return ERR_PTR(ret); -+} -+ -+/** -+ * __stmmac_open - open entry point of the driver - * @dev : pointer to the device structure. -+ * @dma_conf : structure to take the dma data - * Description: - * This function is the open entry point of the driver. - * Return value: - * 0 on success and an appropriate (-)ve integer as defined in errno.h - * file on failure. - */ --static int stmmac_open(struct net_device *dev) -+static int __stmmac_open(struct net_device *dev, -+ struct stmmac_dma_conf *dma_conf) - { - struct stmmac_priv *priv = netdev_priv(dev); - int mode = priv->plat->phy_interface; -- int bfsize = 0; - u32 chan; - int ret; - -@@ -3723,45 +3860,10 @@ static int stmmac_open(struct net_device - memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); - priv->xstats.threshold = tc; - -- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); -- if (bfsize < 0) -- bfsize = 0; -- -- if (bfsize < BUF_SIZE_16KiB) -- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz); -- -- priv->dma_conf.dma_buf_sz = bfsize; -- buf_sz = bfsize; -- - priv->rx_copybreak = STMMAC_RX_COPYBREAK; - -- if (!priv->dma_conf.dma_tx_size) -- priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE; -- if (!priv->dma_conf.dma_rx_size) -- priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE; -- -- /* Earlier check for TBS */ -- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { -- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; -- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; -- -- /* Setup per-TXQ tbs flag before TX descriptor alloc */ -- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; -- } -- -- ret = alloc_dma_desc_resources(priv); -- if (ret < 0) { -- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", -- __func__); -- goto dma_desc_error; -- } -- -- ret = init_dma_desc_rings(dev, GFP_KERNEL); -- if (ret < 0) { -- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", -- __func__); -- goto init_error; -- } -+ buf_sz = dma_conf->dma_buf_sz; -+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); - - if (priv->plat->serdes_powerup) { - ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); -@@ -3804,14 +3906,28 @@ irq_error: - - stmmac_hw_teardown(dev); - init_error: -- free_dma_desc_resources(priv); --dma_desc_error: -+ free_dma_desc_resources(priv, &priv->dma_conf); - phylink_disconnect_phy(priv->phylink); - init_phy_error: - pm_runtime_put(priv->device); - return ret; - } - -+static int stmmac_open(struct net_device *dev) -+{ -+ struct stmmac_priv *priv = netdev_priv(dev); -+ struct stmmac_dma_conf *dma_conf; -+ int ret; -+ -+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); -+ if (IS_ERR(dma_conf)) -+ return PTR_ERR(dma_conf); -+ -+ ret = __stmmac_open(dev, dma_conf); -+ kfree(dma_conf); -+ return ret; -+} -+ - static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) - { - set_bit(__FPE_REMOVING, &priv->fpe_task_state); -@@ -3858,7 +3974,7 @@ static int stmmac_release(struct net_dev - stmmac_stop_all_dma(priv); - - /* Release and free the Rx/Tx resources */ -- free_dma_desc_resources(priv); -+ free_dma_desc_resources(priv, &priv->dma_conf); - - /* Disable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, false); -@@ -6403,7 +6519,7 @@ void stmmac_disable_rx_queue(struct stmm - spin_unlock_irqrestore(&ch->lock, flags); - - stmmac_stop_rx_dma(priv, queue); -- __free_dma_rx_desc_resources(priv, queue); -+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); - } - - void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) -@@ -6414,21 +6530,21 @@ void stmmac_enable_rx_queue(struct stmma - u32 buf_size; - int ret; - -- ret = __alloc_dma_rx_desc_resources(priv, queue); -+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); - if (ret) { - netdev_err(priv->dev, "Failed to alloc RX desc.\n"); - return; - } - -- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); -+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); - if (ret) { -- __free_dma_rx_desc_resources(priv, queue); -+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); - netdev_err(priv->dev, "Failed to init RX desc.\n"); - return; - } - - stmmac_reset_rx_queue(priv, queue); -- stmmac_clear_rx_descriptors(priv, queue); -+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); - - stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - rx_q->dma_rx_phy, rx_q->queue_index); -@@ -6466,7 +6582,7 @@ void stmmac_disable_tx_queue(struct stmm - spin_unlock_irqrestore(&ch->lock, flags); - - stmmac_stop_tx_dma(priv, queue); -- __free_dma_tx_desc_resources(priv, queue); -+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); - } - - void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) -@@ -6476,21 +6592,21 @@ void stmmac_enable_tx_queue(struct stmma - unsigned long flags; - int ret; - -- ret = __alloc_dma_tx_desc_resources(priv, queue); -+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); - if (ret) { - netdev_err(priv->dev, "Failed to alloc TX desc.\n"); - return; - } - -- ret = __init_dma_tx_desc_rings(priv, queue); -+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); - if (ret) { -- __free_dma_tx_desc_resources(priv, queue); -+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); - netdev_err(priv->dev, "Failed to init TX desc.\n"); - return; - } - - stmmac_reset_tx_queue(priv, queue); -- stmmac_clear_tx_descriptors(priv, queue); -+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); - - stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, tx_q->queue_index); -@@ -6530,7 +6646,7 @@ void stmmac_xdp_release(struct net_devic - stmmac_stop_all_dma(priv); - - /* Release and free the Rx/Tx resources */ -- free_dma_desc_resources(priv); -+ free_dma_desc_resources(priv, &priv->dma_conf); - - /* Disable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, false); -@@ -6555,14 +6671,14 @@ int stmmac_xdp_open(struct net_device *d - u32 chan; - int ret; - -- ret = alloc_dma_desc_resources(priv); -+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf); - if (ret < 0) { - netdev_err(dev, "%s: DMA descriptors allocation failed\n", - __func__); - goto dma_desc_error; - } - -- ret = init_dma_desc_rings(dev, GFP_KERNEL); -+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); - if (ret < 0) { - netdev_err(dev, "%s: DMA descriptors initialization failed\n", - __func__); -@@ -6644,7 +6760,7 @@ irq_error: - - stmmac_hw_teardown(dev); - init_error: -- free_dma_desc_resources(priv); -+ free_dma_desc_resources(priv, &priv->dma_conf); - dma_desc_error: - return ret; - } -@@ -7509,7 +7625,7 @@ int stmmac_resume(struct device *dev) - stmmac_reset_queues_param(priv); - - stmmac_free_tx_skbufs(priv); -- stmmac_clear_descriptors(priv); -+ stmmac_clear_descriptors(priv, &priv->dma_conf); - - stmmac_hw_setup(ndev, false); - stmmac_init_coalesce(priv); diff --git a/target/linux/generic/backport-6.1/775-v6.0-05-net-ethernet-stmicro-stmmac-permit-MTU-change-with-i.patch b/target/linux/generic/backport-6.1/775-v6.0-05-net-ethernet-stmicro-stmmac-permit-MTU-change-with-i.patch deleted file mode 100644 index 8fccc716597c..000000000000 --- a/target/linux/generic/backport-6.1/775-v6.0-05-net-ethernet-stmicro-stmmac-permit-MTU-change-with-i.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 3470079687448abac42deb62774253be1d6bdef3 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Sat, 23 Jul 2022 16:29:33 +0200 -Subject: [PATCH 5/5] net: ethernet: stmicro: stmmac: permit MTU change with - interface up - -Remove the limitation where the interface needs to be down to change -MTU by releasing and opening the stmmac driver to set the new MTU. -Also call the set_filter function to correctly init the port. -This permits to remove the EBUSY error while the ethernet port is -running permitting a correct MTU change if for example a DSA request -a MTU change for a switch CPU port. - -Signed-off-by: Christian Marangi -Signed-off-by: Jakub Kicinski ---- - .../net/ethernet/stmicro/stmmac/stmmac_main.c | 30 +++++++++++++++---- - 1 file changed, 24 insertions(+), 6 deletions(-) - ---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c -@@ -5626,18 +5626,15 @@ static int stmmac_change_mtu(struct net_ - { - struct stmmac_priv *priv = netdev_priv(dev); - int txfifosz = priv->plat->tx_fifo_size; -+ struct stmmac_dma_conf *dma_conf; - const int mtu = new_mtu; -+ int ret; - - if (txfifosz == 0) - txfifosz = priv->dma_cap.tx_fifo_size; - - txfifosz /= priv->plat->tx_queues_to_use; - -- if (netif_running(dev)) { -- netdev_err(priv->dev, "must be stopped to change its MTU\n"); -- return -EBUSY; -- } -- - if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { - netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); - return -EINVAL; -@@ -5649,8 +5646,29 @@ static int stmmac_change_mtu(struct net_ - if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) - return -EINVAL; - -- dev->mtu = mtu; -+ if (netif_running(dev)) { -+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); -+ /* Try to allocate the new DMA conf with the new mtu */ -+ dma_conf = stmmac_setup_dma_desc(priv, mtu); -+ if (IS_ERR(dma_conf)) { -+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", -+ mtu); -+ return PTR_ERR(dma_conf); -+ } -+ -+ stmmac_release(dev); -+ -+ ret = __stmmac_open(dev, dma_conf); -+ kfree(dma_conf); -+ if (ret) { -+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); -+ return ret; -+ } -+ -+ stmmac_set_rx_mode(dev); -+ } - -+ dev->mtu = mtu; - netdev_update_features(dev); - - return 0; diff --git a/target/linux/generic/backport-6.1/776-v6.1-01-net-dsa-qca8k-fix-inband-mgmt-for-big-endian-systems.patch b/target/linux/generic/backport-6.1/776-v6.1-01-net-dsa-qca8k-fix-inband-mgmt-for-big-endian-systems.patch deleted file mode 100644 index 4f0d95f45610..000000000000 --- a/target/linux/generic/backport-6.1/776-v6.1-01-net-dsa-qca8k-fix-inband-mgmt-for-big-endian-systems.patch +++ /dev/null @@ -1,156 +0,0 @@ -From a2550d3ce53c68f54042bc5e468c4d07491ffe0e Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 12 Oct 2022 19:18:36 +0200 -Subject: [PATCH 1/2] net: dsa: qca8k: fix inband mgmt for big-endian systems - -The header and the data of the skb for the inband mgmt requires -to be in little-endian. This is problematic for big-endian system -as the mgmt header is written in the cpu byte order. - -Fix this by converting each value for the mgmt header and data to -little-endian, and convert to cpu byte order the mgmt header and -data sent by the switch. - -Fixes: 5950c7c0a68c ("net: dsa: qca8k: add support for mgmt read/write in Ethernet packet") -Tested-by: Pawel Dembicki -Tested-by: Lech Perczak -Signed-off-by: Christian Marangi -Reviewed-by: Lech Perczak -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 63 ++++++++++++++++++++++++-------- - include/linux/dsa/tag_qca.h | 6 +-- - 2 files changed, 50 insertions(+), 19 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(str - struct qca8k_mgmt_eth_data *mgmt_eth_data; - struct qca8k_priv *priv = ds->priv; - struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ u32 command; - u8 len, cmd; -+ int i; - - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); - mgmt_eth_data = &priv->mgmt_eth_data; - -- cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); -- len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); -+ command = get_unaligned_le32(&mgmt_ethhdr->command); -+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); -+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); - - /* Make sure the seq match the requested packet */ -- if (mgmt_ethhdr->seq == mgmt_eth_data->seq) -+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) - mgmt_eth_data->ack = true; - - if (cmd == MDIO_READ) { -- mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; -+ u32 *val = mgmt_eth_data->data; -+ -+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data); - - /* Get the rest of the 12 byte of data. - * The read/write function will extract the requested data. - */ -- if (len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(mgmt_eth_data->data + 1, skb->data, -- QCA_HDR_MGMT_DATA2_LEN); -+ if (len > QCA_HDR_MGMT_DATA1_LEN) { -+ __le32 *data2 = (__le32 *)skb->data; -+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, -+ len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ val++; -+ -+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { -+ *val = get_unaligned_le32(data2); -+ val++; -+ data2++; -+ } -+ } - } - - complete(&mgmt_eth_data->rw_done); -@@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_ - struct qca_mgmt_ethhdr *mgmt_ethhdr; - unsigned int real_len; - struct sk_buff *skb; -- u32 *data2; -+ __le32 *data2; -+ u32 command; - u16 hdr; -+ int i; - - skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); - if (!skb) -@@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_ - hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); - hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); - -- mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, -+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); -+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); -+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); -+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, - QCA_HDR_MGMT_CHECK_CODE_VAL); - -+ put_unaligned_le32(command, &mgmt_ethhdr->command); -+ - if (cmd == MDIO_WRITE) -- mgmt_ethhdr->mdio_data = *val; -+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data); - - mgmt_ethhdr->hdr = htons(hdr); - - data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); -- if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) -- memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); -+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) { -+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, -+ len - QCA_HDR_MGMT_DATA1_LEN); -+ -+ val++; -+ -+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { -+ put_unaligned_le32(*val, data2); -+ data2++; -+ val++; -+ } -+ } - - return skb; - } -@@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_ - static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) - { - struct qca_mgmt_ethhdr *mgmt_ethhdr; -+ u32 seq; - -+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); - mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; -- mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); -+ put_unaligned_le32(seq, &mgmt_ethhdr->seq); - } - - static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -56,9 +56,9 @@ - - /* Special struct emulating a Ethernet header */ - struct qca_mgmt_ethhdr { -- u32 command; /* command bit 31:0 */ -- u32 seq; /* seq 63:32 */ -- u32 mdio_data; /* first 4byte mdio */ -+ __le32 command; /* command bit 31:0 */ -+ __le32 seq; /* seq 63:32 */ -+ __le32 mdio_data; /* first 4byte mdio */ - __be16 hdr; /* qca hdr */ - } __packed; - diff --git a/target/linux/generic/backport-6.1/776-v6.1-02-net-dsa-qca8k-fix-ethtool-autocast-mib-for-big-endia.patch b/target/linux/generic/backport-6.1/776-v6.1-02-net-dsa-qca8k-fix-ethtool-autocast-mib-for-big-endia.patch deleted file mode 100644 index d13014bf93bd..000000000000 --- a/target/linux/generic/backport-6.1/776-v6.1-02-net-dsa-qca8k-fix-ethtool-autocast-mib-for-big-endia.patch +++ /dev/null @@ -1,81 +0,0 @@ -From 0d4636f7d72df3179b20a2d32b647881917a5e2a Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Wed, 12 Oct 2022 19:18:37 +0200 -Subject: [PATCH 2/2] net: dsa: qca8k: fix ethtool autocast mib for big-endian - systems - -The switch sends autocast mib in little-endian. This is problematic for -big-endian system as the values needs to be converted. - -Fix this by converting each mib value to cpu byte order. - -Fixes: 5c957c7ca78c ("net: dsa: qca8k: add support for mib autocast in Ethernet packet") -Tested-by: Pawel Dembicki -Tested-by: Lech Perczak -Signed-off-by: Christian Marangi -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 20 ++++++++------------ - include/linux/dsa/tag_qca.h | 2 +- - 2 files changed, 9 insertions(+), 13 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -1668,9 +1668,9 @@ static void qca8k_mib_autocast_handler(s - struct qca8k_priv *priv = ds->priv; - const struct qca8k_mib_desc *mib; - struct mib_ethhdr *mib_ethhdr; -- int i, mib_len, offset = 0; -- u64 *data; -+ __le32 *data2; - u8 port; -+ int i; - - mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); - mib_eth_data = &priv->mib_eth_data; -@@ -1682,28 +1682,24 @@ static void qca8k_mib_autocast_handler(s - if (port != mib_eth_data->req_port) - goto exit; - -- data = mib_eth_data->data; -+ data2 = (__le32 *)skb->data; - - for (i = 0; i < priv->info->mib_count; i++) { - mib = &ar8327_mib[i]; - - /* First 3 mib are present in the skb head */ - if (i < 3) { -- data[i] = mib_ethhdr->data[i]; -+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i); - continue; - } - -- mib_len = sizeof(uint32_t); -- - /* Some mib are 64 bit wide */ - if (mib->size == 2) -- mib_len = sizeof(uint64_t); -- -- /* Copy the mib value from packet to the */ -- memcpy(data + i, skb->data + offset, mib_len); -+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2); -+ else -+ mib_eth_data->data[i] = get_unaligned_le32(data2); - -- /* Set the offset for the next mib */ -- offset += mib_len; -+ data2 += mib->size; - } - - exit: ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -68,7 +68,7 @@ enum mdio_cmd { - }; - - struct mib_ethhdr { -- u32 data[3]; /* first 3 mib counter */ -+ __le32 data[3]; /* first 3 mib counter */ - __be16 hdr; /* qca hdr */ - } __packed; - diff --git a/target/linux/generic/backport-6.1/777-v6.2-01-net-dsa-qca8k-fix-wrong-length-value-for-mgmt-eth-pa.patch b/target/linux/generic/backport-6.1/777-v6.2-01-net-dsa-qca8k-fix-wrong-length-value-for-mgmt-eth-pa.patch deleted file mode 100644 index b61e7ede4946..000000000000 --- a/target/linux/generic/backport-6.1/777-v6.2-01-net-dsa-qca8k-fix-wrong-length-value-for-mgmt-eth-pa.patch +++ /dev/null @@ -1,102 +0,0 @@ -From 9807ae69746196ee4bbffe7d22d22ab2b61c6ed0 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Thu, 29 Dec 2022 17:33:32 +0100 -Subject: [PATCH 1/5] net: dsa: qca8k: fix wrong length value for mgmt eth - packet - -The assumption that Documentation was right about how this value work was -wrong. It was discovered that the length value of the mgmt header is in -step of word size. - -As an example to process 4 byte of data the correct length to set is 2. -To process 8 byte 4, 12 byte 6, 16 byte 8... - -Odd values will always return the next size on the ack packet. -(length of 3 (6 byte) will always return 8 bytes of data) - -This means that a value of 15 (0xf) actually means reading/writing 32 bytes -of data instead of 16 bytes. This behaviour is totally absent and not -documented in the switch Documentation. - -In fact from Documentation the max value that mgmt eth can process is -16 byte of data while in reality it can process 32 bytes at once. - -To handle this we always round up the length after deviding it for word -size. We check if the result is odd and we round another time to align -to what the switch will provide in the ack packet. -The workaround for the length limit of 15 is still needed as the length -reg max value is 0xf(15) - -Reported-by: Ronald Wahl -Tested-by: Ronald Wahl -Fixes: 90386223f44e ("net: dsa: qca8k: add support for larger read/write size with mgmt Ethernet") -Signed-off-by: Christian Marangi -Cc: stable@vger.kernel.org # v5.18+ -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 45 +++++++++++++++++++++++++------- - 1 file changed, 35 insertions(+), 10 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -146,7 +146,16 @@ static void qca8k_rw_reg_ack_handler(str - - command = get_unaligned_le32(&mgmt_ethhdr->command); - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); -+ - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); -+ /* Special case for len of 15 as this is the max value for len and needs to -+ * be increased before converting it from word to dword. -+ */ -+ if (len == 15) -+ len++; -+ -+ /* We can ignore odd value, we always round up them in the alloc function. */ -+ len *= sizeof(u16); - - /* Make sure the seq match the requested packet */ - if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) -@@ -193,17 +202,33 @@ static struct sk_buff *qca8k_alloc_mdio_ - if (!skb) - return NULL; - -- /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte -- * Actually for some reason the steps are: -- * 0: nothing -- * 1-4: first 4 byte -- * 5-6: first 12 byte -- * 7-15: all 16 byte -+ /* Hdr mgmt length value is in step of word size. -+ * As an example to process 4 byte of data the correct length to set is 2. -+ * To process 8 byte 4, 12 byte 6, 16 byte 8... -+ * -+ * Odd values will always return the next size on the ack packet. -+ * (length of 3 (6 byte) will always return 8 bytes of data) -+ * -+ * This means that a value of 15 (0xf) actually means reading/writing 32 bytes -+ * of data. -+ * -+ * To correctly calculate the length we devide the requested len by word and -+ * round up. -+ * On the ack function we can skip the odd check as we already handle the -+ * case here. - */ -- if (len == 16) -- real_len = 15; -- else -- real_len = len; -+ real_len = DIV_ROUND_UP(len, sizeof(u16)); -+ -+ /* We check if the result len is odd and we round up another time to -+ * the next size. (length of 3 will be increased to 4 as switch will always -+ * return 8 bytes) -+ */ -+ if (real_len % sizeof(u16) != 0) -+ real_len++; -+ -+ /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */ -+ if (real_len == 16) -+ real_len--; - - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb->len); diff --git a/target/linux/generic/backport-6.1/777-v6.2-02-net-dsa-tag_qca-fix-wrong-MGMT_DATA2-size.patch b/target/linux/generic/backport-6.1/777-v6.2-02-net-dsa-tag_qca-fix-wrong-MGMT_DATA2-size.patch deleted file mode 100644 index 55ecb1eb42e7..000000000000 --- a/target/linux/generic/backport-6.1/777-v6.2-02-net-dsa-tag_qca-fix-wrong-MGMT_DATA2-size.patch +++ /dev/null @@ -1,34 +0,0 @@ -From d9dba91be71f03cc75bcf39fc0d5d99ff33f1ae0 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Thu, 29 Dec 2022 17:33:33 +0100 -Subject: [PATCH 2/5] net: dsa: tag_qca: fix wrong MGMT_DATA2 size - -It was discovered that MGMT_DATA2 can contain up to 28 bytes of data -instead of the 12 bytes written in the Documentation by accounting the -limit of 16 bytes declared in Documentation subtracting the first 4 byte -in the packet header. - -Update the define with the real world value. - -Tested-by: Ronald Wahl -Fixes: c2ee8181fddb ("net: dsa: tag_qca: add define for handling mgmt Ethernet packet") -Signed-off-by: Christian Marangi -Cc: stable@vger.kernel.org # v5.18+ -Signed-off-by: David S. Miller ---- - include/linux/dsa/tag_qca.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/include/linux/dsa/tag_qca.h -+++ b/include/linux/dsa/tag_qca.h -@@ -40,8 +40,8 @@ - QCA_HDR_MGMT_COMMAND_LEN + \ - QCA_HDR_MGMT_DATA1_LEN) - --#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */ --#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */ -+#define QCA_HDR_MGMT_DATA2_LEN 28 /* Other 28 byte for the mdio data */ -+#define QCA_HDR_MGMT_PADDING_LEN 18 /* Padding to reach the min Ethernet packet */ - - #define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \ - QCA_HDR_LEN + \ diff --git a/target/linux/generic/backport-6.1/777-v6.2-03-Revert-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch b/target/linux/generic/backport-6.1/777-v6.2-03-Revert-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch deleted file mode 100644 index c8e22fd1b815..000000000000 --- a/target/linux/generic/backport-6.1/777-v6.2-03-Revert-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 03cb9e6d0b32b768e3d9d473c5c4ca1100877664 Mon Sep 17 00:00:00 2001 -From: Christian Marangi -Date: Thu, 29 Dec 2022 17:33:34 +0100 -Subject: [PATCH 3/5] Revert "net: dsa: qca8k: cache lo and hi for mdio write" - -This reverts commit 2481d206fae7884cd07014fd1318e63af35e99eb. - -The Documentation is very confusing about the topic. -The cache logic for hi and lo is wrong and actually miss some regs to be -actually written. - -What the Documentation actually intended was that it's possible to skip -writing hi OR lo if half of the reg is not needed to be written or read. - -Revert the change in favor of a better and correct implementation. - -Reported-by: Ronald Wahl -Signed-off-by: Christian Marangi -Cc: stable@vger.kernel.org # v5.18+ -Signed-off-by: David S. Miller ---- - drivers/net/dsa/qca/qca8k-8xxx.c | 61 +++++++------------------------- - drivers/net/dsa/qca/qca8k.h | 5 --- - 2 files changed, 12 insertions(+), 54 deletions(-) - ---- a/drivers/net/dsa/qca/qca8k-8xxx.c -+++ b/drivers/net/dsa/qca/qca8k-8xxx.c -@@ -37,44 +37,6 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u - } - - static int --qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) --{ -- u16 *cached_lo = &priv->mdio_cache.lo; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (lo == *cached_lo) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, lo); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit lo register\n"); -- -- *cached_lo = lo; -- return 0; --} -- --static int --qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) --{ -- u16 *cached_hi = &priv->mdio_cache.hi; -- struct mii_bus *bus = priv->bus; -- int ret; -- -- if (hi == *cached_hi) -- return 0; -- -- ret = bus->write(bus, phy_id, regnum, hi); -- if (ret < 0) -- dev_err_ratelimited(&bus->dev, -- "failed to write qca8k 32bit hi register\n"); -- -- *cached_hi = hi; -- return 0; --} -- --static int - qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) - { - int ret; -@@ -97,7 +59,7 @@ qca8k_mii_read32(struct mii_bus *bus, in - } - - static void --qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) -+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) - { - u16 lo, hi; - int ret; -@@ -105,9 +67,12 @@ qca8k_mii_write32(struct qca8k_priv *pri - lo = val & 0xffff; - hi = (u16)(val >> 16); - -- ret = qca8k_set_lo(priv, phy_id, regnum, lo); -+ ret = bus->write(bus, phy_id, regnum, lo); - if (ret >= 0) -- ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); -+ ret = bus->write(bus, phy_id, regnum + 1, hi); -+ if (ret < 0) -+ dev_err_ratelimited(&bus->dev, -+ "failed to write qca8k 32bit register\n"); - } - - static int -@@ -442,7 +407,7 @@ qca8k_regmap_write(void *ctx, uint32_t r - if (ret < 0) - goto exit; - -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, val); - - exit: - mutex_unlock(&bus->mdio_lock); -@@ -475,7 +440,7 @@ qca8k_regmap_update_bits(void *ctx, uint - - val &= ~mask; - val |= write_val; -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, val); - - exit: - mutex_unlock(&bus->mdio_lock); -@@ -750,14 +715,14 @@ qca8k_mdio_write(struct qca8k_priv *priv - if (ret) - goto exit; - -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, val); - - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, - QCA8K_MDIO_MASTER_BUSY); - - exit: - /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0); - - mutex_unlock(&bus->mdio_lock); - -@@ -787,7 +752,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, - if (ret) - goto exit; - -- qca8k_mii_write32(priv, 0x10 | r2, r1, val); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, val); - - ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, - QCA8K_MDIO_MASTER_BUSY); -@@ -798,7 +763,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, - - exit: - /* even if the busy_wait timeouts try to clear the MASTER_EN */ -- qca8k_mii_write32(priv, 0x10 | r2, r1, 0); -+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0); - - mutex_unlock(&bus->mdio_lock); - -@@ -1914,8 +1879,6 @@ qca8k_sw_probe(struct mdio_device *mdiod - } - - priv->mdio_cache.page = 0xffff; -- priv->mdio_cache.lo = 0xffff; -- priv->mdio_cache.hi = 0xffff; - - /* Check the detected switch id */ - ret = qca8k_read_switch_id(priv); ---- a/drivers/net/dsa/qca/qca8k.h -+++ b/drivers/net/dsa/qca/qca8k.h -@@ -375,11 +375,6 @@ struct qca8k_mdio_cache { - * mdio writes - */ - u16 page; --/* lo and hi can also be cached and from Documentation we can skip one -- * extra mdio write if lo or hi is didn't change. -- */ -- u16 lo; -- u16 hi; - }; - - struct qca8k_priv { diff --git a/target/linux/generic/backport-6.1/778-v5.18-01-net-phy-at803x-add-fiber-support.patch b/target/linux/generic/backport-6.1/778-v5.18-01-net-phy-at803x-add-fiber-support.patch deleted file mode 100644 index 7cb21ed00d60..000000000000 --- a/target/linux/generic/backport-6.1/778-v5.18-01-net-phy-at803x-add-fiber-support.patch +++ /dev/null @@ -1,193 +0,0 @@ -From 3265f421887847db9ae2c01a00645e33608556d8 Mon Sep 17 00:00:00 2001 -From: Robert Hancock -Date: Tue, 25 Jan 2022 10:54:09 -0600 -Subject: [PATCH] net: phy: at803x: add fiber support - -Previously this driver always forced the copper page to be selected, -however for AR8031 in 100Base-FX or 1000Base-X modes, the fiber page -needs to be selected. Set the appropriate mode based on the hardware -mode_cfg strap selection. - -Enable the appropriate interrupt bits to detect fiber-side link up -or down events. - -Update config_aneg and read_status methods to use the appropriate -Clause 37 calls when fiber mode is in use. - -Signed-off-by: Robert Hancock -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 76 +++++++++++++++++++++++++++++++++++----- - 1 file changed, 67 insertions(+), 9 deletions(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -48,6 +48,8 @@ - #define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12) - #define AT803X_INTR_ENABLE_LINK_FAIL BIT(11) - #define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10) -+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8) -+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7) - #define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5) - #define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1) - #define AT803X_INTR_ENABLE_WOL BIT(0) -@@ -82,6 +84,17 @@ - - #define AT803X_MODE_CFG_MASK 0x0F - #define AT803X_MODE_CFG_SGMII 0x01 -+#define AT803X_MODE_CFG_BASET_RGMII 0x00 -+#define AT803X_MODE_CFG_BASET_SGMII 0x01 -+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02 -+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03 -+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04 -+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05 -+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06 -+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07 -+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B -+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E -+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F - - #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ - #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 -@@ -199,6 +212,8 @@ struct at803x_priv { - u16 clk_25m_mask; - u8 smarteee_lpi_tw_1g; - u8 smarteee_lpi_tw_100m; -+ bool is_fiber; -+ bool is_1000basex; - struct regulator_dev *vddio_rdev; - struct regulator_dev *vddh_rdev; - struct regulator *vddio; -@@ -674,7 +689,33 @@ static int at803x_probe(struct phy_devic - return ret; - } - -+ if (phydev->drv->phy_id == ATH8031_PHY_ID) { -+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); -+ int mode_cfg; -+ -+ if (ccr < 0) -+ goto err; -+ mode_cfg = ccr & AT803X_MODE_CFG_MASK; -+ -+ switch (mode_cfg) { -+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM: -+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM: -+ priv->is_1000basex = true; -+ fallthrough; -+ case AT803X_MODE_CFG_FX100_RGMII_50OHM: -+ case AT803X_MODE_CFG_FX100_RGMII_75OHM: -+ priv->is_fiber = true; -+ break; -+ } -+ } -+ - return 0; -+ -+err: -+ if (priv->vddio) -+ regulator_disable(priv->vddio); -+ -+ return ret; - } - - static void at803x_remove(struct phy_device *phydev) -@@ -687,6 +728,7 @@ static void at803x_remove(struct phy_dev - - static int at803x_get_features(struct phy_device *phydev) - { -+ struct at803x_priv *priv = phydev->priv; - int err; - - err = genphy_read_abilities(phydev); -@@ -704,12 +746,13 @@ static int at803x_get_features(struct ph - * As a result of that, ESTATUS_1000_XFULL is set - * to 1 even when operating in copper TP mode. - * -- * Remove this mode from the supported link modes, -- * as this driver currently only supports copper -- * operation. -+ * Remove this mode from the supported link modes -+ * when not operating in 1000BaseX mode. - */ -- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, -- phydev->supported); -+ if (!priv->is_1000basex) -+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, -+ phydev->supported); -+ - return 0; - } - -@@ -773,15 +816,18 @@ static int at8031_pll_config(struct phy_ - - static int at803x_config_init(struct phy_device *phydev) - { -+ struct at803x_priv *priv = phydev->priv; - int ret; - - if (phydev->drv->phy_id == ATH8031_PHY_ID) { - /* Some bootloaders leave the fiber page selected. -- * Switch to the copper page, as otherwise we read -- * the PHY capabilities from the fiber side. -+ * Switch to the appropriate page (fiber or copper), as otherwise we -+ * read the PHY capabilities from the wrong page. - */ - phy_lock_mdio_bus(phydev); -- ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); -+ ret = at803x_write_page(phydev, -+ priv->is_fiber ? AT803X_PAGE_FIBER : -+ AT803X_PAGE_COPPER); - phy_unlock_mdio_bus(phydev); - if (ret) - return ret; -@@ -840,6 +886,7 @@ static int at803x_ack_interrupt(struct p - - static int at803x_config_intr(struct phy_device *phydev) - { -+ struct at803x_priv *priv = phydev->priv; - int err; - int value; - -@@ -856,6 +903,10 @@ static int at803x_config_intr(struct phy - value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED; - value |= AT803X_INTR_ENABLE_LINK_FAIL; - value |= AT803X_INTR_ENABLE_LINK_SUCCESS; -+ if (priv->is_fiber) { -+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX; -+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX; -+ } - - err = phy_write(phydev, AT803X_INTR_ENABLE, value); - } else { -@@ -923,8 +974,12 @@ static void at803x_link_change_notify(st - - static int at803x_read_status(struct phy_device *phydev) - { -+ struct at803x_priv *priv = phydev->priv; - int ss, err, old_link = phydev->link; - -+ if (priv->is_1000basex) -+ return genphy_c37_read_status(phydev); -+ - /* Update the link, but return if there was an error */ - err = genphy_update_link(phydev); - if (err) -@@ -1023,6 +1078,7 @@ static int at803x_config_mdix(struct phy - - static int at803x_config_aneg(struct phy_device *phydev) - { -+ struct at803x_priv *priv = phydev->priv; - int ret; - - ret = at803x_config_mdix(phydev, phydev->mdix_ctrl); -@@ -1039,6 +1095,9 @@ static int at803x_config_aneg(struct phy - return ret; - } - -+ if (priv->is_1000basex) -+ return genphy_c37_config_aneg(phydev); -+ - return genphy_config_aneg(phydev); - } - diff --git a/target/linux/generic/backport-6.1/778-v5.18-02-net-phy-at803x-support-downstream-SFP-cage.patch b/target/linux/generic/backport-6.1/778-v5.18-02-net-phy-at803x-support-downstream-SFP-cage.patch deleted file mode 100644 index 8393cb32e896..000000000000 --- a/target/linux/generic/backport-6.1/778-v5.18-02-net-phy-at803x-support-downstream-SFP-cage.patch +++ /dev/null @@ -1,95 +0,0 @@ -From dc4d5fcc5d365c9f70ea3f5c09bdf70e988fad50 Mon Sep 17 00:00:00 2001 -From: Robert Hancock -Date: Tue, 25 Jan 2022 10:54:10 -0600 -Subject: [PATCH] net: phy: at803x: Support downstream SFP cage - -Add support for downstream SFP cages for AR8031 and AR8033. This is -primarily intended for fiber modules or direct-attach cables, however -copper modules which work in 1000Base-X mode may also function. Such -modules are allowed with a warning. - -Signed-off-by: Robert Hancock -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 56 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 56 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -19,6 +19,8 @@ - #include - #include - #include -+#include -+#include - #include - - #define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10 -@@ -555,6 +557,55 @@ static int at8031_register_regulators(st - return 0; - } - -+static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) -+{ -+ struct phy_device *phydev = upstream; -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support); -+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); -+ phy_interface_t iface; -+ -+ linkmode_zero(phy_support); -+ phylink_set(phy_support, 1000baseX_Full); -+ phylink_set(phy_support, 1000baseT_Full); -+ phylink_set(phy_support, Autoneg); -+ phylink_set(phy_support, Pause); -+ phylink_set(phy_support, Asym_Pause); -+ -+ linkmode_zero(sfp_support); -+ sfp_parse_support(phydev->sfp_bus, id, sfp_support); -+ /* Some modules support 10G modes as well as others we support. -+ * Mask out non-supported modes so the correct interface is picked. -+ */ -+ linkmode_and(sfp_support, phy_support, sfp_support); -+ -+ if (linkmode_empty(sfp_support)) { -+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n"); -+ return -EINVAL; -+ } -+ -+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support); -+ -+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes -+ * interface for use with SFP modules. -+ * However, some copper modules detected as having a preferred SGMII -+ * interface do default to and function in 1000Base-X mode, so just -+ * print a warning and allow such modules, as they may have some chance -+ * of working. -+ */ -+ if (iface == PHY_INTERFACE_MODE_SGMII) -+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n"); -+ else if (iface != PHY_INTERFACE_MODE_1000BASEX) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+static const struct sfp_upstream_ops at803x_sfp_ops = { -+ .attach = phy_sfp_attach, -+ .detach = phy_sfp_detach, -+ .module_insert = at803x_sfp_insert, -+}; -+ - static int at803x_parse_dt(struct phy_device *phydev) - { - struct device_node *node = phydev->mdio.dev.of_node; -@@ -662,6 +713,11 @@ static int at803x_parse_dt(struct phy_de - phydev_err(phydev, "failed to get VDDIO regulator\n"); - return PTR_ERR(priv->vddio); - } -+ -+ /* Only AR8031/8033 support 1000Base-X for SFP modules */ -+ ret = phy_sfp_probe(phydev, &at803x_sfp_ops); -+ if (ret < 0) -+ return ret; - } - - return 0; diff --git a/target/linux/generic/backport-6.1/778-v5.18-03-net-phy-at803x-fix-NULL-pointer-dereference-on-AR9331-PHY.patch b/target/linux/generic/backport-6.1/778-v5.18-03-net-phy-at803x-fix-NULL-pointer-dereference-on-AR9331-PHY.patch deleted file mode 100644 index de8951db10cf..000000000000 --- a/target/linux/generic/backport-6.1/778-v5.18-03-net-phy-at803x-fix-NULL-pointer-dereference-on-AR9331-PHY.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 9926de7315be3d606cc011a305ad9adb9e8e14c9 Mon Sep 17 00:00:00 2001 -From: Oleksij Rempel -Date: Sat, 18 Jun 2022 14:23:33 +0200 -Subject: [PATCH] net: phy: at803x: fix NULL pointer dereference on AR9331 PHY - -Latest kernel will explode on the PHY interrupt config, since it depends -now on allocated priv. So, run probe to allocate priv to fix it. - - ar9331_switch ethernet.1:10 lan0 (uninitialized): PHY [!ahb!ethernet@1a000000!mdio!switch@10:00] driver [Qualcomm Atheros AR9331 built-in PHY] (irq=13) - CPU 0 Unable to handle kernel paging request at virtual address 0000000a, epc == 8050e8a8, ra == 80504b34 - ... - Call Trace: - [<8050e8a8>] at803x_config_intr+0x5c/0xd0 - [<80504b34>] phy_request_interrupt+0xa8/0xd0 - [<8050289c>] phylink_bringup_phy+0x2d8/0x3ac - [<80502b68>] phylink_fwnode_phy_connect+0x118/0x130 - [<8074d8ec>] dsa_slave_create+0x270/0x420 - [<80743b04>] dsa_port_setup+0x12c/0x148 - [<8074580c>] dsa_register_switch+0xaf0/0xcc0 - [<80511344>] ar9331_sw_probe+0x370/0x388 - [<8050cb78>] mdio_probe+0x44/0x70 - [<804df300>] really_probe+0x200/0x424 - [<804df7b4>] __driver_probe_device+0x290/0x298 - [<804df810>] driver_probe_device+0x54/0xe4 - [<804dfd50>] __device_attach_driver+0xe4/0x130 - [<804dcb00>] bus_for_each_drv+0xb4/0xd8 - [<804dfac4>] __device_attach+0x104/0x1a4 - [<804ddd24>] bus_probe_device+0x48/0xc4 - [<804deb44>] deferred_probe_work_func+0xf0/0x10c - [<800a0ffc>] process_one_work+0x314/0x4d4 - [<800a17fc>] worker_thread+0x2a4/0x354 - [<800a9a54>] kthread+0x134/0x13c - [<8006306c>] ret_from_kernel_thread+0x14/0x1c - -Same Issue would affect some other PHYs (QCA8081, QCA9561), so fix it -too. - -Fixes: 3265f4218878 ("net: phy: at803x: add fiber support") -Signed-off-by: Oleksij Rempel -Reviewed-by: Andrew Lunn -Signed-off-by: David S. Miller ---- - drivers/net/phy/at803x.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -1594,6 +1594,8 @@ static struct phy_driver at803x_driver[] - /* ATHEROS AR9331 */ - PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), - .name = "Qualcomm Atheros AR9331 built-in PHY", -+ .probe = at803x_probe, -+ .remove = at803x_remove, - .suspend = at803x_suspend, - .resume = at803x_resume, - .flags = PHY_POLL_CABLE_TEST, diff --git a/target/linux/generic/backport-6.1/778-v5.18-04-net-phy-at803x-fix-error-return-code-in-at803x_probe.patch b/target/linux/generic/backport-6.1/778-v5.18-04-net-phy-at803x-fix-error-return-code-in-at803x_probe.patch deleted file mode 100644 index cdae5b4ca423..000000000000 --- a/target/linux/generic/backport-6.1/778-v5.18-04-net-phy-at803x-fix-error-return-code-in-at803x_probe.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 1f0dd412e34e177621769866bef347f0b22364df Mon Sep 17 00:00:00 2001 -From: Wei Yongjun -Date: Fri, 18 Nov 2022 10:36:35 +0000 -Subject: [PATCH] net: phy: at803x: fix error return code in at803x_probe() - -Fix to return a negative error code from the ccr read error handling -case instead of 0, as done elsewhere in this function. - -Fixes: 3265f4218878 ("net: phy: at803x: add fiber support") -Signed-off-by: Wei Yongjun -Reviewed-by: Andrew Lunn -Link: https://lore.kernel.org/r/20221118103635.254256-1-weiyongjun@huaweicloud.com -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/at803x.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -749,8 +749,10 @@ static int at803x_probe(struct phy_devic - int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); - int mode_cfg; - -- if (ccr < 0) -+ if (ccr < 0) { -+ ret = ccr; - goto err; -+ } - mode_cfg = ccr & AT803X_MODE_CFG_MASK; - - switch (mode_cfg) { diff --git a/target/linux/generic/backport-6.1/780-v5.16-bus-mhi-pci_generic-Introduce-Sierra-EM919X-support.patch b/target/linux/generic/backport-6.1/780-v5.16-bus-mhi-pci_generic-Introduce-Sierra-EM919X-support.patch deleted file mode 100644 index 44f0864e9a83..000000000000 --- a/target/linux/generic/backport-6.1/780-v5.16-bus-mhi-pci_generic-Introduce-Sierra-EM919X-support.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 789d3eeb2367f92193a0882f7cdab03f0f9d6930 Mon Sep 17 00:00:00 2001 -From: Thomas Perrot -Date: Thu, 16 Dec 2021 13:42:27 +0530 -Subject: [PATCH] bus: mhi: pci_generic: Introduce Sierra EM919X support - -Add support for EM919X modems, this modem series is based on SDX55 -qcom chip. - -It is mandatory to use the same ring for control+data and diag events. - -Link: https://lore.kernel.org/r/20211123081541.648426-1-thomas.perrot@bootlin.com -Tested-by: Aleksander Morgado -Reviewed-by: Manivannan Sadhasivam -Signed-off-by: Thomas Perrot -Signed-off-by: Manivannan Sadhasivam -Link: https://lore.kernel.org/r/20211216081227.237749-11-manivannan.sadhasivam@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/bus/mhi/host/pci_generic.c | 43 +++++++++++++++++++++++++++++++++++ - 1 file changed, 43 insertions(+) - ---- a/drivers/bus/mhi/host/pci_generic.c -+++ b/drivers/bus/mhi/host/pci_generic.c -@@ -406,6 +406,46 @@ static const struct mhi_pci_dev_info mhi - .mru_default = 32768, - }; - -+static const struct mhi_channel_config mhi_sierra_em919x_channels[] = { -+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), -+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0), -+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0), -+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2), -+}; -+ -+static struct mhi_event_config modem_sierra_em919x_mhi_events[] = { -+ /* first ring is control+data and DIAG ring */ -+ MHI_EVENT_CONFIG_CTRL(0, 2048), -+ /* Hardware channels request dedicated hardware event rings */ -+ MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100), -+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) -+}; -+ -+static const struct mhi_controller_config modem_sierra_em919x_config = { -+ .max_channels = 128, -+ .timeout_ms = 24000, -+ .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels), -+ .ch_cfg = mhi_sierra_em919x_channels, -+ .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events), -+ .event_cfg = modem_sierra_em919x_mhi_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { -+ .name = "sierra-em919x", -+ .config = &modem_sierra_em919x_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .sideband_wake = false, -+}; -+ - static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { - MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), - MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), -@@ -480,6 +520,9 @@ static const struct mhi_pci_dev_info mhi - }; - - static const struct pci_device_id mhi_pci_id_table[] = { -+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ -+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), -+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, - /* Telit FN980 hardware revision v1 */ - { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), - .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, diff --git a/target/linux/generic/backport-6.1/781-v6.1-bus-mhi-host-always-print-detected-modem-name.patch b/target/linux/generic/backport-6.1/781-v6.1-bus-mhi-host-always-print-detected-modem-name.patch deleted file mode 100644 index 3c71a27b7987..000000000000 --- a/target/linux/generic/backport-6.1/781-v6.1-bus-mhi-host-always-print-detected-modem-name.patch +++ /dev/null @@ -1,37 +0,0 @@ -From f369e9ad52ec9361827e21a631b7198c9fca438e Mon Sep 17 00:00:00 2001 -From: Koen Vandeputte -Date: Wed, 31 Aug 2022 12:03:49 +0200 -Subject: [PATCH] bus: mhi: host: always print detected modem name - -This harmless print provides a very easy way of knowing -if the modem is detected properly during probing. - -Promote it to an informational print so no hassle is required -enabling kernel debugging info to obtain it. - -The rationale here is that: -On a lot of low-storage embedded devices, extensive kernel -debugging info is not always present as this would -increase it's size to much causing partition size issues. - -Signed-off-by: Koen Vandeputte -Reviewed-by: Manivannan Sadhasivam -Reviewed-by: Loic Poulain -Link: https://lore.kernel.org/r/20220831100349.1488762-1-koen.vandeputte@citymesh.com -[mani: added missing review tags] -Signed-off-by: Manivannan Sadhasivam ---- - drivers/bus/mhi/host/pci_generic.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/bus/mhi/host/pci_generic.c -+++ b/drivers/bus/mhi/host/pci_generic.c -@@ -806,7 +806,7 @@ static int mhi_pci_probe(struct pci_dev - struct mhi_controller *mhi_cntrl; - int err; - -- dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); -+ dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name); - - /* mhi_pdev.mhi_cntrl must be zero-initialized */ - mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); diff --git a/target/linux/generic/backport-6.1/782-v6.1-net-dsa-mt7530-add-support-for-in-band-link-status.patch b/target/linux/generic/backport-6.1/782-v6.1-net-dsa-mt7530-add-support-for-in-band-link-status.patch deleted file mode 100644 index ebfbaccc9d76..000000000000 --- a/target/linux/generic/backport-6.1/782-v6.1-net-dsa-mt7530-add-support-for-in-band-link-status.patch +++ /dev/null @@ -1,130 +0,0 @@ -From e19de30d20809af3221ef8a2648b8a8a52e02d90 Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Wed, 21 Sep 2022 01:23:14 +0100 -Subject: [PATCH 1/1] net: dsa: mt7530: add support for in-band link status - -Read link status from SGMII PCS for in-band managed 2500Base-X and -1000Base-X connection on a MAC port of the MT7531. This is needed to -get the SFP cage working which is connected to SGMII interface of -port 5 of the MT7531 switch IC on the Bananapi BPi-R3 board. -While at it also handle an_complete for both the autoneg and the -non-autoneg codepath. - -Signed-off-by: Daniel Golle -Signed-off-by: David S. Miller ---- - drivers/net/dsa/mt7530.c | 50 +++++++++++++++++++++++++++++----------- - drivers/net/dsa/mt7530.h | 1 + - 2 files changed, 38 insertions(+), 13 deletions(-) - ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -2721,9 +2721,6 @@ mt7531_mac_config(struct dsa_switch *ds, - case PHY_INTERFACE_MODE_NA: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: -- if (phylink_autoneg_inband(mode)) -- return -EINVAL; -- - return mt7531_sgmii_setup_mode_force(priv, port, interface); - default: - return -EINVAL; -@@ -2799,13 +2796,6 @@ unsupported: - return; - } - -- if (phylink_autoneg_inband(mode) && -- state->interface != PHY_INTERFACE_MODE_SGMII) { -- dev_err(ds->dev, "%s: in-band negotiation unsupported\n", -- __func__); -- return; -- } -- - mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port)); - mcr_new = mcr_cur; - mcr_new &= ~PMCR_LINK_SETTINGS_MASK; -@@ -2942,6 +2932,9 @@ static void mt753x_phylink_get_caps(stru - config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000FD; - -+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port)) -+ config->mac_capabilities |= MAC_2500FD; -+ - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. -@@ -3007,6 +3000,7 @@ mt7531_sgmii_pcs_get_state_an(struct mt7 - - status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port)); - state->link = !!(status & MT7531_SGMII_LINK_STATUS); -+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE); - if (state->interface == PHY_INTERFACE_MODE_SGMII && - (status & MT7531_SGMII_AN_ENABLE)) { - val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port)); -@@ -3037,16 +3031,44 @@ mt7531_sgmii_pcs_get_state_an(struct mt7 - return 0; - } - -+static void -+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port, -+ struct phylink_link_state *state) -+{ -+ unsigned int val; -+ -+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port)); -+ state->link = !!(val & MT7531_SGMII_LINK_STATUS); -+ if (!state->link) -+ return; -+ -+ state->an_complete = state->link; -+ -+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) -+ state->speed = SPEED_2500; -+ else -+ state->speed = SPEED_1000; -+ -+ state->duplex = DUPLEX_FULL; -+ state->pause = MLO_PAUSE_NONE; -+} -+ - static void mt7531_pcs_get_state(struct phylink_pcs *pcs, - struct phylink_link_state *state) - { - struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; - int port = pcs_to_mt753x_pcs(pcs)->port; - -- if (state->interface == PHY_INTERFACE_MODE_SGMII) -+ if (state->interface == PHY_INTERFACE_MODE_SGMII) { - mt7531_sgmii_pcs_get_state_an(priv, port, state); -- else -- state->link = false; -+ return; -+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) || -+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) { -+ mt7531_sgmii_pcs_get_state_inband(priv, port, state); -+ return; -+ } -+ -+ state->link = false; - } - - static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode, -@@ -3087,6 +3109,8 @@ mt753x_setup(struct dsa_switch *ds) - priv->pcs[i].pcs.ops = priv->info->pcs_ops; - priv->pcs[i].priv = priv; - priv->pcs[i].port = i; -+ if (mt753x_is_mac_port(i)) -+ priv->pcs[i].pcs.poll = 1; - } - - ret = priv->info->sw_setup(ds); ---- a/drivers/net/dsa/mt7530.h -+++ b/drivers/net/dsa/mt7530.h -@@ -373,6 +373,7 @@ enum mt7530_vlan_port_acc_frm { - #define MT7531_SGMII_LINK_STATUS BIT(18) - #define MT7531_SGMII_AN_ENABLE BIT(12) - #define MT7531_SGMII_AN_RESTART BIT(9) -+#define MT7531_SGMII_AN_COMPLETE BIT(21) - - /* Register for SGMII PCS_SPPED_ABILITY */ - #define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08) diff --git a/target/linux/generic/backport-6.1/783-v6.1-net-sfp-re-implement-soft-state-polling-setup.patch b/target/linux/generic/backport-6.1/783-v6.1-net-sfp-re-implement-soft-state-polling-setup.patch deleted file mode 100644 index 77cd336d364c..000000000000 --- a/target/linux/generic/backport-6.1/783-v6.1-net-sfp-re-implement-soft-state-polling-setup.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 8475c4b70b040f9d8cbc308100f2c4d865f810b3 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 13 Sep 2022 20:06:27 +0100 -Subject: [PATCH 1/1] net: sfp: re-implement soft state polling setup - -Re-implement the decision making for soft state polling. Instead of -generating the soft state mask in sfp_soft_start_poll() by looking at -which GPIOs are available, record their availability in -sfp_sm_mod_probe() in sfp->state_hw_mask. - -This will then allow us to clear bits in sfp->state_hw_mask in module -specific quirks when the hardware signals should not be used, thereby -allowing us to switch to using the software state polling. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/sfp.c | 38 ++++++++++++++++++++++++++------------ - 1 file changed, 26 insertions(+), 12 deletions(-) - ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -240,6 +240,7 @@ struct sfp { - bool need_poll; - - struct mutex st_mutex; /* Protects state */ -+ unsigned int state_hw_mask; - unsigned int state_soft_mask; - unsigned int state; - struct delayed_work poll; -@@ -505,17 +506,18 @@ static void sfp_soft_set_state(struct sf - static void sfp_soft_start_poll(struct sfp *sfp) - { - const struct sfp_eeprom_id *id = &sfp->id; -+ unsigned int mask = 0; - - sfp->state_soft_mask = 0; -- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE && -- !sfp->gpio[GPIO_TX_DISABLE]) -- sfp->state_soft_mask |= SFP_F_TX_DISABLE; -- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT && -- !sfp->gpio[GPIO_TX_FAULT]) -- sfp->state_soft_mask |= SFP_F_TX_FAULT; -- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS && -- !sfp->gpio[GPIO_LOS]) -- sfp->state_soft_mask |= SFP_F_LOS; -+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE) -+ mask |= SFP_F_TX_DISABLE; -+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT) -+ mask |= SFP_F_TX_FAULT; -+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS) -+ mask |= SFP_F_LOS; -+ -+ // Poll the soft state for hardware pins we want to ignore -+ sfp->state_soft_mask = ~sfp->state_hw_mask & mask; - - if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) && - !sfp->need_poll) -@@ -529,10 +531,11 @@ static void sfp_soft_stop_poll(struct sf - - static unsigned int sfp_get_state(struct sfp *sfp) - { -- unsigned int state = sfp->get_state(sfp); -+ unsigned int soft = sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT); -+ unsigned int state; - -- if (state & SFP_F_PRESENT && -- sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT)) -+ state = sfp->get_state(sfp) & sfp->state_hw_mask; -+ if (state & SFP_F_PRESENT && soft) - state |= sfp_soft_get_state(sfp); - - return state; -@@ -1942,6 +1945,15 @@ static int sfp_sm_mod_probe(struct sfp * - if (ret < 0) - return ret; - -+ /* Initialise state bits to use from hardware */ -+ sfp->state_hw_mask = SFP_F_PRESENT; -+ if (sfp->gpio[GPIO_TX_DISABLE]) -+ sfp->state_hw_mask |= SFP_F_TX_DISABLE; -+ if (sfp->gpio[GPIO_TX_FAULT]) -+ sfp->state_hw_mask |= SFP_F_TX_FAULT; -+ if (sfp->gpio[GPIO_LOS]) -+ sfp->state_hw_mask |= SFP_F_LOS; -+ - if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) && - !memcmp(id.base.vendor_pn, "3FE46541AA ", 16)) - sfp->module_t_start_up = T_START_UP_BAD_GPON; -@@ -2568,6 +2580,8 @@ static int sfp_probe(struct platform_dev - return PTR_ERR(sfp->gpio[i]); - } - -+ sfp->state_hw_mask = SFP_F_PRESENT; -+ - sfp->get_state = sfp_gpio_get_state; - sfp->set_state = sfp_gpio_set_state; - diff --git a/target/linux/generic/backport-6.1/784-v6.1-net-sfp-move-quirk-handling-into-sfp.c.patch b/target/linux/generic/backport-6.1/784-v6.1-net-sfp-move-quirk-handling-into-sfp.c.patch deleted file mode 100644 index 02fa28c5af29..000000000000 --- a/target/linux/generic/backport-6.1/784-v6.1-net-sfp-move-quirk-handling-into-sfp.c.patch +++ /dev/null @@ -1,291 +0,0 @@ -From 23571c7b96437483d28a990c906cc81f5f66374e Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 13 Sep 2022 20:06:32 +0100 -Subject: [PATCH 1/1] net: sfp: move quirk handling into sfp.c - -We need to handle more quirks than just those which affect the link -modes of the module. Move the quirk lookup into sfp.c, and pass the -quirk to sfp-bus.c - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/sfp-bus.c | 98 ++------------------------------------- - drivers/net/phy/sfp.c | 94 ++++++++++++++++++++++++++++++++++++- - drivers/net/phy/sfp.h | 9 +++- - 3 files changed, 104 insertions(+), 97 deletions(-) - ---- a/drivers/net/phy/sfp-bus.c -+++ b/drivers/net/phy/sfp-bus.c -@@ -10,12 +10,6 @@ - - #include "sfp.h" - --struct sfp_quirk { -- const char *vendor; -- const char *part; -- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); --}; -- - /** - * struct sfp_bus - internal representation of a sfp bus - */ -@@ -38,93 +32,6 @@ struct sfp_bus { - bool started; - }; - --static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, -- unsigned long *modes) --{ -- phylink_set(modes, 2500baseX_Full); --} -- --static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, -- unsigned long *modes) --{ -- /* Ubiquiti U-Fiber Instant module claims that support all transceiver -- * types including 10G Ethernet which is not truth. So clear all claimed -- * modes and set only one mode which module supports: 1000baseX_Full. -- */ -- phylink_zero(modes); -- phylink_set(modes, 1000baseX_Full); --} -- --static const struct sfp_quirk sfp_quirks[] = { -- { -- // Alcatel Lucent G-010S-P can operate at 2500base-X, but -- // incorrectly report 2500MBd NRZ in their EEPROM -- .vendor = "ALCATELLUCENT", -- .part = "G010SP", -- .modes = sfp_quirk_2500basex, -- }, { -- // Alcatel Lucent G-010S-A can operate at 2500base-X, but -- // report 3.2GBd NRZ in their EEPROM -- .vendor = "ALCATELLUCENT", -- .part = "3FE46541AA", -- .modes = sfp_quirk_2500basex, -- }, { -- // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd -- // NRZ in their EEPROM -- .vendor = "HUAWEI", -- .part = "MA5671A", -- .modes = sfp_quirk_2500basex, -- }, { -- // Lantech 8330-262D-E can operate at 2500base-X, but -- // incorrectly report 2500MBd NRZ in their EEPROM -- .vendor = "Lantech", -- .part = "8330-262D-E", -- .modes = sfp_quirk_2500basex, -- }, { -- .vendor = "UBNT", -- .part = "UF-INSTANT", -- .modes = sfp_quirk_ubnt_uf_instant, -- }, --}; -- --static size_t sfp_strlen(const char *str, size_t maxlen) --{ -- size_t size, i; -- -- /* Trailing characters should be filled with space chars */ -- for (i = 0, size = 0; i < maxlen; i++) -- if (str[i] != ' ') -- size = i + 1; -- -- return size; --} -- --static bool sfp_match(const char *qs, const char *str, size_t len) --{ -- if (!qs) -- return true; -- if (strlen(qs) != len) -- return false; -- return !strncmp(qs, str, len); --} -- --static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id) --{ -- const struct sfp_quirk *q; -- unsigned int i; -- size_t vs, ps; -- -- vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name)); -- ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn)); -- -- for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++) -- if (sfp_match(q->vendor, id->base.vendor_name, vs) && -- sfp_match(q->part, id->base.vendor_pn, ps)) -- return q; -- -- return NULL; --} -- - /** - * sfp_parse_port() - Parse the EEPROM base ID, setting the port type - * @bus: a pointer to the &struct sfp_bus structure for the sfp module -@@ -786,12 +693,13 @@ void sfp_link_down(struct sfp_bus *bus) - } - EXPORT_SYMBOL_GPL(sfp_link_down); - --int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) -+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id, -+ const struct sfp_quirk *quirk) - { - const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); - int ret = 0; - -- bus->sfp_quirk = sfp_lookup_quirk(id); -+ bus->sfp_quirk = quirk; - - if (ops && ops->module_insert) - ret = ops->module_insert(bus->upstream, id); ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -259,6 +259,8 @@ struct sfp { - unsigned int module_t_start_up; - bool tx_fault_ignore; - -+ const struct sfp_quirk *quirk; -+ - #if IS_ENABLED(CONFIG_HWMON) - struct sfp_diag diag; - struct delayed_work hwmon_probe; -@@ -315,6 +317,93 @@ static const struct of_device_id sfp_of_ - }; - MODULE_DEVICE_TABLE(of, sfp_of_match); - -+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, -+ unsigned long *modes) -+{ -+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes); -+} -+ -+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, -+ unsigned long *modes) -+{ -+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver -+ * types including 10G Ethernet which is not truth. So clear all claimed -+ * modes and set only one mode which module supports: 1000baseX_Full. -+ */ -+ linkmode_zero(modes); -+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes); -+} -+ -+static const struct sfp_quirk sfp_quirks[] = { -+ { -+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but -+ // incorrectly report 2500MBd NRZ in their EEPROM -+ .vendor = "ALCATELLUCENT", -+ .part = "G010SP", -+ .modes = sfp_quirk_2500basex, -+ }, { -+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but -+ // report 3.2GBd NRZ in their EEPROM -+ .vendor = "ALCATELLUCENT", -+ .part = "3FE46541AA", -+ .modes = sfp_quirk_2500basex, -+ }, { -+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd -+ // NRZ in their EEPROM -+ .vendor = "HUAWEI", -+ .part = "MA5671A", -+ .modes = sfp_quirk_2500basex, -+ }, { -+ // Lantech 8330-262D-E can operate at 2500base-X, but -+ // incorrectly report 2500MBd NRZ in their EEPROM -+ .vendor = "Lantech", -+ .part = "8330-262D-E", -+ .modes = sfp_quirk_2500basex, -+ }, { -+ .vendor = "UBNT", -+ .part = "UF-INSTANT", -+ .modes = sfp_quirk_ubnt_uf_instant, -+ }, -+}; -+ -+static size_t sfp_strlen(const char *str, size_t maxlen) -+{ -+ size_t size, i; -+ -+ /* Trailing characters should be filled with space chars */ -+ for (i = 0, size = 0; i < maxlen; i++) -+ if (str[i] != ' ') -+ size = i + 1; -+ -+ return size; -+} -+ -+static bool sfp_match(const char *qs, const char *str, size_t len) -+{ -+ if (!qs) -+ return true; -+ if (strlen(qs) != len) -+ return false; -+ return !strncmp(qs, str, len); -+} -+ -+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id) -+{ -+ const struct sfp_quirk *q; -+ unsigned int i; -+ size_t vs, ps; -+ -+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name)); -+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn)); -+ -+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++) -+ if (sfp_match(q->vendor, id->base.vendor_name, vs) && -+ sfp_match(q->part, id->base.vendor_pn, ps)) -+ return q; -+ -+ return NULL; -+} -+ - static unsigned long poll_jiffies; - - static unsigned int sfp_gpio_get_state(struct sfp *sfp) -@@ -1966,6 +2055,8 @@ static int sfp_sm_mod_probe(struct sfp * - else - sfp->tx_fault_ignore = false; - -+ sfp->quirk = sfp_lookup_quirk(&id); -+ - return 0; - } - -@@ -2077,7 +2168,8 @@ static void sfp_sm_module(struct sfp *sf - break; - - /* Report the module insertion to the upstream device */ -- err = sfp_module_insert(sfp->sfp_bus, &sfp->id); -+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id, -+ sfp->quirk); - if (err < 0) { - sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0); - break; ---- a/drivers/net/phy/sfp.h -+++ b/drivers/net/phy/sfp.h -@@ -6,6 +6,12 @@ - - struct sfp; - -+struct sfp_quirk { -+ const char *vendor; -+ const char *part; -+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); -+}; -+ - struct sfp_socket_ops { - void (*attach)(struct sfp *sfp); - void (*detach)(struct sfp *sfp); -@@ -23,7 +29,8 @@ int sfp_add_phy(struct sfp_bus *bus, str - void sfp_remove_phy(struct sfp_bus *bus); - void sfp_link_up(struct sfp_bus *bus); - void sfp_link_down(struct sfp_bus *bus); --int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id); -+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id, -+ const struct sfp_quirk *quirk); - void sfp_module_remove(struct sfp_bus *bus); - int sfp_module_start(struct sfp_bus *bus); - void sfp_module_stop(struct sfp_bus *bus); diff --git a/target/linux/generic/backport-6.1/785-v6.1-net-sfp-move-Alcatel-Lucent-3FE46541AA-fixup.patch b/target/linux/generic/backport-6.1/785-v6.1-net-sfp-move-Alcatel-Lucent-3FE46541AA-fixup.patch deleted file mode 100644 index b076676cff6d..000000000000 --- a/target/linux/generic/backport-6.1/785-v6.1-net-sfp-move-Alcatel-Lucent-3FE46541AA-fixup.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 275416754e9a262c97a1ad6f806a4bc6e0464aa2 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 13 Sep 2022 20:06:37 +0100 -Subject: [PATCH 1/1] net: sfp: move Alcatel Lucent 3FE46541AA fixup - -Add a new fixup mechanism to the SFP quirks, and use it for this -module. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/sfp.c | 14 +++++++++----- - drivers/net/phy/sfp.h | 1 + - 2 files changed, 10 insertions(+), 5 deletions(-) - ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -317,6 +317,11 @@ static const struct of_device_id sfp_of_ - }; - MODULE_DEVICE_TABLE(of, sfp_of_match); - -+static void sfp_fixup_long_startup(struct sfp *sfp) -+{ -+ sfp->module_t_start_up = T_START_UP_BAD_GPON; -+} -+ - static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, - unsigned long *modes) - { -@@ -347,6 +352,7 @@ static const struct sfp_quirk sfp_quirks - .vendor = "ALCATELLUCENT", - .part = "3FE46541AA", - .modes = sfp_quirk_2500basex, -+ .fixup = sfp_fixup_long_startup, - }, { - // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd - // NRZ in their EEPROM -@@ -2043,11 +2049,7 @@ static int sfp_sm_mod_probe(struct sfp * - if (sfp->gpio[GPIO_LOS]) - sfp->state_hw_mask |= SFP_F_LOS; - -- if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) && -- !memcmp(id.base.vendor_pn, "3FE46541AA ", 16)) -- sfp->module_t_start_up = T_START_UP_BAD_GPON; -- else -- sfp->module_t_start_up = T_START_UP; -+ sfp->module_t_start_up = T_START_UP; - - if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) && - !memcmp(id.base.vendor_pn, "MA5671A ", 16)) -@@ -2056,6 +2058,8 @@ static int sfp_sm_mod_probe(struct sfp * - sfp->tx_fault_ignore = false; - - sfp->quirk = sfp_lookup_quirk(&id); -+ if (sfp->quirk && sfp->quirk->fixup) -+ sfp->quirk->fixup(sfp); - - return 0; - } ---- a/drivers/net/phy/sfp.h -+++ b/drivers/net/phy/sfp.h -@@ -10,6 +10,7 @@ struct sfp_quirk { - const char *vendor; - const char *part; - void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); -+ void (*fixup)(struct sfp *sfp); - }; - - struct sfp_socket_ops { diff --git a/target/linux/generic/backport-6.1/786-v6.1-net-sfp-move-Huawei-MA5671A-fixup.patch b/target/linux/generic/backport-6.1/786-v6.1-net-sfp-move-Huawei-MA5671A-fixup.patch deleted file mode 100644 index 7f856e5b5bee..000000000000 --- a/target/linux/generic/backport-6.1/786-v6.1-net-sfp-move-Huawei-MA5671A-fixup.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 5029be761161374a3624aa7b4670174c35449bf5 Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 13 Sep 2022 20:06:42 +0100 -Subject: [PATCH 1/1] net: sfp: move Huawei MA5671A fixup - -Move this module over to the new fixup mechanism. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/sfp.c | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -322,6 +322,11 @@ static void sfp_fixup_long_startup(struc - sfp->module_t_start_up = T_START_UP_BAD_GPON; - } - -+static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) -+{ -+ sfp->tx_fault_ignore = true; -+} -+ - static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, - unsigned long *modes) - { -@@ -359,6 +364,7 @@ static const struct sfp_quirk sfp_quirks - .vendor = "HUAWEI", - .part = "MA5671A", - .modes = sfp_quirk_2500basex, -+ .fixup = sfp_fixup_ignore_tx_fault, - }, { - // Lantech 8330-262D-E can operate at 2500base-X, but - // incorrectly report 2500MBd NRZ in their EEPROM -@@ -2051,11 +2057,7 @@ static int sfp_sm_mod_probe(struct sfp * - - sfp->module_t_start_up = T_START_UP; - -- if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) && -- !memcmp(id.base.vendor_pn, "MA5671A ", 16)) -- sfp->tx_fault_ignore = true; -- else -- sfp->tx_fault_ignore = false; -+ sfp->tx_fault_ignore = false; - - sfp->quirk = sfp_lookup_quirk(&id); - if (sfp->quirk && sfp->quirk->fixup) diff --git a/target/linux/generic/backport-6.1/787-v6.1-net-sfp-add-support-for-HALNy-GPON-SFP.patch b/target/linux/generic/backport-6.1/787-v6.1-net-sfp-add-support-for-HALNy-GPON-SFP.patch deleted file mode 100644 index 81108e19c167..000000000000 --- a/target/linux/generic/backport-6.1/787-v6.1-net-sfp-add-support-for-HALNy-GPON-SFP.patch +++ /dev/null @@ -1,79 +0,0 @@ -From 73472c830eae5fce2107f7f086f1e6827d215caf Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Tue, 13 Sep 2022 20:06:48 +0100 -Subject: [PATCH 1/1] net: sfp: add support for HALNy GPON SFP - -Add a quirk for the HALNy HL-GSFP module, which appears to have an -inverted RX_LOS signal, and maybe uses TX_FAULT as a serial port -transmit pin. Rather than use these hardware signals, switch to -using software polling for these status signals. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: Jakub Kicinski ---- - drivers/net/phy/sfp-bus.c | 2 +- - drivers/net/phy/sfp.c | 21 ++++++++++++++++++--- - 2 files changed, 19 insertions(+), 4 deletions(-) - ---- a/drivers/net/phy/sfp-bus.c -+++ b/drivers/net/phy/sfp-bus.c -@@ -283,7 +283,7 @@ void sfp_parse_support(struct sfp_bus *b - phylink_set(modes, 2500baseX_Full); - } - -- if (bus->sfp_quirk) -+ if (bus->sfp_quirk && bus->sfp_quirk->modes) - bus->sfp_quirk->modes(id, modes); - - linkmode_or(support, support, modes); ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -327,6 +327,15 @@ static void sfp_fixup_ignore_tx_fault(st - sfp->tx_fault_ignore = true; - } - -+static void sfp_fixup_halny_gsfp(struct sfp *sfp) -+{ -+ /* Ignore the TX_FAULT and LOS signals on this module. -+ * these are possibly used for other purposes on this -+ * module, e.g. a serial port. -+ */ -+ sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); -+} -+ - static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, - unsigned long *modes) - { -@@ -359,6 +368,10 @@ static const struct sfp_quirk sfp_quirks - .modes = sfp_quirk_2500basex, - .fixup = sfp_fixup_long_startup, - }, { -+ .vendor = "HALNy", -+ .part = "HL-GSFP", -+ .fixup = sfp_fixup_halny_gsfp, -+ }, { - // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd - // NRZ in their EEPROM - .vendor = "HUAWEI", -@@ -375,16 +388,18 @@ static const struct sfp_quirk sfp_quirks - .vendor = "UBNT", - .part = "UF-INSTANT", - .modes = sfp_quirk_ubnt_uf_instant, -- }, -+ } - }; - - static size_t sfp_strlen(const char *str, size_t maxlen) - { - size_t size, i; - -- /* Trailing characters should be filled with space chars */ -+ /* Trailing characters should be filled with space chars, but -+ * some manufacturers can't read SFF-8472 and use NUL. -+ */ - for (i = 0, size = 0; i < maxlen; i++) -- if (str[i] != ' ') -+ if (str[i] != ' ' && str[i] != '\0') - size = i + 1; - - return size; diff --git a/target/linux/generic/backport-6.1/789-v6.3-net-sfp-add-quirk-enabling-2500Base-x-for-HG-MXPD-48.patch b/target/linux/generic/backport-6.1/789-v6.3-net-sfp-add-quirk-enabling-2500Base-x-for-HG-MXPD-48.patch deleted file mode 100644 index 413c73f54786..000000000000 --- a/target/linux/generic/backport-6.1/789-v6.3-net-sfp-add-quirk-enabling-2500Base-x-for-HG-MXPD-48.patch +++ /dev/null @@ -1,37 +0,0 @@ -From ad651d68cee75e9ac20002254c4e5d09ee67a84b Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Sun, 2 Apr 2023 12:44:37 +0100 -Subject: [PATCH] net: sfp: add quirk enabling 2500Base-x for HG MXPD-483II - -The HG MXPD-483II 1310nm SFP module is meant to operate with 2500Base-X, -however, in their EEPROM they incorrectly specify: - Transceiver type : Ethernet: 1000BASE-LX - ... - BR, Nominal : 2600MBd - -Use sfp_quirk_2500basex for this module to allow 2500Base-X mode anyway. - -https://forum.banana-pi.org/t/bpi-r3-sfp-module-compatibility/14573/60 - -Reported-by: chowtom -Tested-by: chowtom -Signed-off-by: Daniel Golle -Reviewed-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - drivers/net/phy/sfp.c | 4 ++++ - 1 file changed, 4 insertions(+) - ---- a/drivers/net/phy/sfp.c -+++ b/drivers/net/phy/sfp.c -@@ -372,6 +372,10 @@ static const struct sfp_quirk sfp_quirks - .part = "HL-GSFP", - .fixup = sfp_fixup_halny_gsfp, - }, { -+ .vendor = "HG GENUINE", -+ .part = "MXPD-483II", -+ .modes = sfp_quirk_2500basex, -+ }, { - // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd - // NRZ in their EEPROM - .vendor = "HUAWEI", diff --git a/target/linux/generic/backport-6.1/790-v6.0-net-mii-add-mii_bmcr_encode_fixed.patch b/target/linux/generic/backport-6.1/790-v6.0-net-mii-add-mii_bmcr_encode_fixed.patch deleted file mode 100644 index 87d7ff6154d1..000000000000 --- a/target/linux/generic/backport-6.1/790-v6.0-net-mii-add-mii_bmcr_encode_fixed.patch +++ /dev/null @@ -1,55 +0,0 @@ -From bdb6cfe7512f7a214815a3092f0be50963dcacbc Mon Sep 17 00:00:00 2001 -From: "Russell King (Oracle)" -Date: Sat, 18 Jun 2022 11:28:32 +0100 -Subject: [PATCH] net: mii: add mii_bmcr_encode_fixed() - -Add a function to encode a fixed speed/duplex to a BMCR value. - -Signed-off-by: Russell King (Oracle) -Signed-off-by: David S. Miller ---- - include/linux/mii.h | 35 +++++++++++++++++++++++++++++++++++ - 1 file changed, 35 insertions(+) - ---- a/include/linux/mii.h -+++ b/include/linux/mii.h -@@ -595,4 +595,39 @@ static inline u8 mii_resolve_flowctrl_fd - return cap; - } - -+/** -+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value -+ * @speed: a SPEED_* value -+ * @duplex: a DUPLEX_* value -+ * -+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are -+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10 -+ * Mbps. Unknown duplex values are encoded to half-duplex. -+ */ -+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex) -+{ -+ u16 bmcr; -+ -+ switch (speed) { -+ case SPEED_2500: -+ case SPEED_1000: -+ bmcr = BMCR_SPEED1000; -+ break; -+ -+ case SPEED_100: -+ bmcr = BMCR_SPEED100; -+ break; -+ -+ case SPEED_10: -+ default: -+ bmcr = BMCR_SPEED10; -+ break; -+ } -+ -+ if (duplex == DUPLEX_FULL) -+ bmcr |= BMCR_FULLDPLX; -+ -+ return bmcr; -+} -+ - #endif /* __LINUX_MII_H__ */ diff --git a/target/linux/generic/backport-6.1/797-v5.17-net-usb-ax88179_178a-add-TSO-feature.patch b/target/linux/generic/backport-6.1/797-v5.17-net-usb-ax88179_178a-add-TSO-feature.patch deleted file mode 100644 index a2168aaba5f6..000000000000 --- a/target/linux/generic/backport-6.1/797-v5.17-net-usb-ax88179_178a-add-TSO-feature.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 16b1c4e01c89ba07367461e0bc4cb84993c2d027 Mon Sep 17 00:00:00 2001 -From: Jacky Chou -Date: Mon, 15 Nov 2021 11:49:41 +0800 -Subject: [PATCH] net: usb: ax88179_178a: add TSO feature - -On low-effciency embedded platforms, transmission performance is poor -due to on Bulk-out with single packet. -Adding TSO feature improves the transmission performance and reduces -the number of interrupt caused by Bulk-out complete. - -Reference to module, net: usb: aqc111. - -Signed-off-by: Jacky Chou -Signed-off-by: David S. Miller ---- - drivers/net/usb/ax88179_178a.c | 17 +++++++++++------ - 1 file changed, 11 insertions(+), 6 deletions(-) - ---- a/drivers/net/usb/ax88179_178a.c -+++ b/drivers/net/usb/ax88179_178a.c -@@ -1377,11 +1377,12 @@ static int ax88179_bind(struct usbnet *d - dev->mii.phy_id = 0x03; - dev->mii.supports_gmii = 1; - -- dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -- NETIF_F_RXCSUM; -+ dev->net->features |= NETIF_F_SG | NETIF_F_IP_CSUM | -+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO; - -- dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -- NETIF_F_RXCSUM; -+ dev->net->hw_features |= dev->net->features; -+ -+ netif_set_gso_max_size(dev->net, 16384); - - /* Enable checksum offload */ - *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | -@@ -1587,17 +1588,19 @@ ax88179_tx_fixup(struct usbnet *dev, str - { - u32 tx_hdr1, tx_hdr2; - int frame_size = dev->maxpacket; -- int mss = skb_shinfo(skb)->gso_size; - int headroom; - void *ptr; - - tx_hdr1 = skb->len; -- tx_hdr2 = mss; -+ tx_hdr2 = skb_shinfo(skb)->gso_size; /* Set TSO mss */ - if (((skb->len + 8) % frame_size) == 0) - tx_hdr2 |= 0x80008000; /* Enable padding */ - - headroom = skb_headroom(skb) - 8; - -+ if ((dev->net->features & NETIF_F_SG) && skb_linearize(skb)) -+ return NULL; -+ - if ((skb_header_cloned(skb) || headroom < 0) && - pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) { - dev_kfree_skb_any(skb); -@@ -1608,6 +1611,8 @@ ax88179_tx_fixup(struct usbnet *dev, str - put_unaligned_le32(tx_hdr1, ptr); - put_unaligned_le32(tx_hdr2, ptr + 4); - -+ usbnet_set_skb_tx_stats(skb, (skb_shinfo(skb)->gso_segs ?: 1), 0); -+ - return skb; - } - diff --git a/target/linux/generic/backport-6.1/800-v6.0-0001-dt-bindings-leds-add-Broadcom-s-BCM63138-controller.patch b/target/linux/generic/backport-6.1/800-v6.0-0001-dt-bindings-leds-add-Broadcom-s-BCM63138-controller.patch deleted file mode 100644 index b1072ce6408d..000000000000 --- a/target/linux/generic/backport-6.1/800-v6.0-0001-dt-bindings-leds-add-Broadcom-s-BCM63138-controller.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 13344f8ce8a0d98aa7f5d69ce3b47393c73a343b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Mon, 27 Dec 2021 15:59:04 +0100 -Subject: [PATCH] dt-bindings: leds: add Broadcom's BCM63138 controller -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Broadcom used 2 LEDs hardware blocks for their BCM63xx SoCs: -1. Older one (BCM6318, BCM6328, BCM6362, BCM63268, BCM6838) -2. Newer one (BCM6848, BCM6858, BCM63138, BCM63148, BCM63381, BCM68360) - -The newer one was also later also used on BCM4908 SoC. - -Old block is already documented in the leds-bcm6328.yaml. This binding -documents the new one which uses different registers & programming. It's -first used in BCM63138 thus the binding name. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Rob Herring -Reviewed-by: Florian Fainelli -Signed-off-by: Pavel Machek ---- - .../bindings/leds/leds-bcm63138.yaml | 95 +++++++++++++++++++ - 1 file changed, 95 insertions(+) - create mode 100644 Documentation/devicetree/bindings/leds/leds-bcm63138.yaml - ---- /dev/null -+++ b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml -@@ -0,0 +1,95 @@ -+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause -+%YAML 1.2 -+--- -+$id: http://devicetree.org/schemas/leds/leds-bcm63138.yaml# -+$schema: http://devicetree.org/meta-schemas/core.yaml# -+ -+title: Broadcom's BCM63138 LEDs controller -+ -+maintainers: -+ - Rafał Miłecki -+ -+description: | -+ This LEDs controller was first used on BCM63138 and later reused on BCM4908, -+ BCM6848, BCM6858, BCM63138, BCM63148, BCM63381 and BCM68360 SoCs. -+ -+ It supports up to 32 LEDs that can be connected parallelly or serially. It -+ also includes limited support for hardware blinking. -+ -+ Binding serially connected LEDs isn't documented yet. -+ -+properties: -+ compatible: -+ oneOf: -+ - items: -+ - enum: -+ - brcm,bcm4908-leds -+ - brcm,bcm6848-leds -+ - brcm,bcm6858-leds -+ - brcm,bcm63148-leds -+ - brcm,bcm63381-leds -+ - brcm,bcm68360-leds -+ - const: brcm,bcm63138-leds -+ - const: brcm,bcm63138-leds -+ -+ reg: -+ maxItems: 1 -+ -+ "#address-cells": -+ const: 1 -+ -+ "#size-cells": -+ const: 0 -+ -+patternProperties: -+ "^led@[a-f0-9]+$": -+ type: object -+ -+ $ref: common.yaml# -+ -+ properties: -+ reg: -+ maxItems: 1 -+ description: LED pin number -+ -+ active-low: -+ type: boolean -+ description: Makes LED active low. -+ -+ required: -+ - reg -+ -+ unevaluatedProperties: false -+ -+required: -+ - reg -+ - "#address-cells" -+ - "#size-cells" -+ -+additionalProperties: false -+ -+examples: -+ - | -+ #include -+ -+ leds@ff800800 { -+ compatible = "brcm,bcm4908-leds", "brcm,bcm63138-leds"; -+ reg = <0xff800800 0xdc>; -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ led@0 { -+ reg = <0x0>; -+ function = LED_FUNCTION_POWER; -+ color = ; -+ default-state = "on"; -+ }; -+ -+ led@3 { -+ reg = <0x3>; -+ function = LED_FUNCTION_STATUS; -+ color = ; -+ active-low; -+ }; -+ }; diff --git a/target/linux/generic/backport-6.1/800-v6.0-0002-leds-bcm63138-add-support-for-BCM63138-controller.patch b/target/linux/generic/backport-6.1/800-v6.0-0002-leds-bcm63138-add-support-for-BCM63138-controller.patch deleted file mode 100644 index 376584574fcd..000000000000 --- a/target/linux/generic/backport-6.1/800-v6.0-0002-leds-bcm63138-add-support-for-BCM63138-controller.patch +++ /dev/null @@ -1,356 +0,0 @@ -From a0ba692072d89075d0a75c7ad9df31f2c1ee9a1c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Mon, 27 Dec 2021 15:59:05 +0100 -Subject: [PATCH] leds: bcm63138: add support for BCM63138 controller -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -It's a new controller first introduced in BCM63138 SoC. Later it was -also used in BCM4908, some BCM68xx and some BCM63xxx SoCs. - -Signed-off-by: Rafał Miłecki -Reviewed-by: Florian Fainelli -Signed-off-by: Pavel Machek ---- - drivers/leds/blink/Kconfig | 12 ++ - drivers/leds/blink/Makefile | 1 + - drivers/leds/blink/leds-bcm63138.c | 308 +++++++++++++++++++++++++++++ - 3 files changed, 321 insertions(+) - create mode 100644 drivers/leds/blink/leds-bcm63138.c - ---- a/drivers/leds/blink/Kconfig -+++ b/drivers/leds/blink/Kconfig -@@ -1,3 +1,15 @@ -+config LEDS_BCM63138 -+ tristate "LED Support for Broadcom BCM63138 SoC" -+ depends on LEDS_CLASS -+ depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST -+ depends on HAS_IOMEM -+ depends on OF -+ default ARCH_BCM4908 -+ help -+ This option enables support for LED controller that is part of -+ BCM63138 SoC. The same hardware block is known to be also used -+ in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360. -+ - config LEDS_LGM - tristate "LED support for LGM SoC series" - depends on X86 || COMPILE_TEST ---- a/drivers/leds/blink/Makefile -+++ b/drivers/leds/blink/Makefile -@@ -1,2 +1,3 @@ - # SPDX-License-Identifier: GPL-2.0 -+obj-$(CONFIG_LEDS_BCM63138) += leds-bcm63138.o - obj-$(CONFIG_LEDS_LGM) += leds-lgm-sso.o ---- /dev/null -+++ b/drivers/leds/blink/leds-bcm63138.c -@@ -0,0 +1,308 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (C) 2021 Rafał Miłecki -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BCM63138_MAX_LEDS 32 -+#define BCM63138_MAX_BRIGHTNESS 9 -+ -+#define BCM63138_LED_BITS 4 /* how many bits control a single LED */ -+#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */ -+#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */ -+ -+#define BCM63138_GLB_CTRL 0x00 -+#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002 -+#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008 -+#define BCM63138_MASK 0x04 -+#define BCM63138_HW_LED_EN 0x08 -+#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c -+#define BCM63138_FLASH_RATE_CTRL1 0x10 -+#define BCM63138_FLASH_RATE_CTRL2 0x14 -+#define BCM63138_FLASH_RATE_CTRL3 0x18 -+#define BCM63138_FLASH_RATE_CTRL4 0x1c -+#define BCM63138_BRIGHT_CTRL1 0x20 -+#define BCM63138_BRIGHT_CTRL2 0x24 -+#define BCM63138_BRIGHT_CTRL3 0x28 -+#define BCM63138_BRIGHT_CTRL4 0x2c -+#define BCM63138_POWER_LED_CFG 0x30 -+#define BCM63138_HW_POLARITY 0xb4 -+#define BCM63138_SW_DATA 0xb8 -+#define BCM63138_SW_POLARITY 0xbc -+#define BCM63138_PARALLEL_LED_POLARITY 0xc0 -+#define BCM63138_SERIAL_LED_POLARITY 0xc4 -+#define BCM63138_HW_LED_STATUS 0xc8 -+#define BCM63138_FLASH_CTRL_STATUS 0xcc -+#define BCM63138_FLASH_BRT_CTRL 0xd0 -+#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4 -+#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8 -+ -+struct bcm63138_leds { -+ struct device *dev; -+ void __iomem *base; -+ spinlock_t lock; -+}; -+ -+struct bcm63138_led { -+ struct bcm63138_leds *leds; -+ struct led_classdev cdev; -+ u32 pin; -+ bool active_low; -+}; -+ -+/* -+ * I/O access -+ */ -+ -+static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg, -+ u32 data) -+{ -+ writel(data, leds->base + reg); -+} -+ -+static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds, -+ unsigned int reg) -+{ -+ return readl(leds->base + reg); -+} -+ -+static void bcm63138_leds_update_bits(struct bcm63138_leds *leds, -+ unsigned int reg, u32 mask, u32 val) -+{ -+ WARN_ON(val & ~mask); -+ -+ bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask)); -+} -+ -+/* -+ * Helpers -+ */ -+ -+static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds, -+ struct bcm63138_led *led, -+ u8 value) -+{ -+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4; -+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS; -+ -+ bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset, -+ BCM63138_LED_MASK << shift, value << shift); -+} -+ -+static void bcm63138_leds_set_bright(struct bcm63138_leds *leds, -+ struct bcm63138_led *led, -+ u8 value) -+{ -+ int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4; -+ int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS; -+ -+ bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset, -+ BCM63138_LED_MASK << shift, value << shift); -+} -+ -+static void bcm63138_leds_enable_led(struct bcm63138_leds *leds, -+ struct bcm63138_led *led, -+ enum led_brightness value) -+{ -+ u32 bit = BIT(led->pin); -+ -+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, -+ value == LED_OFF ? 0 : bit); -+} -+ -+/* -+ * API callbacks -+ */ -+ -+static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev, -+ enum led_brightness value) -+{ -+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev); -+ struct bcm63138_leds *leds = led->leds; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&leds->lock, flags); -+ -+ bcm63138_leds_enable_led(leds, led, value); -+ if (!value) -+ bcm63138_leds_set_flash_rate(leds, led, 0); -+ else -+ bcm63138_leds_set_bright(leds, led, value); -+ -+ spin_unlock_irqrestore(&leds->lock, flags); -+} -+ -+static int bcm63138_leds_blink_set(struct led_classdev *led_cdev, -+ unsigned long *delay_on, -+ unsigned long *delay_off) -+{ -+ struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev); -+ struct bcm63138_leds *leds = led->leds; -+ unsigned long flags; -+ u8 value; -+ -+ if (!*delay_on && !*delay_off) { -+ *delay_on = 640; -+ *delay_off = 640; -+ } -+ -+ if (*delay_on != *delay_off) { -+ dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n"); -+ return -EINVAL; -+ } -+ -+ switch (*delay_on) { -+ case 1152 ... 1408: /* 1280 ms ± 10% */ -+ value = 0x7; -+ break; -+ case 576 ... 704: /* 640 ms ± 10% */ -+ value = 0x6; -+ break; -+ case 288 ... 352: /* 320 ms ± 10% */ -+ value = 0x5; -+ break; -+ case 126 ... 154: /* 140 ms ± 10% */ -+ value = 0x4; -+ break; -+ case 59 ... 72: /* 65 ms ± 10% */ -+ value = 0x3; -+ break; -+ default: -+ dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n", -+ *delay_on); -+ return -EINVAL; -+ } -+ -+ spin_lock_irqsave(&leds->lock, flags); -+ -+ bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS); -+ bcm63138_leds_set_flash_rate(leds, led, value); -+ -+ spin_unlock_irqrestore(&leds->lock, flags); -+ -+ return 0; -+} -+ -+/* -+ * LED driver -+ */ -+ -+static void bcm63138_leds_create_led(struct bcm63138_leds *leds, -+ struct device_node *np) -+{ -+ struct led_init_data init_data = { -+ .fwnode = of_fwnode_handle(np), -+ }; -+ struct device *dev = leds->dev; -+ struct bcm63138_led *led; -+ struct pinctrl *pinctrl; -+ u32 bit; -+ int err; -+ -+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); -+ if (!led) { -+ dev_err(dev, "Failed to alloc LED\n"); -+ return; -+ } -+ -+ led->leds = leds; -+ -+ if (of_property_read_u32(np, "reg", &led->pin)) { -+ dev_err(dev, "Missing \"reg\" property in %pOF\n", np); -+ goto err_free; -+ } -+ -+ if (led->pin >= BCM63138_MAX_LEDS) { -+ dev_err(dev, "Invalid \"reg\" value %d\n", led->pin); -+ goto err_free; -+ } -+ -+ led->active_low = of_property_read_bool(np, "active-low"); -+ -+ led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS; -+ led->cdev.brightness_set = bcm63138_leds_brightness_set; -+ led->cdev.blink_set = bcm63138_leds_blink_set; -+ -+ err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data); -+ if (err) { -+ dev_err(dev, "Failed to register LED %pOF: %d\n", np, err); -+ goto err_free; -+ } -+ -+ pinctrl = devm_pinctrl_get_select_default(led->cdev.dev); -+ if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) { -+ dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n", -+ np, PTR_ERR(pinctrl)); -+ } -+ -+ bit = BIT(led->pin); -+ bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit, -+ led->active_low ? 0 : bit); -+ bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0); -+ bcm63138_leds_set_flash_rate(leds, led, 0); -+ bcm63138_leds_enable_led(leds, led, led->cdev.brightness); -+ -+ return; -+ -+err_free: -+ devm_kfree(dev, led); -+} -+ -+static int bcm63138_leds_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = dev_of_node(&pdev->dev); -+ struct device *dev = &pdev->dev; -+ struct bcm63138_leds *leds; -+ struct device_node *child; -+ -+ leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL); -+ if (!leds) -+ return -ENOMEM; -+ -+ leds->dev = dev; -+ -+ leds->base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(leds->base)) -+ return PTR_ERR(leds->base); -+ -+ spin_lock_init(&leds->lock); -+ -+ bcm63138_leds_write(leds, BCM63138_GLB_CTRL, -+ BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL | -+ BCM63138_GLB_CTRL_SERIAL_LED_EN_POL); -+ bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0); -+ bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0); -+ bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0); -+ -+ for_each_available_child_of_node(np, child) { -+ bcm63138_leds_create_led(leds, child); -+ } -+ -+ return 0; -+} -+ -+static const struct of_device_id bcm63138_leds_of_match_table[] = { -+ { .compatible = "brcm,bcm63138-leds", }, -+ { }, -+}; -+ -+static struct platform_driver bcm63138_leds_driver = { -+ .probe = bcm63138_leds_probe, -+ .driver = { -+ .name = "leds-bcm63xxx", -+ .of_match_table = bcm63138_leds_of_match_table, -+ }, -+}; -+ -+module_platform_driver(bcm63138_leds_driver); -+ -+MODULE_AUTHOR("Rafał Miłecki"); -+MODULE_LICENSE("GPL"); -+MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table); diff --git a/target/linux/generic/backport-6.1/801-v6.0-0001-dt-bindings-leds-leds-bcm63138-unify-full-stops-in-d.patch b/target/linux/generic/backport-6.1/801-v6.0-0001-dt-bindings-leds-leds-bcm63138-unify-full-stops-in-d.patch deleted file mode 100644 index 483826abed5f..000000000000 --- a/target/linux/generic/backport-6.1/801-v6.0-0001-dt-bindings-leds-leds-bcm63138-unify-full-stops-in-d.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 13b64a0c19059b38150c79d65d350ae44034c5df Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Sun, 17 Jul 2022 14:42:46 +0200 -Subject: [PATCH] dt-bindings: leds: leds-bcm63138: unify full stops in - descriptions -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Description of "reg" doesn't have full stop at the end. It makes sense -as it's a one-sentence only. Use the same style for "active-low". - -Reported-by: Pavel Machek -Signed-off-by: Rafał Miłecki -Signed-off-by: Pavel Machek ---- - Documentation/devicetree/bindings/leds/leds-bcm63138.yaml | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml -+++ b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml -@@ -54,7 +54,7 @@ patternProperties: - - active-low: - type: boolean -- description: Makes LED active low. -+ description: Makes LED active low - - required: - - reg diff --git a/target/linux/generic/backport-6.1/801-v6.0-0002-leds-add-help-info-about-BCM63138-module-name.patch b/target/linux/generic/backport-6.1/801-v6.0-0002-leds-add-help-info-about-BCM63138-module-name.patch deleted file mode 100644 index 5430b1f08448..000000000000 --- a/target/linux/generic/backport-6.1/801-v6.0-0002-leds-add-help-info-about-BCM63138-module-name.patch +++ /dev/null @@ -1,28 +0,0 @@ -From bcc607cdbb1f931111196699426f0cb83bfb296a Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Sun, 17 Jul 2022 14:42:47 +0200 -Subject: [PATCH] leds: add help info about BCM63138 module name -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -It's what we do for all other LEDs drivers. - -Reported-by: Pavel Machek -Signed-off-by: Rafał Miłecki -Signed-off-by: Pavel Machek ---- - drivers/leds/blink/Kconfig | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/leds/blink/Kconfig -+++ b/drivers/leds/blink/Kconfig -@@ -10,6 +10,8 @@ config LEDS_BCM63138 - BCM63138 SoC. The same hardware block is known to be also used - in BCM4908, BCM6848, BCM6858, BCM63148, BCM63381 and BCM68360. - -+ If compiled as module it will be called leds-bcm63138. -+ - config LEDS_LGM - tristate "LED support for LGM SoC series" - depends on X86 || COMPILE_TEST diff --git a/target/linux/generic/backport-6.1/801-v6.0-0003-leds-leds-bcm63138-get-rid-of-LED_OFF.patch b/target/linux/generic/backport-6.1/801-v6.0-0003-leds-leds-bcm63138-get-rid-of-LED_OFF.patch deleted file mode 100644 index e125a54613b2..000000000000 --- a/target/linux/generic/backport-6.1/801-v6.0-0003-leds-leds-bcm63138-get-rid-of-LED_OFF.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 92cfc71ee2ddfb499ed53e21b28bdf8739bc70bc Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Sun, 17 Jul 2022 14:42:48 +0200 -Subject: [PATCH] leds: leds-bcm63138: get rid of LED_OFF -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The whole "enum led_brightness" is marked as obsolete. Replace it with a -(non-)zero check. - -Reported-by: Pavel Machek -Signed-off-by: Rafał Miłecki -Signed-off-by: Pavel Machek ---- - drivers/leds/blink/leds-bcm63138.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/leds/blink/leds-bcm63138.c -+++ b/drivers/leds/blink/leds-bcm63138.c -@@ -113,8 +113,7 @@ static void bcm63138_leds_enable_led(str - { - u32 bit = BIT(led->pin); - -- bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, -- value == LED_OFF ? 0 : bit); -+ bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0); - } - - /* diff --git a/target/linux/generic/backport-6.1/802-v5.16-0001-nvmem-core-rework-nvmem-cell-instance-creation.patch b/target/linux/generic/backport-6.1/802-v5.16-0001-nvmem-core-rework-nvmem-cell-instance-creation.patch deleted file mode 100644 index ebab8c7c736d..000000000000 --- a/target/linux/generic/backport-6.1/802-v5.16-0001-nvmem-core-rework-nvmem-cell-instance-creation.patch +++ /dev/null @@ -1,456 +0,0 @@ -From 7ae6478b304bc004c3139b422665b0e23b57f05c Mon Sep 17 00:00:00 2001 -From: Srinivas Kandagatla -Date: Wed, 13 Oct 2021 14:19:55 +0100 -Subject: [PATCH] nvmem: core: rework nvmem cell instance creation - -In the existing design, we do not create a instance per nvmem cell consumer -but we directly refer cell from nvmem cell list that are added to provider. - -However this design has some limitations when consumers want to assign name -or connection id the nvmem cell instance, ex: via "nvmem-cell-names" or -id in nvmem_cell_get(id). - -Having a name associated with nvmem cell consumer instance will help -provider drivers in performing post processing of nvmem cell data if required -before data is seen by the consumers. This is pretty normal with some vendors -storing nvmem cells like mac-address in a vendor specific data layouts that -are not directly usable by the consumer drivers. - -With this patch nvmem cell will be created dynamically during nvmem_cell_get -and destroyed in nvmem_cell_put, allowing consumers to associate name with -nvmem cell consumer instance. - -With this patch a new struct nvmem_cell_entry replaces struct nvmem_cell -for storing nvmem cell information within the core. -This patch does not change nvmem-consumer interface based on nvmem_cell. - -Tested-by: Joakim Zhang -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20211013131957.30271-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 165 +++++++++++++++++++++++++++---------------- - 1 file changed, 105 insertions(+), 60 deletions(-) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -45,8 +45,7 @@ struct nvmem_device { - #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) - - #define FLAG_COMPAT BIT(0) -- --struct nvmem_cell { -+struct nvmem_cell_entry { - const char *name; - int offset; - int bytes; -@@ -57,6 +56,11 @@ struct nvmem_cell { - struct list_head node; - }; - -+struct nvmem_cell { -+ struct nvmem_cell_entry *entry; -+ const char *id; -+}; -+ - static DEFINE_MUTEX(nvmem_mutex); - static DEFINE_IDA(nvmem_ida); - -@@ -424,7 +428,7 @@ static struct bus_type nvmem_bus_type = - .name = "nvmem", - }; - --static void nvmem_cell_drop(struct nvmem_cell *cell) -+static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) - { - blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); - mutex_lock(&nvmem_mutex); -@@ -437,13 +441,13 @@ static void nvmem_cell_drop(struct nvmem - - static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) - { -- struct nvmem_cell *cell, *p; -+ struct nvmem_cell_entry *cell, *p; - - list_for_each_entry_safe(cell, p, &nvmem->cells, node) -- nvmem_cell_drop(cell); -+ nvmem_cell_entry_drop(cell); - } - --static void nvmem_cell_add(struct nvmem_cell *cell) -+static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) - { - mutex_lock(&nvmem_mutex); - list_add_tail(&cell->node, &cell->nvmem->cells); -@@ -451,9 +455,9 @@ static void nvmem_cell_add(struct nvmem_ - blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); - } - --static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, -- const struct nvmem_cell_info *info, -- struct nvmem_cell *cell) -+static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, -+ const struct nvmem_cell_info *info, -+ struct nvmem_cell_entry *cell) - { - cell->nvmem = nvmem; - cell->offset = info->offset; -@@ -477,13 +481,13 @@ static int nvmem_cell_info_to_nvmem_cell - return 0; - } - --static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, -- const struct nvmem_cell_info *info, -- struct nvmem_cell *cell) -+static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, -+ const struct nvmem_cell_info *info, -+ struct nvmem_cell_entry *cell) - { - int err; - -- err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); -+ err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); - if (err) - return err; - -@@ -507,7 +511,7 @@ static int nvmem_add_cells(struct nvmem_ - const struct nvmem_cell_info *info, - int ncells) - { -- struct nvmem_cell **cells; -+ struct nvmem_cell_entry **cells; - int i, rval; - - cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); -@@ -521,13 +525,13 @@ static int nvmem_add_cells(struct nvmem_ - goto err; - } - -- rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); -+ rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]); - if (rval) { - kfree(cells[i]); - goto err; - } - -- nvmem_cell_add(cells[i]); -+ nvmem_cell_entry_add(cells[i]); - } - - /* remove tmp array */ -@@ -536,7 +540,7 @@ static int nvmem_add_cells(struct nvmem_ - return 0; - err: - while (i--) -- nvmem_cell_drop(cells[i]); -+ nvmem_cell_entry_drop(cells[i]); - - kfree(cells); - -@@ -573,7 +577,7 @@ static int nvmem_add_cells_from_table(st - { - const struct nvmem_cell_info *info; - struct nvmem_cell_table *table; -- struct nvmem_cell *cell; -+ struct nvmem_cell_entry *cell; - int rval = 0, i; - - mutex_lock(&nvmem_cell_mutex); -@@ -588,15 +592,13 @@ static int nvmem_add_cells_from_table(st - goto out; - } - -- rval = nvmem_cell_info_to_nvmem_cell(nvmem, -- info, -- cell); -+ rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); - if (rval) { - kfree(cell); - goto out; - } - -- nvmem_cell_add(cell); -+ nvmem_cell_entry_add(cell); - } - } - } -@@ -606,10 +608,10 @@ out: - return rval; - } - --static struct nvmem_cell * --nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) -+static struct nvmem_cell_entry * -+nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) - { -- struct nvmem_cell *iter, *cell = NULL; -+ struct nvmem_cell_entry *iter, *cell = NULL; - - mutex_lock(&nvmem_mutex); - list_for_each_entry(iter, &nvmem->cells, node) { -@@ -680,7 +682,7 @@ static int nvmem_add_cells_from_of(struc - { - struct device_node *parent, *child; - struct device *dev = &nvmem->dev; -- struct nvmem_cell *cell; -+ struct nvmem_cell_entry *cell; - const __be32 *addr; - int len; - -@@ -729,7 +731,7 @@ static int nvmem_add_cells_from_of(struc - } - - cell->np = of_node_get(child); -- nvmem_cell_add(cell); -+ nvmem_cell_entry_add(cell); - } - - return 0; -@@ -1142,9 +1144,33 @@ struct nvmem_device *devm_nvmem_device_g - } - EXPORT_SYMBOL_GPL(devm_nvmem_device_get); - -+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id) -+{ -+ struct nvmem_cell *cell; -+ const char *name = NULL; -+ -+ cell = kzalloc(sizeof(*cell), GFP_KERNEL); -+ if (!cell) -+ return ERR_PTR(-ENOMEM); -+ -+ if (id) { -+ name = kstrdup_const(id, GFP_KERNEL); -+ if (!name) { -+ kfree(cell); -+ return ERR_PTR(-ENOMEM); -+ } -+ } -+ -+ cell->id = name; -+ cell->entry = entry; -+ -+ return cell; -+} -+ - static struct nvmem_cell * - nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) - { -+ struct nvmem_cell_entry *cell_entry; - struct nvmem_cell *cell = ERR_PTR(-ENOENT); - struct nvmem_cell_lookup *lookup; - struct nvmem_device *nvmem; -@@ -1169,11 +1195,15 @@ nvmem_cell_get_from_lookup(struct device - break; - } - -- cell = nvmem_find_cell_by_name(nvmem, -- lookup->cell_name); -- if (!cell) { -+ cell_entry = nvmem_find_cell_entry_by_name(nvmem, -+ lookup->cell_name); -+ if (!cell_entry) { - __nvmem_device_put(nvmem); - cell = ERR_PTR(-ENOENT); -+ } else { -+ cell = nvmem_create_cell(cell_entry, con_id); -+ if (IS_ERR(cell)) -+ __nvmem_device_put(nvmem); - } - break; - } -@@ -1184,10 +1214,10 @@ nvmem_cell_get_from_lookup(struct device - } - - #if IS_ENABLED(CONFIG_OF) --static struct nvmem_cell * --nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) -+static struct nvmem_cell_entry * -+nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) - { -- struct nvmem_cell *iter, *cell = NULL; -+ struct nvmem_cell_entry *iter, *cell = NULL; - - mutex_lock(&nvmem_mutex); - list_for_each_entry(iter, &nvmem->cells, node) { -@@ -1217,6 +1247,7 @@ struct nvmem_cell *of_nvmem_cell_get(str - { - struct device_node *cell_np, *nvmem_np; - struct nvmem_device *nvmem; -+ struct nvmem_cell_entry *cell_entry; - struct nvmem_cell *cell; - int index = 0; - -@@ -1237,12 +1268,16 @@ struct nvmem_cell *of_nvmem_cell_get(str - if (IS_ERR(nvmem)) - return ERR_CAST(nvmem); - -- cell = nvmem_find_cell_by_node(nvmem, cell_np); -- if (!cell) { -+ cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); -+ if (!cell_entry) { - __nvmem_device_put(nvmem); - return ERR_PTR(-ENOENT); - } - -+ cell = nvmem_create_cell(cell_entry, id); -+ if (IS_ERR(cell)) -+ __nvmem_device_put(nvmem); -+ - return cell; - } - EXPORT_SYMBOL_GPL(of_nvmem_cell_get); -@@ -1348,13 +1383,17 @@ EXPORT_SYMBOL(devm_nvmem_cell_put); - */ - void nvmem_cell_put(struct nvmem_cell *cell) - { -- struct nvmem_device *nvmem = cell->nvmem; -+ struct nvmem_device *nvmem = cell->entry->nvmem; -+ -+ if (cell->id) -+ kfree_const(cell->id); - -+ kfree(cell); - __nvmem_device_put(nvmem); - } - EXPORT_SYMBOL_GPL(nvmem_cell_put); - --static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) -+static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) - { - u8 *p, *b; - int i, extra, bit_offset = cell->bit_offset; -@@ -1388,8 +1427,8 @@ static void nvmem_shift_read_buffer_in_p - } - - static int __nvmem_cell_read(struct nvmem_device *nvmem, -- struct nvmem_cell *cell, -- void *buf, size_t *len) -+ struct nvmem_cell_entry *cell, -+ void *buf, size_t *len, const char *id) - { - int rc; - -@@ -1420,18 +1459,18 @@ static int __nvmem_cell_read(struct nvme - */ - void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) - { -- struct nvmem_device *nvmem = cell->nvmem; -+ struct nvmem_device *nvmem = cell->entry->nvmem; - u8 *buf; - int rc; - - if (!nvmem) - return ERR_PTR(-EINVAL); - -- buf = kzalloc(cell->bytes, GFP_KERNEL); -+ buf = kzalloc(cell->entry->bytes, GFP_KERNEL); - if (!buf) - return ERR_PTR(-ENOMEM); - -- rc = __nvmem_cell_read(nvmem, cell, buf, len); -+ rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id); - if (rc) { - kfree(buf); - return ERR_PTR(rc); -@@ -1441,7 +1480,7 @@ void *nvmem_cell_read(struct nvmem_cell - } - EXPORT_SYMBOL_GPL(nvmem_cell_read); - --static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, -+static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, - u8 *_buf, int len) - { - struct nvmem_device *nvmem = cell->nvmem; -@@ -1494,16 +1533,7 @@ err: - return ERR_PTR(rc); - } - --/** -- * nvmem_cell_write() - Write to a given nvmem cell -- * -- * @cell: nvmem cell to be written. -- * @buf: Buffer to be written. -- * @len: length of buffer to be written to nvmem cell. -- * -- * Return: length of bytes written or negative on failure. -- */ --int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) -+static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) - { - struct nvmem_device *nvmem = cell->nvmem; - int rc; -@@ -1529,6 +1559,21 @@ int nvmem_cell_write(struct nvmem_cell * - - return len; - } -+ -+/** -+ * nvmem_cell_write() - Write to a given nvmem cell -+ * -+ * @cell: nvmem cell to be written. -+ * @buf: Buffer to be written. -+ * @len: length of buffer to be written to nvmem cell. -+ * -+ * Return: length of bytes written or negative on failure. -+ */ -+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) -+{ -+ return __nvmem_cell_entry_write(cell->entry, buf, len); -+} -+ - EXPORT_SYMBOL_GPL(nvmem_cell_write); - - static int nvmem_cell_read_common(struct device *dev, const char *cell_id, -@@ -1631,7 +1676,7 @@ static const void *nvmem_cell_read_varia - if (IS_ERR(cell)) - return cell; - -- nbits = cell->nbits; -+ nbits = cell->entry->nbits; - buf = nvmem_cell_read(cell, len); - nvmem_cell_put(cell); - if (IS_ERR(buf)) -@@ -1727,18 +1772,18 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_variab - ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, - struct nvmem_cell_info *info, void *buf) - { -- struct nvmem_cell cell; -+ struct nvmem_cell_entry cell; - int rc; - ssize_t len; - - if (!nvmem) - return -EINVAL; - -- rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); -+ rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); - if (rc) - return rc; - -- rc = __nvmem_cell_read(nvmem, &cell, buf, &len); -+ rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL); - if (rc) - return rc; - -@@ -1758,17 +1803,17 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read - int nvmem_device_cell_write(struct nvmem_device *nvmem, - struct nvmem_cell_info *info, void *buf) - { -- struct nvmem_cell cell; -+ struct nvmem_cell_entry cell; - int rc; - - if (!nvmem) - return -EINVAL; - -- rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); -+ rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); - if (rc) - return rc; - -- return nvmem_cell_write(&cell, buf, cell.bytes); -+ return __nvmem_cell_entry_write(&cell, buf, cell.bytes); - } - EXPORT_SYMBOL_GPL(nvmem_device_cell_write); - diff --git a/target/linux/generic/backport-6.1/802-v5.16-0002-nvmem-core-add-nvmem-cell-post-processing-callback.patch b/target/linux/generic/backport-6.1/802-v5.16-0002-nvmem-core-add-nvmem-cell-post-processing-callback.patch deleted file mode 100644 index df264add2452..000000000000 --- a/target/linux/generic/backport-6.1/802-v5.16-0002-nvmem-core-add-nvmem-cell-post-processing-callback.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 5008062f1c3f5af3acf86164aa6fcc77b0c7bdce Mon Sep 17 00:00:00 2001 -From: Srinivas Kandagatla -Date: Wed, 13 Oct 2021 14:19:56 +0100 -Subject: [PATCH] nvmem: core: add nvmem cell post processing callback - -Some NVMEM providers have certain nvmem cells encoded, which requires -post processing before actually using it. - -For example mac-address is stored in either in ascii or delimited or reverse-order. - -Having a post-process callback hook to provider drivers would enable them to -do this vendor specific post processing before nvmem consumers see it. - -Tested-by: Joakim Zhang -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20211013131957.30271-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 9 +++++++++ - include/linux/nvmem-provider.h | 5 +++++ - 2 files changed, 14 insertions(+) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -38,6 +38,7 @@ struct nvmem_device { - unsigned int nkeepout; - nvmem_reg_read_t reg_read; - nvmem_reg_write_t reg_write; -+ nvmem_cell_post_process_t cell_post_process; - struct gpio_desc *wp_gpio; - void *priv; - }; -@@ -799,6 +800,7 @@ struct nvmem_device *nvmem_register(cons - nvmem->type = config->type; - nvmem->reg_read = config->reg_read; - nvmem->reg_write = config->reg_write; -+ nvmem->cell_post_process = config->cell_post_process; - nvmem->keepout = config->keepout; - nvmem->nkeepout = config->nkeepout; - if (config->of_node) -@@ -1441,6 +1443,13 @@ static int __nvmem_cell_read(struct nvme - if (cell->bit_offset || cell->nbits) - nvmem_shift_read_buffer_in_place(cell, buf); - -+ if (nvmem->cell_post_process) { -+ rc = nvmem->cell_post_process(nvmem->priv, id, -+ cell->offset, buf, cell->bytes); -+ if (rc) -+ return rc; -+ } -+ - if (len) - *len = cell->bytes; - ---- a/include/linux/nvmem-provider.h -+++ b/include/linux/nvmem-provider.h -@@ -19,6 +19,9 @@ typedef int (*nvmem_reg_read_t)(void *pr - void *val, size_t bytes); - typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, - void *val, size_t bytes); -+/* used for vendor specific post processing of cell data */ -+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, unsigned int offset, -+ void *buf, size_t bytes); - - enum nvmem_type { - NVMEM_TYPE_UNKNOWN = 0, -@@ -62,6 +65,7 @@ struct nvmem_keepout { - * @no_of_node: Device should not use the parent's of_node even if it's !NULL. - * @reg_read: Callback to read data. - * @reg_write: Callback to write data. -+ * @cell_post_process: Callback for vendor specific post processing of cell data - * @size: Device size. - * @word_size: Minimum read/write access granularity. - * @stride: Minimum read/write access stride. -@@ -92,6 +96,7 @@ struct nvmem_config { - bool no_of_node; - nvmem_reg_read_t reg_read; - nvmem_reg_write_t reg_write; -+ nvmem_cell_post_process_t cell_post_process; - int size; - int word_size; - int stride; diff --git a/target/linux/generic/backport-6.1/802-v5.16-0003-nvmem-imx-ocotp-add-support-for-post-processing.patch b/target/linux/generic/backport-6.1/802-v5.16-0003-nvmem-imx-ocotp-add-support-for-post-processing.patch deleted file mode 100644 index ee19228270d1..000000000000 --- a/target/linux/generic/backport-6.1/802-v5.16-0003-nvmem-imx-ocotp-add-support-for-post-processing.patch +++ /dev/null @@ -1,92 +0,0 @@ -From d0221a780cbc99fec6c27a98dba2828dc5735c00 Mon Sep 17 00:00:00 2001 -From: Srinivas Kandagatla -Date: Wed, 13 Oct 2021 14:19:57 +0100 -Subject: [PATCH] nvmem: imx-ocotp: add support for post processing - -Add .cell_post_process callback for imx-ocotp to deal with MAC address, -since MAC address need to be reversed byte for some i.MX SoCs. - -Tested-by: Joakim Zhang -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20211013131957.30271-4-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/imx-ocotp.c | 25 +++++++++++++++++++++++++ - 1 file changed, 25 insertions(+) - ---- a/drivers/nvmem/imx-ocotp.c -+++ b/drivers/nvmem/imx-ocotp.c -@@ -97,6 +97,7 @@ struct ocotp_params { - unsigned int bank_address_words; - void (*set_timing)(struct ocotp_priv *priv); - struct ocotp_ctrl_reg ctrl; -+ bool reverse_mac_address; - }; - - static int imx_ocotp_wait_for_busy(struct ocotp_priv *priv, u32 flags) -@@ -221,6 +222,25 @@ read_end: - return ret; - } - -+static int imx_ocotp_cell_pp(void *context, const char *id, unsigned int offset, -+ void *data, size_t bytes) -+{ -+ struct ocotp_priv *priv = context; -+ -+ /* Deal with some post processing of nvmem cell data */ -+ if (id && !strcmp(id, "mac-address")) { -+ if (priv->params->reverse_mac_address) { -+ u8 *buf = data; -+ int i; -+ -+ for (i = 0; i < bytes/2; i++) -+ swap(buf[i], buf[bytes - i - 1]); -+ } -+ } -+ -+ return 0; -+} -+ - static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv) - { - unsigned long clk_rate; -@@ -468,6 +488,7 @@ static struct nvmem_config imx_ocotp_nvm - .stride = 1, - .reg_read = imx_ocotp_read, - .reg_write = imx_ocotp_write, -+ .cell_post_process = imx_ocotp_cell_pp, - }; - - static const struct ocotp_params imx6q_params = { -@@ -530,6 +551,7 @@ static const struct ocotp_params imx8mq_ - .bank_address_words = 0, - .set_timing = imx_ocotp_set_imx6_timing, - .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, -+ .reverse_mac_address = true, - }; - - static const struct ocotp_params imx8mm_params = { -@@ -537,6 +559,7 @@ static const struct ocotp_params imx8mm_ - .bank_address_words = 0, - .set_timing = imx_ocotp_set_imx6_timing, - .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, -+ .reverse_mac_address = true, - }; - - static const struct ocotp_params imx8mn_params = { -@@ -544,6 +567,7 @@ static const struct ocotp_params imx8mn_ - .bank_address_words = 0, - .set_timing = imx_ocotp_set_imx6_timing, - .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, -+ .reverse_mac_address = true, - }; - - static const struct ocotp_params imx8mp_params = { -@@ -551,6 +575,7 @@ static const struct ocotp_params imx8mp_ - .bank_address_words = 0, - .set_timing = imx_ocotp_set_imx6_timing, - .ctrl = IMX_OCOTP_BM_CTRL_8MP, -+ .reverse_mac_address = true, - }; - - static const struct of_device_id imx_ocotp_dt_ids[] = { diff --git a/target/linux/generic/backport-6.1/803-v5.17-0002-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch b/target/linux/generic/backport-6.1/803-v5.17-0002-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch deleted file mode 100644 index 785bfe53f547..000000000000 --- a/target/linux/generic/backport-6.1/803-v5.17-0002-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 98e2c4efae214fb7086cac9117616eb6ea11475d Mon Sep 17 00:00:00 2001 -From: Chunfeng Yun -Date: Thu, 9 Dec 2021 17:42:34 +0000 -Subject: [PATCH] nvmem: mtk-efuse: support minimum one byte access stride and - granularity - -In order to support nvmem bits property, should support minimum 1 byte -read stride and minimum 1 byte read granularity at the same time. - -Signed-off-by: Chunfeng Yun -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20211209174235.14049-4-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/mtk-efuse.c | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) - ---- a/drivers/nvmem/mtk-efuse.c -+++ b/drivers/nvmem/mtk-efuse.c -@@ -19,11 +19,12 @@ static int mtk_reg_read(void *context, - unsigned int reg, void *_val, size_t bytes) - { - struct mtk_efuse_priv *priv = context; -- u32 *val = _val; -- int i = 0, words = bytes / 4; -+ void __iomem *addr = priv->base + reg; -+ u8 *val = _val; -+ int i; - -- while (words--) -- *val++ = readl(priv->base + reg + (i++ * 4)); -+ for (i = 0; i < bytes; i++, val++) -+ *val = readb(addr + i); - - return 0; - } -@@ -45,8 +46,8 @@ static int mtk_efuse_probe(struct platfo - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - -- econfig.stride = 4; -- econfig.word_size = 4; -+ econfig.stride = 1; -+ econfig.word_size = 1; - econfig.reg_read = mtk_reg_read; - econfig.size = resource_size(res); - econfig.priv = priv; diff --git a/target/linux/generic/backport-6.1/804-v5.18-0001-nvmem-core-Remove-unused-devm_nvmem_unregister.patch b/target/linux/generic/backport-6.1/804-v5.18-0001-nvmem-core-Remove-unused-devm_nvmem_unregister.patch deleted file mode 100644 index ca5357c8d988..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0001-nvmem-core-Remove-unused-devm_nvmem_unregister.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 190fae468592bc2f0efc8b928920f8f712b5831e Mon Sep 17 00:00:00 2001 -From: Andy Shevchenko -Date: Sun, 20 Feb 2022 15:15:15 +0000 -Subject: [PATCH] nvmem: core: Remove unused devm_nvmem_unregister() - -There are no users and seems no will come of the devm_nvmem_unregister(). -Remove the function and remove the unused devm_nvmem_match() along with it. - -Signed-off-by: Andy Shevchenko -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 22 ---------------------- - include/linux/nvmem-provider.h | 8 -------- - 2 files changed, 30 deletions(-) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -943,28 +943,6 @@ struct nvmem_device *devm_nvmem_register - } - EXPORT_SYMBOL_GPL(devm_nvmem_register); - --static int devm_nvmem_match(struct device *dev, void *res, void *data) --{ -- struct nvmem_device **r = res; -- -- return *r == data; --} -- --/** -- * devm_nvmem_unregister() - Unregister previously registered managed nvmem -- * device. -- * -- * @dev: Device that uses the nvmem device. -- * @nvmem: Pointer to previously registered nvmem device. -- * -- * Return: Will be negative on error or zero on success. -- */ --int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) --{ -- return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); --} --EXPORT_SYMBOL(devm_nvmem_unregister); -- - static struct nvmem_device *__nvmem_device_get(void *data, - int (*match)(struct device *dev, const void *data)) - { ---- a/include/linux/nvmem-provider.h -+++ b/include/linux/nvmem-provider.h -@@ -133,8 +133,6 @@ void nvmem_unregister(struct nvmem_devic - struct nvmem_device *devm_nvmem_register(struct device *dev, - const struct nvmem_config *cfg); - --int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); -- - void nvmem_add_cell_table(struct nvmem_cell_table *table); - void nvmem_del_cell_table(struct nvmem_cell_table *table); - -@@ -153,12 +151,6 @@ devm_nvmem_register(struct device *dev, - return nvmem_register(c); - } - --static inline int --devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) --{ -- return -EOPNOTSUPP; --} -- - static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} - static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} - diff --git a/target/linux/generic/backport-6.1/804-v5.18-0002-nvmem-core-Use-devm_add_action_or_reset.patch b/target/linux/generic/backport-6.1/804-v5.18-0002-nvmem-core-Use-devm_add_action_or_reset.patch deleted file mode 100644 index b71a0a365b21..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0002-nvmem-core-Use-devm_add_action_or_reset.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 5825b2c6762611e67ccaf3ccf64485365a120f0b Mon Sep 17 00:00:00 2001 -From: Andy Shevchenko -Date: Sun, 20 Feb 2022 15:15:16 +0000 -Subject: [PATCH] nvmem: core: Use devm_add_action_or_reset() - -Slightly simplify the devm_nvmem_register() by using the -devm_add_action_or_reset(). - -Signed-off-by: Andy Shevchenko -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 22 +++++++++------------- - 1 file changed, 9 insertions(+), 13 deletions(-) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -905,9 +905,9 @@ void nvmem_unregister(struct nvmem_devic - } - EXPORT_SYMBOL_GPL(nvmem_unregister); - --static void devm_nvmem_release(struct device *dev, void *res) -+static void devm_nvmem_unregister(void *nvmem) - { -- nvmem_unregister(*(struct nvmem_device **)res); -+ nvmem_unregister(nvmem); - } - - /** -@@ -924,20 +924,16 @@ static void devm_nvmem_release(struct de - struct nvmem_device *devm_nvmem_register(struct device *dev, - const struct nvmem_config *config) - { -- struct nvmem_device **ptr, *nvmem; -- -- ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); -- if (!ptr) -- return ERR_PTR(-ENOMEM); -+ struct nvmem_device *nvmem; -+ int ret; - - nvmem = nvmem_register(config); -+ if (IS_ERR(nvmem)) -+ return nvmem; - -- if (!IS_ERR(nvmem)) { -- *ptr = nvmem; -- devres_add(dev, ptr); -- } else { -- devres_free(ptr); -- } -+ ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); -+ if (ret) -+ return ERR_PTR(ret); - - return nvmem; - } diff --git a/target/linux/generic/backport-6.1/804-v5.18-0003-nvmem-core-Check-input-parameter-for-NULL-in-nvmem_u.patch b/target/linux/generic/backport-6.1/804-v5.18-0003-nvmem-core-Check-input-parameter-for-NULL-in-nvmem_u.patch deleted file mode 100644 index 4f471f266738..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0003-nvmem-core-Check-input-parameter-for-NULL-in-nvmem_u.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 8c751e0d9a5264376935a84429a2d468c8877d99 Mon Sep 17 00:00:00 2001 -From: Andy Shevchenko -Date: Sun, 20 Feb 2022 15:15:17 +0000 -Subject: [PATCH] nvmem: core: Check input parameter for NULL in - nvmem_unregister() - -nvmem_unregister() frees resources and standard pattern is to allow -caller to not care if it's NULL or not. This will reduce burden on -the callers to perform this check. - -Signed-off-by: Andy Shevchenko -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-4-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -901,7 +901,8 @@ static void nvmem_device_release(struct - */ - void nvmem_unregister(struct nvmem_device *nvmem) - { -- kref_put(&nvmem->refcnt, nvmem_device_release); -+ if (nvmem) -+ kref_put(&nvmem->refcnt, nvmem_device_release); - } - EXPORT_SYMBOL_GPL(nvmem_unregister); - diff --git a/target/linux/generic/backport-6.1/804-v5.18-0004-nvmem-qfprom-fix-kerneldoc-warning.patch b/target/linux/generic/backport-6.1/804-v5.18-0004-nvmem-qfprom-fix-kerneldoc-warning.patch deleted file mode 100644 index c98f8e9d545e..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0004-nvmem-qfprom-fix-kerneldoc-warning.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 05196facc052385960028ac634447ecf6c764ec3 Mon Sep 17 00:00:00 2001 -From: Srinivas Kandagatla -Date: Sun, 20 Feb 2022 15:15:18 +0000 -Subject: [PATCH] nvmem: qfprom: fix kerneldoc warning - -This patch fixes below kernel doc warning, -warning: expecting prototype for qfprom_efuse_reg_write(). -Prototype was for qfprom_reg_write() instead - -No code changes. - -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-5-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/qfprom.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/qfprom.c -+++ b/drivers/nvmem/qfprom.c -@@ -244,7 +244,7 @@ err_clk_prepared: - } - - /** -- * qfprom_efuse_reg_write() - Write to fuses. -+ * qfprom_reg_write() - Write to fuses. - * @context: Our driver data. - * @reg: The offset to write at. - * @_val: Pointer to data to write. diff --git a/target/linux/generic/backport-6.1/804-v5.18-0005-nvmem-sunxi_sid-Add-support-for-D1-variant.patch b/target/linux/generic/backport-6.1/804-v5.18-0005-nvmem-sunxi_sid-Add-support-for-D1-variant.patch deleted file mode 100644 index 6aad6af0800c..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0005-nvmem-sunxi_sid-Add-support-for-D1-variant.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 07ae4fde9efada7878e1383d6ccc7da70315ca23 Mon Sep 17 00:00:00 2001 -From: Samuel Holland -Date: Sun, 20 Feb 2022 15:15:20 +0000 -Subject: [PATCH] nvmem: sunxi_sid: Add support for D1 variant - -D1 has a smaller eFuse block than some other recent SoCs, and it no -longer requires a workaround to read the eFuse data. - -Signed-off-by: Samuel Holland -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-7-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/sunxi_sid.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/nvmem/sunxi_sid.c -+++ b/drivers/nvmem/sunxi_sid.c -@@ -184,6 +184,11 @@ static const struct sunxi_sid_cfg sun8i_ - .need_register_readout = true, - }; - -+static const struct sunxi_sid_cfg sun20i_d1_cfg = { -+ .value_offset = 0x200, -+ .size = 0x100, -+}; -+ - static const struct sunxi_sid_cfg sun50i_a64_cfg = { - .value_offset = 0x200, - .size = 0x100, -@@ -200,6 +205,7 @@ static const struct of_device_id sunxi_s - { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, - { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg }, - { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, -+ { .compatible = "allwinner,sun20i-d1-sid", .data = &sun20i_d1_cfg }, - { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, - { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg }, - { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg }, diff --git a/target/linux/generic/backport-6.1/804-v5.18-0006-nvmem-meson-mx-efuse-replace-unnecessary-devm_kstrdu.patch b/target/linux/generic/backport-6.1/804-v5.18-0006-nvmem-meson-mx-efuse-replace-unnecessary-devm_kstrdu.patch deleted file mode 100644 index a73b42c5de3c..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0006-nvmem-meson-mx-efuse-replace-unnecessary-devm_kstrdu.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 4dc8d89faed9bb05f116fa1794fc955b14910386 Mon Sep 17 00:00:00 2001 -From: Xiaoke Wang -Date: Sun, 20 Feb 2022 15:15:21 +0000 -Subject: [PATCH] nvmem: meson-mx-efuse: replace unnecessary devm_kstrdup() - -Replace unnecessary devm_kstrdup() so to avoid redundant memory allocation. - -Suggested-by: Martin Blumenstingl -Signed-off-by: Xiaoke Wang -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-8-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/meson-mx-efuse.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/nvmem/meson-mx-efuse.c -+++ b/drivers/nvmem/meson-mx-efuse.c -@@ -209,8 +209,7 @@ static int meson_mx_efuse_probe(struct p - if (IS_ERR(efuse->base)) - return PTR_ERR(efuse->base); - -- efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name, -- GFP_KERNEL); -+ efuse->config.name = drvdata->name; - efuse->config.owner = THIS_MODULE; - efuse->config.dev = &pdev->dev; - efuse->config.priv = efuse; diff --git a/target/linux/generic/backport-6.1/804-v5.18-0007-nvmem-add-driver-for-Layerscape-SFP-Security-Fuse-Pr.patch b/target/linux/generic/backport-6.1/804-v5.18-0007-nvmem-add-driver-for-Layerscape-SFP-Security-Fuse-Pr.patch deleted file mode 100644 index 6afb68b3f91b..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0007-nvmem-add-driver-for-Layerscape-SFP-Security-Fuse-Pr.patch +++ /dev/null @@ -1,139 +0,0 @@ -From f78451012b9e159afdba31c3eb69f223a9f42adc Mon Sep 17 00:00:00 2001 -From: Michael Walle -Date: Sun, 20 Feb 2022 15:15:23 +0000 -Subject: [PATCH] nvmem: add driver for Layerscape SFP (Security Fuse - Processor) - -Add support for the Security Fuse Processor found on Layerscape SoCs. -This driver implements basic read access. - -Signed-off-by: Michael Walle -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220220151527.17216-10-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/Kconfig | 12 +++++ - drivers/nvmem/Makefile | 2 + - drivers/nvmem/layerscape-sfp.c | 89 ++++++++++++++++++++++++++++++++++ - 3 files changed, 103 insertions(+) - create mode 100644 drivers/nvmem/layerscape-sfp.c - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -300,4 +300,16 @@ config NVMEM_BRCM_NVRAM - This driver provides support for Broadcom's NVRAM that can be accessed - using I/O mapping. - -+config NVMEM_LAYERSCAPE_SFP -+ tristate "Layerscape SFP (Security Fuse Processor) support" -+ depends on ARCH_LAYERSCAPE || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This driver provides support to read the eFuses on Freescale -+ Layerscape SoC's. For example, the vendor provides a per part -+ unique ID there. -+ -+ This driver can also be built as a module. If so, the module -+ will be called layerscape-sfp. -+ - endif ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -61,3 +61,5 @@ obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem. - nvmem-rmem-y := rmem.o - obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o - nvmem_brcm_nvram-y := brcm_nvram.o -+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o -+nvmem-layerscape-sfp-y := layerscape-sfp.o ---- /dev/null -+++ b/drivers/nvmem/layerscape-sfp.c -@@ -0,0 +1,89 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Layerscape SFP driver -+ * -+ * Copyright (c) 2022 Michael Walle -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define LAYERSCAPE_SFP_OTP_OFFSET 0x0200 -+ -+struct layerscape_sfp_priv { -+ void __iomem *base; -+}; -+ -+struct layerscape_sfp_data { -+ int size; -+}; -+ -+static int layerscape_sfp_read(void *context, unsigned int offset, void *val, -+ size_t bytes) -+{ -+ struct layerscape_sfp_priv *priv = context; -+ -+ memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset, -+ bytes); -+ -+ return 0; -+} -+ -+static struct nvmem_config layerscape_sfp_nvmem_config = { -+ .name = "fsl-sfp", -+ .reg_read = layerscape_sfp_read, -+}; -+ -+static int layerscape_sfp_probe(struct platform_device *pdev) -+{ -+ const struct layerscape_sfp_data *data; -+ struct layerscape_sfp_priv *priv; -+ struct nvmem_device *nvmem; -+ -+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ priv->base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(priv->base)) -+ return PTR_ERR(priv->base); -+ -+ data = device_get_match_data(&pdev->dev); -+ -+ layerscape_sfp_nvmem_config.size = data->size; -+ layerscape_sfp_nvmem_config.dev = &pdev->dev; -+ layerscape_sfp_nvmem_config.priv = priv; -+ -+ nvmem = devm_nvmem_register(&pdev->dev, &layerscape_sfp_nvmem_config); -+ -+ return PTR_ERR_OR_ZERO(nvmem); -+} -+ -+static const struct layerscape_sfp_data ls1028a_data = { -+ .size = 0x88, -+}; -+ -+static const struct of_device_id layerscape_sfp_dt_ids[] = { -+ { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, layerscape_sfp_dt_ids); -+ -+static struct platform_driver layerscape_sfp_driver = { -+ .probe = layerscape_sfp_probe, -+ .driver = { -+ .name = "layerscape_sfp", -+ .of_match_table = layerscape_sfp_dt_ids, -+ }, -+}; -+module_platform_driver(layerscape_sfp_driver); -+ -+MODULE_AUTHOR("Michael Walle "); -+MODULE_DESCRIPTION("Layerscape Security Fuse Processor driver"); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-6.1/804-v5.18-0008-nvmem-qfprom-Increase-fuse-blow-timeout-to-prevent-w.patch b/target/linux/generic/backport-6.1/804-v5.18-0008-nvmem-qfprom-Increase-fuse-blow-timeout-to-prevent-w.patch deleted file mode 100644 index 74bd4a7eb676..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0008-nvmem-qfprom-Increase-fuse-blow-timeout-to-prevent-w.patch +++ /dev/null @@ -1,32 +0,0 @@ -From bc5c75e0a5a9400f81a987cc720100ac475fa4d8 Mon Sep 17 00:00:00 2001 -From: Knox Chiou -Date: Wed, 23 Feb 2022 22:35:00 +0000 -Subject: [PATCH] nvmem: qfprom: Increase fuse blow timeout to prevent write - fail - -sc7180 blow fuses got slightly chances to hit qfprom_reg_write timeout. -Current timeout is simply too low. Since blowing fuses is a -very rare operation, so the risk associated with overestimating this -number is low. -Increase fuse blow timeout from 1ms to 10ms. - -Reviewed-by: Douglas Anderson -Signed-off-by: Knox Chiou -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220223223502.29454-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/qfprom.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/qfprom.c -+++ b/drivers/nvmem/qfprom.c -@@ -22,7 +22,7 @@ - - /* Amount of time required to hold charge to blow fuse in micro-seconds */ - #define QFPROM_FUSE_BLOW_POLL_US 100 --#define QFPROM_FUSE_BLOW_TIMEOUT_US 1000 -+#define QFPROM_FUSE_BLOW_TIMEOUT_US 10000 - - #define QFPROM_BLOW_STATUS_OFFSET 0x048 - #define QFPROM_BLOW_STATUS_BUSY 0x1 diff --git a/target/linux/generic/backport-6.1/804-v5.18-0009-nvmem-Add-driver-for-OCOTP-in-Sunplus-SP7021.patch b/target/linux/generic/backport-6.1/804-v5.18-0009-nvmem-Add-driver-for-OCOTP-in-Sunplus-SP7021.patch deleted file mode 100644 index 2a9dc7494787..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0009-nvmem-Add-driver-for-OCOTP-in-Sunplus-SP7021.patch +++ /dev/null @@ -1,291 +0,0 @@ -From 8747ec2e9762ed9ae53b3a590938f454b6a1abdf Mon Sep 17 00:00:00 2001 -From: Vincent Shih -Date: Wed, 23 Feb 2022 22:35:01 +0000 -Subject: [PATCH] nvmem: Add driver for OCOTP in Sunplus SP7021 - -Add driver for OCOTP in Sunplus SP7021 - -Signed-off-by: Vincent Shih -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220223223502.29454-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - MAINTAINERS | 5 + - drivers/nvmem/Kconfig | 12 ++ - drivers/nvmem/Makefile | 2 + - drivers/nvmem/sunplus-ocotp.c | 228 ++++++++++++++++++++++++++++++++++ - 4 files changed, 247 insertions(+) - create mode 100644 drivers/nvmem/sunplus-ocotp.c - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -17962,6 +17962,11 @@ L: netdev@vger.kernel.org - S: Maintained - F: drivers/net/ethernet/dlink/sundance.c - -+SUNPLUS OCOTP DRIVER -+M: Vincent Shih -+S: Maintained -+F: drivers/nvmem/sunplus-ocotp.c -+ - SUPERH - M: Yoshinori Sato - M: Rich Felker ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -312,4 +312,16 @@ config NVMEM_LAYERSCAPE_SFP - This driver can also be built as a module. If so, the module - will be called layerscape-sfp. - -+config NVMEM_SUNPLUS_OCOTP -+ tristate "Sunplus SoC OTP support" -+ depends on SOC_SP7021 || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This is a driver for the On-chip OTP controller (OCOTP) available -+ on Sunplus SoCs. It provides access to 128 bytes of one-time -+ programmable eFuse. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem-sunplus-ocotp. -+ - endif ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -63,3 +63,5 @@ obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_ - nvmem_brcm_nvram-y := brcm_nvram.o - obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o - nvmem-layerscape-sfp-y := layerscape-sfp.o -+obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o -+nvmem_sunplus_ocotp-y := sunplus-ocotp.o ---- /dev/null -+++ b/drivers/nvmem/sunplus-ocotp.c -@@ -0,0 +1,228 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+/* -+ * The OCOTP driver for Sunplus SP7021 -+ * -+ * Copyright (C) 2019 Sunplus Technology Inc., All rights reserved. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * OTP memory -+ * Each bank contains 4 words (32 bits). -+ * Bank 0 starts at offset 0 from the base. -+ */ -+ -+#define OTP_WORDS_PER_BANK 4 -+#define OTP_WORD_SIZE sizeof(u32) -+#define OTP_BIT_ADDR_OF_BANK (8 * OTP_WORD_SIZE * OTP_WORDS_PER_BANK) -+#define QAC628_OTP_NUM_BANKS 8 -+#define QAC628_OTP_SIZE (QAC628_OTP_NUM_BANKS * OTP_WORDS_PER_BANK * OTP_WORD_SIZE) -+#define OTP_READ_TIMEOUT_US 200000 -+ -+/* HB_GPIO */ -+#define ADDRESS_8_DATA 0x20 -+ -+/* OTP_RX */ -+#define OTP_CONTROL_2 0x48 -+#define OTP_RD_PERIOD GENMASK(15, 8) -+#define OTP_RD_PERIOD_MASK ~GENMASK(15, 8) -+#define CPU_CLOCK FIELD_PREP(OTP_RD_PERIOD, 30) -+#define SEL_BAK_KEY2 BIT(5) -+#define SEL_BAK_KEY2_MASK ~BIT(5) -+#define SW_TRIM_EN BIT(4) -+#define SW_TRIM_EN_MASK ~BIT(4) -+#define SEL_BAK_KEY BIT(3) -+#define SEL_BAK_KEY_MASK ~BIT(3) -+#define OTP_READ BIT(2) -+#define OTP_LOAD_SECURE_DATA BIT(1) -+#define OTP_LOAD_SECURE_DATA_MASK ~BIT(1) -+#define OTP_DO_CRC BIT(0) -+#define OTP_DO_CRC_MASK ~BIT(0) -+#define OTP_STATUS 0x4c -+#define OTP_READ_DONE BIT(4) -+#define OTP_READ_DONE_MASK ~BIT(4) -+#define OTP_LOAD_SECURE_DONE_MASK ~BIT(2) -+#define OTP_READ_ADDRESS 0x50 -+ -+enum base_type { -+ HB_GPIO, -+ OTPRX, -+ BASEMAX, -+}; -+ -+struct sp_ocotp_priv { -+ struct device *dev; -+ void __iomem *base[BASEMAX]; -+ struct clk *clk; -+}; -+ -+struct sp_ocotp_data { -+ int size; -+}; -+ -+const struct sp_ocotp_data sp_otp_v0 = { -+ .size = QAC628_OTP_SIZE, -+}; -+ -+static int sp_otp_read_real(struct sp_ocotp_priv *otp, int addr, char *value) -+{ -+ unsigned int addr_data; -+ unsigned int byte_shift; -+ unsigned int status; -+ int ret; -+ -+ addr_data = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); -+ addr_data = addr_data / OTP_WORD_SIZE; -+ -+ byte_shift = addr % (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); -+ byte_shift = byte_shift % OTP_WORD_SIZE; -+ -+ addr = addr / (OTP_WORD_SIZE * OTP_WORDS_PER_BANK); -+ addr = addr * OTP_BIT_ADDR_OF_BANK; -+ -+ writel(readl(otp->base[OTPRX] + OTP_STATUS) & OTP_READ_DONE_MASK & -+ OTP_LOAD_SECURE_DONE_MASK, otp->base[OTPRX] + OTP_STATUS); -+ writel(addr, otp->base[OTPRX] + OTP_READ_ADDRESS); -+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) | OTP_READ, -+ otp->base[OTPRX] + OTP_CONTROL_2); -+ writel(readl(otp->base[OTPRX] + OTP_CONTROL_2) & SEL_BAK_KEY2_MASK & SW_TRIM_EN_MASK -+ & SEL_BAK_KEY_MASK & OTP_LOAD_SECURE_DATA_MASK & OTP_DO_CRC_MASK, -+ otp->base[OTPRX] + OTP_CONTROL_2); -+ writel((readl(otp->base[OTPRX] + OTP_CONTROL_2) & OTP_RD_PERIOD_MASK) | CPU_CLOCK, -+ otp->base[OTPRX] + OTP_CONTROL_2); -+ -+ ret = readl_poll_timeout(otp->base[OTPRX] + OTP_STATUS, status, -+ status & OTP_READ_DONE, 10, OTP_READ_TIMEOUT_US); -+ -+ if (ret < 0) -+ return ret; -+ -+ *value = (readl(otp->base[HB_GPIO] + ADDRESS_8_DATA + addr_data * OTP_WORD_SIZE) -+ >> (8 * byte_shift)) & 0xff; -+ -+ return ret; -+} -+ -+static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t bytes) -+{ -+ struct sp_ocotp_priv *otp = priv; -+ unsigned int addr; -+ char *buf = value; -+ char val[4]; -+ int ret; -+ -+ ret = clk_enable(otp->clk); -+ if (ret) -+ return ret; -+ -+ *buf = 0; -+ for (addr = offset; addr < (offset + bytes); addr++) { -+ ret = sp_otp_read_real(otp, addr, val); -+ if (ret < 0) { -+ dev_err(otp->dev, "OTP read fail:%d at %d", ret, addr); -+ goto disable_clk; -+ } -+ -+ *buf++ = *val; -+ } -+ -+disable_clk: -+ clk_disable(otp->clk); -+ -+ return ret; -+} -+ -+static struct nvmem_config sp_ocotp_nvmem_config = { -+ .name = "sp-ocotp", -+ .read_only = true, -+ .word_size = 1, -+ .size = QAC628_OTP_SIZE, -+ .stride = 1, -+ .reg_read = sp_ocotp_read, -+ .owner = THIS_MODULE, -+}; -+ -+static int sp_ocotp_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct nvmem_device *nvmem; -+ struct sp_ocotp_priv *otp; -+ struct resource *res; -+ int ret; -+ -+ otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL); -+ if (!otp) -+ return -ENOMEM; -+ -+ otp->dev = dev; -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hb_gpio"); -+ otp->base[HB_GPIO] = devm_ioremap_resource(dev, res); -+ if (IS_ERR(otp->base[HB_GPIO])) -+ return PTR_ERR(otp->base[HB_GPIO]); -+ -+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otprx"); -+ otp->base[OTPRX] = devm_ioremap_resource(dev, res); -+ if (IS_ERR(otp->base[OTPRX])) -+ return PTR_ERR(otp->base[OTPRX]); -+ -+ otp->clk = devm_clk_get(&pdev->dev, NULL); -+ if (IS_ERR(otp->clk)) -+ return dev_err_probe(&pdev->dev, PTR_ERR(otp->clk), -+ "devm_clk_get fail\n"); -+ -+ ret = clk_prepare(otp->clk); -+ if (ret < 0) { -+ dev_err(dev, "failed to prepare clk: %d\n", ret); -+ return ret; -+ } -+ -+ sp_ocotp_nvmem_config.priv = otp; -+ sp_ocotp_nvmem_config.dev = dev; -+ -+ nvmem = devm_nvmem_register(dev, &sp_ocotp_nvmem_config); -+ if (IS_ERR(nvmem)) -+ return dev_err_probe(&pdev->dev, PTR_ERR(nvmem), -+ "register nvmem device fail\n"); -+ -+ platform_set_drvdata(pdev, nvmem); -+ -+ dev_dbg(dev, "banks:%d x wpb:%d x wsize:%d = %d", -+ (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK, -+ (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE); -+ -+ dev_info(dev, "by Sunplus (C) 2020"); -+ -+ return 0; -+} -+ -+static const struct of_device_id sp_ocotp_dt_ids[] = { -+ { .compatible = "sunplus,sp7021-ocotp", .data = &sp_otp_v0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(of, sp_ocotp_dt_ids); -+ -+static struct platform_driver sp_otp_driver = { -+ .probe = sp_ocotp_probe, -+ .driver = { -+ .name = "sunplus,sp7021-ocotp", -+ .of_match_table = sp_ocotp_dt_ids, -+ } -+}; -+module_platform_driver(sp_otp_driver); -+ -+MODULE_AUTHOR("Vincent Shih "); -+MODULE_DESCRIPTION("Sunplus On-Chip OTP driver"); -+MODULE_LICENSE("GPL"); -+ diff --git a/target/linux/generic/backport-6.1/804-v5.18-0010-nvmem-brcm_nvram-parse-NVRAM-content-into-NVMEM-cell.patch b/target/linux/generic/backport-6.1/804-v5.18-0010-nvmem-brcm_nvram-parse-NVRAM-content-into-NVMEM-cell.patch deleted file mode 100644 index 99781b3a7b3a..000000000000 --- a/target/linux/generic/backport-6.1/804-v5.18-0010-nvmem-brcm_nvram-parse-NVRAM-content-into-NVMEM-cell.patch +++ /dev/null @@ -1,146 +0,0 @@ -From 6e977eaa8280e957b87904b536661550f2a6b3e8 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 25 Feb 2022 17:58:20 +0000 -Subject: [PATCH] nvmem: brcm_nvram: parse NVRAM content into NVMEM cells -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -NVRAM consist of header and NUL separated key-value pairs. Parse it and -create NVMEM cell for every key-value entry. - -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220225175822.8293-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/brcm_nvram.c | 90 ++++++++++++++++++++++++++++++++++++++ - 1 file changed, 90 insertions(+) - ---- a/drivers/nvmem/brcm_nvram.c -+++ b/drivers/nvmem/brcm_nvram.c -@@ -6,12 +6,26 @@ - #include - #include - #include -+#include - #include - #include -+#include -+ -+#define NVRAM_MAGIC "FLSH" - - struct brcm_nvram { - struct device *dev; - void __iomem *base; -+ struct nvmem_cell_info *cells; -+ int ncells; -+}; -+ -+struct brcm_nvram_header { -+ char magic[4]; -+ __le32 len; -+ __le32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ -+ __le32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ -+ __le32 config_ncdl; /* ncdl values for memc */ - }; - - static int brcm_nvram_read(void *context, unsigned int offset, void *val, -@@ -26,6 +40,75 @@ static int brcm_nvram_read(void *context - return 0; - } - -+static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data, -+ size_t len) -+{ -+ struct device *dev = priv->dev; -+ char *var, *value, *eq; -+ int idx; -+ -+ priv->ncells = 0; -+ for (var = data + sizeof(struct brcm_nvram_header); -+ var < (char *)data + len && *var; -+ var += strlen(var) + 1) { -+ priv->ncells++; -+ } -+ -+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); -+ if (!priv->cells) -+ return -ENOMEM; -+ -+ for (var = data + sizeof(struct brcm_nvram_header), idx = 0; -+ var < (char *)data + len && *var; -+ var = value + strlen(value) + 1, idx++) { -+ eq = strchr(var, '='); -+ if (!eq) -+ break; -+ *eq = '\0'; -+ value = eq + 1; -+ -+ priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); -+ if (!priv->cells[idx].name) -+ return -ENOMEM; -+ priv->cells[idx].offset = value - (char *)data; -+ priv->cells[idx].bytes = strlen(value); -+ } -+ -+ return 0; -+} -+ -+static int brcm_nvram_parse(struct brcm_nvram *priv) -+{ -+ struct device *dev = priv->dev; -+ struct brcm_nvram_header header; -+ uint8_t *data; -+ size_t len; -+ int err; -+ -+ memcpy_fromio(&header, priv->base, sizeof(header)); -+ -+ if (memcmp(header.magic, NVRAM_MAGIC, 4)) { -+ dev_err(dev, "Invalid NVRAM magic\n"); -+ return -EINVAL; -+ } -+ -+ len = le32_to_cpu(header.len); -+ -+ data = kcalloc(1, len, GFP_KERNEL); -+ memcpy_fromio(data, priv->base, len); -+ data[len - 1] = '\0'; -+ -+ err = brcm_nvram_add_cells(priv, data, len); -+ if (err) { -+ dev_err(dev, "Failed to add cells: %d\n", err); -+ return err; -+ } -+ -+ kfree(data); -+ -+ return 0; -+} -+ - static int brcm_nvram_probe(struct platform_device *pdev) - { - struct nvmem_config config = { -@@ -35,6 +118,7 @@ static int brcm_nvram_probe(struct platf - struct device *dev = &pdev->dev; - struct resource *res; - struct brcm_nvram *priv; -+ int err; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) -@@ -46,7 +130,13 @@ static int brcm_nvram_probe(struct platf - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - -+ err = brcm_nvram_parse(priv); -+ if (err) -+ return err; -+ - config.dev = dev; -+ config.cells = priv->cells; -+ config.ncells = priv->ncells; - config.priv = priv; - config.size = resource_size(res); - diff --git a/target/linux/generic/backport-6.1/805-v5.19-0001-nvmem-bcm-ocotp-mark-ACPI-device-ID-table-as-maybe-u.patch b/target/linux/generic/backport-6.1/805-v5.19-0001-nvmem-bcm-ocotp-mark-ACPI-device-ID-table-as-maybe-u.patch deleted file mode 100644 index ef3107db946c..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0001-nvmem-bcm-ocotp-mark-ACPI-device-ID-table-as-maybe-u.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 6bd0ffeaa389866089e9573b2298ae58d6359b75 Mon Sep 17 00:00:00 2001 -From: Krzysztof Kozlowski -Date: Mon, 21 Mar 2022 12:03:24 +0100 -Subject: [PATCH] nvmem: bcm-ocotp: mark ACPI device ID table as maybe unused -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -"bcm_otpc_acpi_ids" is used with ACPI_PTR, so a build with !CONFIG_ACPI -has a warning: - - drivers/nvmem/bcm-ocotp.c:247:36: error: - ‘bcm_otpc_acpi_ids’ defined but not used [-Werror=unused-const-variable=] - -Signed-off-by: Krzysztof Kozlowski -Link: https://lore.kernel.org/r/20220321110326.44652-1-krzk@kernel.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/bcm-ocotp.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/bcm-ocotp.c -+++ b/drivers/nvmem/bcm-ocotp.c -@@ -244,7 +244,7 @@ static const struct of_device_id bcm_otp - }; - MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); - --static const struct acpi_device_id bcm_otpc_acpi_ids[] = { -+static const struct acpi_device_id bcm_otpc_acpi_ids[] __maybe_unused = { - { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map }, - { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 }, - { /* sentinel */ } diff --git a/target/linux/generic/backport-6.1/805-v5.19-0002-nvmem-sunplus-ocotp-staticize-sp_otp_v0.patch b/target/linux/generic/backport-6.1/805-v5.19-0002-nvmem-sunplus-ocotp-staticize-sp_otp_v0.patch deleted file mode 100644 index a84d2316f0cc..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0002-nvmem-sunplus-ocotp-staticize-sp_otp_v0.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 1066f8156351fcd997125257cea47cf805ba4f6d Mon Sep 17 00:00:00 2001 -From: Krzysztof Kozlowski -Date: Mon, 21 Mar 2022 12:03:25 +0100 -Subject: [PATCH] nvmem: sunplus-ocotp: staticize sp_otp_v0 - -The "sp_otp_v0" file scope variable is not used outside, so make it -static to fix warning: - - drivers/nvmem/sunplus-ocotp.c:74:29: sparse: - sparse: symbol 'sp_otp_v0' was not declared. Should it be static? - -Reported-by: kernel test robot -Signed-off-by: Krzysztof Kozlowski -Link: https://lore.kernel.org/r/20220321110326.44652-2-krzk@kernel.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/sunplus-ocotp.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/sunplus-ocotp.c -+++ b/drivers/nvmem/sunplus-ocotp.c -@@ -71,7 +71,7 @@ struct sp_ocotp_data { - int size; - }; - --const struct sp_ocotp_data sp_otp_v0 = { -+static const struct sp_ocotp_data sp_otp_v0 = { - .size = QAC628_OTP_SIZE, - }; - diff --git a/target/linux/generic/backport-6.1/805-v5.19-0003-nvmem-sunplus-ocotp-drop-useless-probe-confirmation.patch b/target/linux/generic/backport-6.1/805-v5.19-0003-nvmem-sunplus-ocotp-drop-useless-probe-confirmation.patch deleted file mode 100644 index 886ebc12a9c9..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0003-nvmem-sunplus-ocotp-drop-useless-probe-confirmation.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 874dfbcf219ccc42a2cbd187d087c7db82c3024b Mon Sep 17 00:00:00 2001 -From: Krzysztof Kozlowski -Date: Mon, 21 Mar 2022 12:03:26 +0100 -Subject: [PATCH] nvmem: sunplus-ocotp: drop useless probe confirmation - -Printing probe success is discouraged, because we can use tracing for -this purpose. Remove useless print message after Sunplus OCOTP driver -probe. - -Signed-off-by: Krzysztof Kozlowski -Link: https://lore.kernel.org/r/20220321110326.44652-3-krzk@kernel.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/sunplus-ocotp.c | 2 -- - 1 file changed, 2 deletions(-) - ---- a/drivers/nvmem/sunplus-ocotp.c -+++ b/drivers/nvmem/sunplus-ocotp.c -@@ -202,8 +202,6 @@ static int sp_ocotp_probe(struct platfor - (int)QAC628_OTP_NUM_BANKS, (int)OTP_WORDS_PER_BANK, - (int)OTP_WORD_SIZE, (int)QAC628_OTP_SIZE); - -- dev_info(dev, "by Sunplus (C) 2020"); -- - return 0; - } - diff --git a/target/linux/generic/backport-6.1/805-v5.19-0004-nvmem-core-support-passing-DT-node-in-cell-info.patch b/target/linux/generic/backport-6.1/805-v5.19-0004-nvmem-core-support-passing-DT-node-in-cell-info.patch deleted file mode 100644 index 3b1e76147a8b..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0004-nvmem-core-support-passing-DT-node-in-cell-info.patch +++ /dev/null @@ -1,41 +0,0 @@ -From dbc2f62061c6bfba0aee93161ee3194dcee84bd0 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 29 Apr 2022 17:26:46 +0100 -Subject: [PATCH] nvmem: core: support passing DT node in cell info -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Some hardware may have NVMEM cells described in Device Tree using -individual nodes. Let drivers pass such nodes to the NVMEM subsystem so -they can be later used by NVMEM consumers. - -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 1 + - include/linux/nvmem-consumer.h | 1 + - 2 files changed, 2 insertions(+) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell - - cell->bit_offset = info->bit_offset; - cell->nbits = info->nbits; -+ cell->np = info->np; - - if (cell->nbits) - cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, ---- a/include/linux/nvmem-consumer.h -+++ b/include/linux/nvmem-consumer.h -@@ -25,6 +25,7 @@ struct nvmem_cell_info { - unsigned int bytes; - unsigned int bit_offset; - unsigned int nbits; -+ struct device_node *np; - }; - - /** diff --git a/target/linux/generic/backport-6.1/805-v5.19-0005-nvmem-brcm_nvram-find-Device-Tree-nodes-for-NVMEM-ce.patch b/target/linux/generic/backport-6.1/805-v5.19-0005-nvmem-brcm_nvram-find-Device-Tree-nodes-for-NVMEM-ce.patch deleted file mode 100644 index a9eacd9419cb..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0005-nvmem-brcm_nvram-find-Device-Tree-nodes-for-NVMEM-ce.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 207775f7e17b8fd0426a2ac4a5b81e4e1d71849e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 29 Apr 2022 17:26:47 +0100 -Subject: [PATCH] nvmem: brcm_nvram: find Device Tree nodes for NVMEM cells -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -DT binding for Broadcom's NVRAM supports specifying NVMEM cells as NVMEM -device (provider) subnodes. Look for such subnodes when collecing NVMEM -cells. This allows NVMEM consumers to use NVRAM variables. - -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/brcm_nvram.c | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/nvmem/brcm_nvram.c -+++ b/drivers/nvmem/brcm_nvram.c -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -72,6 +73,7 @@ static int brcm_nvram_add_cells(struct b - return -ENOMEM; - priv->cells[idx].offset = value - (char *)data; - priv->cells[idx].bytes = strlen(value); -+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); - } - - return 0; diff --git a/target/linux/generic/backport-6.1/805-v5.19-0006-nvmem-Add-Apple-eFuse-driver.patch b/target/linux/generic/backport-6.1/805-v5.19-0006-nvmem-Add-Apple-eFuse-driver.patch deleted file mode 100644 index ebeb6f5ad394..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0006-nvmem-Add-Apple-eFuse-driver.patch +++ /dev/null @@ -1,130 +0,0 @@ -From b6b7ef932ae838209254f016ecf8862d716a5ced Mon Sep 17 00:00:00 2001 -From: Sven Peter -Date: Fri, 29 Apr 2022 17:26:50 +0100 -Subject: [PATCH] nvmem: Add Apple eFuse driver - -Apple SoCs contain eFuses used to store factory-programmed data such -as calibration values for the PCIe or the Type-C PHY. They are organized -as 32bit values exposed as MMIO. - -Signed-off-by: Sven Peter -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-6-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/Kconfig | 12 ++++++ - drivers/nvmem/Makefile | 2 + - drivers/nvmem/apple-efuses.c | 80 ++++++++++++++++++++++++++++++++++++ - 3 files changed, 94 insertions(+) - create mode 100644 drivers/nvmem/apple-efuses.c - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -324,4 +324,16 @@ config NVMEM_SUNPLUS_OCOTP - This driver can also be built as a module. If so, the module - will be called nvmem-sunplus-ocotp. - -+config NVMEM_APPLE_EFUSES -+ tristate "Apple eFuse support" -+ depends on ARCH_APPLE || COMPILE_TEST -+ default ARCH_APPLE -+ help -+ Say y here to enable support for reading eFuses on Apple SoCs -+ such as the M1. These are e.g. used to store factory programmed -+ calibration data required for the PCIe or the USB-C PHY. -+ -+ This driver can also be built as a module. If so, the module will -+ be called nvmem-apple-efuses. -+ - endif ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -65,3 +65,5 @@ obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nv - nvmem-layerscape-sfp-y := layerscape-sfp.o - obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o - nvmem_sunplus_ocotp-y := sunplus-ocotp.o -+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o -+nvmem-apple-efuses-y := apple-efuses.o ---- /dev/null -+++ b/drivers/nvmem/apple-efuses.c -@@ -0,0 +1,80 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Apple SoC eFuse driver -+ * -+ * Copyright (C) The Asahi Linux Contributors -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+struct apple_efuses_priv { -+ void __iomem *fuses; -+}; -+ -+static int apple_efuses_read(void *context, unsigned int offset, void *val, -+ size_t bytes) -+{ -+ struct apple_efuses_priv *priv = context; -+ u32 *dst = val; -+ -+ while (bytes >= sizeof(u32)) { -+ *dst++ = readl_relaxed(priv->fuses + offset); -+ bytes -= sizeof(u32); -+ offset += sizeof(u32); -+ } -+ -+ return 0; -+} -+ -+static int apple_efuses_probe(struct platform_device *pdev) -+{ -+ struct apple_efuses_priv *priv; -+ struct resource *res; -+ struct nvmem_config config = { -+ .dev = &pdev->dev, -+ .read_only = true, -+ .reg_read = apple_efuses_read, -+ .stride = sizeof(u32), -+ .word_size = sizeof(u32), -+ .name = "apple_efuses_nvmem", -+ .id = NVMEM_DEVID_AUTO, -+ .root_only = true, -+ }; -+ -+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ -+ priv->fuses = devm_platform_get_and_ioremap_resource(pdev, 0, &res); -+ if (IS_ERR(priv->fuses)) -+ return PTR_ERR(priv->fuses); -+ -+ config.priv = priv; -+ config.size = resource_size(res); -+ -+ return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config)); -+} -+ -+static const struct of_device_id apple_efuses_of_match[] = { -+ { .compatible = "apple,efuses", }, -+ {} -+}; -+ -+MODULE_DEVICE_TABLE(of, apple_efuses_of_match); -+ -+static struct platform_driver apple_efuses_driver = { -+ .driver = { -+ .name = "apple_efuses", -+ .of_match_table = apple_efuses_of_match, -+ }, -+ .probe = apple_efuses_probe, -+}; -+ -+module_platform_driver(apple_efuses_driver); -+ -+MODULE_AUTHOR("Sven Peter "); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-6.1/805-v5.19-0007-nvmem-qfprom-using-pm_runtime_resume_and_get-instead.patch b/target/linux/generic/backport-6.1/805-v5.19-0007-nvmem-qfprom-using-pm_runtime_resume_and_get-instead.patch deleted file mode 100644 index cd51d9700691..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0007-nvmem-qfprom-using-pm_runtime_resume_and_get-instead.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 517f6e2641a2802dce5a5aa0d18c7d37a35678d2 Mon Sep 17 00:00:00 2001 -From: Minghao Chi -Date: Fri, 29 Apr 2022 17:26:54 +0100 -Subject: [PATCH] nvmem: qfprom: using pm_runtime_resume_and_get instead of - pm_runtime_get_sync - -Using pm_runtime_resume_and_get is more appropriate -for simplifing code - -Reported-by: Zeal Robot -Signed-off-by: Minghao Chi -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-10-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/qfprom.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/nvmem/qfprom.c -+++ b/drivers/nvmem/qfprom.c -@@ -217,9 +217,8 @@ static int qfprom_enable_fuse_blowing(co - goto err_clk_rate_set; - } - -- ret = pm_runtime_get_sync(priv->dev); -+ ret = pm_runtime_resume_and_get(priv->dev); - if (ret < 0) { -- pm_runtime_put_noidle(priv->dev); - dev_err(priv->dev, "Failed to enable power-domain\n"); - goto err_reg_enable; - } diff --git a/target/linux/generic/backport-6.1/805-v5.19-0008-nvmem-sfp-Use-regmap.patch b/target/linux/generic/backport-6.1/805-v5.19-0008-nvmem-sfp-Use-regmap.patch deleted file mode 100644 index e187238ca38f..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0008-nvmem-sfp-Use-regmap.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 943eadbdb11314b41eacbcc484dfb7f93e271ff4 Mon Sep 17 00:00:00 2001 -From: Sean Anderson -Date: Fri, 29 Apr 2022 17:27:00 +0100 -Subject: [PATCH] nvmem: sfp: Use regmap - -This converts the SFP driver to use regmap. This will allow easily -supporting devices with different endians. We disallow byte-level -access, as regmap_bulk_read doesn't support it (and it's unclear what -the correct result would be when we have an endianness difference). - -Signed-off-by: Sean Anderson -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-16-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/Kconfig | 1 + - drivers/nvmem/layerscape-sfp.c | 30 ++++++++++++++++++++++-------- - 2 files changed, 23 insertions(+), 8 deletions(-) - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -304,6 +304,7 @@ config NVMEM_LAYERSCAPE_SFP - tristate "Layerscape SFP (Security Fuse Processor) support" - depends on ARCH_LAYERSCAPE || COMPILE_TEST - depends on HAS_IOMEM -+ select REGMAP_MMIO - help - This driver provides support to read the eFuses on Freescale - Layerscape SoC's. For example, the vendor provides a per part ---- a/drivers/nvmem/layerscape-sfp.c -+++ b/drivers/nvmem/layerscape-sfp.c -@@ -13,15 +13,17 @@ - #include - #include - #include -+#include - - #define LAYERSCAPE_SFP_OTP_OFFSET 0x0200 - - struct layerscape_sfp_priv { -- void __iomem *base; -+ struct regmap *regmap; - }; - - struct layerscape_sfp_data { - int size; -+ enum regmap_endian endian; - }; - - static int layerscape_sfp_read(void *context, unsigned int offset, void *val, -@@ -29,15 +31,16 @@ static int layerscape_sfp_read(void *con - { - struct layerscape_sfp_priv *priv = context; - -- memcpy_fromio(val, priv->base + LAYERSCAPE_SFP_OTP_OFFSET + offset, -- bytes); -- -- return 0; -+ return regmap_bulk_read(priv->regmap, -+ LAYERSCAPE_SFP_OTP_OFFSET + offset, val, -+ bytes / 4); - } - - static struct nvmem_config layerscape_sfp_nvmem_config = { - .name = "fsl-sfp", - .reg_read = layerscape_sfp_read, -+ .word_size = 4, -+ .stride = 4, - }; - - static int layerscape_sfp_probe(struct platform_device *pdev) -@@ -45,16 +48,26 @@ static int layerscape_sfp_probe(struct p - const struct layerscape_sfp_data *data; - struct layerscape_sfp_priv *priv; - struct nvmem_device *nvmem; -+ struct regmap_config config = { 0 }; -+ void __iomem *base; - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - -- priv->base = devm_platform_ioremap_resource(pdev, 0); -- if (IS_ERR(priv->base)) -- return PTR_ERR(priv->base); -+ base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(base)) -+ return PTR_ERR(base); - - data = device_get_match_data(&pdev->dev); -+ config.reg_bits = 32; -+ config.reg_stride = 4; -+ config.val_bits = 32; -+ config.val_format_endian = data->endian; -+ config.max_register = LAYERSCAPE_SFP_OTP_OFFSET + data->size - 4; -+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &config); -+ if (IS_ERR(priv->regmap)) -+ return PTR_ERR(priv->regmap); - - layerscape_sfp_nvmem_config.size = data->size; - layerscape_sfp_nvmem_config.dev = &pdev->dev; -@@ -67,6 +80,7 @@ static int layerscape_sfp_probe(struct p - - static const struct layerscape_sfp_data ls1028a_data = { - .size = 0x88, -+ .endian = REGMAP_ENDIAN_LITTLE, - }; - - static const struct of_device_id layerscape_sfp_dt_ids[] = { diff --git a/target/linux/generic/backport-6.1/805-v5.19-0009-nvmem-sfp-Add-support-for-TA-2.1-devices.patch b/target/linux/generic/backport-6.1/805-v5.19-0009-nvmem-sfp-Add-support-for-TA-2.1-devices.patch deleted file mode 100644 index ee000986181a..000000000000 --- a/target/linux/generic/backport-6.1/805-v5.19-0009-nvmem-sfp-Add-support-for-TA-2.1-devices.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 33a1c6618677fe33f8e84cb7bedc45abbce89a50 Mon Sep 17 00:00:00 2001 -From: Sean Anderson -Date: Fri, 29 Apr 2022 17:27:01 +0100 -Subject: [PATCH] nvmem: sfp: Add support for TA 2.1 devices - -This adds support for Trust Architecture (TA) 2.1 devices to the SFP driver. -There are few differences between TA 2.1 and TA 3.0, especially for -read-only support, so just re-use the existing data. - -Signed-off-by: Sean Anderson -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220429162701.2222-17-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/layerscape-sfp.c | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/drivers/nvmem/layerscape-sfp.c -+++ b/drivers/nvmem/layerscape-sfp.c -@@ -78,12 +78,18 @@ static int layerscape_sfp_probe(struct p - return PTR_ERR_OR_ZERO(nvmem); - } - -+static const struct layerscape_sfp_data ls1021a_data = { -+ .size = 0x88, -+ .endian = REGMAP_ENDIAN_BIG, -+}; -+ - static const struct layerscape_sfp_data ls1028a_data = { - .size = 0x88, - .endian = REGMAP_ENDIAN_LITTLE, - }; - - static const struct of_device_id layerscape_sfp_dt_ids[] = { -+ { .compatible = "fsl,ls1021a-sfp", .data = &ls1021a_data }, - { .compatible = "fsl,ls1028a-sfp", .data = &ls1028a_data }, - {}, - }; diff --git a/target/linux/generic/backport-6.1/806-v6.0-0001-nvmem-microchip-otpc-add-support.patch b/target/linux/generic/backport-6.1/806-v6.0-0001-nvmem-microchip-otpc-add-support.patch deleted file mode 100644 index b1855d1f2f2a..000000000000 --- a/target/linux/generic/backport-6.1/806-v6.0-0001-nvmem-microchip-otpc-add-support.patch +++ /dev/null @@ -1,389 +0,0 @@ -From 98830350d3fc824c1ff5c338140fe20f041a5916 Mon Sep 17 00:00:00 2001 -From: Claudiu Beznea -Date: Wed, 6 Jul 2022 11:06:22 +0100 -Subject: [PATCH] nvmem: microchip-otpc: add support - -Add support for Microchip OTP controller available on SAMA7G5. The OTPC -controls the access to a non-volatile memory. The memory behind OTPC is -organized into packets, packets are composed by a fixed length header -(4 bytes long) and a variable length payload (payload length is available -in the header). When software request the data at an offset in memory -the OTPC will return (via header + data registers) the whole packet that -has a word at that offset. For the OTP memory layout like below: - -offset OTP Memory layout - - . . - . ... . - . . -0x0E +-----------+ <--- packet X - | header X | -0x12 +-----------+ - | payload X | -0x16 | | - | | -0x1A | | - +-----------+ - . . - . ... . - . . - -if user requests data at address 0x16 the data started at 0x0E will be -returned by controller. User will be able to fetch the whole packet -starting at 0x0E (or parts of the packet) via proper registers. The same -packet will be returned if software request the data at offset 0x0E or -0x12 or 0x1A. - -The OTP will be populated by Microchip with at least 2 packets first one -being boot configuration packet and the 2nd one being temperature -calibration packet. The packet order will be preserved b/w different chip -revisions but the packet sizes may change. - -For the above reasons and to keep the same software able to work on all -chip variants the read function of the driver is working with a packet -id instead of an offset in OTP memory. - -Signed-off-by: Claudiu Beznea -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220706100627.6534-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - MAINTAINERS | 8 + - drivers/nvmem/Kconfig | 7 + - drivers/nvmem/Makefile | 2 + - drivers/nvmem/microchip-otpc.c | 288 +++++++++++++++++++++++++++++++++ - 4 files changed, 305 insertions(+) - create mode 100644 drivers/nvmem/microchip-otpc.c - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -12361,6 +12361,14 @@ S: Supported - F: Documentation/devicetree/bindings/mtd/atmel-nand.txt - F: drivers/mtd/nand/raw/atmel/* - -+MICROCHIP OTPC DRIVER -+M: Claudiu Beznea -+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -+S: Supported -+F: Documentation/devicetree/bindings/nvmem/microchip,sama7g5-otpc.yaml -+F: drivers/nvmem/microchip-otpc.c -+F: dt-bindings/nvmem/microchip,sama7g5-otpc.h -+ - MICROCHIP PWM DRIVER - M: Claudiu Beznea - L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -107,6 +107,13 @@ config MTK_EFUSE - This driver can also be built as a module. If so, the module - will be called efuse-mtk. - -+config MICROCHIP_OTPC -+ tristate "Microchip OTPC support" -+ depends on ARCH_AT91 || COMPILE_TEST -+ help -+ This driver enable the OTP controller available on Microchip SAMA7G5 -+ SoCs. It controlls the access to the OTP memory connected to it. -+ - config NVMEM_NINTENDO_OTP - tristate "Nintendo Wii and Wii U OTP Support" - depends on WII || COMPILE_TEST ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -67,3 +67,5 @@ obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvm - nvmem_sunplus_ocotp-y := sunplus-ocotp.o - obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o - nvmem-apple-efuses-y := apple-efuses.o -+obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o -+nvmem-microchip-otpc-y := microchip-otpc.o ---- /dev/null -+++ b/drivers/nvmem/microchip-otpc.c -@@ -0,0 +1,288 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * OTP Memory controller -+ * -+ * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries -+ * -+ * Author: Claudiu Beznea -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MCHP_OTPC_CR (0x0) -+#define MCHP_OTPC_CR_READ BIT(6) -+#define MCHP_OTPC_MR (0x4) -+#define MCHP_OTPC_MR_ADDR GENMASK(31, 16) -+#define MCHP_OTPC_AR (0x8) -+#define MCHP_OTPC_SR (0xc) -+#define MCHP_OTPC_SR_READ BIT(6) -+#define MCHP_OTPC_HR (0x20) -+#define MCHP_OTPC_HR_SIZE GENMASK(15, 8) -+#define MCHP_OTPC_DR (0x24) -+ -+#define MCHP_OTPC_NAME "mchp-otpc" -+#define MCHP_OTPC_SIZE (11 * 1024) -+ -+/** -+ * struct mchp_otpc - OTPC private data structure -+ * @base: base address -+ * @dev: struct device pointer -+ * @packets: list of packets in OTP memory -+ * @npackets: number of packets in OTP memory -+ */ -+struct mchp_otpc { -+ void __iomem *base; -+ struct device *dev; -+ struct list_head packets; -+ u32 npackets; -+}; -+ -+/** -+ * struct mchp_otpc_packet - OTPC packet data structure -+ * @list: list head -+ * @id: packet ID -+ * @offset: packet offset (in words) in OTP memory -+ */ -+struct mchp_otpc_packet { -+ struct list_head list; -+ u32 id; -+ u32 offset; -+}; -+ -+static struct mchp_otpc_packet *mchp_otpc_id_to_packet(struct mchp_otpc *otpc, -+ u32 id) -+{ -+ struct mchp_otpc_packet *packet; -+ -+ if (id >= otpc->npackets) -+ return NULL; -+ -+ list_for_each_entry(packet, &otpc->packets, list) { -+ if (packet->id == id) -+ return packet; -+ } -+ -+ return NULL; -+} -+ -+static int mchp_otpc_prepare_read(struct mchp_otpc *otpc, -+ unsigned int offset) -+{ -+ u32 tmp; -+ -+ /* Set address. */ -+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_MR); -+ tmp &= ~MCHP_OTPC_MR_ADDR; -+ tmp |= FIELD_PREP(MCHP_OTPC_MR_ADDR, offset); -+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_MR); -+ -+ /* Set read. */ -+ tmp = readl_relaxed(otpc->base + MCHP_OTPC_CR); -+ tmp |= MCHP_OTPC_CR_READ; -+ writel_relaxed(tmp, otpc->base + MCHP_OTPC_CR); -+ -+ /* Wait for packet to be transferred into temporary buffers. */ -+ return read_poll_timeout(readl_relaxed, tmp, !(tmp & MCHP_OTPC_SR_READ), -+ 10000, 2000, false, otpc->base + MCHP_OTPC_SR); -+} -+ -+/* -+ * OTPC memory is organized into packets. Each packets contains a header and -+ * a payload. Header is 4 bytes long and contains the size of the payload. -+ * Payload size varies. The memory footprint is something as follows: -+ * -+ * Memory offset Memory footprint Packet ID -+ * ------------- ---------------- --------- -+ * -+ * 0x0 +------------+ <-- packet 0 -+ * | header 0 | -+ * 0x4 +------------+ -+ * | payload 0 | -+ * . . -+ * . ... . -+ * . . -+ * offset1 +------------+ <-- packet 1 -+ * | header 1 | -+ * offset1 + 0x4 +------------+ -+ * | payload 1 | -+ * . . -+ * . ... . -+ * . . -+ * offset2 +------------+ <-- packet 2 -+ * . . -+ * . ... . -+ * . . -+ * offsetN +------------+ <-- packet N -+ * | header N | -+ * offsetN + 0x4 +------------+ -+ * | payload N | -+ * . . -+ * . ... . -+ * . . -+ * +------------+ -+ * -+ * where offset1, offset2, offsetN depends on the size of payload 0, payload 1, -+ * payload N-1. -+ * -+ * The access to memory is done on a per packet basis: the control registers -+ * need to be updated with an offset address (within a packet range) and the -+ * data registers will be update by controller with information contained by -+ * that packet. E.g. if control registers are updated with any address within -+ * the range [offset1, offset2) the data registers are updated by controller -+ * with packet 1. Header data is accessible though MCHP_OTPC_HR register. -+ * Payload data is accessible though MCHP_OTPC_DR and MCHP_OTPC_AR registers. -+ * There is no direct mapping b/w the offset requested by software and the -+ * offset returned by hardware. -+ * -+ * For this, the read function will return the first requested bytes in the -+ * packet. The user will have to be aware of the memory footprint before doing -+ * the read request. -+ */ -+static int mchp_otpc_read(void *priv, unsigned int off, void *val, -+ size_t bytes) -+{ -+ struct mchp_otpc *otpc = priv; -+ struct mchp_otpc_packet *packet; -+ u32 *buf = val; -+ u32 offset; -+ size_t len = 0; -+ int ret, payload_size; -+ -+ /* -+ * We reach this point with off being multiple of stride = 4 to -+ * be able to cross the subsystem. Inside the driver we use continuous -+ * unsigned integer numbers for packet id, thus devide off by 4 -+ * before passing it to mchp_otpc_id_to_packet(). -+ */ -+ packet = mchp_otpc_id_to_packet(otpc, off / 4); -+ if (!packet) -+ return -EINVAL; -+ offset = packet->offset; -+ -+ while (len < bytes) { -+ ret = mchp_otpc_prepare_read(otpc, offset); -+ if (ret) -+ return ret; -+ -+ /* Read and save header content. */ -+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_HR); -+ len += sizeof(*buf); -+ offset++; -+ if (len >= bytes) -+ break; -+ -+ /* Read and save payload content. */ -+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, *(buf - 1)); -+ writel_relaxed(0UL, otpc->base + MCHP_OTPC_AR); -+ do { -+ *buf++ = readl_relaxed(otpc->base + MCHP_OTPC_DR); -+ len += sizeof(*buf); -+ offset++; -+ payload_size--; -+ } while (payload_size >= 0 && len < bytes); -+ } -+ -+ return 0; -+} -+ -+static int mchp_otpc_init_packets_list(struct mchp_otpc *otpc, u32 *size) -+{ -+ struct mchp_otpc_packet *packet; -+ u32 word, word_pos = 0, id = 0, npackets = 0, payload_size; -+ int ret; -+ -+ INIT_LIST_HEAD(&otpc->packets); -+ *size = 0; -+ -+ while (*size < MCHP_OTPC_SIZE) { -+ ret = mchp_otpc_prepare_read(otpc, word_pos); -+ if (ret) -+ return ret; -+ -+ word = readl_relaxed(otpc->base + MCHP_OTPC_HR); -+ payload_size = FIELD_GET(MCHP_OTPC_HR_SIZE, word); -+ if (!payload_size) -+ break; -+ -+ packet = devm_kzalloc(otpc->dev, sizeof(*packet), GFP_KERNEL); -+ if (!packet) -+ return -ENOMEM; -+ -+ packet->id = id++; -+ packet->offset = word_pos; -+ INIT_LIST_HEAD(&packet->list); -+ list_add_tail(&packet->list, &otpc->packets); -+ -+ /* Count size by adding header and paload sizes. */ -+ *size += 4 * (payload_size + 1); -+ /* Next word: this packet (header, payload) position + 1. */ -+ word_pos += payload_size + 2; -+ -+ npackets++; -+ } -+ -+ otpc->npackets = npackets; -+ -+ return 0; -+} -+ -+static struct nvmem_config mchp_nvmem_config = { -+ .name = MCHP_OTPC_NAME, -+ .type = NVMEM_TYPE_OTP, -+ .read_only = true, -+ .word_size = 4, -+ .stride = 4, -+ .reg_read = mchp_otpc_read, -+}; -+ -+static int mchp_otpc_probe(struct platform_device *pdev) -+{ -+ struct nvmem_device *nvmem; -+ struct mchp_otpc *otpc; -+ u32 size; -+ int ret; -+ -+ otpc = devm_kzalloc(&pdev->dev, sizeof(*otpc), GFP_KERNEL); -+ if (!otpc) -+ return -ENOMEM; -+ -+ otpc->base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(otpc->base)) -+ return PTR_ERR(otpc->base); -+ -+ otpc->dev = &pdev->dev; -+ ret = mchp_otpc_init_packets_list(otpc, &size); -+ if (ret) -+ return ret; -+ -+ mchp_nvmem_config.dev = otpc->dev; -+ mchp_nvmem_config.size = size; -+ mchp_nvmem_config.priv = otpc; -+ nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config); -+ -+ return PTR_ERR_OR_ZERO(nvmem); -+} -+ -+static const struct of_device_id __maybe_unused mchp_otpc_ids[] = { -+ { .compatible = "microchip,sama7g5-otpc", }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(of, mchp_otpc_ids); -+ -+static struct platform_driver mchp_otpc_driver = { -+ .probe = mchp_otpc_probe, -+ .driver = { -+ .name = MCHP_OTPC_NAME, -+ .of_match_table = of_match_ptr(mchp_otpc_ids), -+ }, -+}; -+module_platform_driver(mchp_otpc_driver); -+ -+MODULE_AUTHOR("Claudiu Beznea "); -+MODULE_DESCRIPTION("Microchip SAMA7G5 OTPC driver"); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-6.1/806-v6.0-0002-nvmem-mtk-efuse-Simplify-with-devm_platform_get_and_.patch b/target/linux/generic/backport-6.1/806-v6.0-0002-nvmem-mtk-efuse-Simplify-with-devm_platform_get_and_.patch deleted file mode 100644 index 6a4126b9deda..000000000000 --- a/target/linux/generic/backport-6.1/806-v6.0-0002-nvmem-mtk-efuse-Simplify-with-devm_platform_get_and_.patch +++ /dev/null @@ -1,32 +0,0 @@ -From f5c97da8037b18d1256a58459fa96ed68e50fb41 Mon Sep 17 00:00:00 2001 -From: AngeloGioacchino Del Regno -Date: Wed, 6 Jul 2022 11:06:27 +0100 -Subject: [PATCH] nvmem: mtk-efuse: Simplify with - devm_platform_get_and_ioremap_resource() - -Convert platform_get_resource(), devm_ioremap_resource() to a single -call to devm_platform_get_and_ioremap_resource(), as this is exactly -what this function does. - -No functional changes. - -Signed-off-by: AngeloGioacchino Del Regno -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220706100627.6534-8-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/mtk-efuse.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/nvmem/mtk-efuse.c -+++ b/drivers/nvmem/mtk-efuse.c -@@ -41,8 +41,7 @@ static int mtk_efuse_probe(struct platfo - if (!priv) - return -ENOMEM; - -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- priv->base = devm_ioremap_resource(dev, res); -+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - diff --git a/target/linux/generic/backport-6.1/807-v6.1-0002-nvmem-add-driver-handling-U-Boot-environment-variabl.patch b/target/linux/generic/backport-6.1/807-v6.1-0002-nvmem-add-driver-handling-U-Boot-environment-variabl.patch deleted file mode 100644 index 9138807bc91c..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0002-nvmem-add-driver-handling-U-Boot-environment-variabl.patch +++ /dev/null @@ -1,286 +0,0 @@ -From d5542923f200f95bddf524f36fd495f78aa28e3c Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 16 Sep 2022 13:20:48 +0100 -Subject: [PATCH] nvmem: add driver handling U-Boot environment variables -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -U-Boot stores its setup as environment variables. It's a list of -key-value pairs stored on flash device with a custom header. - -This commit adds an NVMEM driver that: -1. Provides NVMEM access to environment vars binary data -2. Extracts variables as NVMEM cells - -Current Linux's NVMEM sysfs API allows reading whole NVMEM data block. -It can be used by user-space tools for reading U-Boot env vars block -without the hassle of finding its location. Parsing will still need to -be re-done there. - -Kernel-parsed NVMEM cells can be read however by Linux drivers. This may -be useful for Ethernet drivers for reading device MAC address which is -often stored as U-Boot env variable. - -Reviewed-by: Ahmad Fatoum -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - MAINTAINERS | 1 + - drivers/nvmem/Kconfig | 13 +++ - drivers/nvmem/Makefile | 2 + - drivers/nvmem/u-boot-env.c | 218 +++++++++++++++++++++++++++++++++++++ - 4 files changed, 234 insertions(+) - create mode 100644 drivers/nvmem/u-boot-env.c - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -344,4 +344,17 @@ config NVMEM_APPLE_EFUSES - This driver can also be built as a module. If so, the module will - be called nvmem-apple-efuses. - -+config NVMEM_U_BOOT_ENV -+ tristate "U-Boot environment variables support" -+ depends on OF && MTD -+ select CRC32 -+ help -+ U-Boot stores its setup as environment variables. This driver adds -+ support for verifying & exporting such data. It also exposes variables -+ as NVMEM cells so they can be referenced by other drivers. -+ -+ Currently this drivers works only with env variables on top of MTD. -+ -+ If compiled as module it will be called nvmem_u-boot-env. -+ - endif ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -69,3 +69,5 @@ obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvme - nvmem-apple-efuses-y := apple-efuses.o - obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o - nvmem-microchip-otpc-y := microchip-otpc.o -+obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o -+nvmem_u-boot-env-y := u-boot-env.o ---- /dev/null -+++ b/drivers/nvmem/u-boot-env.c -@@ -0,0 +1,218 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (C) 2022 Rafał Miłecki -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+enum u_boot_env_format { -+ U_BOOT_FORMAT_SINGLE, -+ U_BOOT_FORMAT_REDUNDANT, -+}; -+ -+struct u_boot_env { -+ struct device *dev; -+ enum u_boot_env_format format; -+ -+ struct mtd_info *mtd; -+ -+ /* Cells */ -+ struct nvmem_cell_info *cells; -+ int ncells; -+}; -+ -+struct u_boot_env_image_single { -+ __le32 crc32; -+ uint8_t data[]; -+} __packed; -+ -+struct u_boot_env_image_redundant { -+ __le32 crc32; -+ u8 mark; -+ uint8_t data[]; -+} __packed; -+ -+static int u_boot_env_read(void *context, unsigned int offset, void *val, -+ size_t bytes) -+{ -+ struct u_boot_env *priv = context; -+ struct device *dev = priv->dev; -+ size_t bytes_read; -+ int err; -+ -+ err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val); -+ if (err && !mtd_is_bitflip(err)) { -+ dev_err(dev, "Failed to read from mtd: %d\n", err); -+ return err; -+ } -+ -+ if (bytes_read != bytes) { -+ dev_err(dev, "Failed to read %zu bytes\n", bytes); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf, -+ size_t data_offset, size_t data_len) -+{ -+ struct device *dev = priv->dev; -+ char *data = buf + data_offset; -+ char *var, *value, *eq; -+ int idx; -+ -+ priv->ncells = 0; -+ for (var = data; var < data + data_len && *var; var += strlen(var) + 1) -+ priv->ncells++; -+ -+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); -+ if (!priv->cells) -+ return -ENOMEM; -+ -+ for (var = data, idx = 0; -+ var < data + data_len && *var; -+ var = value + strlen(value) + 1, idx++) { -+ eq = strchr(var, '='); -+ if (!eq) -+ break; -+ *eq = '\0'; -+ value = eq + 1; -+ -+ priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); -+ if (!priv->cells[idx].name) -+ return -ENOMEM; -+ priv->cells[idx].offset = data_offset + value - data; -+ priv->cells[idx].bytes = strlen(value); -+ } -+ -+ if (WARN_ON(idx != priv->ncells)) -+ priv->ncells = idx; -+ -+ return 0; -+} -+ -+static int u_boot_env_parse(struct u_boot_env *priv) -+{ -+ struct device *dev = priv->dev; -+ size_t crc32_data_offset; -+ size_t crc32_data_len; -+ size_t crc32_offset; -+ size_t data_offset; -+ size_t data_len; -+ uint32_t crc32; -+ uint32_t calc; -+ size_t bytes; -+ uint8_t *buf; -+ int err; -+ -+ buf = kcalloc(1, priv->mtd->size, GFP_KERNEL); -+ if (!buf) { -+ err = -ENOMEM; -+ goto err_out; -+ } -+ -+ err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf); -+ if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) { -+ dev_err(dev, "Failed to read from mtd: %d\n", err); -+ goto err_kfree; -+ } -+ -+ switch (priv->format) { -+ case U_BOOT_FORMAT_SINGLE: -+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32); -+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data); -+ data_offset = offsetof(struct u_boot_env_image_single, data); -+ break; -+ case U_BOOT_FORMAT_REDUNDANT: -+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); -+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark); -+ data_offset = offsetof(struct u_boot_env_image_redundant, data); -+ break; -+ } -+ crc32 = le32_to_cpu(*(uint32_t *)(buf + crc32_offset)); -+ crc32_data_len = priv->mtd->size - crc32_data_offset; -+ data_len = priv->mtd->size - data_offset; -+ -+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; -+ if (calc != crc32) { -+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32); -+ err = -EINVAL; -+ goto err_kfree; -+ } -+ -+ buf[priv->mtd->size - 1] = '\0'; -+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len); -+ if (err) -+ dev_err(dev, "Failed to add cells: %d\n", err); -+ -+err_kfree: -+ kfree(buf); -+err_out: -+ return err; -+} -+ -+static int u_boot_env_probe(struct platform_device *pdev) -+{ -+ struct nvmem_config config = { -+ .name = "u-boot-env", -+ .reg_read = u_boot_env_read, -+ }; -+ struct device *dev = &pdev->dev; -+ struct device_node *np = dev->of_node; -+ struct u_boot_env *priv; -+ int err; -+ -+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ priv->dev = dev; -+ -+ priv->format = (uintptr_t)of_device_get_match_data(dev); -+ -+ priv->mtd = of_get_mtd_device_by_node(np); -+ if (IS_ERR(priv->mtd)) { -+ dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np); -+ return PTR_ERR(priv->mtd); -+ } -+ -+ err = u_boot_env_parse(priv); -+ if (err) -+ return err; -+ -+ config.dev = dev; -+ config.cells = priv->cells; -+ config.ncells = priv->ncells; -+ config.priv = priv; -+ config.size = priv->mtd->size; -+ -+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); -+} -+ -+static const struct of_device_id u_boot_env_of_match_table[] = { -+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, }, -+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, -+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, }, -+ {}, -+}; -+ -+static struct platform_driver u_boot_env_driver = { -+ .probe = u_boot_env_probe, -+ .driver = { -+ .name = "u_boot_env", -+ .of_match_table = u_boot_env_of_match_table, -+ }, -+}; -+module_platform_driver(u_boot_env_driver); -+ -+MODULE_AUTHOR("Rafał Miłecki"); -+MODULE_LICENSE("GPL"); -+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table); diff --git a/target/linux/generic/backport-6.1/807-v6.1-0004-nvmem-brcm_nvram-Use-kzalloc-for-allocating-only-one.patch b/target/linux/generic/backport-6.1/807-v6.1-0004-nvmem-brcm_nvram-Use-kzalloc-for-allocating-only-one.patch deleted file mode 100644 index 48ad63fab5f9..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0004-nvmem-brcm_nvram-Use-kzalloc-for-allocating-only-one.patch +++ /dev/null @@ -1,29 +0,0 @@ -From d3524bb5b9a0c567b853a0024526afe87dde01ed Mon Sep 17 00:00:00 2001 -From: Kenneth Lee -Date: Fri, 16 Sep 2022 13:20:52 +0100 -Subject: [PATCH] nvmem: brcm_nvram: Use kzalloc for allocating only one - element - -Use kzalloc(...) rather than kcalloc(1, ...) because the number of -elements we are specifying in this case is 1, so kzalloc would -accomplish the same thing and we can simplify. - -Signed-off-by: Kenneth Lee -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-6-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/brcm_nvram.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/brcm_nvram.c -+++ b/drivers/nvmem/brcm_nvram.c -@@ -96,7 +96,7 @@ static int brcm_nvram_parse(struct brcm_ - - len = le32_to_cpu(header.len); - -- data = kcalloc(1, len, GFP_KERNEL); -+ data = kzalloc(len, GFP_KERNEL); - memcpy_fromio(data, priv->base, len); - data[len - 1] = '\0'; - diff --git a/target/linux/generic/backport-6.1/807-v6.1-0005-nvmem-prefix-all-symbols-with-NVMEM_.patch b/target/linux/generic/backport-6.1/807-v6.1-0005-nvmem-prefix-all-symbols-with-NVMEM_.patch deleted file mode 100644 index ae02439bd734..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0005-nvmem-prefix-all-symbols-with-NVMEM_.patch +++ /dev/null @@ -1,281 +0,0 @@ -From 28fc7c986f01fdcfd28af648be2597624cac0e27 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 16 Sep 2022 13:20:54 +0100 -Subject: [PATCH] nvmem: prefix all symbols with NVMEM_ -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This unifies all NVMEM symbols. They follow one style now. - -Reviewed-by: Matthias Brugger -Acked-by: Arnd Bergmann -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-8-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - arch/arm/configs/multi_v7_defconfig | 6 +++--- - arch/arm/configs/qcom_defconfig | 2 +- - arch/arm64/configs/defconfig | 10 +++++----- - arch/mips/configs/ci20_defconfig | 2 +- - drivers/cpufreq/Kconfig.arm | 2 +- - drivers/nvmem/Kconfig | 24 ++++++++++++------------ - drivers/nvmem/Makefile | 24 ++++++++++++------------ - drivers/soc/mediatek/Kconfig | 2 +- - drivers/thermal/qcom/Kconfig | 2 +- - 9 files changed, 37 insertions(+), 37 deletions(-) - ---- a/arch/arm/configs/multi_v7_defconfig -+++ b/arch/arm/configs/multi_v7_defconfig -@@ -1125,10 +1125,10 @@ CONFIG_TI_PIPE3=y - CONFIG_TWL4030_USB=m - CONFIG_RAS=y - CONFIG_NVMEM_IMX_OCOTP=y --CONFIG_ROCKCHIP_EFUSE=m -+CONFIG_NVMEM_ROCKCHIP_EFUSE=m - CONFIG_NVMEM_SUNXI_SID=y - CONFIG_NVMEM_VF610_OCOTP=y --CONFIG_MESON_MX_EFUSE=m -+CONFIG_NVMEM_MESON_MX_EFUSE=m - CONFIG_NVMEM_RMEM=m - CONFIG_FSI=m - CONFIG_FSI_MASTER_GPIO=m ---- a/arch/arm/configs/qcom_defconfig -+++ b/arch/arm/configs/qcom_defconfig -@@ -274,7 +274,7 @@ CONFIG_PHY_QCOM_USB_HS=y - CONFIG_PHY_QCOM_USB_HSIC=y - CONFIG_PHY_QCOM_QMP=y - CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y --CONFIG_QCOM_QFPROM=y -+CONFIG_NVMEM_QCOM_QFPROM=y - CONFIG_INTERCONNECT=y - CONFIG_INTERCONNECT_QCOM=y - CONFIG_INTERCONNECT_QCOM_MSM8974=m ---- a/arch/arm64/configs/defconfig -+++ b/arch/arm64/configs/defconfig -@@ -1135,11 +1135,11 @@ CONFIG_QCOM_L3_PMU=y - CONFIG_NVMEM_IMX_OCOTP=y - CONFIG_NVMEM_IMX_OCOTP_SCU=y - CONFIG_QCOM_QFPROM=y --CONFIG_MTK_EFUSE=y --CONFIG_ROCKCHIP_EFUSE=y -+CONFIG_NVMEM_MTK_EFUSE=y -+CONFIG_NVMEM_ROCKCHIP_EFUSE=y - CONFIG_NVMEM_SUNXI_SID=y --CONFIG_UNIPHIER_EFUSE=y --CONFIG_MESON_EFUSE=m -+CONFIG_NVMEM_UNIPHIER_EFUSE=y -+CONFIG_NVMEM_MESON_EFUSE=m - CONFIG_NVMEM_RMEM=m - CONFIG_FPGA=y - CONFIG_FPGA_MGR_STRATIX10_SOC=m ---- a/arch/mips/configs/ci20_defconfig -+++ b/arch/mips/configs/ci20_defconfig -@@ -137,7 +137,7 @@ CONFIG_MEMORY=y - CONFIG_JZ4780_NEMC=y - CONFIG_PWM=y - CONFIG_PWM_JZ4740=m --CONFIG_JZ4780_EFUSE=y -+CONFIG_NVMEM_JZ4780_EFUSE=y - CONFIG_JZ4770_PHY=y - CONFIG_EXT4_FS=y - # CONFIG_DNOTIFY is not set ---- a/drivers/cpufreq/Kconfig.arm -+++ b/drivers/cpufreq/Kconfig.arm -@@ -153,7 +153,7 @@ config ARM_OMAP2PLUS_CPUFREQ - config ARM_QCOM_CPUFREQ_NVMEM - tristate "Qualcomm nvmem based CPUFreq" - depends on ARCH_QCOM -- depends on QCOM_QFPROM -+ depends on NVMEM_QCOM_QFPROM - depends on QCOM_SMEM - select PM_OPP - help ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -52,7 +52,7 @@ config NVMEM_IMX_OCOTP_SCU - This is a driver for the SCU On-Chip OTP Controller (OCOTP) - available on i.MX8 SoCs. - --config JZ4780_EFUSE -+config NVMEM_JZ4780_EFUSE - tristate "JZ4780 EFUSE Memory Support" - depends on MACH_INGENIC || COMPILE_TEST - depends on HAS_IOMEM -@@ -96,7 +96,7 @@ config NVMEM_MXS_OCOTP - This driver can also be built as a module. If so, the module - will be called nvmem-mxs-ocotp. - --config MTK_EFUSE -+config NVMEM_MTK_EFUSE - tristate "Mediatek SoCs EFUSE support" - depends on ARCH_MEDIATEK || COMPILE_TEST - depends on HAS_IOMEM -@@ -107,7 +107,7 @@ config MTK_EFUSE - This driver can also be built as a module. If so, the module - will be called efuse-mtk. - --config MICROCHIP_OTPC -+config NVMEM_MICROCHIP_OTPC - tristate "Microchip OTPC support" - depends on ARCH_AT91 || COMPILE_TEST - help -@@ -126,7 +126,7 @@ config NVMEM_NINTENDO_OTP - This driver can also be built as a module. If so, the module - will be called nvmem-nintendo-otp. - --config QCOM_QFPROM -+config NVMEM_QCOM_QFPROM - tristate "QCOM QFPROM Support" - depends on ARCH_QCOM || COMPILE_TEST - depends on HAS_IOMEM -@@ -145,7 +145,7 @@ config NVMEM_SPMI_SDAM - Qualcomm Technologies, Inc. PMICs. It provides the clients - an interface to read/write to the SDAM module's shared memory. - --config ROCKCHIP_EFUSE -+config NVMEM_ROCKCHIP_EFUSE - tristate "Rockchip eFuse Support" - depends on ARCH_ROCKCHIP || COMPILE_TEST - depends on HAS_IOMEM -@@ -156,7 +156,7 @@ config ROCKCHIP_EFUSE - This driver can also be built as a module. If so, the module - will be called nvmem_rockchip_efuse. - --config ROCKCHIP_OTP -+config NVMEM_ROCKCHIP_OTP - tristate "Rockchip OTP controller support" - depends on ARCH_ROCKCHIP || COMPILE_TEST - depends on HAS_IOMEM -@@ -199,7 +199,7 @@ config NVMEM_SUNXI_SID - This driver can also be built as a module. If so, the module - will be called nvmem_sunxi_sid. - --config UNIPHIER_EFUSE -+config NVMEM_UNIPHIER_EFUSE - tristate "UniPhier SoCs eFuse support" - depends on ARCH_UNIPHIER || COMPILE_TEST - depends on HAS_IOMEM -@@ -221,7 +221,7 @@ config NVMEM_VF610_OCOTP - This driver can also be build as a module. If so, the module will - be called nvmem-vf610-ocotp. - --config MESON_EFUSE -+config NVMEM_MESON_EFUSE - tristate "Amlogic Meson GX eFuse Support" - depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM - help -@@ -231,7 +231,7 @@ config MESON_EFUSE - This driver can also be built as a module. If so, the module - will be called nvmem_meson_efuse. - --config MESON_MX_EFUSE -+config NVMEM_MESON_MX_EFUSE - tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" - depends on ARCH_MESON || COMPILE_TEST - help -@@ -251,13 +251,13 @@ config NVMEM_SNVS_LPGPR - This driver can also be built as a module. If so, the module - will be called nvmem-snvs-lpgpr. - --config RAVE_SP_EEPROM -+config NVMEM_RAVE_SP_EEPROM - tristate "Rave SP EEPROM Support" - depends on RAVE_SP_CORE - help - Say y here to enable Rave SP EEPROM support. - --config SC27XX_EFUSE -+config NVMEM_SC27XX_EFUSE - tristate "Spreadtrum SC27XX eFuse Support" - depends on MFD_SC27XX_PMIC || COMPILE_TEST - depends on HAS_IOMEM -@@ -278,7 +278,7 @@ config NVMEM_ZYNQMP - - If sure, say yes. If unsure, say no. - --config SPRD_EFUSE -+config NVMEM_SPRD_EFUSE - tristate "Spreadtrum SoC eFuse Support" - depends on ARCH_SPRD || COMPILE_TEST - depends on HAS_IOMEM ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -15,7 +15,7 @@ obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-i - nvmem-imx-ocotp-y := imx-ocotp.o - obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o - nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o --obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o -+obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o - nvmem_jz4780_efuse-y := jz4780-efuse.o - obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o - nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o -@@ -25,37 +25,37 @@ obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-m - nvmem-mxs-ocotp-y := mxs-ocotp.o - obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o - nvmem-nintendo-otp-y := nintendo-otp.o --obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o -+obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o - nvmem_mtk-efuse-y := mtk-efuse.o --obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o -+obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o - nvmem_qfprom-y := qfprom.o - obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o - nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o --obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o -+obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o - nvmem_rockchip_efuse-y := rockchip-efuse.o --obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o -+obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o - nvmem-rockchip-otp-y := rockchip-otp.o - obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o - nvmem_stm32_romem-y := stm32-romem.o - obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o - nvmem_sunxi_sid-y := sunxi_sid.o --obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o -+obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o - nvmem-uniphier-efuse-y := uniphier-efuse.o - obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o - nvmem-vf610-ocotp-y := vf610-ocotp.o --obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o -+obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o - nvmem_meson_efuse-y := meson-efuse.o --obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o -+obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o - nvmem_meson_mx_efuse-y := meson-mx-efuse.o - obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o - nvmem_snvs_lpgpr-y := snvs_lpgpr.o --obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o -+obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o - nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o --obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o -+obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o - nvmem-sc27xx-efuse-y := sc27xx-efuse.o - obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o - nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o --obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o -+obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o - nvmem_sprd_efuse-y := sprd-efuse.o - obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o - nvmem-rmem-y := rmem.o -@@ -67,7 +67,7 @@ obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvm - nvmem_sunplus_ocotp-y := sunplus-ocotp.o - obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o - nvmem-apple-efuses-y := apple-efuses.o --obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o -+obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o - nvmem-microchip-otpc-y := microchip-otpc.o - obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o - nvmem_u-boot-env-y := u-boot-env.o ---- a/drivers/thermal/qcom/Kconfig -+++ b/drivers/thermal/qcom/Kconfig -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0-only - config QCOM_TSENS - tristate "Qualcomm TSENS Temperature Alarm" -- depends on QCOM_QFPROM -+ depends on NVMEM_QCOM_QFPROM - depends on ARCH_QCOM || COMPILE_TEST - help - This enables the thermal sysfs driver for the TSENS device. It shows diff --git a/target/linux/generic/backport-6.1/807-v6.1-0006-nvmem-sort-config-symbols-alphabetically.patch b/target/linux/generic/backport-6.1/807-v6.1-0006-nvmem-sort-config-symbols-alphabetically.patch deleted file mode 100644 index 4e45524bffcd..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0006-nvmem-sort-config-symbols-alphabetically.patch +++ /dev/null @@ -1,535 +0,0 @@ -From a06d9e5a63b7c2f622c908cd9600ce735e70f7c6 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 16 Sep 2022 13:20:55 +0100 -Subject: [PATCH] nvmem: sort config symbols alphabetically -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -1. Match what most subsystems do -2. Simplify maintenance a bit -3. Reduce amount of conflicts for new drivers patches - -While at it unify indent level in Makefile. - -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-9-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/Kconfig | 300 +++++++++++++++++++++-------------------- - drivers/nvmem/Makefile | 114 ++++++++-------- - 2 files changed, 208 insertions(+), 206 deletions(-) - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -21,6 +21,40 @@ config NVMEM_SYSFS - This interface is mostly used by userspace applications to - read/write directly into nvmem. - -+# Devices -+ -+config NVMEM_APPLE_EFUSES -+ tristate "Apple eFuse support" -+ depends on ARCH_APPLE || COMPILE_TEST -+ default ARCH_APPLE -+ help -+ Say y here to enable support for reading eFuses on Apple SoCs -+ such as the M1. These are e.g. used to store factory programmed -+ calibration data required for the PCIe or the USB-C PHY. -+ -+ This driver can also be built as a module. If so, the module will -+ be called nvmem-apple-efuses. -+ -+config NVMEM_BCM_OCOTP -+ tristate "Broadcom On-Chip OTP Controller support" -+ depends on ARCH_BCM_IPROC || COMPILE_TEST -+ depends on HAS_IOMEM -+ default ARCH_BCM_IPROC -+ help -+ Say y here to enable read/write access to the Broadcom OTP -+ controller. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem-bcm-ocotp. -+ -+config NVMEM_BRCM_NVRAM -+ tristate "Broadcom's NVRAM support" -+ depends on ARCH_BCM_5301X || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This driver provides support for Broadcom's NVRAM that can be accessed -+ using I/O mapping. -+ - config NVMEM_IMX_IIM - tristate "i.MX IC Identification Module support" - depends on ARCH_MXC || COMPILE_TEST -@@ -64,6 +98,19 @@ config NVMEM_JZ4780_EFUSE - To compile this driver as a module, choose M here: the module - will be called nvmem_jz4780_efuse. - -+config NVMEM_LAYERSCAPE_SFP -+ tristate "Layerscape SFP (Security Fuse Processor) support" -+ depends on ARCH_LAYERSCAPE || COMPILE_TEST -+ depends on HAS_IOMEM -+ select REGMAP_MMIO -+ help -+ This driver provides support to read the eFuses on Freescale -+ Layerscape SoC's. For example, the vendor provides a per part -+ unique ID there. -+ -+ This driver can also be built as a module. If so, the module -+ will be called layerscape-sfp. -+ - config NVMEM_LPC18XX_EEPROM - tristate "NXP LPC18XX EEPROM Memory Support" - depends on ARCH_LPC18XX || COMPILE_TEST -@@ -84,17 +131,32 @@ config NVMEM_LPC18XX_OTP - To compile this driver as a module, choose M here: the module - will be called nvmem_lpc18xx_otp. - --config NVMEM_MXS_OCOTP -- tristate "Freescale MXS On-Chip OTP Memory Support" -- depends on ARCH_MXS || COMPILE_TEST -- depends on HAS_IOMEM -+config NVMEM_MESON_EFUSE -+ tristate "Amlogic Meson GX eFuse Support" -+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM - help -- If you say Y here, you will get readonly access to the -- One Time Programmable memory pages that are stored -- on the Freescale i.MX23/i.MX28 processor. -+ This is a driver to retrieve specific values from the eFuse found on -+ the Amlogic Meson GX SoCs. - - This driver can also be built as a module. If so, the module -- will be called nvmem-mxs-ocotp. -+ will be called nvmem_meson_efuse. -+ -+config NVMEM_MESON_MX_EFUSE -+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" -+ depends on ARCH_MESON || COMPILE_TEST -+ help -+ This is a driver to retrieve specific values from the eFuse found on -+ the Amlogic Meson6, Meson8 and Meson8b SoCs. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem_meson_mx_efuse. -+ -+config NVMEM_MICROCHIP_OTPC -+ tristate "Microchip OTPC support" -+ depends on ARCH_AT91 || COMPILE_TEST -+ help -+ This driver enable the OTP controller available on Microchip SAMA7G5 -+ SoCs. It controlls the access to the OTP memory connected to it. - - config NVMEM_MTK_EFUSE - tristate "Mediatek SoCs EFUSE support" -@@ -107,12 +169,17 @@ config NVMEM_MTK_EFUSE - This driver can also be built as a module. If so, the module - will be called efuse-mtk. - --config NVMEM_MICROCHIP_OTPC -- tristate "Microchip OTPC support" -- depends on ARCH_AT91 || COMPILE_TEST -+config NVMEM_MXS_OCOTP -+ tristate "Freescale MXS On-Chip OTP Memory Support" -+ depends on ARCH_MXS || COMPILE_TEST -+ depends on HAS_IOMEM - help -- This driver enable the OTP controller available on Microchip SAMA7G5 -- SoCs. It controlls the access to the OTP memory connected to it. -+ If you say Y here, you will get readonly access to the -+ One Time Programmable memory pages that are stored -+ on the Freescale i.MX23/i.MX28 processor. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem-mxs-ocotp. - - config NVMEM_NINTENDO_OTP - tristate "Nintendo Wii and Wii U OTP Support" -@@ -137,13 +204,21 @@ config NVMEM_QCOM_QFPROM - This driver can also be built as a module. If so, the module - will be called nvmem_qfprom. - --config NVMEM_SPMI_SDAM -- tristate "SPMI SDAM Support" -- depends on SPMI -+config NVMEM_RAVE_SP_EEPROM -+ tristate "Rave SP EEPROM Support" -+ depends on RAVE_SP_CORE - help -- This driver supports the Shared Direct Access Memory Module on -- Qualcomm Technologies, Inc. PMICs. It provides the clients -- an interface to read/write to the SDAM module's shared memory. -+ Say y here to enable Rave SP EEPROM support. -+ -+config NVMEM_RMEM -+ tristate "Reserved Memory Based Driver Support" -+ depends on HAS_IOMEM -+ help -+ This driver maps reserved memory into an nvmem device. It might be -+ useful to expose information left by firmware in memory. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem-rmem. - - config NVMEM_ROCKCHIP_EFUSE - tristate "Rockchip eFuse Support" -@@ -167,79 +242,16 @@ config NVMEM_ROCKCHIP_OTP - This driver can also be built as a module. If so, the module - will be called nvmem_rockchip_otp. - --config NVMEM_BCM_OCOTP -- tristate "Broadcom On-Chip OTP Controller support" -- depends on ARCH_BCM_IPROC || COMPILE_TEST -- depends on HAS_IOMEM -- default ARCH_BCM_IPROC -- help -- Say y here to enable read/write access to the Broadcom OTP -- controller. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem-bcm-ocotp. -- --config NVMEM_STM32_ROMEM -- tristate "STMicroelectronics STM32 factory-programmed memory support" -- depends on ARCH_STM32 || COMPILE_TEST -- help -- Say y here to enable read-only access for STMicroelectronics STM32 -- factory-programmed memory area. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem-stm32-romem. -- --config NVMEM_SUNXI_SID -- tristate "Allwinner SoCs SID support" -- depends on ARCH_SUNXI -- help -- This is a driver for the 'security ID' available on various Allwinner -- devices. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem_sunxi_sid. -- --config NVMEM_UNIPHIER_EFUSE -- tristate "UniPhier SoCs eFuse support" -- depends on ARCH_UNIPHIER || COMPILE_TEST -- depends on HAS_IOMEM -- help -- This is a simple driver to dump specified values of UniPhier SoC -- from eFuse. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem-uniphier-efuse. -- --config NVMEM_VF610_OCOTP -- tristate "VF610 SoC OCOTP support" -- depends on SOC_VF610 || COMPILE_TEST -+config NVMEM_SC27XX_EFUSE -+ tristate "Spreadtrum SC27XX eFuse Support" -+ depends on MFD_SC27XX_PMIC || COMPILE_TEST - depends on HAS_IOMEM - help -- This is a driver for the 'OCOTP' peripheral available on Vybrid -- devices like VF5xx and VF6xx. -- -- This driver can also be build as a module. If so, the module will -- be called nvmem-vf610-ocotp. -- --config NVMEM_MESON_EFUSE -- tristate "Amlogic Meson GX eFuse Support" -- depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM -- help -- This is a driver to retrieve specific values from the eFuse found on -- the Amlogic Meson GX SoCs. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem_meson_efuse. -- --config NVMEM_MESON_MX_EFUSE -- tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support" -- depends on ARCH_MESON || COMPILE_TEST -- help -- This is a driver to retrieve specific values from the eFuse found on -- the Amlogic Meson6, Meson8 and Meson8b SoCs. -+ This is a simple driver to dump specified values of Spreadtrum -+ SC27XX PMICs from eFuse. - - This driver can also be built as a module. If so, the module -- will be called nvmem_meson_mx_efuse. -+ will be called nvmem-sc27xx-efuse. - - config NVMEM_SNVS_LPGPR - tristate "Support for Low Power General Purpose Register" -@@ -251,32 +263,13 @@ config NVMEM_SNVS_LPGPR - This driver can also be built as a module. If so, the module - will be called nvmem-snvs-lpgpr. - --config NVMEM_RAVE_SP_EEPROM -- tristate "Rave SP EEPROM Support" -- depends on RAVE_SP_CORE -- help -- Say y here to enable Rave SP EEPROM support. -- --config NVMEM_SC27XX_EFUSE -- tristate "Spreadtrum SC27XX eFuse Support" -- depends on MFD_SC27XX_PMIC || COMPILE_TEST -- depends on HAS_IOMEM -- help -- This is a simple driver to dump specified values of Spreadtrum -- SC27XX PMICs from eFuse. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem-sc27xx-efuse. -- --config NVMEM_ZYNQMP -- bool "Xilinx ZYNQMP SoC nvmem firmware support" -- depends on ARCH_ZYNQMP -+config NVMEM_SPMI_SDAM -+ tristate "SPMI SDAM Support" -+ depends on SPMI - help -- This is a driver to access hardware related data like -- soc revision, IDCODE... etc by using the firmware -- interface. -- -- If sure, say yes. If unsure, say no. -+ This driver supports the Shared Direct Access Memory Module on -+ Qualcomm Technologies, Inc. PMICs. It provides the clients -+ an interface to read/write to the SDAM module's shared memory. - - config NVMEM_SPRD_EFUSE - tristate "Spreadtrum SoC eFuse Support" -@@ -289,36 +282,15 @@ config NVMEM_SPRD_EFUSE - This driver can also be built as a module. If so, the module - will be called nvmem-sprd-efuse. - --config NVMEM_RMEM -- tristate "Reserved Memory Based Driver Support" -- depends on HAS_IOMEM -- help -- This driver maps reserved memory into an nvmem device. It might be -- useful to expose information left by firmware in memory. -- -- This driver can also be built as a module. If so, the module -- will be called nvmem-rmem. -- --config NVMEM_BRCM_NVRAM -- tristate "Broadcom's NVRAM support" -- depends on ARCH_BCM_5301X || COMPILE_TEST -- depends on HAS_IOMEM -- help -- This driver provides support for Broadcom's NVRAM that can be accessed -- using I/O mapping. -- --config NVMEM_LAYERSCAPE_SFP -- tristate "Layerscape SFP (Security Fuse Processor) support" -- depends on ARCH_LAYERSCAPE || COMPILE_TEST -- depends on HAS_IOMEM -- select REGMAP_MMIO -+config NVMEM_STM32_ROMEM -+ tristate "STMicroelectronics STM32 factory-programmed memory support" -+ depends on ARCH_STM32 || COMPILE_TEST - help -- This driver provides support to read the eFuses on Freescale -- Layerscape SoC's. For example, the vendor provides a per part -- unique ID there. -+ Say y here to enable read-only access for STMicroelectronics STM32 -+ factory-programmed memory area. - - This driver can also be built as a module. If so, the module -- will be called layerscape-sfp. -+ will be called nvmem-stm32-romem. - - config NVMEM_SUNPLUS_OCOTP - tristate "Sunplus SoC OTP support" -@@ -332,17 +304,15 @@ config NVMEM_SUNPLUS_OCOTP - This driver can also be built as a module. If so, the module - will be called nvmem-sunplus-ocotp. - --config NVMEM_APPLE_EFUSES -- tristate "Apple eFuse support" -- depends on ARCH_APPLE || COMPILE_TEST -- default ARCH_APPLE -+config NVMEM_SUNXI_SID -+ tristate "Allwinner SoCs SID support" -+ depends on ARCH_SUNXI - help -- Say y here to enable support for reading eFuses on Apple SoCs -- such as the M1. These are e.g. used to store factory programmed -- calibration data required for the PCIe or the USB-C PHY. -+ This is a driver for the 'security ID' available on various Allwinner -+ devices. - -- This driver can also be built as a module. If so, the module will -- be called nvmem-apple-efuses. -+ This driver can also be built as a module. If so, the module -+ will be called nvmem_sunxi_sid. - - config NVMEM_U_BOOT_ENV - tristate "U-Boot environment variables support" -@@ -357,4 +327,36 @@ config NVMEM_U_BOOT_ENV - - If compiled as module it will be called nvmem_u-boot-env. - -+config NVMEM_UNIPHIER_EFUSE -+ tristate "UniPhier SoCs eFuse support" -+ depends on ARCH_UNIPHIER || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This is a simple driver to dump specified values of UniPhier SoC -+ from eFuse. -+ -+ This driver can also be built as a module. If so, the module -+ will be called nvmem-uniphier-efuse. -+ -+config NVMEM_VF610_OCOTP -+ tristate "VF610 SoC OCOTP support" -+ depends on SOC_VF610 || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This is a driver for the 'OCOTP' peripheral available on Vybrid -+ devices like VF5xx and VF6xx. -+ -+ This driver can also be build as a module. If so, the module will -+ be called nvmem-vf610-ocotp. -+ -+config NVMEM_ZYNQMP -+ bool "Xilinx ZYNQMP SoC nvmem firmware support" -+ depends on ARCH_ZYNQMP -+ help -+ This is a driver to access hardware related data like -+ soc revision, IDCODE... etc by using the firmware -+ interface. -+ -+ If sure, say yes. If unsure, say no. -+ - endif ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -7,67 +7,67 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o - nvmem_core-y := core.o - - # Devices --obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o --nvmem-bcm-ocotp-y := bcm-ocotp.o --obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o --nvmem-imx-iim-y := imx-iim.o --obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o --nvmem-imx-ocotp-y := imx-ocotp.o -+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o -+nvmem-apple-efuses-y := apple-efuses.o -+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o -+nvmem-bcm-ocotp-y := bcm-ocotp.o -+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o -+nvmem_brcm_nvram-y := brcm_nvram.o -+obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o -+nvmem-imx-iim-y := imx-iim.o -+obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o -+nvmem-imx-ocotp-y := imx-ocotp.o - obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o --nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o --obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o --nvmem_jz4780_efuse-y := jz4780-efuse.o -+nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o -+obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o -+nvmem_jz4780_efuse-y := jz4780-efuse.o -+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o -+nvmem-layerscape-sfp-y := layerscape-sfp.o - obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o --nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o --obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o --nvmem_lpc18xx_otp-y := lpc18xx_otp.o --obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o --nvmem-mxs-ocotp-y := mxs-ocotp.o --obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o --nvmem-nintendo-otp-y := nintendo-otp.o -+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o -+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o -+nvmem_lpc18xx_otp-y := lpc18xx_otp.o -+obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o -+nvmem_meson_efuse-y := meson-efuse.o -+obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o -+nvmem_meson_mx_efuse-y := meson-mx-efuse.o -+obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o -+nvmem-microchip-otpc-y := microchip-otpc.o - obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o --nvmem_mtk-efuse-y := mtk-efuse.o --obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o --nvmem_qfprom-y := qfprom.o --obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o --nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o -+nvmem_mtk-efuse-y := mtk-efuse.o -+obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o -+nvmem-mxs-ocotp-y := mxs-ocotp.o -+obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o -+nvmem-nintendo-otp-y := nintendo-otp.o -+obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o -+nvmem_qfprom-y := qfprom.o -+obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o -+nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o -+obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o -+nvmem-rmem-y := rmem.o - obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o --nvmem_rockchip_efuse-y := rockchip-efuse.o -+nvmem_rockchip_efuse-y := rockchip-efuse.o - obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o --nvmem-rockchip-otp-y := rockchip-otp.o --obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o --nvmem_stm32_romem-y := stm32-romem.o --obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o --nvmem_sunxi_sid-y := sunxi_sid.o --obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o --nvmem-uniphier-efuse-y := uniphier-efuse.o --obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o --nvmem-vf610-ocotp-y := vf610-ocotp.o --obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o --nvmem_meson_efuse-y := meson-efuse.o --obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o --nvmem_meson_mx_efuse-y := meson-mx-efuse.o --obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o --nvmem_snvs_lpgpr-y := snvs_lpgpr.o --obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o --nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o -+nvmem-rockchip-otp-y := rockchip-otp.o - obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o --nvmem-sc27xx-efuse-y := sc27xx-efuse.o --obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o --nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o --obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o --nvmem_sprd_efuse-y := sprd-efuse.o --obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o --nvmem-rmem-y := rmem.o --obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o --nvmem_brcm_nvram-y := brcm_nvram.o --obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o --nvmem-layerscape-sfp-y := layerscape-sfp.o -+nvmem-sc27xx-efuse-y := sc27xx-efuse.o -+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o -+nvmem_snvs_lpgpr-y := snvs_lpgpr.o -+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o -+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o -+obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o -+nvmem_sprd_efuse-y := sprd-efuse.o -+obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o -+nvmem_stm32_romem-y := stm32-romem.o - obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o --nvmem_sunplus_ocotp-y := sunplus-ocotp.o --obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o --nvmem-apple-efuses-y := apple-efuses.o --obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o --nvmem-microchip-otpc-y := microchip-otpc.o --obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o --nvmem_u-boot-env-y := u-boot-env.o -+nvmem_sunplus_ocotp-y := sunplus-ocotp.o -+obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o -+nvmem_sunxi_sid-y := sunxi_sid.o -+obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o -+nvmem_u-boot-env-y := u-boot-env.o -+obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o -+nvmem-uniphier-efuse-y := uniphier-efuse.o -+obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o -+nvmem-vf610-ocotp-y := vf610-ocotp.o -+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o -+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o diff --git a/target/linux/generic/backport-6.1/807-v6.1-0007-nvmem-u-boot-env-find-Device-Tree-nodes-for-NVMEM-ce.patch b/target/linux/generic/backport-6.1/807-v6.1-0007-nvmem-u-boot-env-find-Device-Tree-nodes-for-NVMEM-ce.patch deleted file mode 100644 index e0a082adc45e..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0007-nvmem-u-boot-env-find-Device-Tree-nodes-for-NVMEM-ce.patch +++ /dev/null @@ -1,31 +0,0 @@ -From d4d432670f7dee0a5432fcffcfc8699b25181ace Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 16 Sep 2022 13:20:57 +0100 -Subject: [PATCH] nvmem: u-boot-env: find Device Tree nodes for NVMEM cells -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -DT binding allows specifying NVMEM cells as NVMEM device (provider) -subnodes. Looks for such subnodes when building NVMEM cells. - -This allows NVMEM consumers to use U-Boot environment variables. - -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-11-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/u-boot-env.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/nvmem/u-boot-env.c -+++ b/drivers/nvmem/u-boot-env.c -@@ -92,6 +92,7 @@ static int u_boot_env_add_cells(struct u - return -ENOMEM; - priv->cells[idx].offset = data_offset + value - data; - priv->cells[idx].bytes = strlen(value); -+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); - } - - if (WARN_ON(idx != priv->ncells)) diff --git a/target/linux/generic/backport-6.1/807-v6.1-0008-nvmem-lan9662-otp-add-support.patch b/target/linux/generic/backport-6.1/807-v6.1-0008-nvmem-lan9662-otp-add-support.patch deleted file mode 100644 index 945c6128ff49..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0008-nvmem-lan9662-otp-add-support.patch +++ /dev/null @@ -1,274 +0,0 @@ -From 9e8f208ad5229ddda97cd4a83ecf89c735d99592 Mon Sep 17 00:00:00 2001 -From: Horatiu Vultur -Date: Fri, 16 Sep 2022 13:20:59 +0100 -Subject: [PATCH] nvmem: lan9662-otp: add support - -Add support for OTP controller available on LAN9662. The OTPC controls -the access to a non-volatile memory. The size of the memory is 8KB. -The OTPC can access the memory based on an offset. -Implement both the read and the write functionality. - -Signed-off-by: Horatiu Vultur -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-13-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/Kconfig | 8 ++ - drivers/nvmem/Makefile | 2 + - drivers/nvmem/lan9662-otpc.c | 222 +++++++++++++++++++++++++++++++++++ - 3 files changed, 232 insertions(+) - create mode 100644 drivers/nvmem/lan9662-otpc.c - ---- a/drivers/nvmem/Kconfig -+++ b/drivers/nvmem/Kconfig -@@ -98,6 +98,14 @@ config NVMEM_JZ4780_EFUSE - To compile this driver as a module, choose M here: the module - will be called nvmem_jz4780_efuse. - -+config NVMEM_LAN9662_OTPC -+ tristate "Microchip LAN9662 OTP controller support" -+ depends on SOC_LAN966 || COMPILE_TEST -+ depends on HAS_IOMEM -+ help -+ This driver enables the OTP controller available on Microchip LAN9662 -+ SoCs. It controls the access to the OTP memory connected to it. -+ - config NVMEM_LAYERSCAPE_SFP - tristate "Layerscape SFP (Security Fuse Processor) support" - depends on ARCH_LAYERSCAPE || COMPILE_TEST ---- a/drivers/nvmem/Makefile -+++ b/drivers/nvmem/Makefile -@@ -21,6 +21,8 @@ obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvm - nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o - obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o - nvmem_jz4780_efuse-y := jz4780-efuse.o -+obj-$(CONFIG_NVMEM_LAN9662_OTPC) += nvmem-lan9662-otpc.o -+nvmem-lan9662-otpc-y := lan9662-otpc.o - obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o - nvmem-layerscape-sfp-y := layerscape-sfp.o - obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o ---- /dev/null -+++ b/drivers/nvmem/lan9662-otpc.c -@@ -0,0 +1,222 @@ -+// SPDX-License-Identifier: GPL-2.0 -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define OTP_OTP_PWR_DN(t) (t + 0x00) -+#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0) -+#define OTP_OTP_ADDR_HI(t) (t + 0x04) -+#define OTP_OTP_ADDR_LO(t) (t + 0x08) -+#define OTP_OTP_PRGM_DATA(t) (t + 0x10) -+#define OTP_OTP_PRGM_MODE(t) (t + 0x14) -+#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0) -+#define OTP_OTP_RD_DATA(t) (t + 0x18) -+#define OTP_OTP_FUNC_CMD(t) (t + 0x20) -+#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1) -+#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0) -+#define OTP_OTP_CMD_GO(t) (t + 0x28) -+#define OTP_OTP_CMD_GO_OTP_GO BIT(0) -+#define OTP_OTP_PASS_FAIL(t) (t + 0x2c) -+#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3) -+#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2) -+#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0) -+#define OTP_OTP_STATUS(t) (t + 0x30) -+#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1) -+#define OTP_OTP_STATUS_OTP_BUSY BIT(0) -+ -+#define OTP_MEM_SIZE 8192 -+#define OTP_SLEEP_US 10 -+#define OTP_TIMEOUT_US 500000 -+ -+struct lan9662_otp { -+ struct device *dev; -+ void __iomem *base; -+}; -+ -+static bool lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag) -+{ -+ u32 val; -+ -+ return readl_poll_timeout(reg, val, !(val & flag), -+ OTP_SLEEP_US, OTP_TIMEOUT_US); -+} -+ -+static int lan9662_otp_power(struct lan9662_otp *otp, bool up) -+{ -+ void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base); -+ -+ if (up) { -+ writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn); -+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base), -+ OTP_OTP_STATUS_OTP_CPUMPEN)) -+ return -ETIMEDOUT; -+ } else { -+ writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn); -+ } -+ -+ return 0; -+} -+ -+static int lan9662_otp_execute(struct lan9662_otp *otp) -+{ -+ if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base), -+ OTP_OTP_CMD_GO_OTP_GO)) -+ return -ETIMEDOUT; -+ -+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base), -+ OTP_OTP_STATUS_OTP_BUSY)) -+ return -ETIMEDOUT; -+ -+ return 0; -+} -+ -+static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset) -+{ -+ writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base)); -+ writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base)); -+} -+ -+static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst) -+{ -+ u32 pass; -+ int rc; -+ -+ lan9662_otp_set_address(otp, offset); -+ writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base)); -+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base)); -+ rc = lan9662_otp_execute(otp); -+ if (!rc) { -+ pass = readl(OTP_OTP_PASS_FAIL(otp->base)); -+ if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED) -+ return -EACCES; -+ *dst = (u8) readl(OTP_OTP_RD_DATA(otp->base)); -+ } -+ return rc; -+} -+ -+static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data) -+{ -+ u32 pass; -+ int rc; -+ -+ lan9662_otp_set_address(otp, offset); -+ writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base)); -+ writel(data, OTP_OTP_PRGM_DATA(otp->base)); -+ writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base)); -+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base)); -+ -+ rc = lan9662_otp_execute(otp); -+ if (!rc) { -+ pass = readl(OTP_OTP_PASS_FAIL(otp->base)); -+ if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED) -+ return -EACCES; -+ if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL) -+ return -EIO; -+ } -+ return rc; -+} -+ -+static int lan9662_otp_read(void *context, unsigned int offset, -+ void *_val, size_t bytes) -+{ -+ struct lan9662_otp *otp = context; -+ u8 *val = _val; -+ uint8_t data; -+ int i, rc = 0; -+ -+ lan9662_otp_power(otp, true); -+ for (i = 0; i < bytes; i++) { -+ rc = lan9662_otp_read_byte(otp, offset + i, &data); -+ if (rc < 0) -+ break; -+ *val++ = data; -+ } -+ lan9662_otp_power(otp, false); -+ -+ return rc; -+} -+ -+static int lan9662_otp_write(void *context, unsigned int offset, -+ void *_val, size_t bytes) -+{ -+ struct lan9662_otp *otp = context; -+ u8 *val = _val; -+ u8 data, newdata; -+ int i, rc = 0; -+ -+ lan9662_otp_power(otp, true); -+ for (i = 0; i < bytes; i++) { -+ /* Skip zero bytes */ -+ if (val[i]) { -+ rc = lan9662_otp_read_byte(otp, offset + i, &data); -+ if (rc < 0) -+ break; -+ -+ newdata = data | val[i]; -+ if (newdata == data) -+ continue; -+ -+ rc = lan9662_otp_write_byte(otp, offset + i, -+ newdata); -+ if (rc < 0) -+ break; -+ } -+ } -+ lan9662_otp_power(otp, false); -+ -+ return rc; -+} -+ -+static struct nvmem_config otp_config = { -+ .name = "lan9662-otp", -+ .stride = 1, -+ .word_size = 1, -+ .reg_read = lan9662_otp_read, -+ .reg_write = lan9662_otp_write, -+ .size = OTP_MEM_SIZE, -+}; -+ -+static int lan9662_otp_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct nvmem_device *nvmem; -+ struct lan9662_otp *otp; -+ -+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); -+ if (!otp) -+ return -ENOMEM; -+ -+ otp->dev = dev; -+ otp->base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(otp->base)) -+ return PTR_ERR(otp->base); -+ -+ otp_config.priv = otp; -+ otp_config.dev = dev; -+ -+ nvmem = devm_nvmem_register(dev, &otp_config); -+ -+ return PTR_ERR_OR_ZERO(nvmem); -+} -+ -+static const struct of_device_id lan9662_otp_match[] = { -+ { .compatible = "microchip,lan9662-otp", }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(of, lan9662_otp_match); -+ -+static struct platform_driver lan9662_otp_driver = { -+ .probe = lan9662_otp_probe, -+ .driver = { -+ .name = "lan9662-otp", -+ .of_match_table = lan9662_otp_match, -+ }, -+}; -+module_platform_driver(lan9662_otp_driver); -+ -+MODULE_AUTHOR("Horatiu Vultur "); -+MODULE_DESCRIPTION("lan9662 OTP driver"); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-6.1/807-v6.1-0009-nvmem-u-boot-env-fix-crc32-casting-type.patch b/target/linux/generic/backport-6.1/807-v6.1-0009-nvmem-u-boot-env-fix-crc32-casting-type.patch deleted file mode 100644 index 633a668a962f..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0009-nvmem-u-boot-env-fix-crc32-casting-type.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 3717ca3e0cc8683f93b41d3f06ca79631eb58715 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= -Date: Fri, 16 Sep 2022 13:21:00 +0100 -Subject: [PATCH] nvmem: u-boot-env: fix crc32 casting type -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This fixes: -drivers/nvmem/u-boot-env.c:141:17: sparse: sparse: cast to restricted __le32 - -Fixes: d5542923f200 ("nvmem: add driver handling U-Boot environment variables") -Reported-by: kernel test robot -Signed-off-by: Rafał Miłecki -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20220916122100.170016-14-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/u-boot-env.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/u-boot-env.c -+++ b/drivers/nvmem/u-boot-env.c -@@ -139,7 +139,7 @@ static int u_boot_env_parse(struct u_boo - data_offset = offsetof(struct u_boot_env_image_redundant, data); - break; - } -- crc32 = le32_to_cpu(*(uint32_t *)(buf + crc32_offset)); -+ crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset)); - crc32_data_len = priv->mtd->size - crc32_data_offset; - data_len = priv->mtd->size - data_offset; - diff --git a/target/linux/generic/backport-6.1/807-v6.1-0010-nvmem-lan9662-otp-Fix-compatible-string.patch b/target/linux/generic/backport-6.1/807-v6.1-0010-nvmem-lan9662-otp-Fix-compatible-string.patch deleted file mode 100644 index b663a1328df3..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0010-nvmem-lan9662-otp-Fix-compatible-string.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 1aeb122d214b92474c86fde00a03d6e2d69381b5 Mon Sep 17 00:00:00 2001 -From: Horatiu Vultur -Date: Wed, 28 Sep 2022 21:51:12 +0200 -Subject: [PATCH] nvmem: lan9662-otp: Fix compatible string - -The device tree bindings for lan9662-otp expects the compatible string -to be one of following compatible strings: -microchip,lan9662-otpc -microchip,lan9668-otpc - -The problem is that the lan9662-otp driver contains the -microchip,lan9662-otp compatible string instead of -microchip,lan9662-otpc. -Fix this by updating the compatible string in the driver. - -Fixes: 9e8f208ad5229d ("nvmem: lan9662-otp: add support") -Signed-off-by: Horatiu Vultur -Link: https://lore.kernel.org/r/20220928195112.630351-1-horatiu.vultur@microchip.com -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/lan9662-otpc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/lan9662-otpc.c -+++ b/drivers/nvmem/lan9662-otpc.c -@@ -203,7 +203,7 @@ static int lan9662_otp_probe(struct plat - } - - static const struct of_device_id lan9662_otp_match[] = { -- { .compatible = "microchip,lan9662-otp", }, -+ { .compatible = "microchip,lan9662-otpc", }, - { }, - }; - MODULE_DEVICE_TABLE(of, lan9662_otp_match); diff --git a/target/linux/generic/backport-6.1/807-v6.1-0011-nvmem-u-boot-env-fix-crc32_data_offset-on-redundant-.patch b/target/linux/generic/backport-6.1/807-v6.1-0011-nvmem-u-boot-env-fix-crc32_data_offset-on-redundant-.patch deleted file mode 100644 index 967e891dbd9c..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0011-nvmem-u-boot-env-fix-crc32_data_offset-on-redundant-.patch +++ /dev/null @@ -1,59 +0,0 @@ -From ee424f7d3960152f5f862bbb6943e59828dc7917 Mon Sep 17 00:00:00 2001 -From: Christian Lamparter -Date: Fri, 4 Nov 2022 17:52:03 +0100 -Subject: [PATCH] nvmem: u-boot-env: fix crc32_data_offset on redundant - u-boot-env - -The Western Digital MyBook Live (PowerPC 464/APM82181) -has a set of redundant u-boot-env. Loading up the driver -the following error: - -| u_boot_env: Invalid calculated CRC32: 0x4f8f2c86 (expected: 0x98b14514) -| u_boot_env: probe of partition@1e000 failed with error -22 - -Looking up the userspace libubootenv utilities source [0], -it looks like the "mark" or "flag" is not part of the -crc32 sum... which is unfortunate :( - -|static int libuboot_load(struct uboot_ctx *ctx) -|{ -|[...] -| if (ctx->redundant) { -| [...] -| offsetdata = offsetof(struct uboot_env_redund, data); -| [...] //-----^^ -| } -| usable_envsize = ctx->size - offsetdata; -| buf[0] = malloc(bufsize); -|[...] -| for (i = 0; i < copies; i++) { -| data = (uint8_t *)(buf[i] + offsetdata); -| uint32_t crc; -| -| ret = devread(ctx, i, buf[i]); -| [...] -| crc = *(uint32_t *)(buf[i] + offsetcrc); -| dev->crc = crc32(0, (uint8_t *)data, usable_envsize); -| - -[0] https://github.com/sbabic/libubootenv/blob/master/src/uboot_env.c#L951 - -Fixes: d5542923f200 ("nvmem: add driver handling U-Boot environment variables") -Signed-off-by: Christian Lamparter -Link: https://lore.kernel.org/r/70a16eae113e08db2390b76e174f4837caa135c3.1667580636.git.chunkeey@gmail.com -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/u-boot-env.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/u-boot-env.c -+++ b/drivers/nvmem/u-boot-env.c -@@ -135,7 +135,7 @@ static int u_boot_env_parse(struct u_boo - break; - case U_BOOT_FORMAT_REDUNDANT: - crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); -- crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark); -+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); - data_offset = offsetof(struct u_boot_env_image_redundant, data); - break; - } diff --git a/target/linux/generic/backport-6.1/807-v6.1-0013-nvmem-lan9662-otp-Change-return-type-of-lan9662_otp_.patch b/target/linux/generic/backport-6.1/807-v6.1-0013-nvmem-lan9662-otp-Change-return-type-of-lan9662_otp_.patch deleted file mode 100644 index 0c842f079321..000000000000 --- a/target/linux/generic/backport-6.1/807-v6.1-0013-nvmem-lan9662-otp-Change-return-type-of-lan9662_otp_.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 022b68f271de0e53024e6d5e96fee8e76d25eb95 Mon Sep 17 00:00:00 2001 -From: Horatiu Vultur -Date: Fri, 18 Nov 2022 06:38:40 +0000 -Subject: [PATCH] nvmem: lan9662-otp: Change return type of - lan9662_otp_wait_flag_clear() - -The blamed commit introduced the following smatch warning in the -function lan9662_otp_wait_flag_clear: -drivers/nvmem/lan9662-otpc.c:43 lan9662_otp_wait_flag_clear() warn: signedness bug returning '(-110)' - -Fix this by changing the return type of the function -lan9662_otp_wait_flag_clear() to be int instead of bool. - -Fixes: 9e8f208ad5229d ("nvmem: lan9662-otp: add support") -Reported-by: kernel test robot -Reported-by: Dan Carpenter -Signed-off-by: Horatiu Vultur -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20221118063840.6357-5-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/lan9662-otpc.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/nvmem/lan9662-otpc.c -+++ b/drivers/nvmem/lan9662-otpc.c -@@ -36,7 +36,7 @@ struct lan9662_otp { - void __iomem *base; - }; - --static bool lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag) -+static int lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag) - { - u32 val; - diff --git a/target/linux/generic/backport-6.1/808-v6.2-0007-nvmem-brcm_nvram-Add-check-for-kzalloc.patch b/target/linux/generic/backport-6.1/808-v6.2-0007-nvmem-brcm_nvram-Add-check-for-kzalloc.patch deleted file mode 100644 index 14108b592768..000000000000 --- a/target/linux/generic/backport-6.1/808-v6.2-0007-nvmem-brcm_nvram-Add-check-for-kzalloc.patch +++ /dev/null @@ -1,30 +0,0 @@ -From b0576ade3aaf24b376ea1a4406ae138e2a22b0c0 Mon Sep 17 00:00:00 2001 -From: Jiasheng Jiang -Date: Fri, 27 Jan 2023 10:40:06 +0000 -Subject: [PATCH] nvmem: brcm_nvram: Add check for kzalloc - -Add the check for the return value of kzalloc in order to avoid -NULL pointer dereference. - -Fixes: 6e977eaa8280 ("nvmem: brcm_nvram: parse NVRAM content into NVMEM cells") -Cc: stable@vger.kernel.org -Signed-off-by: Jiasheng Jiang -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20230127104015.23839-2-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/brcm_nvram.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/nvmem/brcm_nvram.c -+++ b/drivers/nvmem/brcm_nvram.c -@@ -97,6 +97,9 @@ static int brcm_nvram_parse(struct brcm_ - len = le32_to_cpu(header.len); - - data = kzalloc(len, GFP_KERNEL); -+ if (!data) -+ return -ENOMEM; -+ - memcpy_fromio(data, priv->base, len); - data[len - 1] = '\0'; - diff --git a/target/linux/generic/backport-6.1/808-v6.2-0008-nvmem-sunxi_sid-Always-use-32-bit-MMIO-reads.patch b/target/linux/generic/backport-6.1/808-v6.2-0008-nvmem-sunxi_sid-Always-use-32-bit-MMIO-reads.patch deleted file mode 100644 index 632b01cb2ac8..000000000000 --- a/target/linux/generic/backport-6.1/808-v6.2-0008-nvmem-sunxi_sid-Always-use-32-bit-MMIO-reads.patch +++ /dev/null @@ -1,55 +0,0 @@ -From c151d5ed8e8fe0474bd61dce7f2076ca5916c683 Mon Sep 17 00:00:00 2001 -From: Samuel Holland -Date: Fri, 27 Jan 2023 10:40:07 +0000 -Subject: [PATCH] nvmem: sunxi_sid: Always use 32-bit MMIO reads - -The SID SRAM on at least some SoCs (A64 and D1) returns different values -when read with bus cycles narrower than 32 bits. This is not immediately -obvious, because memcpy_fromio() uses word-size accesses as long as -enough data is being copied. - -The vendor driver always uses 32-bit MMIO reads, so do the same here. -This is faster than the register-based method, which is currently used -as a workaround on A64. And it fixes the values returned on D1, where -the SRAM method was being used. - -The special case for the last word is needed to maintain .word_size == 1 -for sysfs ABI compatibility, as noted previously in commit de2a3eaea552 -("nvmem: sunxi_sid: Optimize register read-out method"). - -Fixes: 07ae4fde9efa ("nvmem: sunxi_sid: Add support for D1 variant") -Cc: stable@vger.kernel.org -Tested-by: Heiko Stuebner -Signed-off-by: Samuel Holland -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20230127104015.23839-3-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/sunxi_sid.c | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - ---- a/drivers/nvmem/sunxi_sid.c -+++ b/drivers/nvmem/sunxi_sid.c -@@ -41,8 +41,21 @@ static int sunxi_sid_read(void *context, - void *val, size_t bytes) - { - struct sunxi_sid *sid = context; -+ u32 word; - -- memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes); -+ /* .stride = 4 so offset is guaranteed to be aligned */ -+ __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4); -+ -+ val += round_down(bytes, 4); -+ offset += round_down(bytes, 4); -+ bytes = bytes % 4; -+ -+ if (!bytes) -+ return 0; -+ -+ /* Handle any trailing bytes */ -+ word = readl_relaxed(sid->base + sid->value_offset + offset); -+ memcpy(val, &word, bytes); - - return 0; - } diff --git a/target/linux/generic/backport-6.1/808-v6.2-0013-nvmem-core-fix-device-node-refcounting.patch b/target/linux/generic/backport-6.1/808-v6.2-0013-nvmem-core-fix-device-node-refcounting.patch deleted file mode 100644 index a229c303ad0f..000000000000 --- a/target/linux/generic/backport-6.1/808-v6.2-0013-nvmem-core-fix-device-node-refcounting.patch +++ /dev/null @@ -1,48 +0,0 @@ -From edcf2fb660526b5ed29f93bd17328a2b4835c8b2 Mon Sep 17 00:00:00 2001 -From: Michael Walle -Date: Fri, 27 Jan 2023 10:40:12 +0000 -Subject: [PATCH] nvmem: core: fix device node refcounting - -In of_nvmem_cell_get(), of_get_next_parent() is used on cell_np. This -will decrement the refcount on cell_np, but cell_np is still used later -in the code. Use of_get_parent() instead and of_node_put() in the -appropriate places. - -Fixes: 69aba7948cbe ("nvmem: Add a simple NVMEM framework for consumers") -Fixes: 7ae6478b304b ("nvmem: core: rework nvmem cell instance creation") -Cc: stable@vger.kernel.org -Signed-off-by: Michael Walle -Signed-off-by: Srinivas Kandagatla -Link: https://lore.kernel.org/r/20230127104015.23839-8-srinivas.kandagatla@linaro.org -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvmem/core.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - ---- a/drivers/nvmem/core.c -+++ b/drivers/nvmem/core.c -@@ -1237,16 +1237,21 @@ struct nvmem_cell *of_nvmem_cell_get(str - if (!cell_np) - return ERR_PTR(-ENOENT); - -- nvmem_np = of_get_next_parent(cell_np); -- if (!nvmem_np) -+ nvmem_np = of_get_parent(cell_np); -+ if (!nvmem_np) { -+ of_node_put(cell_np); - return ERR_PTR(-EINVAL); -+ } - - nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); - of_node_put(nvmem_np); -- if (IS_ERR(nvmem)) -+ if (IS_ERR(nvmem)) { -+ of_node_put(cell_np); - return ERR_CAST(nvmem); -+ } - - cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); -+ of_node_put(cell_np); - if (!cell_entry) { - __nvmem_device_put(nvmem); - return ERR_PTR(-ENOENT); diff --git a/target/linux/generic/backport-6.1/810-v5.17-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch b/target/linux/generic/backport-6.1/810-v5.17-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch deleted file mode 100644 index 25802be7e3d0..000000000000 --- a/target/linux/generic/backport-6.1/810-v5.17-net-qmi_wwan-add-ZTE-MF286D-modem-19d2-1485.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 078c6a1cbd4cd7496048786beec2e312577bebbf Mon Sep 17 00:00:00 2001 -From: Pawel Dembicki -Date: Tue, 11 Jan 2022 23:11:32 +0100 -Subject: [PATCH 1/1] net: qmi_wwan: add ZTE MF286D modem 19d2:1485 -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Modem from ZTE MF286D is an Qualcomm MDM9250 based 3G/4G modem. - -T: Bus=02 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 3 Spd=5000 MxCh= 0 -D: Ver= 3.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs= 1 -P: Vendor=19d2 ProdID=1485 Rev=52.87 -S: Manufacturer=ZTE,Incorporated -S: Product=ZTE Technologies MSM -S: SerialNumber=MF286DZTED000000 -C:* #Ifs= 7 Cfg#= 1 Atr=80 MxPwr=896mA -A: FirstIf#= 0 IfCount= 2 Cls=02(comm.) Sub=06 Prot=00 -I:* If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=ff Driver=rndis_host -E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl=32ms -I:* If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=rndis_host -E: Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option -E: Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option -E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms -E: Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option -E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms -E: Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan -E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=32ms -E: Ad=8e(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=0f(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -I:* If#= 6 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs -E: Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms -E: Ad=89(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms - -Signed-off-by: Pawel Dembicki -Acked-by: Bjørn Mork -Signed-off-by: David S. Miller ---- - drivers/net/usb/qmi_wwan.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/usb/qmi_wwan.c -+++ b/drivers/net/usb/qmi_wwan.c -@@ -1314,6 +1314,7 @@ static const struct usb_device_id produc - {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ - {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ - {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */ -+ {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */ - {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ - {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ - {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ diff --git a/target/linux/generic/backport-6.1/821-v5.16-Bluetooth-btusb-Support-public-address-configuration.patch b/target/linux/generic/backport-6.1/821-v5.16-Bluetooth-btusb-Support-public-address-configuration.patch deleted file mode 100644 index 986871264871..000000000000 --- a/target/linux/generic/backport-6.1/821-v5.16-Bluetooth-btusb-Support-public-address-configuration.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 5cb03751455c299b1bf10cb48631bf359cfb11b5 Mon Sep 17 00:00:00 2001 -From: "mark-yw.chen" -Date: Wed, 1 Sep 2021 11:32:25 +0800 -Subject: [PATCH 1/5] Bluetooth: btusb: Support public address configuration - for MediaTek Chip. - -The MediaTek chip support vendor specific HCI command(0xfc1a) to -change the public address. Add hdev->set_bdaddr handler for MediaTek -Chip. After doing a power cycle or MediaTek Bluetooth reset, BD_ADDR -will bring back the original one. - -Signed-off-by: mark-yw.chen -Signed-off-by: Marcel Holtmann ---- - drivers/bluetooth/btusb.c | 18 ++++++++++++++++++ - 1 file changed, 18 insertions(+) - ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -2272,6 +2272,23 @@ struct btmtk_section_map { - }; - } __packed; - -+static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr) -+{ -+ struct sk_buff *skb; -+ long ret; -+ -+ skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT); -+ if (IS_ERR(skb)) { -+ ret = PTR_ERR(skb); -+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", -+ ret); -+ return ret; -+ } -+ kfree_skb(skb); -+ -+ return 0; -+} -+ - static void btusb_mtk_wmt_recv(struct urb *urb) - { - struct hci_dev *hdev = urb->context; -@@ -3923,6 +3940,7 @@ static int btusb_probe(struct usb_interf - hdev->shutdown = btusb_mtk_shutdown; - hdev->manufacturer = 70; - hdev->cmd_timeout = btusb_mtk_cmd_timeout; -+ hdev->set_bdaddr = btusb_set_bdaddr_mtk; - set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); - data->recv_acl = btusb_recv_acl_mtk; - } diff --git a/target/linux/generic/backport-6.1/822-v5.17-Bluetooth-btusb-Fix-application-of-sizeof-to-pointer.patch b/target/linux/generic/backport-6.1/822-v5.17-Bluetooth-btusb-Fix-application-of-sizeof-to-pointer.patch deleted file mode 100644 index cff537a86699..000000000000 --- a/target/linux/generic/backport-6.1/822-v5.17-Bluetooth-btusb-Fix-application-of-sizeof-to-pointer.patch +++ /dev/null @@ -1,29 +0,0 @@ -From af774a731f7b4c2a90a8476cd44045ba8d1263ba Mon Sep 17 00:00:00 2001 -From: David Yang -Date: Wed, 13 Oct 2021 08:56:33 +0800 -Subject: [PATCH 2/5] Bluetooth: btusb: Fix application of sizeof to pointer - -The coccinelle check report: -"./drivers/bluetooth/btusb.c:2239:36-42: -ERROR: application of sizeof to pointer". -Using the real size to fix it. - -Fixes: 5a87679ffd443 ("Bluetooth: btusb: Support public address configuration for MediaTek Chip.") -Reported-by: Zeal Robot -Signed-off-by: David Yang -Signed-off-by: Marcel Holtmann ---- - drivers/bluetooth/btusb.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -2277,7 +2277,7 @@ static int btusb_set_bdaddr_mtk(struct h - struct sk_buff *skb; - long ret; - -- skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT); -+ skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - ret = PTR_ERR(skb); - bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", diff --git a/target/linux/generic/backport-6.1/823-v5.18-Bluetooth-btusb-Add-a-new-PID-VID-13d3-3567-for-MT79.patch b/target/linux/generic/backport-6.1/823-v5.18-Bluetooth-btusb-Add-a-new-PID-VID-13d3-3567-for-MT79.patch deleted file mode 100644 index d670195da1f2..000000000000 --- a/target/linux/generic/backport-6.1/823-v5.18-Bluetooth-btusb-Add-a-new-PID-VID-13d3-3567-for-MT79.patch +++ /dev/null @@ -1,70 +0,0 @@ -From e57186fc02cedff191c469a26cce615371e41740 Mon Sep 17 00:00:00 2001 -From: Yake Yang -Date: Wed, 23 Feb 2022 07:55:59 +0800 -Subject: [PATCH 3/5] Bluetooth: btusb: Add a new PID/VID 13d3/3567 for MT7921 - -Add VID 13D3 & PID 3567 for MediaTek MT7921 USB Bluetooth chip. - -The information in /sys/kernel/debug/usb/devices about the Bluetooth -device is listed as the below. - -T: Bus=05 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0 -D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 -P: Vendor=13d3 ProdID=3567 Rev= 1.00 -S: Manufacturer=MediaTek Inc. -S: Product=Wireless_Device -S: SerialNumber=000000000 -C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA -A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01 -I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us -E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms -I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms -I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms -I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms -I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms -I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms -I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms -I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us -I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us - -Co-developed-by: Sean Wang -Signed-off-by: Sean Wang -Signed-off-by: Yake Yang -Signed-off-by: Marcel Holtmann ---- - drivers/bluetooth/btusb.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -464,6 +464,9 @@ static const struct usb_device_id blackl - { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, diff --git a/target/linux/generic/backport-6.1/824-v5.19-Bluetooth-btusb-Add-a-new-PID-VID-0489-e0c8-for-MT79.patch b/target/linux/generic/backport-6.1/824-v5.19-Bluetooth-btusb-Add-a-new-PID-VID-0489-e0c8-for-MT79.patch deleted file mode 100644 index be9dc7342151..000000000000 --- a/target/linux/generic/backport-6.1/824-v5.19-Bluetooth-btusb-Add-a-new-PID-VID-0489-e0c8-for-MT79.patch +++ /dev/null @@ -1,68 +0,0 @@ -From e507366cd1e8e1d4eebe537c08fd142cf0b617fa Mon Sep 17 00:00:00 2001 -From: Sean Wang -Date: Thu, 28 Apr 2022 02:38:39 +0800 -Subject: [PATCH 4/5] Bluetooth: btusb: Add a new PID/VID 0489/e0c8 for MT7921 - -Add VID 0489 & PID e0c8 for MediaTek MT7921 USB Bluetooth chip. - -The information in /sys/kernel/debug/usb/devices about the Bluetooth -device is listed as the below. - -T: Bus=01 Lev=01 Prnt=01 Port=13 Cnt=03 Dev#= 4 Spd=480 MxCh= 0 -D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 -P: Vendor=0489 ProdID=e0c8 Rev= 1.00 -S: Manufacturer=MediaTek Inc. -S: Product=Wireless_Device -S: SerialNumber=000000000 -C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA -A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01 -I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us -E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms -I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms -I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms -I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms -I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms -I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms -I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms -I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us -I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 512 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 512 Ivl=125us - -Signed-off-by: Sean Wang -Signed-off-by: Marcel Holtmann ---- - drivers/bluetooth/btusb.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -455,6 +455,9 @@ static const struct usb_device_id blackl - BTUSB_VALID_LE_STATES }, - - /* Additional MediaTek MT7921 Bluetooth devices */ -+ { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, diff --git a/target/linux/generic/backport-6.1/825-v6.1-Bluetooth-btusb-Add-a-new-VID-PID-0e8d-0608-for-MT79.patch b/target/linux/generic/backport-6.1/825-v6.1-Bluetooth-btusb-Add-a-new-VID-PID-0e8d-0608-for-MT79.patch deleted file mode 100644 index 24ec68a2ca50..000000000000 --- a/target/linux/generic/backport-6.1/825-v6.1-Bluetooth-btusb-Add-a-new-VID-PID-0e8d-0608-for-MT79.patch +++ /dev/null @@ -1,66 +0,0 @@ -From be55622ce673f9692cc15d26d77a050cda42a3d3 Mon Sep 17 00:00:00 2001 -From: Daniel Golle -Date: Fri, 9 Sep 2022 21:00:30 +0100 -Subject: [PATCH 1/1] Bluetooth: btusb: Add a new VID/PID 0e8d/0608 for MT7921 - -Add a new PID/VID 0e8d/0608 for MT7921K chip found on AMD RZ608 module. - -From /sys/kernel/debug/usb/devices: -T: Bus=01 Lev=02 Prnt=02 Port=01 Cnt=01 Dev#= 3 Spd=480 MxCh= 0 -D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 -P: Vendor=0e8d ProdID=0608 Rev= 1.00 -S: Manufacturer=MediaTek Inc. -S: Product=Wireless_Device -S: SerialNumber=000000000 -C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA -A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01 -I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us -E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms -I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms -I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms -I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms -I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms -I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms -I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb -E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms -E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms -I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us -I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) -E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us -E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us - -Signed-off-by: Daniel Golle -Signed-off-by: Luiz Augusto von Dentz ---- - drivers/bluetooth/btusb.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -473,6 +473,9 @@ static const struct usb_device_id blackl - { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, - - /* MediaTek MT7922A Bluetooth devices */ - { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | diff --git a/target/linux/generic/backport-6.1/826-v5.17-of-base-make-small-of_parse_phandle-variants-static-.patch b/target/linux/generic/backport-6.1/826-v5.17-of-base-make-small-of_parse_phandle-variants-static-.patch deleted file mode 100644 index b00cf5741991..000000000000 --- a/target/linux/generic/backport-6.1/826-v5.17-of-base-make-small-of_parse_phandle-variants-static-.patch +++ /dev/null @@ -1,359 +0,0 @@ -From 66a8f7f04979f4ad739085f01d99c8caf620b4f5 Mon Sep 17 00:00:00 2001 -From: Michael Walle -Date: Tue, 18 Jan 2022 18:35:02 +0100 -Subject: [PATCH] of: base: make small of_parse_phandle() variants static - inline - -Make all the smaller variants of the of_parse_phandle() static inline. -This also let us remove the empty function stubs if CONFIG_OF is not -defined. - -Suggested-by: Rob Herring -Signed-off-by: Michael Walle -[robh: move index < 0 check into __of_parse_phandle_with_args] -Signed-off-by: Rob Herring -Link: https://lore.kernel.org/r/20220118173504.2867523-2-michael@walle.cc ---- - drivers/of/base.c | 131 +++------------------------------------ - include/linux/of.h | 148 ++++++++++++++++++++++++++++++++++++--------- - 2 files changed, 129 insertions(+), 150 deletions(-) - ---- a/drivers/of/base.c -+++ b/drivers/of/base.c -@@ -1371,15 +1371,18 @@ int of_phandle_iterator_args(struct of_p - return count; - } - --static int __of_parse_phandle_with_args(const struct device_node *np, -- const char *list_name, -- const char *cells_name, -- int cell_count, int index, -- struct of_phandle_args *out_args) -+int __of_parse_phandle_with_args(const struct device_node *np, -+ const char *list_name, -+ const char *cells_name, -+ int cell_count, int index, -+ struct of_phandle_args *out_args) - { - struct of_phandle_iterator it; - int rc, cur_index = 0; - -+ if (index < 0) -+ return -EINVAL; -+ - /* Loop over the phandles until all the requested entry is found */ - of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { - /* -@@ -1422,82 +1425,7 @@ static int __of_parse_phandle_with_args( - of_node_put(it.node); - return rc; - } -- --/** -- * of_parse_phandle - Resolve a phandle property to a device_node pointer -- * @np: Pointer to device node holding phandle property -- * @phandle_name: Name of property holding a phandle value -- * @index: For properties holding a table of phandles, this is the index into -- * the table -- * -- * Return: The device_node pointer with refcount incremented. Use -- * of_node_put() on it when done. -- */ --struct device_node *of_parse_phandle(const struct device_node *np, -- const char *phandle_name, int index) --{ -- struct of_phandle_args args; -- -- if (index < 0) -- return NULL; -- -- if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0, -- index, &args)) -- return NULL; -- -- return args.np; --} --EXPORT_SYMBOL(of_parse_phandle); -- --/** -- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list -- * @np: pointer to a device tree node containing a list -- * @list_name: property name that contains a list -- * @cells_name: property name that specifies phandles' arguments count -- * @index: index of a phandle to parse out -- * @out_args: optional pointer to output arguments structure (will be filled) -- * -- * This function is useful to parse lists of phandles and their arguments. -- * Returns 0 on success and fills out_args, on error returns appropriate -- * errno value. -- * -- * Caller is responsible to call of_node_put() on the returned out_args->np -- * pointer. -- * -- * Example:: -- * -- * phandle1: node1 { -- * #list-cells = <2>; -- * }; -- * -- * phandle2: node2 { -- * #list-cells = <1>; -- * }; -- * -- * node3 { -- * list = <&phandle1 1 2 &phandle2 3>; -- * }; -- * -- * To get a device_node of the ``node2`` node you may call this: -- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); -- */ --int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, -- const char *cells_name, int index, -- struct of_phandle_args *out_args) --{ -- int cell_count = -1; -- -- if (index < 0) -- return -EINVAL; -- -- /* If cells_name is NULL we assume a cell count of 0 */ -- if (!cells_name) -- cell_count = 0; -- -- return __of_parse_phandle_with_args(np, list_name, cells_name, -- cell_count, index, out_args); --} --EXPORT_SYMBOL(of_parse_phandle_with_args); -+EXPORT_SYMBOL(__of_parse_phandle_with_args); - - /** - * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it -@@ -1684,47 +1612,6 @@ free: - EXPORT_SYMBOL(of_parse_phandle_with_args_map); - - /** -- * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list -- * @np: pointer to a device tree node containing a list -- * @list_name: property name that contains a list -- * @cell_count: number of argument cells following the phandle -- * @index: index of a phandle to parse out -- * @out_args: optional pointer to output arguments structure (will be filled) -- * -- * This function is useful to parse lists of phandles and their arguments. -- * Returns 0 on success and fills out_args, on error returns appropriate -- * errno value. -- * -- * Caller is responsible to call of_node_put() on the returned out_args->np -- * pointer. -- * -- * Example:: -- * -- * phandle1: node1 { -- * }; -- * -- * phandle2: node2 { -- * }; -- * -- * node3 { -- * list = <&phandle1 0 2 &phandle2 2 3>; -- * }; -- * -- * To get a device_node of the ``node2`` node you may call this: -- * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args); -- */ --int of_parse_phandle_with_fixed_args(const struct device_node *np, -- const char *list_name, int cell_count, -- int index, struct of_phandle_args *out_args) --{ -- if (index < 0) -- return -EINVAL; -- return __of_parse_phandle_with_args(np, list_name, NULL, cell_count, -- index, out_args); --} --EXPORT_SYMBOL(of_parse_phandle_with_fixed_args); -- --/** - * of_count_phandle_with_args() - Find the number of phandles references in a property - * @np: pointer to a device tree node containing a list - * @list_name: property name that contains a list ---- a/include/linux/of.h -+++ b/include/linux/of.h -@@ -363,18 +363,12 @@ extern const struct of_device_id *of_mat - const struct of_device_id *matches, const struct device_node *node); - extern int of_modalias_node(struct device_node *node, char *modalias, int len); - extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args); --extern struct device_node *of_parse_phandle(const struct device_node *np, -- const char *phandle_name, -- int index); --extern int of_parse_phandle_with_args(const struct device_node *np, -- const char *list_name, const char *cells_name, int index, -- struct of_phandle_args *out_args); -+extern int __of_parse_phandle_with_args(const struct device_node *np, -+ const char *list_name, const char *cells_name, int cell_count, -+ int index, struct of_phandle_args *out_args); - extern int of_parse_phandle_with_args_map(const struct device_node *np, - const char *list_name, const char *stem_name, int index, - struct of_phandle_args *out_args); --extern int of_parse_phandle_with_fixed_args(const struct device_node *np, -- const char *list_name, int cells_count, int index, -- struct of_phandle_args *out_args); - extern int of_count_phandle_with_args(const struct device_node *np, - const char *list_name, const char *cells_name); - -@@ -864,18 +858,12 @@ static inline int of_property_read_strin - return -ENOSYS; - } - --static inline struct device_node *of_parse_phandle(const struct device_node *np, -- const char *phandle_name, -- int index) --{ -- return NULL; --} -- --static inline int of_parse_phandle_with_args(const struct device_node *np, -- const char *list_name, -- const char *cells_name, -- int index, -- struct of_phandle_args *out_args) -+static inline int __of_parse_phandle_with_args(const struct device_node *np, -+ const char *list_name, -+ const char *cells_name, -+ int cell_count, -+ int index, -+ struct of_phandle_args *out_args) - { - return -ENOSYS; - } -@@ -889,13 +877,6 @@ static inline int of_parse_phandle_with_ - return -ENOSYS; - } - --static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, -- const char *list_name, int cells_count, int index, -- struct of_phandle_args *out_args) --{ -- return -ENOSYS; --} -- - static inline int of_count_phandle_with_args(const struct device_node *np, - const char *list_name, - const char *cells_name) -@@ -1077,6 +1058,117 @@ static inline bool of_node_is_type(const - } - - /** -+ * of_parse_phandle - Resolve a phandle property to a device_node pointer -+ * @np: Pointer to device node holding phandle property -+ * @phandle_name: Name of property holding a phandle value -+ * @index: For properties holding a table of phandles, this is the index into -+ * the table -+ * -+ * Return: The device_node pointer with refcount incremented. Use -+ * of_node_put() on it when done. -+ */ -+static inline struct device_node *of_parse_phandle(const struct device_node *np, -+ const char *phandle_name, -+ int index) -+{ -+ struct of_phandle_args args; -+ -+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0, -+ index, &args)) -+ return NULL; -+ -+ return args.np; -+} -+ -+/** -+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list -+ * @np: pointer to a device tree node containing a list -+ * @list_name: property name that contains a list -+ * @cells_name: property name that specifies phandles' arguments count -+ * @index: index of a phandle to parse out -+ * @out_args: optional pointer to output arguments structure (will be filled) -+ * -+ * This function is useful to parse lists of phandles and their arguments. -+ * Returns 0 on success and fills out_args, on error returns appropriate -+ * errno value. -+ * -+ * Caller is responsible to call of_node_put() on the returned out_args->np -+ * pointer. -+ * -+ * Example:: -+ * -+ * phandle1: node1 { -+ * #list-cells = <2>; -+ * }; -+ * -+ * phandle2: node2 { -+ * #list-cells = <1>; -+ * }; -+ * -+ * node3 { -+ * list = <&phandle1 1 2 &phandle2 3>; -+ * }; -+ * -+ * To get a device_node of the ``node2`` node you may call this: -+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); -+ */ -+static inline int of_parse_phandle_with_args(const struct device_node *np, -+ const char *list_name, -+ const char *cells_name, -+ int index, -+ struct of_phandle_args *out_args) -+{ -+ int cell_count = -1; -+ -+ /* If cells_name is NULL we assume a cell count of 0 */ -+ if (!cells_name) -+ cell_count = 0; -+ -+ return __of_parse_phandle_with_args(np, list_name, cells_name, -+ cell_count, index, out_args); -+} -+ -+/** -+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list -+ * @np: pointer to a device tree node containing a list -+ * @list_name: property name that contains a list -+ * @cell_count: number of argument cells following the phandle -+ * @index: index of a phandle to parse out -+ * @out_args: optional pointer to output arguments structure (will be filled) -+ * -+ * This function is useful to parse lists of phandles and their arguments. -+ * Returns 0 on success and fills out_args, on error returns appropriate -+ * errno value. -+ * -+ * Caller is responsible to call of_node_put() on the returned out_args->np -+ * pointer. -+ * -+ * Example:: -+ * -+ * phandle1: node1 { -+ * }; -+ * -+ * phandle2: node2 { -+ * }; -+ * -+ * node3 { -+ * list = <&phandle1 0 2 &phandle2 2 3>; -+ * }; -+ * -+ * To get a device_node of the ``node2`` node you may call this: -+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args); -+ */ -+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, -+ const char *list_name, -+ int cell_count, -+ int index, -+ struct of_phandle_args *out_args) -+{ -+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count, -+ index, out_args); -+} -+ -+/** - * of_property_count_u8_elems - Count the number of u8 elements in a property - * - * @np: device node from which the property value is to be read. diff --git a/target/linux/generic/backport-6.1/830-v6.2-ata-ahci-fix-enum-constants-for-gcc-13.patch b/target/linux/generic/backport-6.1/830-v6.2-ata-ahci-fix-enum-constants-for-gcc-13.patch deleted file mode 100644 index 0d81d24e09cf..000000000000 --- a/target/linux/generic/backport-6.1/830-v6.2-ata-ahci-fix-enum-constants-for-gcc-13.patch +++ /dev/null @@ -1,348 +0,0 @@ -From f07788079f515ca4a681c5f595bdad19cfbd7b1d Mon Sep 17 00:00:00 2001 -From: Arnd Bergmann -Date: Sat, 3 Dec 2022 11:54:25 +0100 -Subject: [PATCH] ata: ahci: fix enum constants for gcc-13 - -gcc-13 slightly changes the type of constant expressions that are defined -in an enum, which triggers a compile time sanity check in libata: - -linux/drivers/ata/libahci.c: In function 'ahci_led_store': -linux/include/linux/compiler_types.h:357:45: error: call to '__compiletime_assert_302' declared with attribute error: BUILD_BUG_ON failed: sizeof(_s) > sizeof(long) -357 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) - -The new behavior is that sizeof() returns the same value for the -constant as it does for the enum type, which is generally more sensible -and consistent. - -The problem in libata is that it contains a single enum definition for -lots of unrelated constants, some of which are large positive (unsigned) -integers like 0xffffffff, while others like (1<<31) are interpreted as -negative integers, and this forces the enum type to become 64 bit wide -even though most constants would still fit into a signed 32-bit 'int'. - -Fix this by changing the entire enum definition to use BIT(x) in place -of (1< -Cc: linux-ide@vger.kernel.org -Cc: Damien Le Moal -Cc: stable@vger.kernel.org -Cc: Randy Dunlap -Signed-off-by: Arnd Bergmann -Tested-by: Luis Machado -Signed-off-by: Damien Le Moal ---- - drivers/ata/ahci.h | 245 +++++++++++++++++++++++---------------------- - 1 file changed, 123 insertions(+), 122 deletions(-) - ---- a/drivers/ata/ahci.h -+++ b/drivers/ata/ahci.h -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - /* Enclosure Management Control */ - #define EM_CTRL_MSG_TYPE 0x000f0000 -@@ -54,12 +55,12 @@ enum { - AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + - AHCI_CMD_TBL_AR_SZ + - (AHCI_RX_FIS_SZ * 16), -- AHCI_IRQ_ON_SG = (1 << 31), -- AHCI_CMD_ATAPI = (1 << 5), -- AHCI_CMD_WRITE = (1 << 6), -- AHCI_CMD_PREFETCH = (1 << 7), -- AHCI_CMD_RESET = (1 << 8), -- AHCI_CMD_CLR_BUSY = (1 << 10), -+ AHCI_IRQ_ON_SG = BIT(31), -+ AHCI_CMD_ATAPI = BIT(5), -+ AHCI_CMD_WRITE = BIT(6), -+ AHCI_CMD_PREFETCH = BIT(7), -+ AHCI_CMD_RESET = BIT(8), -+ AHCI_CMD_CLR_BUSY = BIT(10), - - RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ - RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ -@@ -77,37 +78,37 @@ enum { - HOST_CAP2 = 0x24, /* host capabilities, extended */ - - /* HOST_CTL bits */ -- HOST_RESET = (1 << 0), /* reset controller; self-clear */ -- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ -- HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ -- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ -+ HOST_RESET = BIT(0), /* reset controller; self-clear */ -+ HOST_IRQ_EN = BIT(1), /* global IRQ enable */ -+ HOST_MRSM = BIT(2), /* MSI Revert to Single Message */ -+ HOST_AHCI_EN = BIT(31), /* AHCI enabled */ - - /* HOST_CAP bits */ -- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ -- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ -- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ -- HOST_CAP_PART = (1 << 13), /* Partial state capable */ -- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ -- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ -- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ -- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ -- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ -- HOST_CAP_CLO = (1 << 24), /* Command List Override support */ -- HOST_CAP_LED = (1 << 25), /* Supports activity LED */ -- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ -- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ -- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ -- HOST_CAP_SNTF = (1 << 29), /* SNotification register */ -- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ -- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ -+ HOST_CAP_SXS = BIT(5), /* Supports External SATA */ -+ HOST_CAP_EMS = BIT(6), /* Enclosure Management support */ -+ HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */ -+ HOST_CAP_PART = BIT(13), /* Partial state capable */ -+ HOST_CAP_SSC = BIT(14), /* Slumber state capable */ -+ HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */ -+ HOST_CAP_FBS = BIT(16), /* FIS-based switching support */ -+ HOST_CAP_PMP = BIT(17), /* Port Multiplier support */ -+ HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */ -+ HOST_CAP_CLO = BIT(24), /* Command List Override support */ -+ HOST_CAP_LED = BIT(25), /* Supports activity LED */ -+ HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */ -+ HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */ -+ HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */ -+ HOST_CAP_SNTF = BIT(29), /* SNotification register */ -+ HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */ -+ HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */ - - /* HOST_CAP2 bits */ -- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ -- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ -- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ -- HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ -- HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ -- HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ -+ HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */ -+ HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */ -+ HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */ -+ HOST_CAP2_SDS = BIT(3), /* Support device sleep */ -+ HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */ -+ HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */ - - /* registers for each SATA port */ - PORT_LST_ADDR = 0x00, /* command list DMA addr */ -@@ -129,24 +130,24 @@ enum { - PORT_DEVSLP = 0x44, /* device sleep */ - - /* PORT_IRQ_{STAT,MASK} bits */ -- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ -- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ -- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ -- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ -- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ -- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ -- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ -- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ -- -- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ -- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ -- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ -- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ -- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ -- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ -- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ -- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ -- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ -+ PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */ -+ PORT_IRQ_TF_ERR = BIT(30), /* task file error */ -+ PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */ -+ PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */ -+ PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */ -+ PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */ -+ PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */ -+ PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */ -+ -+ PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */ -+ PORT_IRQ_DEV_ILCK = BIT(7), /* device interlock */ -+ PORT_IRQ_CONNECT = BIT(6), /* port connect change status */ -+ PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */ -+ PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */ -+ PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */ -+ PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */ -+ PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */ -+ PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */ - - PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | - PORT_IRQ_IF_ERR | -@@ -162,34 +163,34 @@ enum { - PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, - - /* PORT_CMD bits */ -- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ -- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ -- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ -- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ -- PORT_CMD_ESP = (1 << 21), /* External Sata Port */ -- PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */ -- PORT_CMD_PMP = (1 << 17), /* PMP attached */ -- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ -- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ -- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ -- PORT_CMD_CLO = (1 << 3), /* Command list override */ -- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ -- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ -- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ -- -- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ -- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ -- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ -- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ -+ PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */ -+ PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */ -+ PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */ -+ PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */ -+ PORT_CMD_ESP = BIT(21), /* External Sata Port */ -+ PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */ -+ PORT_CMD_PMP = BIT(17), /* PMP attached */ -+ PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */ -+ PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */ -+ PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */ -+ PORT_CMD_CLO = BIT(3), /* Command list override */ -+ PORT_CMD_POWER_ON = BIT(2), /* Power up device */ -+ PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */ -+ PORT_CMD_START = BIT(0), /* Enable port DMA engine */ -+ -+ PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */ -+ PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */ -+ PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */ -+ PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */ - - /* PORT_FBS bits */ - PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ - PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ - PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ - PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ -- PORT_FBS_SDE = (1 << 2), /* FBS single device error */ -- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ -- PORT_FBS_EN = (1 << 0), /* Enable FBS */ -+ PORT_FBS_SDE = BIT(2), /* FBS single device error */ -+ PORT_FBS_DEC = BIT(1), /* FBS device error clear */ -+ PORT_FBS_EN = BIT(0), /* Enable FBS */ - - /* PORT_DEVSLP bits */ - PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ -@@ -197,50 +198,50 @@ enum { - PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ - PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ - PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ -- PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ -- PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ -+ PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */ -+ PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */ - - /* hpriv->flags bits */ - - #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) - -- AHCI_HFLAG_NO_NCQ = (1 << 0), -- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ -- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ -- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ -- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ -- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ -- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ -- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ -- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ -- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ -- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as -+ AHCI_HFLAG_NO_NCQ = BIT(0), -+ AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */ -+ AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */ -+ AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */ -+ AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */ -+ AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */ -+ AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */ -+ AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */ -+ AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */ -+ AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */ -+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as - link offline */ -- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ -- AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ -- AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ -- AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on -+ AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */ -+ AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */ -+ AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */ -+ AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on - port start (wait until - error-handling stage) */ -- AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ -- AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ -+ AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */ -+ AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */ - - #ifdef CONFIG_PCI_MSI -- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ -+ AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */ - #else - /* compile out MSI infrastructure */ - AHCI_HFLAG_MULTI_MSI = 0, - #endif -- AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ -- AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */ -- AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read -+ AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */ -+ AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */ -+ AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read - only registers */ -- AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use -+ AHCI_HFLAG_IS_MOBILE = BIT(25), /* mobile chipset, use - SATA_MOBILE_LPM_POLICY - as default lpm_policy */ -- AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during -+ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during - suspend/resume */ -- AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ -+ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */ - - /* ap->flags bits */ - -@@ -256,22 +257,22 @@ enum { - EM_MAX_RETRY = 5, - - /* em_ctl bits */ -- EM_CTL_RST = (1 << 9), /* Reset */ -- EM_CTL_TM = (1 << 8), /* Transmit Message */ -- EM_CTL_MR = (1 << 0), /* Message Received */ -- EM_CTL_ALHD = (1 << 26), /* Activity LED */ -- EM_CTL_XMT = (1 << 25), /* Transmit Only */ -- EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ -- EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ -- EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ -- EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ -- EM_CTL_LED = (1 << 16), /* LED messages supported */ -+ EM_CTL_RST = BIT(9), /* Reset */ -+ EM_CTL_TM = BIT(8), /* Transmit Message */ -+ EM_CTL_MR = BIT(0), /* Message Received */ -+ EM_CTL_ALHD = BIT(26), /* Activity LED */ -+ EM_CTL_XMT = BIT(25), /* Transmit Only */ -+ EM_CTL_SMB = BIT(24), /* Single Message Buffer */ -+ EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */ -+ EM_CTL_SES = BIT(18), /* SES-2 messages supported */ -+ EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */ -+ EM_CTL_LED = BIT(16), /* LED messages supported */ - - /* em message type */ -- EM_MSG_TYPE_LED = (1 << 0), /* LED */ -- EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ -- EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ -- EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ -+ EM_MSG_TYPE_LED = BIT(0), /* LED */ -+ EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */ -+ EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */ -+ EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */ - }; - - struct ahci_cmd_hdr { diff --git a/target/linux/generic/backport-6.1/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch b/target/linux/generic/backport-6.1/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch deleted file mode 100644 index e9d692b651a3..000000000000 --- a/target/linux/generic/backport-6.1/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch +++ /dev/null @@ -1,49 +0,0 @@ -From d3115128bdafb62628ab41861a4f06f6d02ac320 Mon Sep 17 00:00:00 2001 -From: Lech Perczak -Date: Mon, 10 Jan 2022 23:48:44 +0100 -Subject: MIPS: ath79: drop _machine_restart again - -Commit 81424d0ad0d4 ("MIPS: ath79: Use the reset controller to restart -OF machines") removed setup of _machine_restart on OF machines to use -reset handler in reset controller driver. -While removing remnants of non-OF machines in commit 3a77e0d75eed -("MIPS: ath79: drop machfiles"), this was introduced again, making it -impossible to use additional restart handlers registered through device -tree. Drop setting _machine_restart altogether, and ath79_restart -function, which is no longer used after this. - -Fixes: 3a77e0d75eed ("MIPS: ath79: drop machfiles") -Cc: John Crispin -Cc: Florian Fainelli -Signed-off-by: Lech Perczak -Signed-off-by: Thomas Bogendoerfer ---- - arch/mips/ath79/setup.c | 10 ---------- - 1 file changed, 10 deletions(-) - ---- a/arch/mips/ath79/setup.c -+++ b/arch/mips/ath79/setup.c -@@ -34,15 +34,6 @@ - - static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; - --static void ath79_restart(char *command) --{ -- local_irq_disable(); -- ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); -- for (;;) -- if (cpu_wait) -- cpu_wait(); --} -- - static void ath79_halt(void) - { - while (1) -@@ -234,7 +225,6 @@ void __init plat_mem_setup(void) - - detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); - -- _machine_restart = ath79_restart; - _machine_halt = ath79_halt; - pm_power_off = ath79_halt; - } diff --git a/target/linux/generic/backport-6.1/870-v5.18-hwmon-lm70-Add-ti-tmp125-support.patch b/target/linux/generic/backport-6.1/870-v5.18-hwmon-lm70-Add-ti-tmp125-support.patch deleted file mode 100644 index fabf177628f0..000000000000 --- a/target/linux/generic/backport-6.1/870-v5.18-hwmon-lm70-Add-ti-tmp125-support.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 31d8f414e1596ba54a4315418e4c0086fda9e428 Mon Sep 17 00:00:00 2001 -From: Christian Lamparter -Date: Fri, 18 Feb 2022 10:06:43 +0100 -Subject: hwmon: (lm70) Add ti,tmp125 support - -The TMP125 is a 2 degree Celsius accurate Digital -Temperature Sensor with a SPI interface. - -The temperature register is a 16-bit, read-only register. -The MSB (Bit 15) is a leading zero and never set. Bits 14 -to 5 are the 1+9 temperature data bits in a two's -complement format. Bits 4 to 0 are useless copies of -Bit 5 value and therefore ignored. - -Signed-off-by: Christian Lamparter -Link: https://lore.kernel.org/r/43b19cbd4e7f51e9509e561b02b5d8d0e7079fac.1645175187.git.chunkeey@gmail.com -Signed-off-by: Guenter Roeck ---- ---- a/drivers/hwmon/lm70.c -+++ b/drivers/hwmon/lm70.c -@@ -34,6 +34,7 @@ - #define LM70_CHIP_LM71 2 /* NS LM71 */ - #define LM70_CHIP_LM74 3 /* NS LM74 */ - #define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */ -+#define LM70_CHIP_TMP125 5 /* TI TMP125 */ - - struct lm70 { - struct spi_device *spi; -@@ -87,6 +88,12 @@ static ssize_t temp1_input_show(struct d - * LM71: - * 14 bits of 2's complement data, discard LSB 2 bits, - * resolution 0.0312 degrees celsius. -+ * -+ * TMP125: -+ * MSB/D15 is a leading zero. D14 is the sign-bit. This is -+ * followed by 9 temperature bits (D13..D5) in 2's complement -+ * data format with a resolution of 0.25 degrees celsius per unit. -+ * LSB 5 bits (D4..D0) share the same value as D5 and get discarded. - */ - switch (p_lm70->chip) { - case LM70_CHIP_LM70: -@@ -102,6 +109,10 @@ static ssize_t temp1_input_show(struct d - case LM70_CHIP_LM71: - val = ((int)raw / 4) * 3125 / 100; - break; -+ -+ case LM70_CHIP_TMP125: -+ val = (sign_extend32(raw, 14) / 32) * 250; -+ break; - } - - status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */ -@@ -136,6 +147,10 @@ static const struct of_device_id lm70_of - .data = (void *) LM70_CHIP_TMP122, - }, - { -+ .compatible = "ti,tmp125", -+ .data = (void *) LM70_CHIP_TMP125, -+ }, -+ { - .compatible = "ti,lm71", - .data = (void *) LM70_CHIP_LM71, - }, -@@ -184,6 +199,7 @@ static const struct spi_device_id lm70_i - { "lm70", LM70_CHIP_LM70 }, - { "tmp121", LM70_CHIP_TMP121 }, - { "tmp122", LM70_CHIP_TMP122 }, -+ { "tmp125", LM70_CHIP_TMP125 }, - { "lm71", LM70_CHIP_LM71 }, - { "lm74", LM70_CHIP_LM74 }, - { }, diff --git a/target/linux/generic/backport-6.1/880-v5.19-cdc_ether-export-usbnet_cdc_zte_rx_fixup.patch b/target/linux/generic/backport-6.1/880-v5.19-cdc_ether-export-usbnet_cdc_zte_rx_fixup.patch deleted file mode 100644 index 39fdb3277334..000000000000 --- a/target/linux/generic/backport-6.1/880-v5.19-cdc_ether-export-usbnet_cdc_zte_rx_fixup.patch +++ /dev/null @@ -1,58 +0,0 @@ -From a79a5613e1907e1bf09bb6ba6fd5ff43b66c1afe Mon Sep 17 00:00:00 2001 -From: Lech Perczak -Date: Fri, 1 Apr 2022 22:03:55 +0200 -Subject: [PATCH 1/3] cdc_ether: export usbnet_cdc_zte_rx_fixup -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Commit bfe9b9d2df66 ("cdc_ether: Improve ZTE MF823/831/910 handling") -introduces a workaround for certain ZTE modems reporting invalid MAC -addresses over CDC-ECM. -The same issue was present on their RNDIS interface,which was fixed in -commit a5a18bdf7453 ("rndis_host: Set valid random MAC on buggy devices"). - -However, internal modem of ZTE MF286R router, on its RNDIS interface, also -exhibits a second issue fixed already in CDC-ECM, of the device not -respecting configured random MAC address. In order to share the fixup for -this with rndis_host driver, export the workaround function, which will -be re-used in the following commit in rndis_host. - -Cc: Kristian Evensen -Cc: Bjørn Mork -Cc: Oliver Neukum -Signed-off-by: Lech Perczak ---- - drivers/net/usb/cdc_ether.c | 3 ++- - include/linux/usb/usbnet.h | 1 + - 2 files changed, 3 insertions(+), 1 deletion(-) - ---- a/drivers/net/usb/cdc_ether.c -+++ b/drivers/net/usb/cdc_ether.c -@@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct us - * device MAC address has been updated). Always set MAC address to that of the - * device. - */ --static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) -+int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) - { - if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02)) - return 1; -@@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struc - - return 1; - } -+EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup); - - /* Ensure correct link state - * ---- a/include/linux/usb/usbnet.h -+++ b/include/linux/usb/usbnet.h -@@ -214,6 +214,7 @@ extern int usbnet_ether_cdc_bind(struct - extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); - extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); - extern void usbnet_cdc_status(struct usbnet *, struct urb *); -+extern int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb); - - /* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ - #define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ diff --git a/target/linux/generic/backport-6.1/881-v5.19-rndis_host-enable-the-bogus-MAC-fixup-for-ZTE-device.patch b/target/linux/generic/backport-6.1/881-v5.19-rndis_host-enable-the-bogus-MAC-fixup-for-ZTE-device.patch deleted file mode 100644 index 71a6df81020a..000000000000 --- a/target/linux/generic/backport-6.1/881-v5.19-rndis_host-enable-the-bogus-MAC-fixup-for-ZTE-device.patch +++ /dev/null @@ -1,118 +0,0 @@ -From aa8aff10e969aca0cb64f5e54ff7489355582667 Mon Sep 17 00:00:00 2001 -From: Lech Perczak -Date: Fri, 1 Apr 2022 22:04:01 +0200 -Subject: [PATCH 2/3] rndis_host: enable the bogus MAC fixup for ZTE devices - from cdc_ether -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Certain ZTE modems, namely: MF823. MF831, MF910, built-in modem from -MF286R, expose both CDC-ECM and RNDIS network interfaces. -They have a trait of ignoring the locally-administered MAC address -configured on the interface both in CDC-ECM and RNDIS part, -and this leads to dropping of incoming traffic by the host. -However, the workaround was only present in CDC-ECM, and MF286R -explicitly requires it in RNDIS mode. - -Re-use the workaround in rndis_host as well, to fix operation of MF286R -module, some versions of which expose only the RNDIS interface. Do so by -introducing new flag, RNDIS_DRIVER_DATA_DST_MAC_FIXUP, and testing for it -in rndis_rx_fixup. This is required, as RNDIS uses frame batching, and all -of the packets inside the batch need the fixup. This might introduce a -performance penalty, because test is done for every returned Ethernet -frame. - -Apply the workaround to both "flavors" of RNDIS interfaces, as older ZTE -modems, like MF823 found in the wild, report the USB_CLASS_COMM class -interfaces, while MF286R reports USB_CLASS_WIRELESS_CONTROLLER. - -Suggested-by: Bjørn Mork -Cc: Kristian Evensen -Cc: Oliver Neukum -Signed-off-by: Lech Perczak ---- - drivers/net/usb/rndis_host.c | 32 ++++++++++++++++++++++++++++++++ - include/linux/usb/rndis_host.h | 1 + - 2 files changed, 33 insertions(+) - ---- a/drivers/net/usb/rndis_host.c -+++ b/drivers/net/usb/rndis_host.c -@@ -486,10 +486,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind); - */ - int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) - { -+ bool dst_mac_fixup; -+ - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) - return 0; - -+ dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP); -+ - /* peripheral may have batched packets to us... */ - while (likely(skb->len)) { - struct rndis_data_hdr *hdr = (void *)skb->data; -@@ -524,10 +528,17 @@ int rndis_rx_fixup(struct usbnet *dev, s - break; - skb_pull(skb, msg_len - sizeof *hdr); - skb_trim(skb2, data_len); -+ -+ if (unlikely(dst_mac_fixup)) -+ usbnet_cdc_zte_rx_fixup(dev, skb2); -+ - usbnet_skb_return(dev, skb2); - } - - /* caller will usbnet_skb_return the remaining packet */ -+ if (unlikely(dst_mac_fixup)) -+ usbnet_cdc_zte_rx_fixup(dev, skb); -+ - return 1; - } - EXPORT_SYMBOL_GPL(rndis_rx_fixup); -@@ -601,6 +612,17 @@ static const struct driver_info rndis_po - .tx_fixup = rndis_tx_fixup, - }; - -+static const struct driver_info zte_rndis_info = { -+ .description = "ZTE RNDIS device", -+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, -+ .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, -+ .bind = rndis_bind, -+ .unbind = rndis_unbind, -+ .status = rndis_status, -+ .rx_fixup = rndis_rx_fixup, -+ .tx_fixup = rndis_tx_fixup, -+}; -+ - /*-------------------------------------------------------------------------*/ - - static const struct usb_device_id products [] = { -@@ -615,6 +637,16 @@ static const struct usb_device_id produc - USB_CLASS_COMM, 2 /* ACM */, 0x0ff), - .driver_info = (unsigned long)&rndis_info, - }, { -+ /* ZTE WWAN modules */ -+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2, -+ USB_CLASS_WIRELESS_CONTROLLER, 1, 3), -+ .driver_info = (unsigned long)&zte_rndis_info, -+}, { -+ /* ZTE WWAN modules, ACM flavour */ -+ USB_VENDOR_AND_INTERFACE_INFO(0x19d2, -+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff), -+ .driver_info = (unsigned long)&zte_rndis_info, -+}, { - /* RNDIS is MSFT's un-official variant of CDC ACM */ - USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), - .driver_info = (unsigned long) &rndis_info, ---- a/include/linux/usb/rndis_host.h -+++ b/include/linux/usb/rndis_host.h -@@ -197,6 +197,7 @@ struct rndis_keepalive_c { /* IN (option - - /* Flags for driver_info::data */ - #define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */ -+#define RNDIS_DRIVER_DATA_DST_MAC_FIXUP 2 /* device ignores configured MAC address */ - - extern void rndis_status(struct usbnet *dev, struct urb *urb); - extern int diff --git a/target/linux/generic/backport-6.1/882-v5.19-rndis_host-limit-scope-of-bogus-MAC-address-detectio.patch b/target/linux/generic/backport-6.1/882-v5.19-rndis_host-limit-scope-of-bogus-MAC-address-detectio.patch deleted file mode 100644 index bdd812cc906c..000000000000 --- a/target/linux/generic/backport-6.1/882-v5.19-rndis_host-limit-scope-of-bogus-MAC-address-detectio.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 9bfb4bcda7ba32d73ea322ea56a8ebe32e9247f6 Mon Sep 17 00:00:00 2001 -From: Lech Perczak -Date: Sat, 2 Apr 2022 02:19:57 +0200 -Subject: [PATCH 3/3] rndis_host: limit scope of bogus MAC address detection to - ZTE devices -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Reporting of bogus MAC addresses and ignoring configuration of new -destination address wasn't observed outside of a range of ZTE devices, -among which this seems to be the common bug. Align rndis_host driver -with implementation found in cdc_ether, which also limits this workaround -to ZTE devices. - -Suggested-by: Bjørn Mork -Cc: Kristian Evensen -Cc: Oliver Neukum -Signed-off-by: Lech Perczak ---- - drivers/net/usb/rndis_host.c | 17 ++++++++++++----- - 1 file changed, 12 insertions(+), 5 deletions(-) - ---- a/drivers/net/usb/rndis_host.c -+++ b/drivers/net/usb/rndis_host.c -@@ -419,10 +419,7 @@ generic_rndis_bind(struct usbnet *dev, s - goto halt_fail_and_release; - } - -- if (bp[0] & 0x02) -- eth_hw_addr_random(net); -- else -- ether_addr_copy(net->dev_addr, bp); -+ ether_addr_copy(net->dev_addr, bp); - - /* set a nonzero filter to enable data transfers */ - memset(u.set, 0, sizeof *u.set); -@@ -464,6 +461,16 @@ static int rndis_bind(struct usbnet *dev - return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); - } - -+static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf) -+{ -+ int status = rndis_bind(dev, intf); -+ -+ if (!status && (dev->net->dev_addr[0] & 0x02)) -+ eth_hw_addr_random(dev->net); -+ -+ return status; -+} -+ - void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) - { - struct rndis_halt *halt; -@@ -616,7 +623,7 @@ static const struct driver_info zte_rndi - .description = "ZTE RNDIS device", - .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, - .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, -- .bind = rndis_bind, -+ .bind = zte_rndis_bind, - .unbind = rndis_unbind, - .status = rndis_status, - .rx_fixup = rndis_rx_fixup, diff --git a/target/linux/generic/backport-6.1/890-v6.1-mtd-spinand-winbond-fix-flash-detection.patch b/target/linux/generic/backport-6.1/890-v6.1-mtd-spinand-winbond-fix-flash-detection.patch deleted file mode 100644 index 38fbc3a3d73f..000000000000 --- a/target/linux/generic/backport-6.1/890-v6.1-mtd-spinand-winbond-fix-flash-detection.patch +++ /dev/null @@ -1,40 +0,0 @@ -From dbf70fc204d2fbb0d8ad8f42038a60846502efda Mon Sep 17 00:00:00 2001 -From: Mikhail Kshevetskiy -Date: Mon, 10 Oct 2022 13:51:09 +0300 -Subject: [PATCH] mtd: spinand: winbond: fix flash identification - -Winbond uses 3 bytes to identify flash: vendor_id, dev_id_0, dev_id_1, -but current driver uses only first 2 bytes of it for devices -identification. As result Winbond W25N02KV flash (id_bytes: EF, AA, 22) -is identified as W25N01GV (id_bytes: EF, AA, 21). - -Fix this by adding missed identification bytes. - -Signed-off-by: Mikhail Kshevetskiy -Reviewed-by: Frieder Schrempf -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20221010105110.446674-1-mikhail.kshevetskiy@iopsys.eu ---- - drivers/mtd/nand/spi/winbond.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/nand/spi/winbond.c -+++ b/drivers/mtd/nand/spi/winbond.c -@@ -76,7 +76,7 @@ static int w25m02gv_select_target(struct - - static const struct spinand_info winbond_spinand_table[] = { - SPINAND_INFO("W25M02GV", -- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab), -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), - NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2), - NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -@@ -86,7 +86,7 @@ static const struct spinand_info winbond - SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), - SPINAND_SELECT_TARGET(w25m02gv_select_target)), - SPINAND_INFO("W25N01GV", -- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa), -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21), - NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), - NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, diff --git a/target/linux/generic/backport-6.1/891-v6.1-mtd-spinand-winbond-add-W25N02KV.patch b/target/linux/generic/backport-6.1/891-v6.1-mtd-spinand-winbond-add-W25N02KV.patch deleted file mode 100644 index d75a1acc57c7..000000000000 --- a/target/linux/generic/backport-6.1/891-v6.1-mtd-spinand-winbond-add-W25N02KV.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 6154c7a583483d7b69f53bea868efdc369edd563 Mon Sep 17 00:00:00 2001 -From: Mikhail Kshevetskiy -Date: Mon, 10 Oct 2022 13:51:10 +0300 -Subject: [PATCH] mtd: spinand: winbond: add Winbond W25N02KV flash support - -Add support of Winbond W25N02KV flash - -Signed-off-by: Mikhail Kshevetskiy -Reviewed-by: Frieder Schrempf -Signed-off-by: Miquel Raynal -Link: https://lore.kernel.org/linux-mtd/20221010105110.446674-2-mikhail.kshevetskiy@iopsys.eu ---- - drivers/mtd/nand/spi/winbond.c | 75 ++++++++++++++++++++++++++++++++++ - 1 file changed, 75 insertions(+) - ---- a/drivers/mtd/nand/spi/winbond.c -+++ b/drivers/mtd/nand/spi/winbond.c -@@ -74,6 +74,72 @@ static int w25m02gv_select_target(struct - return spi_mem_exec_op(spinand->spimem, &op); - } - -+static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *region) -+{ -+ if (section > 3) -+ return -ERANGE; -+ -+ region->offset = 64 + (16 * section); -+ region->length = 13; -+ -+ return 0; -+} -+ -+static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *region) -+{ -+ if (section > 3) -+ return -ERANGE; -+ -+ region->offset = (16 * section) + 2; -+ region->length = 14; -+ -+ return 0; -+} -+ -+static const struct mtd_ooblayout_ops w25n02kv_ooblayout = { -+ .ecc = w25n02kv_ooblayout_ecc, -+ .free = w25n02kv_ooblayout_free, -+}; -+ -+static int w25n02kv_ecc_get_status(struct spinand_device *spinand, -+ u8 status) -+{ -+ struct nand_device *nand = spinand_to_nand(spinand); -+ u8 mbf = 0; -+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); -+ -+ switch (status & STATUS_ECC_MASK) { -+ case STATUS_ECC_NO_BITFLIPS: -+ return 0; -+ -+ case STATUS_ECC_UNCOR_ERROR: -+ return -EBADMSG; -+ -+ case STATUS_ECC_HAS_BITFLIPS: -+ /* -+ * Let's try to retrieve the real maximum number of bitflips -+ * in order to avoid forcing the wear-leveling layer to move -+ * data around if it's not necessary. -+ */ -+ if (spi_mem_exec_op(spinand->spimem, &op)) -+ return nanddev_get_ecc_conf(nand)->strength; -+ -+ mbf >>= 4; -+ -+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) -+ return nanddev_get_ecc_conf(nand)->strength; -+ -+ return mbf; -+ -+ default: -+ break; -+ } -+ -+ return -EINVAL; -+} -+ - static const struct spinand_info winbond_spinand_table[] = { - SPINAND_INFO("W25M02GV", - SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), -@@ -94,6 +160,15 @@ static const struct spinand_info winbond - &update_cache_variants), - 0, - SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), -+ SPINAND_INFO("W25N02KV", -+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22), -+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), -+ NAND_ECCREQ(8, 512), -+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants, -+ &write_cache_variants, -+ &update_cache_variants), -+ 0, -+ SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)), - }; - - static int winbond_spinand_init(struct spinand_device *spinand)