-LINUX_VERSION-5.15 = .48
-LINUX_KERNEL_HASH-5.15.48 = 19f0075d1b94d6874a2af7127a59b6b6c423fc7d4a883a51415543e7ec1be2a6
+LINUX_VERSION-5.15 = .49
+LINUX_KERNEL_HASH-5.15.49 = 32497893ba47f4ad32a59fa4254e8c25e41bc821798e3b2f2443822fa00059dc
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
-@@ -828,7 +829,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -839,7 +840,7 @@ static int mtk_init_fq_dma(struct mtk_et
dma_addr_t dma_addr;
int i;
cnt * sizeof(struct mtk_tx_dma),
ð->phy_scratch_ring,
GFP_ATOMIC);
-@@ -840,10 +841,10 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -851,10 +852,10 @@ static int mtk_init_fq_dma(struct mtk_et
if (unlikely(!eth->scratch_head))
return -ENOMEM;
return -ENOMEM;
phy_ring_tail = eth->phy_scratch_ring +
-@@ -897,26 +898,26 @@ static void mtk_tx_unmap(struct mtk_eth
+@@ -908,26 +909,26 @@ static void mtk_tx_unmap(struct mtk_eth
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
-@@ -994,9 +995,9 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1005,9 +1006,9 @@ static int mtk_tx_map(struct sk_buff *sk
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
-@@ -1035,10 +1036,10 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1046,10 +1047,10 @@ static int mtk_tx_map(struct sk_buff *sk
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
goto err_dma;
if (i == nr_frags - 1 &&
-@@ -1316,18 +1317,18 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1330,18 +1331,18 @@ static int mtk_poll_rx(struct napi_struc
netdev->stats.rx_dropped++;
goto release_desc;
}
ring->buf_size, DMA_FROM_DEVICE);
/* receive data */
-@@ -1600,7 +1601,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1614,7 +1615,7 @@ static int mtk_tx_alloc(struct mtk_eth *
if (!ring->buf)
goto no_tx_mem;
&ring->phys, GFP_ATOMIC);
if (!ring->dma)
goto no_tx_mem;
-@@ -1618,7 +1619,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1632,7 +1633,7 @@ static int mtk_tx_alloc(struct mtk_eth *
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
&ring->phys_pdma,
GFP_ATOMIC);
if (!ring->dma_pdma)
-@@ -1677,7 +1678,7 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -1691,7 +1692,7 @@ static void mtk_tx_clean(struct mtk_eth
}
if (ring->dma) {
MTK_DMA_SIZE * sizeof(*ring->dma),
ring->dma,
ring->phys);
-@@ -1685,7 +1686,7 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -1699,7 +1700,7 @@ static void mtk_tx_clean(struct mtk_eth
}
if (ring->dma_pdma) {
MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
ring->dma_pdma,
ring->phys_pdma);
-@@ -1730,18 +1731,18 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1747,18 +1748,18 @@ static int mtk_rx_alloc(struct mtk_eth *
return -ENOMEM;
}
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
-@@ -1777,7 +1778,7 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -1794,7 +1795,7 @@ static void mtk_rx_clean(struct mtk_eth
continue;
if (!ring->dma[i].rxd1)
continue;
ring->dma[i].rxd1,
ring->buf_size,
DMA_FROM_DEVICE);
-@@ -1788,7 +1789,7 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -1805,7 +1806,7 @@ static void mtk_rx_clean(struct mtk_eth
}
if (ring->dma) {
ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
-@@ -2144,7 +2145,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2161,7 +2162,7 @@ static void mtk_dma_free(struct mtk_eth
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
eth->scratch_ring,
eth->phy_scratch_ring);
-@@ -2494,6 +2495,8 @@ static void mtk_dim_tx(struct work_struc
+@@ -2511,6 +2512,8 @@ static void mtk_dim_tx(struct work_struc
static int mtk_hw_init(struct mtk_eth *eth)
{
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, ð->state))
-@@ -2506,6 +2509,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2523,6 +2526,10 @@ static int mtk_hw_init(struct mtk_eth *e
if (ret)
goto err_disable_pm;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
-@@ -3059,6 +3066,35 @@ free_netdev:
+@@ -3076,6 +3083,35 @@ free_netdev:
return err;
}
static int mtk_probe(struct platform_device *pdev)
{
struct device_node *mac_np;
-@@ -3072,6 +3108,7 @@ static int mtk_probe(struct platform_dev
+@@ -3089,6 +3125,7 @@ static int mtk_probe(struct platform_dev
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
-@@ -3120,6 +3157,16 @@ static int mtk_probe(struct platform_dev
+@@ -3137,6 +3174,16 @@ static int mtk_probe(struct platform_dev
}
}
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
-@@ -3189,6 +3190,22 @@ static int mtk_probe(struct platform_dev
+@@ -3206,6 +3207,22 @@ static int mtk_probe(struct platform_dev
}
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2315,7 +2315,7 @@ static int mtk_open(struct net_device *d
+@@ -2332,7 +2332,7 @@ static int mtk_open(struct net_device *d
if (err)
return err;
gdm_config = MTK_GDMA_TO_PPE;
mtk_gdm_config(eth, gdm_config);
-@@ -2389,7 +2389,7 @@ static int mtk_stop(struct net_device *d
+@@ -2406,7 +2406,7 @@ static int mtk_stop(struct net_device *d
mtk_dma_free(eth);
if (eth->soc->offload_version)
return 0;
}
-@@ -3281,10 +3281,11 @@ static int mtk_probe(struct platform_dev
+@@ -3298,10 +3298,11 @@ static int mtk_probe(struct platform_dev
}
if (eth->soc->offload_version) {
#include <net/dsa.h>
#include "mtk_eth_soc.h"
-@@ -1281,7 +1282,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1292,7 +1293,7 @@ static int mtk_poll_rx(struct napi_struc
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
int mac;
ring = mtk_get_rx_ring(eth);
-@@ -1357,6 +1358,11 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1371,6 +1372,11 @@ static int mtk_poll_rx(struct napi_struc
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
}
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
(trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-@@ -3281,7 +3287,7 @@ static int mtk_probe(struct platform_dev
+@@ -3298,7 +3304,7 @@ static int mtk_probe(struct platform_dev
}
if (eth->soc->offload_version) {
mediatek,hifsys = <&hifsys>;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3168,7 +3168,7 @@ static int mtk_probe(struct platform_dev
+@@ -3185,7 +3185,7 @@ static int mtk_probe(struct platform_dev
struct regmap *cci;
cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -834,7 +834,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -845,7 +845,7 @@ static int mtk_init_fq_dma(struct mtk_et
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
cnt * sizeof(struct mtk_tx_dma),
ð->phy_scratch_ring,
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
-@@ -1609,7 +1609,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1623,7 +1623,7 @@ static int mtk_tx_alloc(struct mtk_eth *
goto no_tx_mem;
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
if (!ring->dma)
goto no_tx_mem;
-@@ -1627,8 +1627,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1641,8 +1641,7 @@ static int mtk_tx_alloc(struct mtk_eth *
*/
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
if (!ring->dma_pdma)
goto no_tx_mem;
-@@ -1740,7 +1739,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1757,7 +1756,7 @@ static int mtk_rx_alloc(struct mtk_eth *
ring->dma = dma_alloc_coherent(eth->dma_dev,
rx_dma_size * sizeof(*ring->dma),
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -960,18 +960,51 @@ static void setup_tx_buf(struct mtk_eth
+@@ -971,18 +971,51 @@ static void setup_tx_buf(struct mtk_eth
}
}
int k = 0;
itxd = ring->next_free;
-@@ -979,49 +1012,32 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -990,49 +1023,32 @@ static int mtk_tx_map(struct sk_buff *sk
if (itxd == ring->last_free)
return -ENOMEM;
bool new_desc = true;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
-@@ -1036,23 +1052,17 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1047,23 +1063,17 @@ static int mtk_tx_map(struct sk_buff *sk
new_desc = false;
}
tx_buf = mtk_desc_to_tx_buf(ring, txd);
if (new_desc)
-@@ -1062,20 +1072,17 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1073,20 +1083,17 @@ static int mtk_tx_map(struct sk_buff *sk
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -826,20 +826,20 @@ static inline bool mtk_rx_get_desc(struc
+@@ -837,20 +837,20 @@ static void *mtk_max_lro_buf_alloc(gfp_t
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
if (unlikely(!eth->scratch_head))
return -ENOMEM;
-@@ -849,16 +849,19 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -860,16 +860,19 @@ static int mtk_init_fq_dma(struct mtk_et
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
}
mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
-@@ -2152,6 +2155,7 @@ static int mtk_dma_init(struct mtk_eth *
+@@ -2169,6 +2172,7 @@ static int mtk_dma_init(struct mtk_eth *
static void mtk_dma_free(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
-@@ -2159,9 +2163,8 @@ static void mtk_dma_free(struct mtk_eth
+@@ -2176,9 +2180,8 @@ static void mtk_dma_free(struct mtk_eth
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
-@@ -3371,6 +3374,9 @@ static const struct mtk_soc_data mt2701_
+@@ -3388,6 +3391,9 @@ static const struct mtk_soc_data mt2701_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7621_data = {
-@@ -3379,6 +3385,9 @@ static const struct mtk_soc_data mt7621_
+@@ -3396,6 +3402,9 @@ static const struct mtk_soc_data mt7621_
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
};
static const struct mtk_soc_data mt7622_data = {
-@@ -3388,6 +3397,9 @@ static const struct mtk_soc_data mt7622_
+@@ -3405,6 +3414,9 @@ static const struct mtk_soc_data mt7622_
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
};
static const struct mtk_soc_data mt7623_data = {
-@@ -3396,6 +3408,9 @@ static const struct mtk_soc_data mt7623_
+@@ -3413,6 +3425,9 @@ static const struct mtk_soc_data mt7623_
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
};
static const struct mtk_soc_data mt7629_data = {
-@@ -3404,6 +3419,9 @@ static const struct mtk_soc_data mt7629_
+@@ -3421,6 +3436,9 @@ static const struct mtk_soc_data mt7629_
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data rt5350_data = {
-@@ -3411,6 +3429,9 @@ static const struct mtk_soc_data rt5350_
+@@ -3428,6 +3446,9 @@ static const struct mtk_soc_data rt5350_
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1610,8 +1610,10 @@ static int mtk_napi_rx(struct napi_struc
+@@ -1624,8 +1624,10 @@ static int mtk_napi_rx(struct napi_struc
static int mtk_tx_alloc(struct mtk_eth *eth)
{
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL);
-@@ -1627,8 +1629,10 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1641,8 +1643,10 @@ static int mtk_tx_alloc(struct mtk_eth *
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
-@@ -1650,7 +1654,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1664,7 +1668,7 @@ static int mtk_tx_alloc(struct mtk_eth *
ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
-@@ -1683,6 +1687,7 @@ no_tx_mem:
+@@ -1697,6 +1701,7 @@ no_tx_mem:
static void mtk_tx_clean(struct mtk_eth *eth)
{
struct mtk_tx_ring *ring = ð->tx_ring;
int i;
-@@ -1695,17 +1700,15 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -1709,17 +1714,15 @@ static void mtk_tx_clean(struct mtk_eth
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -879,10 +879,11 @@ static inline void *mtk_qdma_phys_to_vir
+@@ -890,10 +890,11 @@ static inline void *mtk_qdma_phys_to_vir
return ret + (desc - ring->phys);
}
return &ring->buf[idx];
}
-@@ -1004,6 +1005,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1015,6 +1016,7 @@ static int mtk_tx_map(struct sk_buff *sk
};
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
-@@ -1015,7 +1017,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1026,7 +1028,7 @@ static int mtk_tx_map(struct sk_buff *sk
if (itxd == ring->last_free)
return -ENOMEM;
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
-@@ -1043,7 +1045,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1054,7 +1056,7 @@ static int mtk_tx_map(struct sk_buff *sk
while (frag_size) {
bool new_desc = true;
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
-@@ -1067,7 +1069,8 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1078,7 +1080,8 @@ static int mtk_tx_map(struct sk_buff *sk
mtk_tx_set_dma_desc(dev, txd, &txd_info);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
-@@ -1086,7 +1089,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1097,7 +1100,7 @@ static int mtk_tx_map(struct sk_buff *sk
/* store skb to cleanup */
itx_buf->skb = skb;
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
-@@ -1104,7 +1107,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1115,7 +1118,7 @@ static int mtk_tx_map(struct sk_buff *sk
*/
wmb();
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-@@ -1118,13 +1121,13 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1129,13 +1132,13 @@ static int mtk_tx_map(struct sk_buff *sk
err_dma:
do {
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
-@@ -1435,7 +1438,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
+@@ -1449,7 +1452,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -894,9 +894,10 @@ static struct mtk_tx_dma *qdma_to_pdma(s
+@@ -905,9 +905,10 @@ static struct mtk_tx_dma *qdma_to_pdma(s
return ring->dma_pdma - ring->dma + dma;
}
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
-@@ -1112,8 +1113,10 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1123,8 +1124,10 @@ static int mtk_tx_map(struct sk_buff *sk
!netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
} else {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1758,7 +1758,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1775,7 +1775,7 @@ static int mtk_rx_alloc(struct mtk_eth *
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
&ring->phys, GFP_KERNEL);
if (!ring->dma)
return -ENOMEM;
-@@ -1816,9 +1816,8 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -1833,9 +1833,8 @@ static void mtk_rx_clean(struct mtk_eth
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma = NULL;
}
}
-@@ -3386,6 +3385,7 @@ static const struct mtk_soc_data mt2701_
+@@ -3403,6 +3402,7 @@ static const struct mtk_soc_data mt2701_
.required_pctl = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
},
};
-@@ -3397,6 +3397,7 @@ static const struct mtk_soc_data mt7621_
+@@ -3414,6 +3414,7 @@ static const struct mtk_soc_data mt7621_
.offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
},
};
-@@ -3409,6 +3410,7 @@ static const struct mtk_soc_data mt7622_
+@@ -3426,6 +3427,7 @@ static const struct mtk_soc_data mt7622_
.offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
},
};
-@@ -3420,6 +3422,7 @@ static const struct mtk_soc_data mt7623_
+@@ -3437,6 +3439,7 @@ static const struct mtk_soc_data mt7623_
.offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
},
};
-@@ -3431,6 +3434,7 @@ static const struct mtk_soc_data mt7629_
+@@ -3448,6 +3451,7 @@ static const struct mtk_soc_data mt7629_
.required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
},
};
-@@ -3441,6 +3445,7 @@ static const struct mtk_soc_data rt5350_
+@@ -3458,6 +3462,7 @@ static const struct mtk_soc_data rt5350_
.required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1253,9 +1253,12 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+@@ -1264,9 +1264,12 @@ static struct mtk_rx_ring *mtk_get_rx_ri
return ð->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring->calc_idx_update = true;
return ring;
}
-@@ -1306,7 +1309,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1317,7 +1320,7 @@ static int mtk_poll_rx(struct napi_struc
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
data = ring->data[idx];
if (!mtk_rx_get_desc(&trxd, rxd))
-@@ -1495,7 +1498,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+@@ -1509,7 +1512,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
mtk_tx_unmap(eth, tx_buf, true);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1767,18 +1767,25 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1784,18 +1784,25 @@ static int mtk_rx_alloc(struct mtk_eth *
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
-@@ -1803,14 +1810,17 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -1820,14 +1827,17 @@ static void mtk_rx_clean(struct mtk_eth
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
}
u64_stats_update_end(&hw_stats->syncp);
-@@ -864,10 +917,10 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -875,10 +928,10 @@ static int mtk_init_fq_dma(struct mtk_et
txd->txd4 = 0;
}
return 0;
}
-@@ -1111,7 +1164,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1122,7 +1175,7 @@ static int mtk_tx_map(struct sk_buff *sk
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
} else {
int next_idx;
-@@ -1425,6 +1478,7 @@ rx_done:
+@@ -1439,6 +1492,7 @@ rx_done:
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
-@@ -1432,7 +1486,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+@@ -1446,7 +1500,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
u32 cpu, dma;
cpu = ring->last_free_ptr;
desc = mtk_qdma_phys_to_virt(ring, cpu);
-@@ -1467,7 +1521,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+@@ -1481,7 +1535,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
}
ring->last_free_ptr = cpu;
return budget;
}
-@@ -1560,24 +1614,25 @@ static void mtk_handle_status_irq(struct
+@@ -1574,24 +1628,25 @@ static void mtk_handle_status_irq(struct
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
return budget;
if (napi_complete_done(napi, tx_done))
-@@ -1589,6 +1644,7 @@ static int mtk_napi_tx(struct napi_struc
+@@ -1603,6 +1658,7 @@ static int mtk_napi_tx(struct napi_struc
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
int rx_done_total = 0;
mtk_handle_status_irq(eth);
-@@ -1596,21 +1652,21 @@ static int mtk_napi_rx(struct napi_struc
+@@ -1610,21 +1666,21 @@ static int mtk_napi_rx(struct napi_struc
do {
int rx_done;
if (napi_complete_done(napi, rx_done_total))
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
-@@ -1673,20 +1729,20 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1687,20 +1743,20 @@ static int mtk_tx_alloc(struct mtk_eth *
*/
wmb();
}
return 0;
-@@ -1725,6 +1781,7 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -1739,6 +1795,7 @@ static void mtk_tx_clean(struct mtk_eth
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
-@@ -1790,16 +1847,18 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1807,16 +1864,18 @@ static int mtk_rx_alloc(struct mtk_eth *
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
return 0;
}
-@@ -2108,9 +2167,9 @@ static int mtk_dma_busy_wait(struct mtk_
+@@ -2125,9 +2184,9 @@ static int mtk_dma_busy_wait(struct mtk_
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
-@@ -2168,8 +2227,8 @@ static int mtk_dma_init(struct mtk_eth *
+@@ -2185,8 +2244,8 @@ static int mtk_dma_init(struct mtk_eth *
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
}
return 0;
-@@ -2243,13 +2302,14 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -2260,13 +2319,14 @@ static irqreturn_t mtk_handle_irq_tx(int
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
mtk_handle_irq_tx(irq, _eth);
}
-@@ -2273,6 +2333,7 @@ static void mtk_poll_controller(struct n
+@@ -2290,6 +2350,7 @@ static void mtk_poll_controller(struct n
static int mtk_start_dma(struct mtk_eth *eth)
{
u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
int err;
err = mtk_dma_init(eth);
-@@ -2287,16 +2348,15 @@ static int mtk_start_dma(struct mtk_eth
+@@ -2304,16 +2365,15 @@ static int mtk_start_dma(struct mtk_eth
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
MTK_RX_BT_32DWORDS,
}
return 0;
-@@ -2420,8 +2480,8 @@ static int mtk_stop(struct net_device *d
+@@ -2437,8 +2497,8 @@ static int mtk_stop(struct net_device *d
cancel_work_sync(ð->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_dma_free(eth);
-@@ -2475,6 +2535,7 @@ static void mtk_dim_rx(struct work_struc
+@@ -2492,6 +2552,7 @@ static void mtk_dim_rx(struct work_struc
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
struct dim_cq_moder cur_profile;
u32 val, cur;
-@@ -2482,7 +2543,7 @@ static void mtk_dim_rx(struct work_struc
+@@ -2499,7 +2560,7 @@ static void mtk_dim_rx(struct work_struc
dim->profile_ix);
spin_lock_bh(ð->dim_lock);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
-@@ -2492,9 +2553,9 @@ static void mtk_dim_rx(struct work_struc
+@@ -2509,9 +2570,9 @@ static void mtk_dim_rx(struct work_struc
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
spin_unlock_bh(ð->dim_lock);
-@@ -2505,6 +2566,7 @@ static void mtk_dim_tx(struct work_struc
+@@ -2522,6 +2583,7 @@ static void mtk_dim_tx(struct work_struc
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
struct dim_cq_moder cur_profile;
u32 val, cur;
-@@ -2512,7 +2574,7 @@ static void mtk_dim_tx(struct work_struc
+@@ -2529,7 +2591,7 @@ static void mtk_dim_tx(struct work_struc
dim->profile_ix);
spin_lock_bh(ð->dim_lock);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
-@@ -2522,9 +2584,9 @@ static void mtk_dim_tx(struct work_struc
+@@ -2539,9 +2601,9 @@ static void mtk_dim_tx(struct work_struc
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
spin_unlock_bh(ð->dim_lock);
-@@ -2535,6 +2597,7 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2552,6 +2614,7 @@ static int mtk_hw_init(struct mtk_eth *e
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, ð->state))
-@@ -2609,10 +2672,10 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2626,10 +2689,10 @@ static int mtk_hw_init(struct mtk_eth *e
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
return 0;
-@@ -3151,14 +3214,6 @@ static int mtk_probe(struct platform_dev
+@@ -3168,14 +3231,6 @@ static int mtk_probe(struct platform_dev
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
eth->ip_align = NET_IP_ALIGN;
-@@ -3392,6 +3447,7 @@ static int mtk_remove(struct platform_de
+@@ -3409,6 +3464,7 @@ static int mtk_remove(struct platform_de
}
static const struct mtk_soc_data mt2701_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
-@@ -3403,6 +3459,7 @@ static const struct mtk_soc_data mt2701_
+@@ -3420,6 +3476,7 @@ static const struct mtk_soc_data mt2701_
};
static const struct mtk_soc_data mt7621_data = {
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
-@@ -3415,6 +3472,7 @@ static const struct mtk_soc_data mt7621_
+@@ -3432,6 +3489,7 @@ static const struct mtk_soc_data mt7621_
};
static const struct mtk_soc_data mt7622_data = {
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
-@@ -3428,6 +3486,7 @@ static const struct mtk_soc_data mt7622_
+@@ -3445,6 +3503,7 @@ static const struct mtk_soc_data mt7622_
};
static const struct mtk_soc_data mt7623_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
-@@ -3440,6 +3499,7 @@ static const struct mtk_soc_data mt7623_
+@@ -3457,6 +3516,7 @@ static const struct mtk_soc_data mt7623_
};
static const struct mtk_soc_data mt7629_data = {
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
-@@ -3452,6 +3512,7 @@ static const struct mtk_soc_data mt7629_
+@@ -3469,6 +3529,7 @@ static const struct mtk_soc_data mt7629_
};
static const struct mtk_soc_data rt5350_data = {
return true;
}
-@@ -905,7 +909,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -916,7 +920,7 @@ static int mtk_init_fq_dma(struct mtk_et
phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-@@ -915,6 +919,12 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -926,6 +930,12 @@ static int mtk_init_fq_dma(struct mtk_et
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
}
mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
-@@ -1018,10 +1028,12 @@ static void setup_tx_buf(struct mtk_eth
+@@ -1029,10 +1039,12 @@ static void setup_tx_buf(struct mtk_eth
}
}
u32 data;
WRITE_ONCE(desc->txd1, info->addr);
-@@ -1045,6 +1057,59 @@ static void mtk_tx_set_dma_desc(struct n
+@@ -1056,6 +1068,59 @@ static void mtk_tx_set_dma_desc(struct n
WRITE_ONCE(desc->txd4, data);
}
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
-@@ -1053,6 +1118,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1064,6 +1129,7 @@ static int mtk_tx_map(struct sk_buff *sk
.gso = gso,
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
.vlan = skb_vlan_tag_present(skb),
.vlan_tci = skb_vlan_tag_get(skb),
.first = true,
.last = !skb_is_nonlinear(skb),
-@@ -1112,7 +1178,9 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1123,7 +1189,9 @@ static int mtk_tx_map(struct sk_buff *sk
}
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
-@@ -1193,17 +1261,16 @@ err_dma:
+@@ -1204,17 +1272,16 @@ err_dma:
return -ENOMEM;
}
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
-@@ -1255,7 +1322,7 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1266,7 +1333,7 @@ static netdev_tx_t mtk_start_xmit(struct
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
-@@ -1347,7 +1414,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1358,7 +1425,7 @@ static int mtk_poll_rx(struct napi_struc
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
int done = 0, bytes = 0;
while (done < budget) {
-@@ -1355,7 +1422,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1366,7 +1433,7 @@ static int mtk_poll_rx(struct napi_struc
unsigned int pktlen;
dma_addr_t dma_addr;
u32 hash, reason;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
-@@ -1365,16 +1432,15 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1376,16 +1443,15 @@ static int mtk_poll_rx(struct napi_struc
rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
-@@ -1417,7 +1483,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1431,7 +1497,7 @@ static int mtk_poll_rx(struct napi_struc
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
-@@ -1435,10 +1501,25 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1449,10 +1515,25 @@ static int mtk_poll_rx(struct napi_struc
mtk_ppe_check_skb(eth->ppe, skb,
trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
-@@ -1450,7 +1531,7 @@ release_desc:
+@@ -1464,7 +1545,7 @@ release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
ring->calc_idx = idx;
-@@ -1652,7 +1733,8 @@ static int mtk_napi_rx(struct napi_struc
+@@ -1666,7 +1747,8 @@ static int mtk_napi_rx(struct napi_struc
do {
int rx_done;
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
-@@ -1666,10 +1748,11 @@ static int mtk_napi_rx(struct napi_struc
+@@ -1680,10 +1762,11 @@ static int mtk_napi_rx(struct napi_struc
if (rx_done_total == budget)
return budget;
return rx_done_total;
}
-@@ -1679,7 +1762,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1693,7 +1776,7 @@ static int mtk_tx_alloc(struct mtk_eth *
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
int i, sz = soc->txrx.txd_size;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL);
-@@ -1699,13 +1782,19 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1713,13 +1796,19 @@ static int mtk_tx_alloc(struct mtk_eth *
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
-@@ -1785,13 +1874,11 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1799,13 +1888,11 @@ static int mtk_rx_alloc(struct mtk_eth *
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
} else {
ring = ð->rx_ring[ring_no];
}
-@@ -1824,7 +1911,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1841,7 +1928,7 @@ static int mtk_rx_alloc(struct mtk_eth *
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
ring->data[i] + NET_SKB_PAD + eth->ip_align,
-@@ -1839,26 +1926,47 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1856,26 +1943,47 @@ static int mtk_rx_alloc(struct mtk_eth *
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
return 0;
}
-@@ -2280,7 +2388,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -2297,7 +2405,7 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
__napi_schedule(ð->rx_napi);
}
return IRQ_HANDLED;
-@@ -2304,8 +2412,10 @@ static irqreturn_t mtk_handle_irq(int ir
+@@ -2321,8 +2429,10 @@ static irqreturn_t mtk_handle_irq(int ir
struct mtk_eth *eth = _eth;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
-@@ -2323,16 +2433,16 @@ static void mtk_poll_controller(struct n
+@@ -2340,16 +2450,16 @@ static void mtk_poll_controller(struct n
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
-@@ -2343,12 +2453,19 @@ static int mtk_start_dma(struct mtk_eth
+@@ -2360,12 +2470,19 @@ static int mtk_start_dma(struct mtk_eth
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
-@@ -2420,7 +2537,7 @@ static int mtk_open(struct net_device *d
+@@ -2437,7 +2554,7 @@ static int mtk_open(struct net_device *d
napi_enable(ð->tx_napi);
napi_enable(ð->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
refcount_set(ð->dma_refcnt, 1);
}
else
-@@ -2472,7 +2589,7 @@ static int mtk_stop(struct net_device *d
+@@ -2489,7 +2606,7 @@ static int mtk_stop(struct net_device *d
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
napi_disable(ð->tx_napi);
napi_disable(ð->rx_napi);
-@@ -2632,9 +2749,25 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2649,9 +2766,25 @@ static int mtk_hw_init(struct mtk_eth *e
return 0;
}
if (eth->pctl) {
/* Set GE2 driving and slew rate */
-@@ -2673,11 +2806,47 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2690,11 +2823,47 @@ static int mtk_hw_init(struct mtk_eth *e
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
return 0;
err_disable_pm:
-@@ -3214,12 +3383,8 @@ static int mtk_probe(struct platform_dev
+@@ -3231,12 +3400,8 @@ static int mtk_probe(struct platform_dev
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
spin_lock_init(ð->page_lock);
spin_lock_init(ð->tx_irq_lock);
-@@ -3455,6 +3620,10 @@ static const struct mtk_soc_data mt2701_
+@@ -3472,6 +3637,10 @@ static const struct mtk_soc_data mt2701_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
},
};
-@@ -3468,6 +3637,10 @@ static const struct mtk_soc_data mt7621_
+@@ -3485,6 +3654,10 @@ static const struct mtk_soc_data mt7621_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
},
};
-@@ -3482,6 +3655,10 @@ static const struct mtk_soc_data mt7622_
+@@ -3499,6 +3672,10 @@ static const struct mtk_soc_data mt7622_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
},
};
-@@ -3495,6 +3672,10 @@ static const struct mtk_soc_data mt7623_
+@@ -3512,6 +3689,10 @@ static const struct mtk_soc_data mt7623_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
},
};
-@@ -3508,6 +3689,10 @@ static const struct mtk_soc_data mt7629_
+@@ -3525,6 +3706,10 @@ static const struct mtk_soc_data mt7629_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
},
};
-@@ -3520,6 +3705,10 @@ static const struct mtk_soc_data rt5350_
+@@ -3537,6 +3722,10 @@ static const struct mtk_soc_data rt5350_
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -935,18 +935,15 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -946,18 +946,15 @@ static int mtk_init_fq_dma(struct mtk_et
return 0;
}
return &ring->buf[idx];
}
-@@ -954,13 +951,12 @@ static struct mtk_tx_buf *mtk_desc_to_tx
+@@ -965,13 +962,12 @@ static struct mtk_tx_buf *mtk_desc_to_tx
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
-@@ -1377,7 +1373,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+@@ -1388,7 +1384,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
ring = ð->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
-@@ -1429,7 +1425,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1440,7 +1436,7 @@ static int mtk_poll_rx(struct napi_struc
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
-@@ -1633,7 +1629,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+@@ -1647,7 +1643,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
-@@ -1778,7 +1774,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1792,7 +1788,7 @@ static int mtk_tx_alloc(struct mtk_eth *
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
-@@ -1808,7 +1804,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -1822,7 +1818,7 @@ static int mtk_tx_alloc(struct mtk_eth *
ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
-@@ -1920,7 +1916,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -1937,7 +1933,7 @@ static int mtk_rx_alloc(struct mtk_eth *
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
rxd->rxd1 = (unsigned int)dma_addr;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
-@@ -1982,7 +1978,7 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -1999,7 +1995,7 @@ static void mtk_rx_clean(struct mtk_eth
if (!ring->data[i])
continue;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -911,7 +911,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -922,7 +922,7 @@ static int mtk_init_fq_dma(struct mtk_et
for (i = 0; i < cnt; i++) {
struct mtk_tx_dma_v2 *txd;
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
-@@ -3692,6 +3729,21 @@ static const struct mtk_soc_data mt7629_
+@@ -3709,6 +3746,21 @@ static const struct mtk_soc_data mt7629_
},
};
static const struct mtk_soc_data rt5350_data = {
.reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
-@@ -3714,6 +3766,7 @@ const struct of_device_id of_mtk_match[]
+@@ -3731,6 +3783,7 @@ const struct of_device_id of_mtk_match[]
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1451,8 +1451,8 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1462,8 +1462,8 @@ static int mtk_poll_rx(struct napi_struc
int done = 0, bytes = 0;
while (done < budget) {
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
-@@ -1516,7 +1516,13 @@ static int mtk_poll_rx(struct napi_struc
+@@ -1530,7 +1530,13 @@ static int mtk_poll_rx(struct napi_struc
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
-@@ -3739,6 +3745,7 @@ static const struct mtk_soc_data mt7986_
+@@ -3756,6 +3762,7 @@ static const struct mtk_soc_data mt7986_
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+++ /dev/null
-From: Chen Lin <chen45464546@163.com>
-Date: Wed, 8 Jun 2022 20:46:53 +0800
-Subject: [PATCH] net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface
- netdev[napi]_alloc_frag
-
-When rx_flag == MTK_RX_FLAGS_HWLRO,
-rx_data_len = MTK_MAX_LRO_RX_LENGTH(4096 * 3) > PAGE_SIZE.
-netdev_alloc_frag is for alloction of page fragment only.
-Reference to other drivers and Documentation/vm/page_frags.rst
-
-Branch to use __get_free_pages when ring->frag_size > PAGE_SIZE.
-
-Signed-off-by: Chen Lin <chen45464546@163.com>
-Link: https://lore.kernel.org/r/1654692413-2598-1-git-send-email-chen45464546@163.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -917,6 +917,17 @@ static bool mtk_rx_get_desc(struct mtk_e
- return true;
- }
-
-+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
-+{
-+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
-+ unsigned long data;
-+
-+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
-+ get_order(size));
-+
-+ return (void *)data;
-+}
-+
- /* the qdma core needs scratch memory to be setup */
- static int mtk_init_fq_dma(struct mtk_eth *eth)
- {
-@@ -1485,7 +1496,10 @@ static int mtk_poll_rx(struct napi_struc
- goto release_desc;
-
- /* alloc new buffer */
-- new_data = napi_alloc_frag(ring->frag_size);
-+ if (ring->frag_size <= PAGE_SIZE)
-+ new_data = napi_alloc_frag(ring->frag_size);
-+ else
-+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
-@@ -1938,7 +1952,10 @@ static int mtk_rx_alloc(struct mtk_eth *
- return -ENOMEM;
-
- for (i = 0; i < rx_dma_size; i++) {
-- ring->data[i] = netdev_alloc_frag(ring->frag_size);
-+ if (ring->frag_size <= PAGE_SIZE)
-+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
-+ else
-+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- if (!ring->data[i])
- return -ENOMEM;
- }
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
-@@ -33,7 +33,7 @@ config CRYPTO_FIPS
+@@ -34,7 +34,7 @@ config CRYPTO_FIPS
this is.
config CRYPTO_ALGAPI
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
-@@ -42,7 +42,7 @@ config CRYPTO_ALGAPI2
+@@ -43,7 +43,7 @@ config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
-@@ -53,7 +53,7 @@ config CRYPTO_AEAD2
+@@ -54,7 +54,7 @@ config CRYPTO_AEAD2
select CRYPTO_RNG2
config CRYPTO_SKCIPHER
select CRYPTO_SKCIPHER2
select CRYPTO_ALGAPI
-@@ -63,7 +63,7 @@ config CRYPTO_SKCIPHER2
+@@ -64,7 +64,7 @@ config CRYPTO_SKCIPHER2
select CRYPTO_RNG2
config CRYPTO_HASH
select CRYPTO_HASH2
select CRYPTO_ALGAPI
-@@ -72,7 +72,7 @@ config CRYPTO_HASH2
+@@ -73,7 +73,7 @@ config CRYPTO_HASH2
select CRYPTO_ALGAPI2
config CRYPTO_RNG
bool
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -440,16 +440,16 @@ config BCH_CONST_T
+@@ -443,16 +443,16 @@ config BCH_CONST_T
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
that can be interpreted by the ASN.1 stream decoder and used to
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -611,7 +611,7 @@ config LIBFDT
+@@ -614,7 +614,7 @@ config LIBFDT
bool
config OID_REGISTRY
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
-@@ -120,13 +120,13 @@ config CRYPTO_MANAGER
+@@ -121,13 +121,13 @@ config CRYPTO_MANAGER
cbc(aes).
config CRYPTO_MANAGER2
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
-@@ -1129,6 +1131,11 @@ static const struct usb_device_id option
+@@ -1131,6 +1133,11 @@ static const struct usb_device_id option
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
#define JFFS2_NODE_ACCURATE 0x2000
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -337,6 +337,12 @@ config ZSTD_DECOMPRESS
+@@ -340,6 +340,12 @@ config ZSTD_DECOMPRESS
source "lib/xz/Kconfig"
},
[PORT_NPCM] = {
.name = "Nuvoton 16550",
-@@ -2764,6 +2764,11 @@ serial8250_do_set_termios(struct uart_po
+@@ -2766,6 +2766,11 @@ serial8250_do_set_termios(struct uart_po
unsigned long flags;
unsigned int baud, quot, frac = 0;