Refresh patches.
Drop patches that have been upstreamed:
target/linux/ar71xx/patches-4.9/106-01-MIPS-ath79-fix-AR724X_PLL_REG_PCIE_CONFIG-offset.patch
target/linux/generic/backport-4.9/095-v4.12-ipv6-Need-to-export-ipv6_push_frag_opts-for-tunnelin.patch
target/linux/generic/pending-4.9/180-net-phy-at803x-add-support-for-AT8032.patch
target/linux/generic/pending-4.9/181-net-usb-add-lte-modem-wistron-neweb-d18q1.patch
target/linux/generic/pending-4.9/182-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch
Compile & run tested: ar71xx Archer C7 v2
Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
LINUX_VERSION-3.18 = .71
LINUX_VERSION-4.4 = .121
-LINUX_VERSION-4.9 = .102
+LINUX_VERSION-4.9 = .105
LINUX_VERSION-4.14 = .44
LINUX_KERNEL_HASH-3.18.71 = 5abc9778ad44ce02ed6c8ab52ece8a21c6d20d21f6ed8a19287b4a38a50c1240
LINUX_KERNEL_HASH-4.4.121 = 44a88268b5088dc326b30c9b9133ac35a9a200b636b7268d08f32abeae6ca729
-LINUX_KERNEL_HASH-4.9.102 = d155a36ba52d5809805cd370902582ac373c5b23a958c6424325684447119dc5
+LINUX_KERNEL_HASH-4.9.105 = d085d228e3ac1fdbdf5a31bb8154e4e8a0943a9085f0384842601db8e9d96dc4
LINUX_KERNEL_HASH-4.14.44 = 2eb356e6af25f6ca65affe7704be8c4e0cdf224505e7441ac9d5b6e8d96ec8e4
remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1))))
+++ /dev/null
-From 0f15814bcdf59f10b708a3fba636acb089e9a4f1 Mon Sep 17 00:00:00 2001
-From: Mathias Kresin <dev@kresin.me>
-Date: Thu, 30 Mar 2017 15:34:39 +0200
-Subject: [PATCH] MIPS: ath79: fix AR724X_PLL_REG_PCIE_CONFIG offset
-
-According to the QCA u-boot source the "PCIE Phase Lock Loop
-Configuration (PCIE_PLL_CONFIG)" register is for all SoCs except the
-QCA955X and QCA956X at offset 0x10.
-
-Since the PCIE PLL config register is only defined for the AR724x fix
-only this value. The value is wrong since the day it was added and isn't
-yet used by any driver.
-
-Signed-off-by: Mathias Kresin <dev@kresin.me>
----
- arch/mips/include/asm/mach-ath79/ar71xx_regs.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
-+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
-@@ -167,7 +167,7 @@
- #define AR71XX_AHB_DIV_MASK 0x7
-
- #define AR724X_PLL_REG_CPU_CONFIG 0x00
--#define AR724X_PLL_REG_PCIE_CONFIG 0x18
-+#define AR724X_PLL_REG_PCIE_CONFIG 0x10
-
- #define AR724X_PLL_FB_SHIFT 0
- #define AR724X_PLL_FB_MASK 0x3ff
#endif /* _PHY_AT803X_PDATA_H */
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
-@@ -264,6 +264,7 @@ static int at803x_resume(struct phy_devi
+@@ -263,6 +263,7 @@ static int at803x_resume(struct phy_devi
static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct at803x_priv *priv;
struct gpio_desc *gpiod_reset;
-@@ -276,6 +277,12 @@ static int at803x_probe(struct phy_devic
- phydev->drv->phy_id != ATH8032_PHY_ID)
+@@ -274,6 +275,12 @@ static int at803x_probe(struct phy_devic
+ if (phydev->drv->phy_id != ATH8030_PHY_ID)
goto does_not_require_reset_workaround;
+ pdata = dev_get_platdata(dev);
gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod_reset))
return PTR_ERR(gpiod_reset);
-@@ -407,15 +414,23 @@ static void at803x_link_change_notify(st
+@@ -405,15 +412,23 @@ static void at803x_link_change_notify(st
* cannot recover from by software.
*/
if (phydev->state == PHY_NOLINK) {
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
-@@ -4369,6 +4369,9 @@ static int _dwc2_hcd_suspend(struct usb_
+@@ -4381,6 +4381,9 @@ static int _dwc2_hcd_suspend(struct usb_
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
if (dc_lsize == 0)
r4k_blast_dcache = (void *)cache_noop;
else if (dc_lsize == 16)
-@@ -952,6 +964,8 @@ static void local_r4k_flush_cache_sigtra
+@@ -955,6 +967,8 @@ static void local_r4k_flush_cache_sigtra
}
R4600_HIT_CACHEOP_WAR_IMPL;
if (!cpu_has_ic_fills_f_dc) {
if (dc_lsize)
vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
-@@ -1840,6 +1854,17 @@ static void coherency_setup(void)
+@@ -1843,6 +1857,17 @@ static void coherency_setup(void)
* silly idea of putting something else there ...
*/
switch (current_cpu_type()) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
-@@ -1886,6 +1911,15 @@ void r4k_cache_init(void)
+@@ -1889,6 +1914,15 @@ void r4k_cache_init(void)
extern void build_copy_page(void);
struct cpuinfo_mips *c = ¤t_cpu_data;
probe_pcache();
probe_vcache();
setup_scache();
-@@ -1963,7 +1997,15 @@ void r4k_cache_init(void)
+@@ -1966,7 +2000,15 @@ void r4k_cache_init(void)
*/
local_r4k___flush_cache_all(NULL);
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3531,8 +3531,6 @@ void tcp_send_ack(struct sock *sk)
+@@ -3534,8 +3534,6 @@ void tcp_send_ack(struct sock *sk)
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
-@@ -5188,7 +5188,6 @@ error3:
+@@ -5200,7 +5200,6 @@ error3:
error2:
usb_put_hcd(hcd);
error1:
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
-@@ -1459,22 +1459,32 @@ static int bgmac_phy_connect(struct bgma
+@@ -1460,22 +1460,32 @@ static int bgmac_phy_connect(struct bgma
return 0;
}
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
-@@ -1501,7 +1511,7 @@ int bgmac_enet_probe(struct bgmac *info)
+@@ -1502,7 +1512,7 @@ int bgmac_enet_probe(struct bgmac *info)
err = bgmac_dma_alloc(bgmac);
if (err) {
dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
-@@ -1537,8 +1547,7 @@ err_phy_disconnect:
+@@ -1538,8 +1548,7 @@ err_phy_disconnect:
phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
enet_dmac_writel(priv, priv->dma_chan_int_mask,
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
-@@ -1145,7 +1145,7 @@ static int bgmac_poll(struct napi_struct
+@@ -1146,7 +1146,7 @@ static int bgmac_poll(struct napi_struct
return weight;
if (handled < weight) {
return received;
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
-@@ -3193,7 +3193,7 @@ static int gfar_poll_rx_sq(struct napi_s
+@@ -3197,7 +3197,7 @@ static int gfar_poll_rx_sq(struct napi_s
if (work_done < budget) {
u32 imask;
/* Clear the halt bit in RSTAT */
gfar_write(®s->rstat, gfargrp->rstat);
-@@ -3282,7 +3282,7 @@ static int gfar_poll_rx(struct napi_stru
+@@ -3286,7 +3286,7 @@ static int gfar_poll_rx(struct napi_stru
if (!num_act_queues) {
u32 imask;
* then check once more to make sure we are done.
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
-@@ -1027,7 +1027,7 @@ restart_poll:
+@@ -1028,7 +1028,7 @@ restart_poll:
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -2701,11 +2701,9 @@ static int mvneta_poll(struct napi_struc
+@@ -2702,11 +2702,9 @@ static int mvneta_poll(struct napi_struc
rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-@@ -2693,7 +2693,7 @@ static int stmmac_poll(struct napi_struc
+@@ -2705,7 +2705,7 @@ static int stmmac_poll(struct napi_struc
work_done = stmmac_rx(priv, budget);
if (work_done < budget) {
adapter->rx_last_jiffies = jiffies;
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
-@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, i
+@@ -1873,7 +1873,7 @@ vmxnet3_poll(struct napi_struct *napi, i
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
-@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct
+@@ -1904,7 +1904,7 @@ vmxnet3_poll_rx_only(struct napi_struct
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
*/
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
-@@ -1061,7 +1061,7 @@ err:
+@@ -1064,7 +1064,7 @@ err:
if (work_done < budget) {
int more_to_do = 0;
+++ /dev/null
-From 5b8481fa42ac58484d633b558579e302aead64c1 Mon Sep 17 00:00:00 2001
-From: "David S. Miller" <davem@davemloft.net>
-Date: Mon, 1 May 2017 15:10:20 -0400
-Subject: [PATCH] ipv6: Need to export ipv6_push_frag_opts for tunneling now.
-
-Since that change also made the nfrag function not necessary
-for exports, remove it.
-
-Fixes: 89a23c8b528b ("ip6_tunnel: Fix missing tunnel encapsulation limit option")
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- net/ipv6/exthdrs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
-index d32e2110aff2..b636f1da9aec 100644
---- a/net/ipv6/exthdrs.c
-+++ b/net/ipv6/exthdrs.c
-@@ -946,13 +946,13 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
- if (opt->hopopt)
- ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
- }
--EXPORT_SYMBOL(ipv6_push_nfrag_opts);
-
- void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
- {
- if (opt->dst1opt)
- ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
- }
-+EXPORT_SYMBOL(ipv6_push_frag_opts);
-
- struct ipv6_txoptions *
- ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
---
-2.17.1
-
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
-@@ -138,11 +139,13 @@
+@@ -139,11 +140,13 @@
* This is an Ethernet frame header.
*/
#include <linux/bcm47xx_nvram.h>
#include "bgmac.h"
-@@ -1387,6 +1388,17 @@ static const struct ethtool_ops bgmac_et
+@@ -1388,6 +1389,17 @@ static const struct ethtool_ops bgmac_et
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************
* MII
**************************************************/
-@@ -1533,6 +1545,14 @@ int bgmac_enet_probe(struct bgmac *bgmac
+@@ -1534,6 +1546,14 @@ int bgmac_enet_probe(struct bgmac *bgmac
net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features;
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
-@@ -1555,6 +1575,10 @@ EXPORT_SYMBOL_GPL(bgmac_enet_probe);
+@@ -1556,6 +1576,10 @@ EXPORT_SYMBOL_GPL(bgmac_enet_probe);
void bgmac_enet_remove(struct bgmac *bgmac)
{
static void __sk_free(struct sock *sk)
{
+#ifdef CONFIG_SOCK_DIAG
- if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
+ if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
sock_diag_broadcast_destroy(sk);
else
+#endif
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
-@@ -756,6 +756,24 @@ static int jffs2_mknod (struct inode *di
+@@ -752,6 +752,24 @@ static int jffs2_mknod (struct inode *di
return ret;
}
static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
struct inode *new_dir_i, struct dentry *new_dentry,
unsigned int flags)
-@@ -766,7 +784,7 @@ static int jffs2_rename (struct inode *o
+@@ -762,7 +780,7 @@ static int jffs2_rename (struct inode *o
uint8_t type;
uint32_t now;
return -EINVAL;
/* The VFS will check for us and prevent trying to rename a
-@@ -832,9 +850,14 @@ static int jffs2_rename (struct inode *o
+@@ -828,9 +846,14 @@ static int jffs2_rename (struct inode *o
if (d_is_dir(old_dentry) && !victim_f)
inc_nlink(new_dir_i);
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
-@@ -781,18 +781,31 @@ static int jffs2_rename (struct inode *o
+@@ -777,18 +777,31 @@ static int jffs2_rename (struct inode *o
int ret;
struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
struct jffs2_inode_info *victim_f = NULL;
victim_f = JFFS2_INODE_INFO(d_inode(new_dentry));
if (d_is_dir(new_dentry)) {
struct jffs2_full_dirent *fd;
-@@ -827,7 +840,7 @@ static int jffs2_rename (struct inode *o
+@@ -823,7 +836,7 @@ static int jffs2_rename (struct inode *o
if (ret)
return ret;
/* There was a victim. Kill it off nicely */
if (d_is_dir(new_dentry))
clear_nlink(d_inode(new_dentry));
-@@ -853,6 +866,12 @@ static int jffs2_rename (struct inode *o
+@@ -849,6 +862,12 @@ static int jffs2_rename (struct inode *o
if (flags & RENAME_WHITEOUT)
/* Replace with whiteout */
ret = jffs2_whiteout(old_dir_i, old_dentry);
else
/* Unlink the original */
ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
-@@ -884,7 +903,7 @@ static int jffs2_rename (struct inode *o
+@@ -880,7 +899,7 @@ static int jffs2_rename (struct inode *o
return ret;
}
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Subject: net: phy: at803x: add support for AT8032
-
-Like AT8030, this PHY needs the GPIO reset workaround
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -62,6 +62,7 @@
-
- #define ATH8030_PHY_ID 0x004dd076
- #define ATH8031_PHY_ID 0x004dd074
-+#define ATH8032_PHY_ID 0x004dd023
- #define ATH8035_PHY_ID 0x004dd072
-
- MODULE_DESCRIPTION("Atheros 803x PHY driver");
-@@ -259,7 +260,8 @@ static int at803x_probe(struct phy_devic
- if (!priv)
- return -ENOMEM;
-
-- if (phydev->drv->phy_id != ATH8030_PHY_ID)
-+ if (phydev->drv->phy_id != ATH8030_PHY_ID &&
-+ phydev->drv->phy_id != ATH8032_PHY_ID)
- goto does_not_require_reset_workaround;
-
- gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
-@@ -335,7 +337,7 @@ static void at803x_link_change_notify(st
- struct at803x_priv *priv = phydev->priv;
-
- /*
-- * Conduct a hardware reset for AT8030 every time a link loss is
-+ * Conduct a hardware reset for AT8030/2 every time a link loss is
- * signalled. This is necessary to circumvent a hardware bug that
- * occurs when the cable is unplugged while TX packets are pending
- * in the FIFO. In such cases, the FIFO enters an error mode it
-@@ -447,6 +449,24 @@ static struct phy_driver at803x_driver[]
- .aneg_done = at803x_aneg_done,
- .ack_interrupt = &at803x_ack_interrupt,
- .config_intr = &at803x_config_intr,
-+}, {
-+ /* ATHEROS 8032 */
-+ .phy_id = ATH8032_PHY_ID,
-+ .name = "Atheros 8032 ethernet",
-+ .phy_id_mask = 0xffffffef,
-+ .probe = at803x_probe,
-+ .config_init = at803x_config_init,
-+ .link_change_notify = at803x_link_change_notify,
-+ .set_wol = at803x_set_wol,
-+ .get_wol = at803x_get_wol,
-+ .suspend = at803x_suspend,
-+ .resume = at803x_resume,
-+ .features = PHY_BASIC_FEATURES,
-+ .flags = PHY_HAS_INTERRUPT,
-+ .config_aneg = genphy_config_aneg,
-+ .read_status = genphy_read_status,
-+ .ack_interrupt = at803x_ack_interrupt,
-+ .config_intr = at803x_config_intr,
- } };
-
- module_phy_driver(at803x_driver);
-@@ -454,6 +474,7 @@ module_phy_driver(at803x_driver);
- static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, 0xffffffef },
- { ATH8031_PHY_ID, 0xffffffef },
-+ { ATH8032_PHY_ID, 0xffffffef },
- { ATH8035_PHY_ID, 0xffffffef },
- { }
- };
+++ /dev/null
-From d4c4bc11353f3bea6754f7d21e3612c9f32d1d64 Mon Sep 17 00:00:00 2001
-From: Giuseppe Lippolis <giu.lippolis@gmail.com>
-Date: Mon, 26 Mar 2018 16:34:39 +0200
-Subject: [PATCH] net-usb: add qmi_wwan if on lte modem wistron neweb d18q1
-
-This modem is embedded on dlink dwr-921 router.
- The oem configuration states:
-
- T: Bus=02 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0
- D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
- P: Vendor=1435 ProdID=0918 Rev= 2.32
- S: Manufacturer=Android
- S: Product=Android
- S: SerialNumber=0123456789ABCDEF
- C:* #Ifs= 7 Cfg#= 1 Atr=80 MxPwr=500mA
- I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
- E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
- E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
- E: Ad=84(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
- E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
- E: Ad=86(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
- E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
- E: Ad=88(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
- E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
- E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
- E: Ad=89(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- I:* If#= 6 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none)
- E: Ad=8b(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
- E: Ad=07(O) Atr=02(Bulk) MxPS= 512 Ivl=125us
-
-Tested on openwrt distribution
-
-Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/usb/qmi_wwan.c | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/drivers/net/usb/qmi_wwan.c
-+++ b/drivers/net/usb/qmi_wwan.c
-@@ -810,6 +810,9 @@ static const struct usb_device_id produc
- {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
- {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
- {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
-+ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
-+ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
-+ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
- {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
- {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
- {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
+++ /dev/null
-From 743989254ea9f132517806d8893ca9b6cf9dc86b Mon Sep 17 00:00:00 2001
-From: Pawel Dembicki <paweldembicki@gmail.com>
-Date: Sat, 24 Mar 2018 22:08:14 +0100
-Subject: [PATCH] net: qmi_wwan: add BroadMobi BM806U 2020:2033
-
-BroadMobi BM806U is an Qualcomm MDM9225 based 3G/4G modem.
-Tested hardware BM806U is mounted on D-Link DWR-921-C3 router.
-The USB id is added to qmi_wwan.c to allow QMI communication with
-the BM806U.
-
-Tested on 4.14 kernel and OpenWRT.
-
-Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/usb/qmi_wwan.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/net/usb/qmi_wwan.c
-+++ b/drivers/net/usb/qmi_wwan.c
-@@ -889,6 +889,7 @@ static const struct usb_device_id produc
- {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
- {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
- {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
-+ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
- {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
- {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
- {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
-@@ -3260,6 +3262,7 @@ static int packet_create(struct net *net
+@@ -3262,6 +3264,7 @@ static int packet_create(struct net *net
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
-@@ -3873,6 +3876,16 @@ packet_setsockopt(struct socket *sock, i
+@@ -3875,6 +3878,16 @@ packet_setsockopt(struct socket *sock, i
po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
return 0;
}
default:
return -ENOPROTOOPT;
}
-@@ -3925,6 +3938,13 @@ static int packet_getsockopt(struct sock
+@@ -3927,6 +3940,13 @@ static int packet_getsockopt(struct sock
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
break;
}
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
-@@ -2041,6 +2252,12 @@ static void ip6_tnl_dellink(struct net_d
+@@ -2045,6 +2256,12 @@ static void ip6_tnl_dellink(struct net_d
static size_t ip6_tnl_get_size(const struct net_device *dev)
{
return
/* IFLA_IPTUN_LINK */
nla_total_size(4) +
-@@ -2068,6 +2285,24 @@ static size_t ip6_tnl_get_size(const str
+@@ -2072,6 +2289,24 @@ static size_t ip6_tnl_get_size(const str
nla_total_size(2) +
/* IFLA_IPTUN_COLLECT_METADATA */
nla_total_size(0) +
0;
}
-@@ -2075,6 +2310,9 @@ static int ip6_tnl_fill_info(struct sk_b
+@@ -2079,6 +2314,9 @@ static int ip6_tnl_fill_info(struct sk_b
{
struct ip6_tnl *tunnel = netdev_priv(dev);
struct __ip6_tnl_parm *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
-@@ -2083,9 +2321,27 @@ static int ip6_tnl_fill_info(struct sk_b
+@@ -2087,9 +2325,27 @@ static int ip6_tnl_fill_info(struct sk_b
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
-@@ -2123,6 +2379,7 @@ static const struct nla_policy ip6_tnl_p
+@@ -2127,6 +2383,7 @@ static const struct nla_policy ip6_tnl_p
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
-@@ -72,6 +79,7 @@ MODULE_LICENSE("GPL");
+@@ -71,6 +78,7 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
bool phy_reset:1;
struct gpio_desc *gpiod_reset;
};
struct at803x_context {
-@@ -276,8 +284,16 @@ does_not_require_reset_workaround:
+@@ -274,8 +282,16 @@ does_not_require_reset_workaround:
return 0;
}
int ret;
ret = genphy_config_init(phydev);
-@@ -298,6 +314,26 @@ static int at803x_config_init(struct phy
+@@ -296,6 +312,26 @@ static int at803x_config_init(struct phy
return ret;
}
return 0;
}
-@@ -335,6 +371,8 @@ static int at803x_config_intr(struct phy
+@@ -333,6 +369,8 @@ static int at803x_config_intr(struct phy
static void at803x_link_change_notify(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
+ pdata = dev_get_platdata(&phydev->mdio.dev);
/*
- * Conduct a hardware reset for AT8030/2 every time a link loss is
-@@ -363,6 +401,24 @@ static void at803x_link_change_notify(st
+ * Conduct a hardware reset for AT8030 every time a link loss is
+@@ -361,6 +399,24 @@ static void at803x_link_change_notify(st
} else {
priv->phy_reset = false;
}
#define AT803X_MODE_CFG_MASK 0x0F
#define AT803X_MODE_CFG_SGMII 0x01
-@@ -295,6 +299,27 @@ static int at803x_config_init(struct phy
+@@ -293,6 +297,27 @@ static int at803x_config_init(struct phy
{
struct at803x_platform_data *pdata;
int ret;
/**
* ata_build_rw_tf - Build ATA taskfile for given read/write request
* @tf: Target ATA taskfile
-@@ -4988,6 +5001,9 @@ struct ata_queued_cmd *ata_qc_new_init(s
+@@ -4994,6 +5007,9 @@ struct ata_queued_cmd *ata_qc_new_init(s
if (tag < 0)
return NULL;
}
qc = __ata_qc_from_tag(ap, tag);
qc->tag = tag;
-@@ -5889,6 +5905,9 @@ struct ata_port *ata_port_alloc(struct a
+@@ -5895,6 +5911,9 @@ struct ata_port *ata_port_alloc(struct a
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
ata_sff_port_init(ap);
return ap;
-@@ -5910,6 +5929,12 @@ static void ata_host_release(struct devi
+@@ -5916,6 +5935,12 @@ static void ata_host_release(struct devi
kfree(ap->pmp_link);
kfree(ap->slave_link);
kfree(ap);
host->ports[i] = NULL;
}
-@@ -6356,7 +6381,23 @@ int ata_host_register(struct ata_host *h
+@@ -6362,7 +6387,23 @@ int ata_host_register(struct ata_host *h
host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
host->ports[i]->local_port_no = i + 1;
}
{
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -396,6 +396,11 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
+@@ -410,6 +410,11 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
This may be set to 1 or 0 to enable or disable them all, or
to a bitmask as described in Documentation/sysrq.txt.
case UPIO_AU:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
-@@ -2759,6 +2779,7 @@ static int serial8250_request_std_resour
+@@ -2760,6 +2780,7 @@ static int serial8250_request_std_resour
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
if (!port->mapbase)
break;
-@@ -2797,6 +2818,7 @@ static void serial8250_release_std_resou
+@@ -2798,6 +2819,7 @@ static void serial8250_release_std_resou
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
#endif /* __SOC_BUS_H */
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
-@@ -35,6 +35,7 @@
+@@ -36,6 +36,7 @@
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
#define ETH_FCS_LEN 4 /* Octets in the FCS */
}
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
-@@ -4654,3 +4654,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
+@@ -4659,3 +4659,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
.msi_prepare = its_fsl_mc_msi_prepare,
};
-@@ -95,8 +95,8 @@ int __init its_fsl_mc_msi_init(void)
+@@ -97,8 +97,8 @@ int __init its_fsl_mc_msi_init(void)
continue;
}
};
struct dmar_atsr_unit {
-@@ -4252,27 +4253,40 @@ static inline void init_iommu_pm_ops(voi
+@@ -4251,27 +4252,40 @@ static inline void init_iommu_pm_ops(voi
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
}
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
-@@ -4486,6 +4500,7 @@ static void intel_iommu_free_dmars(void)
+@@ -4485,6 +4499,7 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
kfree(rmrru);
}
-@@ -5221,6 +5236,45 @@ static void intel_iommu_remove_device(st
+@@ -5220,6 +5235,45 @@ static void intel_iommu_remove_device(st
iommu_device_unlink(iommu->iommu_dev, dev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
#define MAX_NR_PASID_BITS (20)
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
-@@ -5351,19 +5405,21 @@ struct intel_iommu *intel_svm_device_to_
+@@ -5350,19 +5404,21 @@ struct intel_iommu *intel_svm_device_to_
#endif /* CONFIG_INTEL_IOMMU_SVM */
static const struct iommu_ops intel_iommu_ops = {
}
/*
-@@ -1899,9 +1922,13 @@ static int lpuart_probe(struct platform_
+@@ -1899,13 +1922,13 @@ static int lpuart_probe(struct platform_
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
- return ret;
+- }
+- if (ret >= ARRAY_SIZE(lpuart_ports)) {
+- dev_err(&pdev->dev, "serial%d out of range\n", ret);
+- return -EINVAL;
+ ret = find_first_zero_bit(linemap, UART_NR);
+ if (ret >= UART_NR) {
+ dev_err(&pdev->dev, "port line is full, add device failed\n");
sport->port.line = ret;
sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
-@@ -1983,6 +2010,7 @@ static int lpuart_remove(struct platform
+@@ -1987,6 +2010,7 @@ static int lpuart_remove(struct platform
struct lpuart_port *sport = platform_get_drvdata(pdev);
uart_remove_one_port(&lpuart_reg, &sport->port);
clk_disable_unprepare(sport->clk);
-@@ -2067,12 +2095,10 @@ static int lpuart_resume(struct device *
+@@ -2071,12 +2095,10 @@ static int lpuart_resume(struct device *
if (sport->lpuart_dma_rx_use) {
if (sport->port.irq_wake) {
if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
-@@ -213,8 +232,9 @@ static void dwc3_frame_length_adjustment
+@@ -227,8 +246,9 @@ static void dwc3_frame_length_adjustment
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
-@@ -585,6 +605,99 @@ static int dwc3_phy_setup(struct dwc3 *d
+@@ -599,6 +619,99 @@ static int dwc3_phy_setup(struct dwc3 *d
return 0;
}
static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
-@@ -727,6 +840,8 @@ static int dwc3_core_init(struct dwc3 *d
+@@ -741,6 +854,8 @@ static int dwc3_core_init(struct dwc3 *d
if (ret)
goto err1;
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
-@@ -925,11 +1040,117 @@ static void dwc3_core_exit_mode(struct d
+@@ -939,11 +1054,117 @@ static void dwc3_core_exit_mode(struct d
}
}
struct resource *res;
struct dwc3 *dwc;
u8 lpm_nyet_threshold;
-@@ -961,6 +1182,11 @@ static int dwc3_probe(struct platform_de
+@@ -975,6 +1196,11 @@ static int dwc3_probe(struct platform_de
dwc->xhci_resources[0].flags = res->flags;
dwc->xhci_resources[0].name = res->name;
res->start += DWC3_GLOBALS_REGS_START;
/*
-@@ -1003,6 +1229,12 @@ static int dwc3_probe(struct platform_de
+@@ -1017,6 +1243,12 @@ static int dwc3_probe(struct platform_de
dwc->usb3_lpm_capable = device_property_read_bool(dev,
"snps,usb3_lpm_capable");
dwc->disable_scramble_quirk = device_property_read_bool(dev,
"snps,disable_scramble_quirk");
dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
-@@ -1047,6 +1279,8 @@ static int dwc3_probe(struct platform_de
+@@ -1061,6 +1293,8 @@ static int dwc3_probe(struct platform_de
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
-@@ -1070,6 +1304,11 @@ static int dwc3_probe(struct platform_de
+@@ -1084,6 +1318,11 @@ static int dwc3_probe(struct platform_de
if (ret < 0)
goto err1;
#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
#define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12)
#define DWC3_GCTL_PRTCAP_HOST 1
-@@ -292,6 +318,10 @@
+@@ -294,6 +320,10 @@
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14)
-@@ -756,6 +786,7 @@ struct dwc3_scratchpad_array {
+@@ -758,6 +788,7 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
* @irq_gadget: peripheral controller's IRQ number
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
-@@ -832,6 +863,7 @@ struct dwc3_scratchpad_array {
+@@ -834,6 +865,7 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
*/
struct dwc3 {
struct usb_ctrlrequest *ctrl_req;
-@@ -850,6 +882,7 @@ struct dwc3 {
+@@ -852,6 +884,7 @@ struct dwc3 {
spinlock_t lock;
struct device *dev;
struct platform_device *xhci;
struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
-@@ -875,6 +908,12 @@ struct dwc3 {
+@@ -877,6 +910,12 @@ struct dwc3 {
enum usb_phy_interface hsphy_mode;
u32 fladj;
u32 irq_gadget;
u32 nr_scratch;
u32 u1u2;
-@@ -951,9 +990,12 @@ struct dwc3 {
+@@ -953,9 +992,12 @@ struct dwc3 {
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned has_hibernation:1;
unsigned pending_events:1;
unsigned pullups_connected:1;
unsigned setup_packet_pending:1;
-@@ -974,9 +1016,16 @@ struct dwc3 {
+@@ -976,9 +1018,16 @@ struct dwc3 {
unsigned dis_rxdet_inp3_quirk:1;
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
curr_ep = get_ep_by_pipe(udc, i);
/* If the ep is configured */
-- if (curr_ep->name == NULL) {
+- if (!curr_ep->ep.name) {
+ if (strncmp(curr_ep->name, "ep", 2)) {
WARNING("Invalid EP?");
continue;
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
-@@ -137,6 +137,7 @@ src-plat-$(CONFIG_PPC_PSERIES) += pserie
+@@ -138,6 +138,7 @@ src-plat-$(CONFIG_PPC_PSERIES) += pserie
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
-@@ -320,6 +321,7 @@ image-$(CONFIG_TQM8555) += cuImage.tqm
+@@ -321,6 +322,7 @@ image-$(CONFIG_TQM8555) += cuImage.tqm
image-$(CONFIG_TQM8560) += cuImage.tqm8560
image-$(CONFIG_SBC8548) += cuImage.sbc8548
image-$(CONFIG_KSI8560) += cuImage.ksi8560
priv->xstats.tx_clean++;
-@@ -1393,22 +1394,17 @@ static void stmmac_tx_clean(struct stmma
+@@ -1398,22 +1399,17 @@ static void stmmac_tx_clean(struct stmma
netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
if (unlikely(netif_queue_stopped(priv->dev) &&
}
static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
-@@ -1512,7 +1508,7 @@ static void stmmac_mmc_setup(struct stmm
+@@ -1517,7 +1513,7 @@ static void stmmac_mmc_setup(struct stmm
dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
}
/**
-@@ -1525,18 +1521,18 @@ static void stmmac_mmc_setup(struct stmm
+@@ -1530,18 +1526,18 @@ static void stmmac_mmc_setup(struct stmm
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
priv->hw->desc = &ndesc_ops;
}
}
-@@ -1577,8 +1573,8 @@ static void stmmac_check_ether_addr(stru
+@@ -1582,8 +1578,8 @@ static void stmmac_check_ether_addr(stru
priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
}
}
-@@ -1592,16 +1588,12 @@ static void stmmac_check_ether_addr(stru
+@@ -1597,16 +1593,12 @@ static void stmmac_check_ether_addr(stru
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
-@@ -1613,8 +1605,8 @@ static int stmmac_init_dma_engine(struct
+@@ -1618,8 +1610,8 @@ static int stmmac_init_dma_engine(struct
return ret;
}
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->rx_tail_addr = priv->dma_rx_phy +
-@@ -1686,7 +1678,8 @@ static int stmmac_hw_setup(struct net_de
+@@ -1691,7 +1683,8 @@ static int stmmac_hw_setup(struct net_de
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
return ret;
}
-@@ -1715,7 +1708,7 @@ static int stmmac_hw_setup(struct net_de
+@@ -1720,7 +1713,7 @@ static int stmmac_hw_setup(struct net_de
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
priv->hw->rx_csum = 0;
}
-@@ -1740,10 +1733,11 @@ static int stmmac_hw_setup(struct net_de
+@@ -1745,10 +1738,11 @@ static int stmmac_hw_setup(struct net_de
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
-@@ -1798,8 +1792,9 @@ static int stmmac_open(struct net_device
+@@ -1803,8 +1797,9 @@ static int stmmac_open(struct net_device
priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
return ret;
}
}
-@@ -1814,33 +1809,36 @@ static int stmmac_open(struct net_device
+@@ -1819,33 +1814,36 @@ static int stmmac_open(struct net_device
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
goto init_error;
}
-@@ -1849,8 +1847,9 @@ static int stmmac_open(struct net_device
+@@ -1854,8 +1852,9 @@ static int stmmac_open(struct net_device
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
goto wolirq_error;
}
}
-@@ -1860,8 +1859,9 @@ static int stmmac_open(struct net_device
+@@ -1865,8 +1864,9 @@ static int stmmac_open(struct net_device
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
goto lpiirq_error;
}
}
-@@ -1880,8 +1880,8 @@ wolirq_error:
+@@ -1885,8 +1885,8 @@ wolirq_error:
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
return ret;
}
-@@ -1900,10 +1900,9 @@ static int stmmac_release(struct net_dev
+@@ -1905,10 +1905,9 @@ static int stmmac_release(struct net_dev
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
}
netif_stop_queue(dev);
-@@ -1963,13 +1962,13 @@ static void stmmac_tso_allocator(struct
+@@ -1968,13 +1967,13 @@ static void stmmac_tso_allocator(struct
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
desc = priv->dma_tx + priv->cur_tx;
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
-@@ -2014,8 +2013,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2019,8 +2018,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
u8 proto_hdr_len;
int i;
/* Compute header lengths */
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-@@ -2025,9 +2022,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2030,9 +2027,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
return NETDEV_TX_BUSY;
}
-@@ -2065,11 +2063,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2070,11 +2068,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
priv->tx_skbuff[first_entry] = skb;
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
-@@ -2098,8 +2096,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2103,8 +2101,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
}
-@@ -2143,7 +2141,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2155,7 +2153,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
-@@ -2162,11 +2160,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2174,11 +2172,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
-@@ -2198,14 +2194,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2210,14 +2206,13 @@ static netdev_tx_t stmmac_xmit(struct sk
return stmmac_tso_xmit(skb, dev);
}
}
return NETDEV_TX_BUSY;
}
-@@ -2258,13 +2253,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2270,13 +2265,11 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->tx_skbuff[entry] = NULL;
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
-@@ -2282,9 +2275,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2294,9 +2287,10 @@ static netdev_tx_t stmmac_xmit(struct sk
if (netif_msg_pktdata(priv)) {
void *tx_head;
if (priv->extend_desc)
tx_head = (void *)priv->dma_etx;
-@@ -2293,13 +2287,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2305,13 +2299,13 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
netif_stop_queue(dev);
}
-@@ -2335,13 +2329,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2347,13 +2341,11 @@ static netdev_tx_t stmmac_xmit(struct sk
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
-@@ -2362,7 +2354,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2374,7 +2366,7 @@ static netdev_tx_t stmmac_xmit(struct sk
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
}
netdev_sent_queue(dev, skb->len);
-@@ -2373,12 +2365,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2385,12 +2377,10 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
STMMAC_CHAN0);
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-@@ -2449,16 +2439,16 @@ static inline void stmmac_rx_refill(stru
+@@ -2461,16 +2451,16 @@ static inline void stmmac_rx_refill(stru
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device,
priv->rx_skbuff_dma[entry])) {
}
if (priv->hw->mode->refill_desc3)
priv->hw->mode->refill_desc3(priv, p);
-@@ -2466,17 +2456,17 @@ static inline void stmmac_rx_refill(stru
+@@ -2478,17 +2468,17 @@ static inline void stmmac_rx_refill(stru
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
-@@ -2500,7 +2490,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2512,7 +2502,7 @@ static int stmmac_rx(struct stmmac_priv
if (netif_msg_rx_status(priv)) {
void *rx_head;
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
-@@ -2562,9 +2552,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2574,9 +2564,9 @@ static int stmmac_rx(struct stmmac_priv
unsigned int des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
-@@ -2573,9 +2563,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2585,9 +2575,9 @@ static int stmmac_rx(struct stmmac_priv
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
priv->dev->stats.rx_length_errors++;
break;
}
-@@ -2587,11 +2577,11 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2599,11 +2589,11 @@ static int stmmac_rx(struct stmmac_priv
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
}
/* The zero-copy is always used for all the sizes
-@@ -2628,8 +2618,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2640,8 +2630,9 @@ static int stmmac_rx(struct stmmac_priv
} else {
skb = priv->rx_skbuff[entry];
if (unlikely(!skb)) {
priv->dev->stats.rx_dropped++;
break;
}
-@@ -2645,7 +2636,8 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2657,7 +2648,8 @@ static int stmmac_rx(struct stmmac_priv
}
if (netif_msg_pktdata(priv)) {
print_pkt(skb->data, frame_len);
}
-@@ -2748,7 +2740,7 @@ static int stmmac_change_mtu(struct net_
+@@ -2760,7 +2752,7 @@ static int stmmac_change_mtu(struct net_
int max_mtu;
if (netif_running(dev)) {
return -EBUSY;
}
-@@ -2840,7 +2832,7 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2852,7 +2844,7 @@ static irqreturn_t stmmac_interrupt(int
pm_wakeup_event(priv->device, 0);
if (unlikely(!dev)) {
return IRQ_NONE;
}
-@@ -2898,7 +2890,6 @@ static void stmmac_poll_controller(struc
+@@ -2910,7 +2902,6 @@ static void stmmac_poll_controller(struc
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
-@@ -2908,9 +2899,9 @@ static int stmmac_ioctl(struct net_devic
+@@ -2920,9 +2911,9 @@ static int stmmac_ioctl(struct net_devic
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_ioctl(dev, rq);
-@@ -2938,14 +2929,17 @@ static void sysfs_display_ring(void *hea
+@@ -2950,14 +2941,17 @@ static void sysfs_display_ring(void *hea
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
p++;
}
seq_printf(seq, "\n");
-@@ -2977,6 +2971,8 @@ static int stmmac_sysfs_ring_open(struct
+@@ -2989,6 +2983,8 @@ static int stmmac_sysfs_ring_open(struct
return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
}
static const struct file_operations stmmac_rings_status_fops = {
.owner = THIS_MODULE,
.open = stmmac_sysfs_ring_open,
-@@ -2999,11 +2995,11 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -3011,11 +3007,11 @@ static int stmmac_sysfs_dma_cap_read(str
seq_printf(seq, "\tDMA HW features\n");
seq_printf(seq, "==============================\n");
(priv->dma_cap.half_duplex) ? "Y" : "N");
seq_printf(seq, "\tHash Filter: %s\n",
(priv->dma_cap.hash_filter) ? "Y" : "N");
-@@ -3021,9 +3017,9 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -3033,9 +3029,9 @@ static int stmmac_sysfs_dma_cap_read(str
(priv->dma_cap.rmon) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
(priv->dma_cap.time_stamp) ? "Y" : "N");
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
-@@ -3070,8 +3066,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3082,8 +3078,7 @@ static int stmmac_init_fs(struct net_dev
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
return -ENOMEM;
}
-@@ -3083,7 +3078,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev
&stmmac_rings_status_fops);
if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
-@@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev
+@@ -3107,7 +3102,7 @@ static int stmmac_init_fs(struct net_dev
dev, &stmmac_dma_cap_fops);
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
-@@ -3167,11 +3162,11 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3179,11 +3174,11 @@ static int stmmac_hw_init(struct stmmac_
} else {
if (chain_mode) {
priv->hw->mode = &chain_mode_ops;
priv->mode = STMMAC_RING_MODE;
}
}
-@@ -3179,7 +3174,7 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3191,7 +3186,7 @@ static int stmmac_hw_init(struct stmmac_
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) {
/* We can override some gmac/dma configuration fields: e.g.
* enh_desc, tx_coe (e.g. that are passed through the
-@@ -3204,8 +3199,9 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3216,8 +3211,9 @@ static int stmmac_hw_init(struct stmmac_
else if (priv->dma_cap.rx_coe_type1)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
/* To use alternate (extended), normal or GMAC4 descriptor structures */
if (priv->synopsys_id >= DWMAC_CORE_4_00)
-@@ -3215,20 +3211,20 @@ static int stmmac_hw_init(struct stmmac_
+@@ -3227,20 +3223,20 @@ static int stmmac_hw_init(struct stmmac_
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
return 0;
}
-@@ -3287,8 +3283,8 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3299,8 +3295,8 @@ int stmmac_dvr_probe(struct device *devi
priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
if (IS_ERR(priv->stmmac_clk)) {
/* If failed to obtain stmmac_clk and specific clk_csr value
* is NOT passed from the platform, probe fail.
*/
-@@ -3337,7 +3333,7 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3349,7 +3345,7 @@ int stmmac_dvr_probe(struct device *devi
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO;
priv->tso = true;
}
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
-@@ -3357,13 +3353,13 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3369,13 +3365,13 @@ int stmmac_dvr_probe(struct device *devi
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
-@@ -3384,15 +3380,17 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3396,15 +3392,17 @@ int stmmac_dvr_probe(struct device *devi
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
goto error_netdev_register;
}
-@@ -3403,7 +3401,7 @@ error_netdev_register:
+@@ -3415,7 +3413,7 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
netif_napi_del(&priv->napi);
error_hw_init:
clk_disable_unprepare(priv->pclk);
-@@ -3427,7 +3425,7 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3439,7 +3437,7 @@ int stmmac_dvr_remove(struct device *dev
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
-@@ -3465,8 +3463,8 @@ int stmmac_suspend(struct device *dev)
+@@ -3477,8 +3475,8 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
spin_lock_irqsave(&priv->lock, flags);
-@@ -3560,8 +3558,8 @@ int stmmac_resume(struct device *dev)
+@@ -3572,8 +3570,8 @@ int stmmac_resume(struct device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
};
static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
-@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct m
+@@ -181,11 +179,19 @@ static int meson8b_init_prg_eth(struct m
{
int ret;
unsigned long clk_rate;
case PHY_INTERFACE_MODE_RGMII_TXID:
/* Generate a 25MHz clock for the PHY */
clk_rate = 25 * 1000 * 1000;
-@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct m
+@@ -198,9 +204,8 @@ static int meson8b_init_prg_eth(struct m
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
break;
case PHY_INTERFACE_MODE_RMII:
-@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct pl
+@@ -286,6 +291,11 @@ static int meson8b_dwmac_probe(struct pl
goto err_remove_config_dt;
}
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv: driver private structure
* Description: it is used for configuring the DMA operation mode register in
-@@ -1686,10 +1687,6 @@ static int stmmac_hw_setup(struct net_de
+@@ -1691,10 +1692,6 @@ static int stmmac_hw_setup(struct net_de
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
int speed = priv->plat->mac_port_sel_speed;
-@@ -1706,6 +1703,10 @@ static int stmmac_hw_setup(struct net_de
+@@ -1711,6 +1708,10 @@ static int stmmac_hw_setup(struct net_de
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
-@@ -1726,8 +1727,10 @@ static int stmmac_hw_setup(struct net_de
+@@ -1731,8 +1732,10 @@ static int stmmac_hw_setup(struct net_de
if (init_ptp) {
ret = stmmac_init_ptp(priv);
}
#ifdef CONFIG_DEBUG_FS
-@@ -1741,11 +1744,6 @@ static int stmmac_hw_setup(struct net_de
+@@ -1746,11 +1749,6 @@ static int stmmac_hw_setup(struct net_de
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
-@@ -2535,7 +2533,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2547,7 +2545,7 @@ static int stmmac_rx(struct stmmac_priv
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
if (priv->hwts_rx_en && !priv->extend_desc) {
* with timestamp value, hence reinitialize
* them in stmmac_rx_refill() function so that
* device can reuse it.
-@@ -2558,7 +2556,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2570,7 +2568,7 @@ static int stmmac_rx(struct stmmac_priv
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
* (preallocated during init) then the packet is
* ignored
*/
-@@ -2778,7 +2776,7 @@ static netdev_features_t stmmac_fix_feat
+@@ -2790,7 +2788,7 @@ static netdev_features_t stmmac_fix_feat
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
*/
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
-@@ -2924,9 +2922,7 @@ static void sysfs_display_ring(void *hea
+@@ -2936,9 +2934,7 @@ static void sysfs_display_ring(void *hea
struct dma_desc *p = (struct dma_desc *)head;
for (i = 0; i < size; i++) {
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(ep->basic.des0),
-@@ -2935,7 +2931,6 @@ static void sysfs_display_ring(void *hea
+@@ -2947,7 +2943,6 @@ static void sysfs_display_ring(void *hea
le32_to_cpu(ep->basic.des3));
ep++;
} else {
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
-@@ -3005,7 +3000,7 @@ static int stmmac_sysfs_dma_cap_read(str
+@@ -3017,7 +3012,7 @@ static int stmmac_sysfs_dma_cap_read(str
(priv->dma_cap.hash_filter) ? "Y" : "N");
seq_printf(seq, "\tMultiple MAC address registers: %s\n",
(priv->dma_cap.multi_addr) ? "Y" : "N");
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
(priv->dma_cap.sma_mdio) ? "Y" : "N");
-@@ -3281,44 +3276,8 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3293,44 +3288,8 @@ int stmmac_dvr_probe(struct device *devi
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
-@@ -3404,10 +3363,6 @@ error_netdev_register:
+@@ -3416,10 +3375,6 @@ error_netdev_register:
error_mdio_register:
netif_napi_del(&priv->napi);
error_hw_init:
free_netdev(ndev);
return ret;
-@@ -3433,10 +3388,10 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3445,10 +3400,10 @@ int stmmac_dvr_remove(struct device *dev
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
-@@ -3485,14 +3440,14 @@ int stmmac_suspend(struct device *dev)
+@@ -3497,14 +3452,14 @@ int stmmac_suspend(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
-@@ -3525,9 +3480,9 @@ int stmmac_resume(struct device *dev)
+@@ -3537,9 +3492,9 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
-@@ -1033,257 +1234,516 @@ static int init_dma_desc_rings(struct ne
+@@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne
priv->dma_buf_sz = bfsize;
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
+- }
+ for (i = 0; i < DMA_TX_SIZE; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+}
-+
+
+- if (priv->tx_skbuff[i]) {
+- dev_kfree_skb_any(priv->tx_skbuff[i]);
+- priv->tx_skbuff[i] = NULL;
+- priv->tx_skbuff_dma[i].buf = 0;
+- priv->tx_skbuff_dma[i].map_as_page = false;
+- }
+/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
+
+ kfree(rx_q->rx_skbuff_dma);
+ kfree(rx_q->rx_skbuff);
-+ }
-+}
-+
-+/**
+ }
+ }
+
+ /**
+- * alloc_dma_desc_resources - alloc TX/RX resources.
+ * free_dma_tx_desc_resources - free TX dma desc resources
+ * @priv: private structure
+ */
+
+/**
+ * alloc_dma_rx_desc_resources - alloc RX resources.
-+ * @priv: private structure
-+ * Description: according to which descriptor can be used (extend or basic)
-+ * this function allocates the resources for TX and RX paths. In case of
-+ * reception, for example, it pre-allocated the RX socket buffer in order to
-+ * allow zero-copy mechanism.
-+ */
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
-+{
+ {
+ u32 rx_count = priv->plat->rx_queues_to_use;
-+ int ret = -ENOMEM;
+ int ret = -ENOMEM;
+ u32 queue;
-+
+
+- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
+- GFP_KERNEL);
+- if (!priv->rx_skbuff_dma)
+- return -ENOMEM;
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-+
+
+- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
+- GFP_KERNEL);
+- if (!priv->rx_skbuff)
+- goto err_rx_skbuff;
+-
+- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+- sizeof(*priv->tx_skbuff_dma),
+- GFP_KERNEL);
+- if (!priv->tx_skbuff_dma)
+- goto err_tx_skbuff_dma;
+-
+- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
+- GFP_KERNEL);
+- if (!priv->tx_skbuff)
+- goto err_tx_skbuff;
+-
+- if (priv->extend_desc) {
+- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
+- sizeof(struct
+- dma_extended_desc),
+- &priv->dma_rx_phy,
+- GFP_KERNEL);
+- if (!priv->dma_erx)
+- goto err_dma;
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
-+
+
+- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
+- sizeof(struct
+- dma_extended_desc),
+- &priv->dma_tx_phy,
+ rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
+ sizeof(dma_addr_t),
-+ GFP_KERNEL);
+ GFP_KERNEL);
+- if (!priv->dma_etx) {
+- dma_free_coherent(priv->device, DMA_RX_SIZE *
+- sizeof(struct dma_extended_desc),
+- priv->dma_erx, priv->dma_rx_phy);
+- goto err_dma;
+- }
+- } else {
+- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
+- sizeof(struct dma_desc),
+- &priv->dma_rx_phy,
+- GFP_KERNEL);
+- if (!priv->dma_rx)
+- goto err_dma;
+ if (!rx_q->rx_skbuff_dma)
+ return -ENOMEM;
-+
+
+- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
+- sizeof(struct dma_desc),
+- &priv->dma_tx_phy,
+- GFP_KERNEL);
+- if (!priv->dma_tx) {
+- dma_free_coherent(priv->device, DMA_RX_SIZE *
+- sizeof(struct dma_desc),
+- priv->dma_rx, priv->dma_rx_phy);
+ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!rx_q->rx_skbuff)
-+ goto err_dma;
+ goto err_dma;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_zalloc_coherent(priv->device,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ goto err_dma;
-+ }
-+ }
-+
-+ return 0;
-+
-+err_dma:
+ }
+ }
+
+ return 0;
+
+ err_dma:
+- kfree(priv->tx_skbuff);
+-err_tx_skbuff:
+- kfree(priv->tx_skbuff_dma);
+-err_tx_skbuff_dma:
+- kfree(priv->rx_skbuff);
+-err_rx_skbuff:
+- kfree(priv->rx_skbuff_dma);
+ free_dma_rx_desc_resources(priv);
+
+ return ret;
+ GFP_KERNEL);
+ if (!tx_q->dma_tx)
+ goto err_dma_buffers;
- }
++ }
+ }
+
+ return 0;
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
-+ return ret;
-+}
-+
+ return ret;
+ }
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
-+static void free_dma_desc_resources(struct stmmac_priv *priv)
-+{
+ static void free_dma_desc_resources(struct stmmac_priv *priv)
+ {
+- /* Release the DMA TX/RX socket buffers */
+- dma_free_rx_skbufs(priv);
+- dma_free_tx_skbufs(priv);
+-
+- /* Free DMA regions of consistent memory previously allocated */
+- if (!priv->extend_desc) {
+- dma_free_coherent(priv->device,
+- DMA_TX_SIZE * sizeof(struct dma_desc),
+- priv->dma_tx, priv->dma_tx_phy);
+- dma_free_coherent(priv->device,
+- DMA_RX_SIZE * sizeof(struct dma_desc),
+- priv->dma_rx, priv->dma_rx_phy);
+- } else {
+- dma_free_coherent(priv->device, DMA_TX_SIZE *
+- sizeof(struct dma_extended_desc),
+- priv->dma_etx, priv->dma_tx_phy);
+- dma_free_coherent(priv->device, DMA_RX_SIZE *
+- sizeof(struct dma_extended_desc),
+- priv->dma_erx, priv->dma_rx_phy);
+- }
+- kfree(priv->rx_skbuff_dma);
+- kfree(priv->rx_skbuff);
+- kfree(priv->tx_skbuff_dma);
+- kfree(priv->tx_skbuff);
+ /* Release the DMA RX socket buffers */
+ free_dma_rx_desc_resources(priv);
+
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
-+}
-+
-+/**
-+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
-+ * @priv: driver private structure
-+ * Description: It is used for enabling the rx queues in the MAC
-+ */
-+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
-+{
+ }
+
+ /**
+@@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru
+ */
+ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+ {
+- int rx_count = priv->dma_cap.number_rx_queues;
+- int queue = 0;
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
-- if (priv->tx_skbuff[i]) {
-- dev_kfree_skb_any(priv->tx_skbuff[i]);
-- priv->tx_skbuff[i] = NULL;
-- priv->tx_skbuff_dma[i].buf = 0;
-- priv->tx_skbuff_dma[i].map_as_page = false;
-- }
+- /* If GMAC does not have multiple queues, then this is not necessary*/
+- if (rx_count == 1)
+- return;
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
- }
- }
++ }
++}
- /**
-- * alloc_dma_desc_resources - alloc TX/RX resources.
-- * @priv: private structure
-- * Description: according to which descriptor can be used (extend or basic)
-- * this function allocates the resources for TX and RX paths. In case of
-- * reception, for example, it pre-allocated the RX socket buffer in order to
-- * allow zero-copy mechanism.
+- /**
+- * If the core is synthesized with multiple rx queues / multiple
+- * dma channels, then rx queues will be disabled by default.
+- * For now only rx queue 0 is enabled.
+- */
+- priv->hw->mac->rx_queue_enable(priv->hw, queue);
++/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
- */
--static int alloc_dma_desc_resources(struct stmmac_priv *priv)
++ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
- {
-- int ret = -ENOMEM;
--
-- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
-- GFP_KERNEL);
-- if (!priv->rx_skbuff_dma)
-- return -ENOMEM;
--
-- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
-- GFP_KERNEL);
-- if (!priv->rx_skbuff)
-- goto err_rx_skbuff;
--
-- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
-- sizeof(*priv->tx_skbuff_dma),
-- GFP_KERNEL);
-- if (!priv->tx_skbuff_dma)
-- goto err_tx_skbuff_dma;
--
-- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
-- GFP_KERNEL);
-- if (!priv->tx_skbuff)
-- goto err_tx_skbuff;
--
-- if (priv->extend_desc) {
-- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
-- sizeof(struct
-- dma_extended_desc),
-- &priv->dma_rx_phy,
-- GFP_KERNEL);
-- if (!priv->dma_erx)
-- goto err_dma;
--
-- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
-- sizeof(struct
-- dma_extended_desc),
-- &priv->dma_tx_phy,
-- GFP_KERNEL);
-- if (!priv->dma_etx) {
-- dma_free_coherent(priv->device, DMA_RX_SIZE *
-- sizeof(struct dma_extended_desc),
-- priv->dma_erx, priv->dma_rx_phy);
-- goto err_dma;
-- }
-- } else {
-- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
-- sizeof(struct dma_desc),
-- &priv->dma_rx_phy,
-- GFP_KERNEL);
-- if (!priv->dma_rx)
-- goto err_dma;
++{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ priv->hw->dma->start_rx(priv->ioaddr, chan);
+}
-
-- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
-- sizeof(struct dma_desc),
-- &priv->dma_tx_phy,
-- GFP_KERNEL);
-- if (!priv->dma_tx) {
-- dma_free_coherent(priv->device, DMA_RX_SIZE *
-- sizeof(struct dma_desc),
-- priv->dma_rx, priv->dma_rx_phy);
-- goto err_dma;
-- }
-- }
++
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ priv->hw->dma->start_tx(priv->ioaddr, chan);
+}
-
-- return 0;
++
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_rx(priv->ioaddr, chan);
+}
-
--err_dma:
-- kfree(priv->tx_skbuff);
--err_tx_skbuff:
-- kfree(priv->tx_skbuff_dma);
--err_tx_skbuff_dma:
-- kfree(priv->rx_skbuff);
--err_rx_skbuff:
-- kfree(priv->rx_skbuff_dma);
-- return ret;
++
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_tx(priv->ioaddr, chan);
- }
-
--static void free_dma_desc_resources(struct stmmac_priv *priv)
++}
++
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
- {
-- /* Release the DMA TX/RX socket buffers */
-- dma_free_rx_skbufs(priv);
-- dma_free_tx_skbufs(priv);
--
-- /* Free DMA regions of consistent memory previously allocated */
-- if (!priv->extend_desc) {
-- dma_free_coherent(priv->device,
-- DMA_TX_SIZE * sizeof(struct dma_desc),
-- priv->dma_tx, priv->dma_tx_phy);
-- dma_free_coherent(priv->device,
-- DMA_RX_SIZE * sizeof(struct dma_desc),
-- priv->dma_rx, priv->dma_rx_phy);
-- } else {
-- dma_free_coherent(priv->device, DMA_TX_SIZE *
-- sizeof(struct dma_extended_desc),
-- priv->dma_etx, priv->dma_tx_phy);
-- dma_free_coherent(priv->device, DMA_RX_SIZE *
-- sizeof(struct dma_extended_desc),
-- priv->dma_erx, priv->dma_rx_phy);
-- }
-- kfree(priv->rx_skbuff_dma);
-- kfree(priv->rx_skbuff);
-- kfree(priv->tx_skbuff_dma);
-- kfree(priv->tx_skbuff);
++{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
- }
-
- /**
-- * stmmac_mac_enable_rx_queues - Enable MAC rx queues
-- * @priv: driver private structure
-- * Description: It is used for enabling the rx queues in the MAC
++}
++
++/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
- */
--static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
++ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
- {
-- int rx_count = priv->dma_cap.number_rx_queues;
-- int queue = 0;
++{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
-
-- /* If GMAC does not have multiple queues, then this is not necessary*/
-- if (rx_count == 1)
-- return;
++
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
-
-- /**
-- * If the core is synthesized with multiple rx queues / multiple
-- * dma channels, then rx queues will be disabled by default.
-- * For now only rx queue 0 is enabled.
-- */
-- priv->hw->mac->rx_queue_enable(priv->hw, queue);
++
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
}
status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
-@@ -1357,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma
+@@ -1362,48 +1847,51 @@ static void stmmac_tx_clean(struct stmma
stmmac_get_tx_hwtstamp(priv, p, skb);
}
}
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
-@@ -1408,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma
+@@ -1413,45 +1901,76 @@ static void stmmac_tx_clean(struct stmma
netif_tx_unlock(priv->dev);
}
}
/**
-@@ -1458,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_
+@@ -1463,31 +1982,43 @@ static void stmmac_tx_err(struct stmmac_
*/
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
}
/**
-@@ -1589,6 +2120,13 @@ static void stmmac_check_ether_addr(stru
+@@ -1594,6 +2125,13 @@ static void stmmac_check_ether_addr(stru
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
int atds = 0;
int ret = 0;
-@@ -1606,19 +2144,49 @@ static int stmmac_init_dma_engine(struct
+@@ -1611,19 +2149,49 @@ static int stmmac_init_dma_engine(struct
return ret;
}
}
if (priv->plat->axi && priv->hw->dma->axi)
-@@ -1636,8 +2204,12 @@ static int stmmac_init_dma_engine(struct
+@@ -1641,8 +2209,12 @@ static int stmmac_init_dma_engine(struct
static void stmmac_tx_timer(unsigned long data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
}
/**
-@@ -1659,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru
+@@ -1664,6 +2236,196 @@ static void stmmac_init_tx_coalesce(stru
add_timer(&priv->txtimer);
}
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
-@@ -1674,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru
+@@ -1679,6 +2441,9 @@ static void stmmac_init_tx_coalesce(stru
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
/* DMA initialization and SW reset */
-@@ -1703,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de
+@@ -1708,9 +2473,9 @@ static int stmmac_hw_setup(struct net_de
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
ret = priv->hw->mac->rx_ipc(priv->hw);
if (!ret) {
-@@ -1715,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de
+@@ -1720,10 +2485,7 @@ static int stmmac_hw_setup(struct net_de
}
/* Enable the MAC Rx/Tx */
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
-@@ -1726,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de
+@@ -1731,6 +2493,10 @@ static int stmmac_hw_setup(struct net_de
stmmac_mmc_setup(priv);
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
-@@ -1740,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de
+@@ -1745,35 +2511,37 @@ static int stmmac_hw_setup(struct net_de
__func__);
#endif
/* Start the ball rolling... */
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
-@@ -1837,7 +2605,7 @@ static int stmmac_open(struct net_device
+@@ -1842,7 +2610,7 @@ static int stmmac_open(struct net_device
netdev_err(priv->dev,
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
}
/* Request the Wake IRQ in case of another line is used for WoL */
-@@ -1864,8 +2632,8 @@ static int stmmac_open(struct net_device
+@@ -1869,8 +2637,8 @@ static int stmmac_open(struct net_device
}
}
return 0;
-@@ -1874,7 +2642,12 @@ lpiirq_error:
+@@ -1879,7 +2647,12 @@ lpiirq_error:
free_irq(priv->wol_irq, dev);
wolirq_error:
free_irq(dev->irq, dev);
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
-@@ -1903,9 +2676,9 @@ static int stmmac_release(struct net_dev
+@@ -1908,9 +2681,9 @@ static int stmmac_release(struct net_dev
phy_disconnect(dev->phydev);
}
del_timer_sync(&priv->txtimer);
-@@ -1917,14 +2690,13 @@ static int stmmac_release(struct net_dev
+@@ -1922,14 +2695,13 @@ static int stmmac_release(struct net_dev
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
netif_carrier_off(dev);
-@@ -1943,22 +2715,24 @@ static int stmmac_release(struct net_dev
+@@ -1948,22 +2720,24 @@ static int stmmac_release(struct net_dev
* @des: buffer start address
* @total_len: total length to fill in descriptors
* @last_segmant: condition for the last descriptor
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
-@@ -1966,7 +2740,7 @@ static void stmmac_tso_allocator(struct
+@@ -1971,7 +2745,7 @@ static void stmmac_tso_allocator(struct
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
0, 1,
0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE;
-@@ -2002,23 +2776,28 @@ static void stmmac_tso_allocator(struct
+@@ -2007,23 +2781,28 @@ static void stmmac_tso_allocator(struct
*/
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2033,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2038,10 +2817,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* set new MSS value if needed */
if (mss != priv->mss) {
}
if (netif_msg_tx_queued(priv)) {
-@@ -2046,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2051,9 +2830,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
skb->data_len);
}
first = desc;
/* first descriptor: fill Headers on Buf1 */
-@@ -2057,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2062,9 +2841,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
first->des0 = cpu_to_le32(des);
-@@ -2070,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2075,7 +2853,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
-@@ -2079,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2084,24 +2862,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
des = skb_frag_dma_map(priv->device, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev->stats.tx_bytes += skb->len;
-@@ -2128,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2133,7 +2921,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->hw->desc->prepare_tso_tx_desc(first, 1,
proto_hdr_len,
pay_len,
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
-@@ -2143,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2155,20 +2943,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
return NETDEV_TX_OK;
-@@ -2180,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2192,21 +2980,27 @@ static netdev_tx_t stmmac_xmit(struct sk
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
/* This is a hard error, log it. */
netdev_err(priv->dev,
"%s: Tx Ring full when queue awake\n",
-@@ -2206,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2218,20 +3012,18 @@ static netdev_tx_t stmmac_xmit(struct sk
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
-@@ -2227,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2239,7 +3031,7 @@ static netdev_tx_t stmmac_xmit(struct sk
if (unlikely(is_jumbo) && likely(priv->synopsys_id <
DWMAC_CORE_4_00)) {
if (unlikely(entry < 0))
goto dma_map_err;
}
-@@ -2240,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2252,48 +3044,56 @@ static netdev_tx_t stmmac_xmit(struct sk
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (likely(priv->extend_desc))
priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
-@@ -2289,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2301,10 +3101,10 @@ static netdev_tx_t stmmac_xmit(struct sk
print_pkt(skb->data, skb->len);
}
}
dev->stats.tx_bytes += skb->len;
-@@ -2327,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2339,14 +3139,14 @@ static netdev_tx_t stmmac_xmit(struct sk
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
-@@ -2346,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2358,7 +3158,7 @@ static netdev_tx_t stmmac_xmit(struct sk
/* Prepare the first descriptor setting the OWN bit too */
priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
csum_insertion, priv->mode, 1,
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
-@@ -2355,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2367,13 +3167,13 @@ static netdev_tx_t stmmac_xmit(struct sk
dma_wmb();
}
return NETDEV_TX_OK;
-@@ -2389,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de
+@@ -2401,9 +3201,9 @@ static void stmmac_rx_vlan(struct net_de
}
return 0;
return 1;
-@@ -2400,30 +3200,33 @@ static inline int stmmac_rx_threshold_co
+@@ -2412,30 +3212,33 @@ static inline int stmmac_rx_threshold_co
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
if (unlikely(net_ratelimit()))
dev_err(priv->device,
"fail to alloc skb entry %d\n",
-@@ -2431,28 +3234,28 @@ static inline void stmmac_rx_refill(stru
+@@ -2443,28 +3246,28 @@ static inline void stmmac_rx_refill(stru
break;
}
netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry);
-@@ -2468,31 +3271,33 @@ static inline void stmmac_rx_refill(stru
+@@ -2480,31 +3283,33 @@ static inline void stmmac_rx_refill(stru
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
-@@ -2502,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2514,9 +3319,9 @@ static int stmmac_rx(struct stmmac_priv
struct dma_desc *np;
if (priv->extend_desc)
/* read the status of the incoming frame */
status = priv->hw->desc->rx_status(&priv->dev->stats,
-@@ -2515,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2527,20 +3332,20 @@ static int stmmac_rx(struct stmmac_priv
count++;
entry);
if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
-@@ -2538,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2550,9 +3355,9 @@ static int stmmac_rx(struct stmmac_priv
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2588,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2600,7 +3405,7 @@ static int stmmac_rx(struct stmmac_priv
*/
if (unlikely(!priv->plat->has_gmac4 &&
((frame_len < priv->rx_copybreak) ||
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
-@@ -2600,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2612,21 +3417,21 @@ static int stmmac_rx(struct stmmac_priv
}
dma_sync_single_for_cpu(priv->device,
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
-@@ -2623,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2635,12 +3440,12 @@ static int stmmac_rx(struct stmmac_priv
break;
}
prefetch(skb->data - NET_IP_ALIGN);
priv->dma_buf_sz,
DMA_FROM_DEVICE);
}
-@@ -2650,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2662,7 +3467,7 @@ static int stmmac_rx(struct stmmac_priv
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
-@@ -2658,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2670,7 +3475,7 @@ static int stmmac_rx(struct stmmac_priv
entry = next_entry;
}
priv->xstats.rx_pkt_n += count;
-@@ -2675,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv
+@@ -2687,16 +3492,24 @@ static int stmmac_rx(struct stmmac_priv
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
}
return work_done;
}
-@@ -2700,9 +3513,12 @@ static int stmmac_poll(struct napi_struc
+@@ -2712,9 +3525,12 @@ static int stmmac_poll(struct napi_struc
static void stmmac_tx_timeout(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
}
/**
-@@ -2825,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2837,6 +3653,12 @@ static irqreturn_t stmmac_interrupt(int
{
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
-@@ -2838,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int
+@@ -2850,16 +3672,30 @@ static irqreturn_t stmmac_interrupt(int
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
}
/* PCS link status */
-@@ -2932,7 +3768,7 @@ static void sysfs_display_ring(void *hea
+@@ -2944,7 +3780,7 @@ static void sysfs_display_ring(void *hea
ep++;
} else {
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
p++;
-@@ -2945,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct
+@@ -2957,17 +3793,40 @@ static int stmmac_sysfs_ring_read(struct
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
}
return 0;
-@@ -3238,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3250,11 +4109,14 @@ int stmmac_dvr_probe(struct device *devi
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
if (!ndev)
return -ENOMEM;
-@@ -3284,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3296,6 +4158,10 @@ int stmmac_dvr_probe(struct device *devi
if (ret)
goto error_hw_init;
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-@@ -3316,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi
+@@ -3328,7 +4194,12 @@ int stmmac_dvr_probe(struct device *devi
"Enable RX Mitigation via HW Watchdog Timer\n");
}
spin_lock_init(&priv->lock);
-@@ -3361,7 +4232,11 @@ error_netdev_register:
+@@ -3373,7 +4244,11 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
error_hw_init:
free_netdev(ndev);
-@@ -3382,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev
+@@ -3394,10 +4269,9 @@ int stmmac_dvr_remove(struct device *dev
netdev_info(priv->dev, "%s: removing driver", __func__);
netif_carrier_off(ndev);
unregister_netdev(ndev);
if (priv->plat->stmmac_rst)
-@@ -3424,20 +4298,19 @@ int stmmac_suspend(struct device *dev)
+@@ -3436,20 +4310,19 @@ int stmmac_suspend(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
netif_device_detach(ndev);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable(priv->plat->pclk);
-@@ -3453,6 +4326,31 @@ int stmmac_suspend(struct device *dev)
+@@ -3465,6 +4338,31 @@ int stmmac_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_resume - resume callback
* @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
-@@ -3492,10 +4390,8 @@ int stmmac_resume(struct device *dev)
+@@ -3504,10 +4402,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave(&priv->lock, flags);
/* reset private mss value to force mss context settings at
* next tso xmit (only used for gmac4).
*/
-@@ -3507,9 +4403,9 @@ int stmmac_resume(struct device *dev)
+@@ -3519,9 +4415,9 @@ int stmmac_resume(struct device *dev)
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
free_dma_tx_desc_resources(priv);
return ret;
-@@ -2902,8 +2900,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -2907,8 +2905,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
priv->xstats.tx_set_ic_bit++;
}
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
-@@ -2981,7 +2978,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -2993,7 +2990,7 @@ static netdev_tx_t stmmac_xmit(struct sk
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
return stmmac_tso_xmit(skb, dev);
}
-@@ -3112,8 +3109,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -3124,8 +3121,7 @@ static netdev_tx_t stmmac_xmit(struct sk
priv->xstats.tx_set_ic_bit++;
}
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
-@@ -3990,7 +3986,9 @@ static int stmmac_hw_init(struct stmmac_
+@@ -4002,7 +3998,9 @@ static int stmmac_hw_init(struct stmmac_
struct mac_device_info *mac;
/* Identify the MAC HW device */
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
-@@ -4010,6 +4008,10 @@ static int stmmac_hw_init(struct stmmac_
+@@ -4022,6 +4020,10 @@ static int stmmac_hw_init(struct stmmac_
priv->hw = mac;
/* To use the chained or ring mode */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->mode = &dwmac4_ring_mode_ops;
-@@ -4138,8 +4140,15 @@ int stmmac_dvr_probe(struct device *devi
+@@ -4150,8 +4152,15 @@ int stmmac_dvr_probe(struct device *devi
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
-@@ -4156,7 +4165,7 @@ int stmmac_dvr_probe(struct device *devi
+@@ -4168,7 +4177,7 @@ int stmmac_dvr_probe(struct device *devi
NETIF_F_RXCSUM;
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
-@@ -4318,7 +4327,7 @@ int stmmac_suspend(struct device *dev)
+@@ -4330,7 +4339,7 @@ int stmmac_suspend(struct device *dev)
}
spin_unlock_irqrestore(&priv->lock, flags);