}
}
-@@ -3575,7 +3575,7 @@ static int stmmac_request_irq_multi_msi(
+@@ -3576,7 +3576,7 @@ static int stmmac_request_irq_multi_msi(
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
-@@ -3598,7 +3598,7 @@ static int stmmac_request_irq_multi_msi(
+@@ -3599,7 +3599,7 @@ static int stmmac_request_irq_multi_msi(
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
-@@ -3729,21 +3729,21 @@ static int stmmac_open(struct net_device
+@@ -3730,21 +3730,21 @@ static int stmmac_open(struct net_device
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
/* Setup per-TXQ tbs flag before TX descriptor alloc */
-@@ -3801,7 +3801,7 @@ irq_error:
+@@ -3802,7 +3802,7 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
stmmac_hw_teardown(dev);
init_error:
-@@ -3843,7 +3843,7 @@ static int stmmac_release(struct net_dev
+@@ -3844,7 +3844,7 @@ static int stmmac_release(struct net_dev
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
netif_tx_disable(dev);
-@@ -3907,7 +3907,7 @@ static bool stmmac_vlan_insert(struct st
+@@ -3908,7 +3908,7 @@ static bool stmmac_vlan_insert(struct st
return false;
stmmac_set_tx_owner(priv, p);
return true;
}
-@@ -3925,7 +3925,7 @@ static bool stmmac_vlan_insert(struct st
+@@ -3926,7 +3926,7 @@ static bool stmmac_vlan_insert(struct st
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
-@@ -3936,7 +3936,7 @@ static void stmmac_tso_allocator(struct
+@@ -3937,7 +3937,7 @@ static void stmmac_tso_allocator(struct
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
-@@ -3964,7 +3964,7 @@ static void stmmac_tso_allocator(struct
+@@ -3965,7 +3965,7 @@ static void stmmac_tso_allocator(struct
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
int desc_size;
if (likely(priv->extend_desc))
-@@ -4026,7 +4026,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4027,7 +4027,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
dma_addr_t des;
int i;
first_tx = tx_q->cur_tx;
/* Compute header lengths */
-@@ -4066,7 +4066,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4067,7 +4067,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
-@@ -4178,7 +4178,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+@@ -4179,7 +4179,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
-@@ -4266,7 +4266,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4267,7 +4267,7 @@ static netdev_tx_t stmmac_xmit(struct sk
int entry, first_tx;
dma_addr_t des;
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
-@@ -4329,7 +4329,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4330,7 +4330,7 @@ static netdev_tx_t stmmac_xmit(struct sk
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
-@@ -4400,7 +4400,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+@@ -4401,7 +4401,7 @@ static netdev_tx_t stmmac_xmit(struct sk
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
-@@ -4512,7 +4512,7 @@ static void stmmac_rx_vlan(struct net_de
+@@ -4513,7 +4513,7 @@ static void stmmac_rx_vlan(struct net_de
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
-@@ -4562,7 +4562,7 @@ static inline void stmmac_rx_refill(stru
+@@ -4563,7 +4563,7 @@ static inline void stmmac_rx_refill(stru
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
-@@ -4590,12 +4590,12 @@ static unsigned int stmmac_rx_buf1_len(s
+@@ -4591,12 +4591,12 @@ static unsigned int stmmac_rx_buf1_len(s
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
-@@ -4611,7 +4611,7 @@ static unsigned int stmmac_rx_buf2_len(s
+@@ -4612,7 +4612,7 @@ static unsigned int stmmac_rx_buf2_len(s
/* Not last descriptor */
if (status & rx_not_ls)
plen = stmmac_get_rx_frame_len(priv, p, coe);
-@@ -4622,7 +4622,7 @@ static unsigned int stmmac_rx_buf2_len(s
+@@ -4623,7 +4623,7 @@ static unsigned int stmmac_rx_buf2_len(s
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
-@@ -4685,7 +4685,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
+@@ -4686,7 +4686,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
-@@ -4859,7 +4859,7 @@ static void stmmac_dispatch_skb_zc(struc
+@@ -4860,7 +4860,7 @@ static void stmmac_dispatch_skb_zc(struc
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
-@@ -4902,7 +4902,7 @@ static bool stmmac_rx_refill_zc(struct s
+@@ -4903,7 +4903,7 @@ static bool stmmac_rx_refill_zc(struct s
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
}
if (rx_desc) {
-@@ -4917,7 +4917,7 @@ static bool stmmac_rx_refill_zc(struct s
+@@ -4918,7 +4918,7 @@ static bool stmmac_rx_refill_zc(struct s
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
-@@ -4939,7 +4939,7 @@ static int stmmac_rx_zc(struct stmmac_pr
+@@ -4940,7 +4940,7 @@ static int stmmac_rx_zc(struct stmmac_pr
desc_size = sizeof(struct dma_desc);
}
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
-@@ -4986,7 +4986,7 @@ read_again:
+@@ -4987,7 +4987,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
-@@ -5107,7 +5107,7 @@ read_again:
+@@ -5108,7 +5108,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
-@@ -5120,7 +5120,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -5121,7 +5121,7 @@ static int stmmac_rx(struct stmmac_priv
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
if (netif_msg_rx_status(priv)) {
void *rx_head;
-@@ -5134,7 +5134,7 @@ static int stmmac_rx(struct stmmac_priv
+@@ -5135,7 +5135,7 @@ static int stmmac_rx(struct stmmac_priv
desc_size = sizeof(struct dma_desc);
}
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
-@@ -5178,7 +5178,7 @@ read_again:
+@@ -5179,7 +5179,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
-@@ -5312,7 +5312,7 @@ read_again:
+@@ -5313,7 +5313,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
-@@ -5324,7 +5324,7 @@ read_again:
+@@ -5325,7 +5325,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
-@@ -5767,11 +5767,13 @@ static irqreturn_t stmmac_safety_interru
+@@ -5768,11 +5768,13 @@ static irqreturn_t stmmac_safety_interru
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-@@ -5811,10 +5813,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
+@@ -5812,10 +5814,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-@@ -5845,10 +5849,10 @@ static void stmmac_poll_controller(struc
+@@ -5846,10 +5850,10 @@ static void stmmac_poll_controller(struc
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
-@@ -6029,34 +6033,34 @@ static int stmmac_rings_status_show(stru
+@@ -6030,34 +6034,34 @@ static int stmmac_rings_status_show(stru
return 0;
for (queue = 0; queue < rx_count; queue++) {
}
}
-@@ -6403,7 +6407,7 @@ void stmmac_disable_rx_queue(struct stmm
+@@ -6404,7 +6408,7 @@ void stmmac_disable_rx_queue(struct stmm
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
-@@ -6440,7 +6444,7 @@ void stmmac_enable_rx_queue(struct stmma
+@@ -6441,7 +6445,7 @@ void stmmac_enable_rx_queue(struct stmma
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
rx_q->queue_index);
}
-@@ -6466,7 +6470,7 @@ void stmmac_disable_tx_queue(struct stmm
+@@ -6467,7 +6471,7 @@ void stmmac_disable_tx_queue(struct stmm
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
-@@ -6516,7 +6520,7 @@ void stmmac_xdp_release(struct net_devic
+@@ -6517,7 +6521,7 @@ void stmmac_xdp_release(struct net_devic
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
-@@ -6575,7 +6579,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6576,7 +6580,7 @@ int stmmac_xdp_open(struct net_device *d
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
-@@ -6593,7 +6597,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6594,7 +6598,7 @@ int stmmac_xdp_open(struct net_device *d
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
rx_q->queue_index);
}
-@@ -6602,7 +6606,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6603,7 +6607,7 @@ int stmmac_xdp_open(struct net_device *d
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
-@@ -6635,7 +6639,7 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6636,7 +6640,7 @@ int stmmac_xdp_open(struct net_device *d
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
stmmac_hw_teardown(dev);
init_error:
-@@ -6662,8 +6666,8 @@ int stmmac_xsk_wakeup(struct net_device
+@@ -6663,8 +6667,8 @@ int stmmac_xsk_wakeup(struct net_device
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
-@@ -6923,8 +6927,8 @@ int stmmac_reinit_ringparam(struct net_d
+@@ -6924,8 +6928,8 @@ int stmmac_reinit_ringparam(struct net_d
if (netif_running(dev))
stmmac_release(dev);
if (netif_running(dev))
ret = stmmac_open(dev);
-@@ -7359,7 +7363,7 @@ int stmmac_suspend(struct device *dev)
+@@ -7360,7 +7364,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
-@@ -7411,7 +7415,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
+@@ -7412,7 +7416,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
{
rx_q->cur_rx = 0;
rx_q->dirty_rx = 0;
-@@ -7419,7 +7423,7 @@ static void stmmac_reset_rx_queue(struct
+@@ -7420,7 +7424,7 @@ static void stmmac_reset_rx_queue(struct
static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
{
stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
-@@ -3685,19 +3748,93 @@ static int stmmac_request_irq(struct net
+@@ -3686,19 +3749,93 @@ static int stmmac_request_irq(struct net
}
/**
u32 chan;
int ret;
-@@ -3724,45 +3861,10 @@ static int stmmac_open(struct net_device
+@@ -3725,45 +3862,10 @@ static int stmmac_open(struct net_device
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
if (priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
-@@ -3805,14 +3907,28 @@ irq_error:
+@@ -3806,14 +3908,28 @@ irq_error:
stmmac_hw_teardown(dev);
init_error:
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
-@@ -3859,7 +3975,7 @@ static int stmmac_release(struct net_dev
+@@ -3860,7 +3976,7 @@ static int stmmac_release(struct net_dev
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
-@@ -6402,7 +6518,7 @@ void stmmac_disable_rx_queue(struct stmm
+@@ -6403,7 +6519,7 @@ void stmmac_disable_rx_queue(struct stmm
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
-@@ -6413,21 +6529,21 @@ void stmmac_enable_rx_queue(struct stmma
+@@ -6414,21 +6530,21 @@ void stmmac_enable_rx_queue(struct stmma
u32 buf_size;
int ret;
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
-@@ -6465,7 +6581,7 @@ void stmmac_disable_tx_queue(struct stmm
+@@ -6466,7 +6582,7 @@ void stmmac_disable_tx_queue(struct stmm
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
-@@ -6475,21 +6591,21 @@ void stmmac_enable_tx_queue(struct stmma
+@@ -6476,21 +6592,21 @@ void stmmac_enable_tx_queue(struct stmma
unsigned long flags;
int ret;
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
-@@ -6529,7 +6645,7 @@ void stmmac_xdp_release(struct net_devic
+@@ -6530,7 +6646,7 @@ void stmmac_xdp_release(struct net_devic
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
-@@ -6554,14 +6670,14 @@ int stmmac_xdp_open(struct net_device *d
+@@ -6555,14 +6671,14 @@ int stmmac_xdp_open(struct net_device *d
u32 chan;
int ret;
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__);
-@@ -6643,7 +6759,7 @@ irq_error:
+@@ -6644,7 +6760,7 @@ irq_error:
stmmac_hw_teardown(dev);
init_error:
dma_desc_error:
return ret;
}
-@@ -7506,7 +7622,7 @@ int stmmac_resume(struct device *dev)
+@@ -7507,7 +7623,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);