return txcp;
}
-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
+static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
{
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
queue_tail_inc(txq);
} while (cur_index != last_index);
- atomic_sub(num_wrbs, &txq->used);
-
kfree_skb(sent_skb);
+ return num_wrbs;
}
static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
- u16 end_idx, cmpl = 0, timeo = 0;
+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
bool dummy_wrb;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs += be_tx_compl_process(adapter, end_idx);
cmpl++;
}
if (cmpl) {
be_cq_notify(adapter, tx_cq->id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
cmpl = 0;
+ num_wrbs = 0;
}
if (atomic_read(&txq->used) == 0 || ++timeo > 200)
index_adv(&end_idx,
wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
txq->len);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs = be_tx_compl_process(adapter, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
}
}
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
int tx_compl = 0, mcc_compl, status = 0;
- u16 end_idx;
+ u16 end_idx, num_wrbs = 0;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs += be_tx_compl_process(adapter, end_idx);
tx_compl++;
}
if (tx_compl) {
be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
+ atomic_sub(num_wrbs, &txq->used);
+
/* As Tx wrbs have been freed up, wake up netdev queue if
* it was stopped due to lack of tx wrbs.
*/