ath10k_htt_rx_msdu_buff_replenish(htt);
}
-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
{
- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+
+ dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ htt->rx_ring.netbufs_ring[i] = NULL;
+ }
+}
+void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
- struct sk_buff *skb =
- htt->rx_ring.netbufs_ring[sw_rd_idx];
- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
-
- dma_unmap_single(htt->ar->dev, cb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
- sw_rd_idx++;
- sw_rd_idx &= htt->rx_ring.size_mask;
- }
+ ath10k_htt_rx_ring_clean_up(htt);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
idx++;
idx &= htt->rx_ring.size_mask;
htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
htt->rx_ring.netbufs_ring =
- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL);
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;