priv->cfg->ops->lib->rx_handler_setup(priv);
}
-/*
- * this should be called while priv->lock is locked
-*/
-static void __iwl_rx_replenish(struct iwl_priv *priv)
-{
- iwl_rx_allocate(priv);
- iwl_rx_queue_restock(priv);
-}
-
-
/**
* iwl_rx_handle - Main entry function for receiving responses from uCode
*
count++;
if (count >= 8) {
priv->rxq.read = i;
- __iwl_rx_replenish(priv);
+ iwl_rx_queue_restock(priv);
count = 0;
}
}
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
- spin_lock_irqsave(&rxq->lock, flags);
- while (!list_empty(&rxq->rx_used)) {
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
element = rxq->rx_used.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
/* Alloc a new receive buffer */
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
- __GFP_NOWARN | GFP_ATOMIC);
+ GFP_KERNEL);
if (!rxb->skb) {
- if (net_ratelimit())
- printk(KERN_CRIT DRV_NAME
- ": Can not allocate SKB buffers\n");
+ printk(KERN_CRIT DRV_NAME
+ "Can not allocate SKB buffers\n");
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
break;
}
- priv->alloc_rxb_skb++;
- list_del(element);
/* Get physical address of RB/SKB */
rxb->real_dma_addr = pci_map_single(
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+ spin_lock_irqsave(&rxq->lock, flags);
+
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
+ priv->alloc_rxb_skb++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
}
- spin_unlock_irqrestore(&rxq->lock, flags);
}
-EXPORT_SYMBOL(iwl_rx_allocate);
void iwl_rx_replenish(struct iwl_priv *priv)
{