/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
- rxq->processed = RX_QUEUE_SIZE - 1;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
* Driver allocates buffers of this size for Rx
*/
-static inline int ipw_queue_space(const struct clx2_queue *q)
+/**
+ * ipw_rx_queue_space - Return number of free slots available in queue.
+ */
+static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+static inline int ipw_tx_queue_space(const struct clx2_queue *q)
{
int s = q->last_used - q->first_empty;
if (s <= 0)
priv->tx_packets++;
}
done:
- if ((ipw_queue_space(q) > q->low_mark) &&
+ if ((ipw_tx_queue_space(q) > q->low_mark) &&
(qindex >= 0) &&
(priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
netif_wake_queue(priv->net_dev);
struct clx2_queue *q = &txq->q;
struct tfd_frame *tfd;
- if (ipw_queue_space(q) < (sync ? 1 : 2)) {
+ if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
IPW_ERROR("No space for Tx\n");
return -EBUSY;
}
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write;
- while ((rxq->write != rxq->processed) && (rxq->free_count)) {
+ while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
element = rxq->rx_free.next;
rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
list_del(element);
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
- rxq->processed = RX_QUEUE_SIZE - 1;
rxq->free_count = 0;
return rxq;
struct ieee80211_hdr_4addr *header;
u32 r, w, i;
u8 network_packet;
+ u8 fill_rx = 0;
DECLARE_MAC_BUF(mac);
DECLARE_MAC_BUF(mac2);
DECLARE_MAC_BUF(mac3);
r = ipw_read32(priv, IPW_RX_READ_INDEX);
w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
- i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
+ i = priv->rxq->read;
+
+ if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
while (i != r) {
rxb = priv->rxq->queue[i];
list_add_tail(&rxb->list, &priv->rxq->rx_used);
i = (i + 1) % RX_QUEUE_SIZE;
+
+ /* If there are a lot of unsued frames, restock the Rx queue
+ * so the ucode won't assert */
+ if (fill_rx) {
+ priv->rxq->read = i;
+ ipw_rx_queue_replenish(priv);
+ }
}
/* Backtrack one entry */
- priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
-
+ priv->rxq->read = i;
ipw_rx_queue_restock(priv);
}
q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
ipw_write32(priv, q->reg_w, q->first_empty);
- if (ipw_queue_space(q) < q->high_mark)
+ if (ipw_tx_queue_space(q) < q->high_mark)
netif_stop_queue(priv->net_dev);
return NETDEV_TX_OK;
struct clx2_tx_queue *txq = &priv->txq[0];
#endif /* CONFIG_IPW2200_QOS */
- if (ipw_queue_space(&txq->q) < txq->q.high_mark)
+ if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
return 1;
return 0;