}
static int
-mt76u_fill_rx_sg(struct mt76_dev *dev, struct urb *urb, int nsgs,
- gfp_t gfp)
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+ int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int sglen = SKB_WITH_OVERHEAD(q->buf_size);
-
int i;
for (i = 0; i < nsgs; i++) {
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
- struct urb *urb, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
if (dev->usb.sg_en) {
- return mt76u_fill_rx_sg(dev, urb, nsgs, gfp);
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
} else {
urb->transfer_buffer_length = SKB_WITH_OVERHEAD(q->buf_size);
urb->transfer_buffer = page_frag_alloc(&q->rx_page,
static int
mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL);
sg_init_table(urb->sg, MT_SG_MAX_SIZE);
}
- return mt76u_refill_rx(dev, q, urb, MT_SG_MAX_SIZE, GFP_KERNEL);
+ return mt76u_refill_rx(dev, urb, MT_SG_MAX_SIZE, GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
count = mt76u_process_rx_entry(dev, urb);
if (count > 0) {
- err = mt76u_refill_rx(dev, q, urb, count,
- GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}