struct mt76_txwi_cache *t;
struct sk_buff *iter;
dma_addr_t addr;
+ u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
+ txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
if (dev->drv->tx_aligned4_skbs)
}
tx_info.nbuf = n;
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, qid, wcid, sta,
+ ret = dev->drv->tx_prepare_skb(dev, txwi, skb, qid, wcid, sta,
&tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
};
struct mt76_txwi_cache {
- u32 txwi[8];
- dma_addr_t dma_addr;
struct list_head list;
+ dma_addr_t dma_addr;
};
-
struct mt76_rx_tid {
struct rcu_head rcu_head;
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline u8 *
+mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ return (u8 *)t - dev->drv->txwi_size;
+}
+
/* increment with wrap-around */
static inline int mt76_incr(int val, int size)
{
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
+ u8 *txwi_ptr;
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
mt76x02_mac_poll_tx_status(dev, false);
- txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
struct mt76_queue *q;
void *status_fifo;
- BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
+ u8 *txwi;
int size;
- size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
- t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!t)
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
return t;
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
}