}
skb->prev = skb->next = NULL;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
+
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, qid, wcid, sta,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
- int qsel = MT_QSEL_EDCA;
- int pid;
+ int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = skb->len - (hdrlen & 2);
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
- mt76_insert_hdr_pad(skb);
-
if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,