return 0;
}
- static int ath5k_pci_resume(struct device *dev)
- {
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
-
- /*
- * Suspend/Resume resets the PCI configuration space, so we have to
- * re-disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state
- */
- pci_write_config_byte(pdev, 0x41, 0);
+ /*
+ * Set/change channels. We always reset the chip.
+ * To accomplish this we must first cleanup any pending DMA,
+ * then restart stuff after a la ath5k_init.
+ *
+ * Called with sc->lock.
+ */
+ static int
+ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
+ {
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "channel set, resetting (%u -> %u MHz)\n",
+ sc->curchan->center_freq, chan->center_freq);
- ath5k_led_enable(sc);
- return 0;
+ /*
+ * To switch channels clear any pending DMA operations;
+ * wait long enough for the RX fifo to drain, reset the
+ * hardware at the new frequency, and then re-enable
+ * the relevant bits of the h/w.
+ */
+ return ath5k_reset(sc, chan);
}
- #endif /* CONFIG_PM_SLEEP */
+ static void
+ ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
+ {
+ sc->curmode = mode;
- /***********************\
- * Driver Initialization *
- \***********************/
+ if (mode == AR5K_MODE_11A) {
+ sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
+ } else {
+ sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
+ }
+ }
- static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+ static void
+ ath5k_mode_setup(struct ath5k_softc *sc)
{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath5k_softc *sc = hw->priv;
- struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
+ struct ath5k_hw *ah = sc->ah;
+ u32 rfilt;
- return ath_reg_notifier_apply(wiphy, request, regulatory);
+ /* configure rx filter */
+ rfilt = sc->filter_flags;
+ ath5k_hw_set_rx_filter(ah, rfilt);
+
+ if (ath5k_hw_hasbssidmask(ah))
+ ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
+
+ /* configure operational mode */
+ ath5k_hw_set_opmode(ah, sc->opmode);
+
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
- static int
- ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
+ static inline int
+ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
- u8 mac[ETH_ALEN] = {};
- int ret;
+ int rix;
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
+ /* return base rate on errors */
+ if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
+ "hw_rix out of bounds: %x\n", hw_rix))
+ return 0;
- /*
- * Check if the MAC has multi-rate retry support.
- * We do this by trying to setup a fake extended
- * descriptor. MACs that don't have support will
- * return false w/o doing anything. MACs that do
- * support it will return true w/o doing anything.
- */
- ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+ rix = sc->rate_idx[sc->curband->band][hw_rix];
+ if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
+ rix = 0;
- if (ret < 0)
- goto err;
- if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, sc->status);
+ return rix;
+ }
- /*
- * Collect the channel list. The 802.11 layer
- * is resposible for filtering this list based
- * on settings like the phy mode and regulatory
- * domain restrictions.
- */
- ret = ath5k_setup_bands(hw);
- if (ret) {
- ATH5K_ERR(sc, "can't get channels\n");
- goto err;
- }
+ /***************\
+ * Buffers setup *
+ \***************/
- /* NB: setup here so ath5k_rate_update is happy */
- if (test_bit(AR5K_MODE_11A, ah->ah_modes))
- ath5k_setcurmode(sc, AR5K_MODE_11A);
- else
- ath5k_setcurmode(sc, AR5K_MODE_11B);
+ static
+ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
+ {
+ struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct sk_buff *skb;
/*
- * Allocate tx+rx descriptors and populate the lists.
+ * Allocate buffer with headroom_needed space for the
+ * fake physical layer header at the start.
*/
- ret = ath5k_desc_alloc(sc, pdev);
- if (ret) {
- ATH5K_ERR(sc, "can't allocate descriptors\n");
- goto err;
- }
+ skb = ath_rxbuf_alloc(common,
+ common->rx_bufsize,
+ GFP_ATOMIC);
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that hw functions handle resetting
- * these queues at the needed time.
- */
- ret = ath5k_beaconq_setup(ah);
- if (ret < 0) {
- ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
- goto err_desc;
- }
- sc->bhalq = ret;
- sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
- if (IS_ERR(sc->cabq)) {
- ATH5K_ERR(sc, "can't setup cab queue\n");
- ret = PTR_ERR(sc->cabq);
- goto err_bhal;
+ if (!skb) {
+ ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
+ common->rx_bufsize);
+ return NULL;
}
- sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
- if (IS_ERR(sc->txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
- ret = PTR_ERR(sc->txq);
- goto err_queues;
+ *skb_addr = pci_map_single(sc->pdev,
+ skb->data, common->rx_bufsize,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
+ ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
+ dev_kfree_skb(skb);
+ return NULL;
}
+ return skb;
+ }
- tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
- tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
- tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
- tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
- tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
-
- INIT_WORK(&sc->reset_work, ath5k_reset_work);
+ static int
+ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ {
+ struct ath5k_hw *ah = sc->ah;
+ struct sk_buff *skb = bf->skb;
+ struct ath5k_desc *ds;
+ int ret;
- ret = ath5k_eeprom_read_mac(ah, mac);
- if (ret) {
- ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
- sc->pdev->device);
- goto err_queues;
+ if (!skb) {
+ skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
+ if (!skb)
+ return -ENOMEM;
+ bf->skb = skb;
}
- SET_IEEE80211_PERM_ADDR(hw, mac);
- /* All MAC address bits matter for ACKs */
- memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
- ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
-
- regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
- ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
+ /*
+ * Setup descriptors. For receive we always terminate
+ * the descriptor list with a self-linked entry so we'll
+ * not get overrun under high load (as can happen with a
+ * 5212 when ANI processing enables PHY error frames).
+ *
+ * To ensure the last descriptor is self-linked we create
+ * each descriptor as self-linked and add it to the end. As
+ * each additional descriptor is added the previous self-linked
+ * entry is "fixed" naturally. This should be safe even
+ * if DMA is happening. When processing RX interrupts we
+ * never remove/process the last, self-linked, entry on the
+ * descriptor list. This ensures the hardware always has
+ * someplace to write a new frame.
+ */
+ ds = bf->desc;
+ ds->ds_link = bf->daddr; /* link to self */
+ ds->ds_data = bf->skbaddr;
+ ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
if (ret) {
- ATH5K_ERR(sc, "can't initialize regulatory system\n");
- goto err_queues;
+ ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
+ return ret;
}
- ret = ieee80211_register_hw(hw);
- if (ret) {
- ATH5K_ERR(sc, "can't register ieee80211 hw\n");
- goto err_queues;
- }
+ if (sc->rxlink != NULL)
+ *sc->rxlink = bf->daddr;
+ sc->rxlink = &ds->ds_link;
+ return 0;
+ }
- if (!ath_is_world_regd(regulatory))
- regulatory_hint(hw->wiphy, regulatory->alpha2);
+ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+ {
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
- ath5k_init_leds(sc);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
- ath5k_sysfs_register(sc);
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
- return 0;
- err_queues:
- ath5k_txq_release(sc);
- err_bhal:
- ath5k_hw_release_tx_queue(ah, sc->bhalq);
- err_desc:
- ath5k_desc_free(sc, pdev);
- err:
- return ret;
+ return htype;
}
- static void
- ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
+ static int
+ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
+ struct ath5k_txq *txq, int padsize)
{
- struct ath5k_softc *sc = hw->priv;
-
- /*
- * NB: the order of these is important:
- * o call the 802.11 layer before detaching ath5k_hw to
- * ensure callbacks into the driver to delete global
- * key cache entries can be handled
- * o reclaim the tx queue data structures after calling
- * the 802.11 layer as we'll get called back to reclaim
- * node state and potentially want to use them
- * o to cleanup the tx queues the hal is called, so detach
- * it last
- * XXX: ??? detach ath5k_hw ???
- * Other than that, it's straightforward...
- */
- ieee80211_unregister_hw(hw);
- ath5k_desc_free(sc, pdev);
- ath5k_txq_release(sc);
- ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
- ath5k_unregister_leds(sc);
-
- ath5k_sysfs_unregister(sc);
- /*
- * NB: can't reclaim these until after ieee80211_ifdetach
- * returns because we'll get called back to reclaim node
- * state and potentially want to use them.
- */
- }
+ struct ath5k_hw *ah = sc->ah;
+ struct ath5k_desc *ds = bf->desc;
+ struct sk_buff *skb = bf->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
+ struct ieee80211_rate *rate;
+ unsigned int mrr_rate[3], mrr_tries[3];
+ int i, ret;
+ u16 hw_rate;
+ u16 cts_rate = 0;
+ u16 duration = 0;
+ u8 rc_flags;
+ flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
+ /* XXX endianness */
+ bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ rate = ieee80211_get_tx_rate(sc->hw, info);
++ if (!rate) {
++ ret = -EINVAL;
++ goto err_unmap;
++ }
- /********************\
- * Channel/mode setup *
- \********************/
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ flags |= AR5K_TXDESC_NOACK;
- /*
- * Convert IEEE channel number to MHz frequency.
- */
- static inline short
- ath5k_ieee2mhz(short chan)
- {
- if (chan <= 14 || chan >= 27)
- return ieee80211chan2mhz(chan);
- else
- return 2212 + chan * 20;
- }
+ rc_flags = info->control.rates[0].flags;
+ hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
+ rate->hw_value_short : rate->hw_value;
- /*
- * Returns true for the channel numbers used without all_channels modparam.
- */
- static bool ath5k_is_standard_channel(short chan)
- {
- return ((chan <= 14) ||
- /* UNII 1,2 */
- ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
- /* midband */
- ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
- /* UNII-3 */
- ((chan & 3) == 1 && chan >= 149 && chan <= 165));
- }
+ pktlen = skb->len;
- static unsigned int
- ath5k_copy_channels(struct ath5k_hw *ah,
- struct ieee80211_channel *channels,
- unsigned int mode,
- unsigned int max)
- {
- unsigned int i, count, size, chfreq, freq, ch;
+ /* FIXME: If we are in g mode and rate is a CCK rate
+ * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
+ * from tx power (value is in dB units already) */
+ if (info->control.hw_key) {
+ keyidx = info->control.hw_key->hw_key_idx;
+ pktlen += info->control.hw_key->icv_len;
+ }
+ if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ flags |= AR5K_TXDESC_RTSENA;
+ cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
+ sc->vif, pktlen, info));
+ }
+ if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ flags |= AR5K_TXDESC_CTSENA;
+ cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
+ sc->vif, pktlen, info));
+ }
+ ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
+ get_hw_packet_type(skb),
+ (sc->power_level * 2),
+ hw_rate,
+ info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
+ cts_rate, duration);
+ if (ret)
+ goto err_unmap;
- if (!test_bit(mode, ah->ah_modes))
- return 0;
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+ for (i = 0; i < 3; i++) {
+ rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
+ if (!rate)
+ break;
- switch (mode) {
- case AR5K_MODE_11A:
- case AR5K_MODE_11A_TURBO:
- /* 1..220, but 2GHz frequencies are filtered by check_channel */
- size = 220 ;
- chfreq = CHANNEL_5GHZ;
- break;
- case AR5K_MODE_11B:
- case AR5K_MODE_11G:
- case AR5K_MODE_11G_TURBO:
- size = 26;
- chfreq = CHANNEL_2GHZ;
- break;
- default:
- ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
- return 0;
+ mrr_rate[i] = rate->hw_value;
+ mrr_tries[i] = info->control.rates[i + 1].count;
}
- for (i = 0, count = 0; i < size && max > 0; i++) {
- ch = i + 1 ;
- freq = ath5k_ieee2mhz(ch);
-
- /* Check if channel is supported by the chipset */
- if (!ath5k_channel_ok(ah, freq, chfreq))
- continue;
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
- if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
- continue;
+ ds->ds_link = 0;
+ ds->ds_data = bf->skbaddr;
- /* Write channel info and increment counter */
- channels[count].center_freq = freq;
- channels[count].band = (chfreq == CHANNEL_2GHZ) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- switch (mode) {
- case AR5K_MODE_11A:
- case AR5K_MODE_11G:
- channels[count].hw_value = chfreq | CHANNEL_OFDM;
- break;
- case AR5K_MODE_11A_TURBO:
- case AR5K_MODE_11G_TURBO:
- channels[count].hw_value = chfreq |
- CHANNEL_OFDM | CHANNEL_TURBO;
- break;
- case AR5K_MODE_11B:
- channels[count].hw_value = CHANNEL_B;
- }
+ spin_lock_bh(&txq->lock);
+ list_add_tail(&bf->list, &txq->q);
+ txq->txq_len++;
+ if (txq->link == NULL) /* is this first packet? */
+ ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
+ else /* no, so only link it */
+ *txq->link = bf->daddr;
- count++;
- max--;
- }
+ txq->link = &ds->ds_link;
+ ath5k_hw_start_tx_dma(ah, txq->qnum);
+ mmiowb();
+ spin_unlock_bh(&txq->lock);
- return count;
+ return 0;
+ err_unmap:
+ pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
+ return ret;
}
- static void
- ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
+ /*******************\
+ * Descriptors setup *
+ \*******************/
+
+ static int
+ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
{
- u8 i;
+ struct ath5k_desc *ds;
+ struct ath5k_buf *bf;
+ dma_addr_t da;
+ unsigned int i;
+ int ret;
- for (i = 0; i < AR5K_MAX_RATES; i++)
- sc->rate_idx[b->band][i] = -1;
+ /* allocate descriptors */
+ sc->desc_len = sizeof(struct ath5k_desc) *
+ (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
+ sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
+ if (sc->desc == NULL) {
+ ATH5K_ERR(sc, "can't allocate descriptors\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ ds = sc->desc;
+ da = sc->desc_daddr;
+ ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
+ ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
- for (i = 0; i < b->n_bitrates; i++) {
- sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
- if (b->bitrates[i].hw_value_short)
- sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
+ bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
+ sizeof(struct ath5k_buf), GFP_KERNEL);
+ if (bf == NULL) {
+ ATH5K_ERR(sc, "can't allocate bufptr\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ sc->bufptr = bf;
+
+ INIT_LIST_HEAD(&sc->rxbuf);
+ for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
+ bf->desc = ds;
+ bf->daddr = da;
+ list_add_tail(&bf->list, &sc->rxbuf);
+ }
+
+ INIT_LIST_HEAD(&sc->txbuf);
+ sc->txbuf_len = ATH_TXBUF;
+ for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
+ da += sizeof(*ds)) {
+ bf->desc = ds;
+ bf->daddr = da;
+ list_add_tail(&bf->list, &sc->txbuf);
}
+
+ /* beacon buffer */
+ bf->desc = ds;
+ bf->daddr = da;
+ sc->bbuf = bf;
+
+ return 0;
+ err_free:
+ pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ err:
+ sc->desc = NULL;
+ return ret;
}
- static int
- ath5k_setup_bands(struct ieee80211_hw *hw)
+ static void
+ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ieee80211_supported_band *sband;
- int max_c, count_c = 0;
- int i;
+ struct ath5k_buf *bf;
- BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
- max_c = ARRAY_SIZE(sc->channels);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
+ list_for_each_entry(bf, &sc->txbuf, list)
+ ath5k_txbuf_free_skb(sc, bf);
+ list_for_each_entry(bf, &sc->rxbuf, list)
+ ath5k_rxbuf_free_skb(sc, bf);
- /* 2GHz band */
- sband = &sc->sbands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
- sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
+ /* Free memory associated with all descriptors */
+ pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ sc->desc = NULL;
+ sc->desc_daddr = 0;
- if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
- /* G mode */
- memcpy(sband->bitrates, &ath5k_rates[0],
- sizeof(struct ieee80211_rate) * 12);
- sband->n_bitrates = 12;
+ kfree(sc->bufptr);
+ sc->bufptr = NULL;
+ sc->bbuf = NULL;
+ }
- sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
- AR5K_MODE_11G, max_c);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
- count_c = sband->n_channels;
- max_c -= count_c;
- } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
- /* B mode */
- memcpy(sband->bitrates, &ath5k_rates[0],
- sizeof(struct ieee80211_rate) * 4);
- sband->n_bitrates = 4;
+ /**************\
+ * Queues setup *
+ \**************/
- /* 5211 only supports B rates and uses 4bit rate codes
- * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
- * fix them up here:
+ static struct ath5k_txq *
+ ath5k_txq_setup(struct ath5k_softc *sc,
+ int qtype, int subtype)
+ {
+ struct ath5k_hw *ah = sc->ah;
+ struct ath5k_txq *txq;
+ struct ath5k_txq_info qi = {
+ .tqi_subtype = subtype,
+ /* XXX: default values not correct for B and XR channels,
+ * but who cares? */
+ .tqi_aifs = AR5K_TUNE_AIFS,
+ .tqi_cw_min = AR5K_TUNE_CWMIN,
+ .tqi_cw_max = AR5K_TUNE_CWMAX
+ };
+ int qnum;
+
+ /*
+ * Enable interrupts only for EOL and DESC conditions.
+ * We mark tx descriptors to receive a DESC interrupt
+ * when a tx queue gets deep; otherwise we wait for the
+ * EOL to reap descriptors. Note that this is done to
+ * reduce interrupt load and this only defers reaping
+ * descriptors, never transmitting frames. Aside from
+ * reducing interrupts this also permits more concurrency.
+ * The only potential downside is if the tx queue backs
+ * up in which case the top half of the kernel may backup
+ * due to a lack of tx descriptors.
+ */
+ qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
+ AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
+ qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
+ if (qnum < 0) {
+ /*
+ * NB: don't print a message, this happens
+ * normally on parts with too few tx queues
*/
- if (ah->ah_version == AR5K_AR5211) {
- for (i = 0; i < 4; i++) {
- sband->bitrates[i].hw_value =
- sband->bitrates[i].hw_value & 0xF;
- sband->bitrates[i].hw_value_short =
- sband->bitrates[i].hw_value_short & 0xF;
- }
- }
+ return ERR_PTR(qnum);
+ }
+ if (qnum >= ARRAY_SIZE(sc->txqs)) {
+ ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
+ qnum, ARRAY_SIZE(sc->txqs));
+ ath5k_hw_release_tx_queue(ah, qnum);
+ return ERR_PTR(-EINVAL);
+ }
+ txq = &sc->txqs[qnum];
+ if (!txq->setup) {
+ txq->qnum = qnum;
+ txq->link = NULL;
+ INIT_LIST_HEAD(&txq->q);
+ spin_lock_init(&txq->lock);
+ txq->setup = true;
+ txq->txq_len = 0;
+ txq->txq_poll_mark = false;
+ txq->txq_stuck = 0;
+ }
+ return &sc->txqs[qnum];
+ }
- sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
- AR5K_MODE_11B, max_c);
+ static int
+ ath5k_beaconq_setup(struct ath5k_hw *ah)
+ {
+ struct ath5k_txq_info qi = {
+ /* XXX: default values not correct for B and XR channels,
+ * but who cares? */
+ .tqi_aifs = AR5K_TUNE_AIFS,
+ .tqi_cw_min = AR5K_TUNE_CWMIN,
+ .tqi_cw_max = AR5K_TUNE_CWMAX,
+ /* NB: for dynamic turbo, don't enable any other interrupts */
+ .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
+ };
+
+ return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
+ }
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
- count_c = sband->n_channels;
- max_c -= count_c;
- }
- ath5k_setup_rate_idx(sc, sband);
+ static int
+ ath5k_beaconq_config(struct ath5k_softc *sc)
+ {
+ struct ath5k_hw *ah = sc->ah;
+ struct ath5k_txq_info qi;
+ int ret;
- /* 5GHz band, A mode */
- if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
- sband = &sc->sbands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
- sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
+ ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
+ if (ret)
+ goto err;
- memcpy(sband->bitrates, &ath5k_rates[4],
- sizeof(struct ieee80211_rate) * 8);
- sband->n_bitrates = 8;
+ if (sc->opmode == NL80211_IFTYPE_AP ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT) {
+ /*
+ * Always burst out beacon and CAB traffic
+ * (aifs = cwmin = cwmax = 0)
+ */
+ qi.tqi_aifs = 0;
+ qi.tqi_cw_min = 0;
+ qi.tqi_cw_max = 0;
+ } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
+ /*
+ * Adhoc mode; backoff between 0 and (2 * cw_min).
+ */
+ qi.tqi_aifs = 0;
+ qi.tqi_cw_min = 0;
+ qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
+ }
- sband->channels = &sc->channels[count_c];
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
- AR5K_MODE_11A, max_c);
+ ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
+ qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
+ if (ret) {
+ ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
+ "hardware queue!\n", __func__);
+ goto err;
}
- ath5k_setup_rate_idx(sc, sband);
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
- ath5k_debug_dump_bands(sc);
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
- return 0;
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+ err:
+ return ret;
}
- /*
- * Set/change channels. We always reset the chip.
- * To accomplish this we must first cleanup any pending DMA,
- * then restart stuff after a la ath5k_init.
- *
- * Called with sc->lock.
- */
- static int
- ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
+ static void
+ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
- "channel set, resetting (%u -> %u MHz)\n",
- sc->curchan->center_freq, chan->center_freq);
+ struct ath5k_buf *bf, *bf0;
/*
- * To switch channels clear any pending DMA operations;
- * wait long enough for the RX fifo to drain, reset the
- * hardware at the new frequency, and then re-enable
- * the relevant bits of the h/w.
+ * NB: this assumes output has been stopped and
+ * we do not need to block ath5k_tx_tasklet
*/
- return ath5k_reset(sc, chan);
- }
+ spin_lock_bh(&txq->lock);
+ list_for_each_entry_safe(bf, bf0, &txq->q, list) {
+ ath5k_debug_printtxbuf(sc, bf);
- static void
- ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
- {
- sc->curmode = mode;
+ ath5k_txbuf_free_skb(sc, bf);
- if (mode == AR5K_MODE_11A) {
- sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
- } else {
- sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
+ spin_lock_bh(&sc->txbuflock);
+ list_move_tail(&bf->list, &sc->txbuf);
+ sc->txbuf_len++;
+ txq->txq_len--;
+ spin_unlock_bh(&sc->txbuflock);
}
+ txq->link = NULL;
+ txq->txq_poll_mark = false;
+ spin_unlock_bh(&txq->lock);
}
+ /*
+ * Drain the transmit queues and reclaim resources.
+ */
static void
- ath5k_mode_setup(struct ath5k_softc *sc)
+ ath5k_txq_cleanup(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
- u32 rfilt;
-
- /* configure rx filter */
- rfilt = sc->filter_flags;
- ath5k_hw_set_rx_filter(ah, rfilt);
-
- if (ath5k_hw_hasbssidmask(ah))
- ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
+ unsigned int i;
- /* configure operational mode */
- ath5k_hw_set_opmode(ah, sc->opmode);
+ /* XXX return value */
+ if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
+ /* don't touch the hardware if marked invalid */
+ ath5k_hw_stop_tx_dma(ah, sc->bhalq);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
+ ath5k_hw_get_txdp(ah, sc->bhalq));
+ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
+ if (sc->txqs[i].setup) {
+ ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
+ "link %p\n",
+ sc->txqs[i].qnum,
+ ath5k_hw_get_txdp(ah,
+ sc->txqs[i].qnum),
+ sc->txqs[i].link);
+ }
+ }
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
+ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
+ if (sc->txqs[i].setup)
+ ath5k_txq_drainq(sc, &sc->txqs[i]);
}
- static inline int
- ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
+ static void
+ ath5k_txq_release(struct ath5k_softc *sc)
{
- int rix;
-
- /* return base rate on errors */
- if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
- "hw_rix out of bounds: %x\n", hw_rix))
- return 0;
-
- rix = sc->rate_idx[sc->curband->band][hw_rix];
- if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
- rix = 0;
+ struct ath5k_txq *txq = sc->txqs;
+ unsigned int i;
- return rix;
+ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
+ if (txq->setup) {
+ ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
+ txq->setup = false;
+ }
}
- /***************\
- * Buffers setup *
- \***************/
-
- static
- struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
- {
- struct ath_common *common = ath5k_hw_common(sc->ah);
- struct sk_buff *skb;
-
- /*
- * Allocate buffer with headroom_needed space for the
- * fake physical layer header at the start.
- */
- skb = ath_rxbuf_alloc(common,
- common->rx_bufsize,
- GFP_ATOMIC);
-
- if (!skb) {
- ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
- common->rx_bufsize);
- return NULL;
- }
- *skb_addr = pci_map_single(sc->pdev,
- skb->data, common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
- ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
- dev_kfree_skb(skb);
- return NULL;
- }
- return skb;
- }
+ /*************\
+ * RX Handling *
+ \*************/
+ /*
+ * Enable the receive h/w following a reset.
+ */
static int
- ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ ath5k_rx_start(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
- struct sk_buff *skb = bf->skb;
- struct ath5k_desc *ds;
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ath5k_buf *bf;
int ret;
- if (!skb) {
- skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
- if (!skb)
- return -ENOMEM;
- bf->skb = skb;
- }
+ common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
- /*
- * Setup descriptors. For receive we always terminate
- * the descriptor list with a self-linked entry so we'll
- * not get overrun under high load (as can happen with a
- * 5212 when ANI processing enables PHY error frames).
- *
- * To ensure the last descriptor is self-linked we create
- * each descriptor as self-linked and add it to the end. As
- * each additional descriptor is added the previous self-linked
- * entry is "fixed" naturally. This should be safe even
- * if DMA is happening. When processing RX interrupts we
- * never remove/process the last, self-linked, entry on the
- * descriptor list. This ensures the hardware always has
- * someplace to write a new frame.
- */
- ds = bf->desc;
- ds->ds_link = bf->daddr; /* link to self */
- ds->ds_data = bf->skbaddr;
- ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
- if (ret) {
- ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
- return ret;
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ spin_lock_bh(&sc->rxbuflock);
+ sc->rxlink = NULL;
+ list_for_each_entry(bf, &sc->rxbuf, list) {
+ ret = ath5k_rxbuf_setup(sc, bf);
+ if (ret != 0) {
+ spin_unlock_bh(&sc->rxbuflock);
+ goto err;
+ }
}
+ bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
+ ath5k_hw_set_rxdp(ah, bf->daddr);
+ spin_unlock_bh(&sc->rxbuflock);
+
+ ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
+ ath5k_mode_setup(sc); /* set filters, etc. */
+ ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
- if (sc->rxlink != NULL)
- *sc->rxlink = bf->daddr;
- sc->rxlink = &ds->ds_link;
return 0;
+ err:
+ return ret;
}
- static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+ /*
+ * Disable the receive h/w in preparation for a reset.
+ */
+ static void
+ ath5k_rx_stop(struct ath5k_softc *sc)
{
- struct ieee80211_hdr *hdr;
- enum ath5k_pkt_type htype;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
+ struct ath5k_hw *ah = sc->ah;
- if (ieee80211_is_beacon(fc))
- htype = AR5K_PKT_TYPE_BEACON;
- else if (ieee80211_is_probe_resp(fc))
- htype = AR5K_PKT_TYPE_PROBE_RESP;
- else if (ieee80211_is_atim(fc))
- htype = AR5K_PKT_TYPE_ATIM;
- else if (ieee80211_is_pspoll(fc))
- htype = AR5K_PKT_TYPE_PSPOLL;
- else
- htype = AR5K_PKT_TYPE_NORMAL;
+ ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
+ ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
+ ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
- return htype;
+ ath5k_debug_printrxbuffs(sc, ah);
}
- static int
- ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
- struct ath5k_txq *txq, int padsize)
+ static unsigned int
+ ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw *ah = sc->ah;
- struct ath5k_desc *ds = bf->desc;
- struct sk_buff *skb = bf->skb;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
- struct ieee80211_rate *rate;
- unsigned int mrr_rate[3], mrr_tries[3];
- int i, ret;
- u16 hw_rate;
- u16 cts_rate = 0;
- u16 duration = 0;
- u8 rc_flags;
+ struct ath_common *common = ath5k_hw_common(ah);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int keyix, hlen;
- flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
+ if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
+ rs->rs_keyix != AR5K_RXKEYIX_INVALID)
+ return RX_FLAG_DECRYPTED;
- /* XXX endianness */
- bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ /* Apparently when a default key is used to decrypt the packet
+ the hw does not set the index used to decrypt. In such cases
+ get the index from the packet. */
+ hlen = ieee80211_hdrlen(hdr->frame_control);
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
+ skb->len >= hlen + 4) {
+ keyix = skb->data[hlen + 3] >> 6;
- rate = ieee80211_get_tx_rate(sc->hw, info);
- if (!rate) {
- ret = -EINVAL;
- goto err_unmap;
+ if (test_bit(keyix, common->keymap))
+ return RX_FLAG_DECRYPTED;
}
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- flags |= AR5K_TXDESC_NOACK;
+ return 0;
+ }
- rc_flags = info->control.rates[0].flags;
- hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
- rate->hw_value_short : rate->hw_value;
- pktlen = skb->len;
+ static void
+ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ieee80211_rx_status *rxs)
+ {
+ struct ath_common *common = ath5k_hw_common(sc->ah);
+ u64 tsf, bc_tstamp;
+ u32 hw_tu;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- /* FIXME: If we are in g mode and rate is a CCK rate
- * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
- * from tx power (value is in dB units already) */
- if (info->control.hw_key) {
- keyidx = info->control.hw_key->hw_key_idx;
- pktlen += info->control.hw_key->icv_len;
- }
- if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- flags |= AR5K_TXDESC_RTSENA;
- cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
- duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
- sc->vif, pktlen, info));
- }
- if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- flags |= AR5K_TXDESC_CTSENA;
- cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
- duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
- sc->vif, pktlen, info));
- }
- ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), padsize,
- get_hw_packet_type(skb),
- (sc->power_level * 2),
- hw_rate,
- info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
- cts_rate, duration);
- if (ret)
- goto err_unmap;
+ if (ieee80211_is_beacon(mgmt->frame_control) &&
+ le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
+ /*
+ * Received an IBSS beacon with the same BSSID. Hardware *must*
+ * have updated the local TSF. We have to work around various
+ * hardware bugs, though...
+ */
+ tsf = ath5k_hw_get_tsf64(sc->ah);
+ bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
+ hw_tu = TSF_TO_TU(tsf);
- memset(mrr_rate, 0, sizeof(mrr_rate));
- memset(mrr_tries, 0, sizeof(mrr_tries));
- for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
- if (!rate)
- break;
+ ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
+ (unsigned long long)bc_tstamp,
+ (unsigned long long)rxs->mactime,
+ (unsigned long long)(rxs->mactime - bc_tstamp),
+ (unsigned long long)tsf);
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
- }
+ /*
+ * Sometimes the HW will give us a wrong tstamp in the rx
+ * status, causing the timestamp extension to go wrong.
+ * (This seems to happen especially with beacon frames bigger
+ * than 78 byte (incl. FCS))
+ * But we know that the receive timestamp must be later than the
+ * timestamp of the beacon since HW must have synced to that.
+ *
+ * NOTE: here we assume mactime to be after the frame was
+ * received, not like mac80211 which defines it at the start.
+ */
+ if (bc_tstamp > rxs->mactime) {
+ ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ "fixing mactime from %llx to %llx\n",
+ (unsigned long long)rxs->mactime,
+ (unsigned long long)tsf);
+ rxs->mactime = tsf;
+ }
- ath5k_hw_setup_mrr_tx_desc(ah, ds,
- mrr_rate[0], mrr_tries[0],
- mrr_rate[1], mrr_tries[1],
- mrr_rate[2], mrr_tries[2]);
+ /*
+ * Local TSF might have moved higher than our beacon timers,
+ * in that case we have to update them to continue sending
+ * beacons. This also takes care of synchronizing beacon sending
+ * times with other stations.
+ */
+ if (hw_tu >= sc->nexttbtt)
+ ath5k_beacon_update_timers(sc, bc_tstamp);
+ }
+ }
- ds->ds_link = 0;
- ds->ds_data = bf->skbaddr;
+ static void
+ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+ {
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
- spin_lock_bh(&txq->lock);
- list_add_tail(&bf->list, &txq->q);
- if (txq->link == NULL) /* is this first packet? */
- ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
- else /* no, so only link it */
- *txq->link = bf->daddr;
+ /* only beacons from our BSSID */
+ if (!ieee80211_is_beacon(mgmt->frame_control) ||
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+ return;
- txq->link = &ds->ds_link;
- ath5k_hw_start_tx_dma(ah, txq->qnum);
- mmiowb();
- spin_unlock_bh(&txq->lock);
+ ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
+ rssi);
- return 0;
- err_unmap:
- pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
- return ret;
+ /* in IBSS mode we should keep RSSI statistics per neighbour */
+ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
- /*******************\
- * Descriptors setup *
- \*******************/
-
- static int
- ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
+ /*
+ * Compute padding position. skb must contain an IEEE 802.11 frame
+ */
+ static int ath5k_common_padpos(struct sk_buff *skb)
{
- struct ath5k_desc *ds;
- struct ath5k_buf *bf;
- dma_addr_t da;
- unsigned int i;
- int ret;
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
- /* allocate descriptors */
- sc->desc_len = sizeof(struct ath5k_desc) *
- (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
- sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
- if (sc->desc == NULL) {
- ATH5K_ERR(sc, "can't allocate descriptors\n");
- ret = -ENOMEM;
- goto err;
+ if (ieee80211_has_a4(frame_control)) {
+ padpos += ETH_ALEN;
}
- ds = sc->desc;
- da = sc->desc_daddr;
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
- ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
+ if (ieee80211_is_data_qos(frame_control)) {
+ padpos += IEEE80211_QOS_CTL_LEN;
+ }
+
+ return padpos;
+ }
+
+ /*
+ * This function expects an 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enough header room.
+ */
+ static int ath5k_add_padding(struct sk_buff *skb)
+ {
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
- bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
- sizeof(struct ath5k_buf), GFP_KERNEL);
- if (bf == NULL) {
- ATH5K_ERR(sc, "can't allocate bufptr\n");
- ret = -ENOMEM;
- goto err_free;
- }
- sc->bufptr = bf;
+ if (padsize && skb->len>padpos) {
- INIT_LIST_HEAD(&sc->rxbuf);
- for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
- bf->desc = ds;
- bf->daddr = da;
- list_add_tail(&bf->list, &sc->rxbuf);
- }
+ if (skb_headroom(skb) < padsize)
+ return -1;
- INIT_LIST_HEAD(&sc->txbuf);
- sc->txbuf_len = ATH_TXBUF;
- for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
- da += sizeof(*ds)) {
- bf->desc = ds;
- bf->daddr = da;
- list_add_tail(&bf->list, &sc->txbuf);
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data+padsize, padpos);
+ return padsize;
}
- /* beacon buffer */
- bf->desc = ds;
- bf->daddr = da;
- sc->bbuf = bf;
+ return 0;
+ }
+
+ /*
+ * The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = 4 - (hdrlen & 3); however, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. We must not try to
+ * remove padding from short control frames that do not have a
+ * payload.
+ *
+ * This function expects an 802.11 frame and returns the number of
+ * bytes removed.
+ */
+ static int ath5k_remove_padding(struct sk_buff *skb)
+ {
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>=padpos+padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
return 0;
- err_free:
- pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
- err:
- sc->desc = NULL;
- return ret;
}
static void