We must not transmit packets we're not able to encrypt.
This fixes a bug where in a tiny timeframe after machine resume
packets can get sent unencrypted and might leak information.
This also fixes three small resource leakages I spotted while fixing
the security problem. Properly deallocate the DMA slots in any DMA
allocation error path.
Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
{
const struct b43_dma_ops *ops = ring->ops;
u8 *header;
- int slot;
+ int slot, old_top_slot, old_used_slots;
int err;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
#define SLOTS_PER_PACKET 2
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
+ old_top_slot = ring->current_slot;
+ old_used_slots = ring->used_slots;
+
/* Get a slot for the header. */
slot = request_slot(ring);
desc = ops->idx2desc(ring, slot, &meta_hdr);
header = &(ring->txhdr_cache[slot * hdrsize]);
cookie = generate_cookie(ring, slot);
- b43_generate_txhdr(ring->dev, header,
- skb->data, skb->len, ctl, cookie);
+ err = b43_generate_txhdr(ring->dev, header,
+ skb->data, skb->len, ctl, cookie);
+ if (unlikely(err)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
+ return err;
+ }
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
hdrsize, 1);
- if (dma_mapping_error(meta_hdr->dmaaddr))
+ if (dma_mapping_error(meta_hdr->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
return -EIO;
+ }
ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
hdrsize, 1, 0, 0);
if (dma_mapping_error(meta->dmaaddr)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -EIO;
goto out_free_bounce;
}
B43_WARN_ON(ring->stopped);
err = dma_tx_fragment(ring, skb, ctl);
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+ dev_kfree_skb_any(skb);
+ err = 0;
+ goto out_unlock;
+ }
if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
}
/* Generate a TX data header. */
-void b43_generate_txhdr(struct b43_wldev *dev,
- u8 *_txhdr,
- const unsigned char *fragment_data,
- unsigned int fragment_len,
- const struct ieee80211_tx_control *txctl,
- u16 cookie)
+int b43_generate_txhdr(struct b43_wldev *dev,
+ u8 *_txhdr,
+ const unsigned char *fragment_data,
+ unsigned int fragment_len,
+ const struct ieee80211_tx_control *txctl,
+ u16 cookie)
{
struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr;
const struct b43_phy *phy = &dev->phy;
B43_WARN_ON(key_idx >= dev->max_nr_keys);
key = &(dev->key[key_idx]);
- if (likely(key->keyconf)) {
- /* This key is valid. Use it for encryption. */
-
- /* Hardware appends ICV. */
- plcp_fragment_len += txctl->icv_len;
-
- key_idx = b43_kidx_to_fw(dev, key_idx);
- mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) &
- B43_TXH_MAC_KEYIDX;
- mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) &
- B43_TXH_MAC_KEYALG;
- wlhdr_len = ieee80211_get_hdrlen(fctl);
- iv_len = min((size_t) txctl->iv_len,
- ARRAY_SIZE(txhdr->iv));
- memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
+ if (unlikely(!key->keyconf)) {
+ /* This key is invalid. This might only happen
+ * in a short timeframe after machine resume before
+ * we were able to reconfigure keys.
+ * Drop this packet completely. Do not transmit it
+ * unencrypted to avoid leaking information. */
+ return -ENOKEY;
}
+
+ /* Hardware appends ICV. */
+ plcp_fragment_len += txctl->icv_len;
+
+ key_idx = b43_kidx_to_fw(dev, key_idx);
+ mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) &
+ B43_TXH_MAC_KEYIDX;
+ mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) &
+ B43_TXH_MAC_KEYALG;
+ wlhdr_len = ieee80211_get_hdrlen(fctl);
+ iv_len = min((size_t) txctl->iv_len,
+ ARRAY_SIZE(txhdr->iv));
+ memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
}
if (b43_is_old_txhdr_format(dev)) {
b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->old_format.plcp),
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
txhdr->extra_ft = extra_ft;
+ return 0;
}
static s8 b43_rssi_postprocess(struct b43_wldev *dev,
}
-void b43_generate_txhdr(struct b43_wldev *dev,
- u8 * txhdr,
- const unsigned char *fragment_data,
- unsigned int fragment_len,
- const struct ieee80211_tx_control *txctl, u16 cookie);
+int b43_generate_txhdr(struct b43_wldev *dev,
+ u8 * txhdr,
+ const unsigned char *fragment_data,
+ unsigned int fragment_len,
+ const struct ieee80211_tx_control *txctl, u16 cookie);
/* Transmit Status */
struct b43_txstatus {