u8 assoc_req_len, u8 assoc_resp_len,
u8 *assoc_info)
{
- unsigned long flags;
-
ath6kl_cfg80211_connect_event(ar, channel, bssid,
listen_int, beacon_int,
net_type, beacon_ie_len,
netif_wake_queue(ar->net_dev);
/* Update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->lock);
set_bit(CONNECTED, &ar->flag);
clear_bit(CONNECT_PEND, &ar->flag);
netif_carrier_on(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_unlock_bh(&ar->lock);
aggr_reset_state(ar->aggr_cntxt);
ar->reconnect_flag = 0;
u8 assoc_resp_len, u8 *assoc_info,
u16 prot_reason_status)
{
- unsigned long flags;
-
if (ar->nw_type == AP_NETWORK) {
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
}
/* update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->lock);
clear_bit(CONNECTED, &ar->flag);
netif_carrier_off(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_unlock_bh(&ar->lock);
if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
ar->reconnect_flag = 0;
static int ath6kl_open(struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->lock);
set_bit(WLAN_ENABLED, &ar->flag);
} else
netif_carrier_off(dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_unlock_bh(&ar->lock);
return 0;
}
static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
{
struct bus_request *bus_req;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
if (list_empty(&ar_sdio->bus_req_freeq)) {
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
return NULL;
}
struct bus_request, list);
list_del(&bus_req->list);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
struct bus_request *bus_req)
{
- unsigned long flag;
-
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
}
static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
static void ath6kl_sdio_write_async_work(struct work_struct *work)
{
struct ath6kl_sdio *ar_sdio;
- unsigned long flags;
struct bus_request *req, *tmp_req;
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
sdio_claim_host(ar_sdio->func);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
__ath6kl_sdio_write_async(ar_sdio, req);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
}
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
sdio_release_host(ar_sdio->func);
}
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *bus_req;
- unsigned long flags;
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
bus_req->request = request;
bus_req->packet = packet;
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
return 0;
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *node = NULL;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
if (!list_empty(&ar_sdio->scat_req)) {
node = list_first_entry(&ar_sdio->scat_req,
list_del(&node->list);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
return node;
}
struct hif_scatter_req *s_req)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_add_tail(&s_req->list, &ar_sdio->scat_req);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
u32 request = scat_req->req;
int status = 0;
- unsigned long flags;
if (!scat_req->len)
return -EINVAL;
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
sdio_release_host(ar_sdio->func);
} else {
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
}
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *s_req, *tmp_req;
- unsigned long flag;
/* empty the free list */
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->sgentries);
kfree(s_req);
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
/* setup of HIF scatter resources */