#define WL1271_IRQ_MAX_LOOPS 256
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static int wlcore_irq_locked(struct wl1271 *wl)
{
- int ret;
+ int ret = 0;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
- struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags;
- /* TX might be handled here, avoid redundant work */
- set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
- cancel_work_sync(&wl->tx_work);
-
/*
* In case edge triggered interrupt must be used, we cannot iterate
* more than once without introducing race conditions with the hardirq.
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
loopcount = 1;
- mutex_lock(&wl->mutex);
-
wl1271_debug(DEBUG_IRQ, "IRQ work");
if (unlikely(wl->state == WL1271_STATE_OFF))
smp_mb__after_clear_bit();
ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
wlcore_hw_tx_immediate_compl(wl);
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("HW watchdog interrupt received! starting recovery.");
wl->watchdog_recovery = true;
- wl12xx_queue_recovery_work(wl);
+ ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto out;
wl1271_error("SW watchdog interrupt received! "
"starting recovery.");
wl->watchdog_recovery = true;
- wl12xx_queue_recovery_work(wl);
+ ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto out;
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
ret = wlcore_rx(wl, wl->fw_status_1);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
* call the work function directly.
*/
ret = wlcore_tx_work_locked(wl);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* check for tx results */
ret = wlcore_hw_tx_delayed_compl(wl);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
if (intr & WL1271_ACX_INTR_EVENT_A) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
ret = wl1271_event_handle(wl, 0);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
ret = wl1271_event_handle(wl, 1);
- if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ if (ret < 0)
goto out;
- }
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
wl1271_ps_elp_sleep(wl);
out:
+ return ret;
+}
+
+static irqreturn_t wlcore_irq(int irq, void *cookie)
+{
+ int ret;
+ unsigned long flags;
+ struct wl1271 *wl = cookie;
+
+ /* TX might be handled here, avoid redundant work */
+ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ cancel_work_sync(&wl->tx_work);
+
+ mutex_lock(&wl->mutex);
+
+ ret = wlcore_irq_locked(wl);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
/* don't talk to the HW if recovery is pending */
if (!pending_recovery)
- wl1271_irq(0, wl);
+ wlcore_irq(0, wl);
wlcore_enable_interrupts(wl);
}
else
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
- ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
irqflags,
pdev->name, wl);
if (ret < 0) {