return ret;
}
- int wake;
++/*
++ * Default primary interrupt handler for threaded interrupts. Is
++ * assigned as primary handler when request_threaded_irq is called
++ * with handler == NULL. Useful for oneshot interrupts.
++ */
++static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++/*
++ * Primary handler for nested threaded interrupts. Should never be
++ * called.
++ */
++static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
++{
++ WARN(1, "Primary handler called for nested irq %d\n", irq);
++ return IRQ_NONE;
++}
++
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ schedule();
+ }
+ return -1;
+}
+
++/*
++ * Oneshot interrupts keep the irq line masked until the threaded
++ * handler finished. unmask if the interrupt has not been disabled and
++ * is marked MASKED.
++ */
++static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
++{
++ chip_bus_lock(irq, desc);
++ spin_lock_irq(&desc->lock);
++ if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
++ desc->status &= ~IRQ_MASKED;
++ desc->chip->unmask(irq);
++ }
++ spin_unlock_irq(&desc->lock);
++ chip_bus_sync_unlock(irq, desc);
++}
++
+#ifdef CONFIG_SMP
+/*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+ cpumask_var_t mask;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+
+ /*
+ * In case we are out of memory we set IRQTF_AFFINITY again and
+ * try again next time
+ */
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ set_bit(IRQTF_AFFINITY, &action->thread_flags);
+ return;
+ }
+
+ spin_lock_irq(&desc->lock);
+ cpumask_copy(mask, desc->affinity);
+ spin_unlock_irq(&desc->lock);
+
+ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+}
+#else
+static inline void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
+#endif
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
++ int wake, oneshot = desc->status & IRQ_ONESHOT;
+
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+ current->irqaction = action;
+
+ while (!irq_wait_for_interrupt(action)) {
+
+ irq_thread_check_affinity(desc, action);
+
+ atomic_inc(&desc->threads_active);
+
+ spin_lock_irq(&desc->lock);
+ if (unlikely(desc->status & IRQ_DISABLED)) {
+ /*
+ * CHECKME: We might need a dedicated
+ * IRQ_THREAD_PENDING flag here, which
+ * retriggers the thread in check_irq_resend()
+ * but AFAICT IRQ_PENDING should be fine as it
+ * retriggers the interrupt itself --- tglx
+ */
+ desc->status |= IRQ_PENDING;
+ spin_unlock_irq(&desc->lock);
+ } else {
+ spin_unlock_irq(&desc->lock);
+
+ action->thread_fn(action->irq, action->dev_id);
++
++ if (oneshot)
++ irq_finalize_oneshot(action->irq, desc);
+ }
+
+ wake = atomic_dec_and_test(&desc->threads_active);
+
+ if (wake && waitqueue_active(&desc->wait_for_threads))
+ wake_up(&desc->wait_for_threads);
+ }
+
+ /*
+ * Clear irqaction. Otherwise exit_irq_thread() would make
+ * fuzz about an active irq thread going into nirvana.
+ */
+ current->irqaction = NULL;
+ return 0;
+}
+
+/*
+ * Called from do_exit()
+ */
+void exit_irq_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ if (!tsk->irqaction)
+ return;
+
+ printk(KERN_ERR
+ "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+
+ /*
+ * Set the THREAD DIED flag to prevent further wakeups of the
+ * soon to be gone threaded handler.
+ */
+ set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
rand_initialize_irq(irq);
}
- * Threaded handler ?
++ /* Oneshot interrupts are not allowed with shared */
++ if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
++ return -EINVAL;
++
++ /*
++ * Check whether the interrupt nests into another interrupt
++ * thread.
++ */
++ nested = desc->status & IRQ_NESTED_THREAD;
++ if (nested) {
++ if (!new->thread_fn)
++ return -EINVAL;
++ /*
++ * Replace the primary handler which was provided from
++ * the driver for non nested interrupt handling by the
++ * dummy function which warns when called.
++ */
++ new->handler = irq_nested_primary_handler;
++ }
++
+ /*
- if (new->thread_fn) {
++ * Create a handler thread when a thread function is supplied
++ * and the interrupt does not nest into another interrupt
++ * thread.
+ */
++ if (new->thread_fn && !nested) {
+ struct task_struct *t;
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
+ */
+ get_task_struct(t);
+ new->thread = t;
+ }
+
/*
* The following block of code has to be executed atomically
*/
EXPORT_SYMBOL(free_irq);
/**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
++ * If NULL and thread_fn != NULL the default
++ * primary handler is installed
+ * @thread_fn: Function called from the irq handler thread
+ * If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function