irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
+ bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
}
raw_spin_lock_irq(&desc->lock);
- cpumask_copy(mask, desc->irq_data.affinity);
+ /*
+ * This code is triggered unconditionally. Check the affinity
+ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
+ */
+ if (desc->irq_data.affinity)
+ cpumask_copy(mask, desc->irq_data.affinity);
+ else
+ valid = false;
raw_spin_unlock_irq(&desc->lock);
- set_cpus_allowed_ptr(current, mask);
+ if (valid)
+ set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
*/
get_task_struct(t);
new->thread = t;
+ /*
+ * Tell the thread to set its affinity. This is
+ * important for shared interrupt handlers as we do
+ * not invoke setup_affinity() for the secondary
+ * handlers as everything is already set up. Even for
+ * interrupts marked with IRQF_NO_BALANCE this is
+ * correct as we want the thread to move to the cpu(s)
+ * on which the requesting code placed the interrupt.
+ */
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
}
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {