From a68db763af9b676590c3fe9ec3f17bf18015eb2f Mon Sep 17 00:00:00 2001 From: Peter Chubb Date: Thu, 23 Jun 2005 21:14:00 -0700 Subject: [PATCH] [IA64] Fix another IA64 preemption problem There's another problem shown up by Ingo's recent patch to make smp_processor_id() complain if it's called with preemption enabled. local_finish_flush_tlb_mm() calls activate_context() in a situation where it could be rescheduled to another processor. This patch disables preemption around the call. Signed-off-by: Peter Chubb Signed-off-by: Tony Luck --- arch/ia64/kernel/smp.c | 3 +++ include/asm-ia64/mmu_context.h | 3 +++ 2 files changed, 6 insertions(+) diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index b49d4ddaab93..0166a9847095 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -231,13 +231,16 @@ smp_flush_tlb_all (void) void smp_flush_tlb_mm (struct mm_struct *mm) { + preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) { local_finish_flush_tlb_mm(mm); + preempt_enable(); return; } + preempt_enable(); /* * We could optimize this further by using mm->cpu_vm_mask to track which CPUs * have been running in the address space. It's not clear that this is worth the diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 0096e7e05012..e3e5fededb04 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -132,6 +132,9 @@ reload_context (mm_context_t context) ia64_srlz_i(); /* srlz.i implies srlz.d */ } +/* + * Must be called with preemption off + */ static inline void activate_context (struct mm_struct *mm) { -- 2.30.2