local_flush_tlb_mm((struct mm_struct *)mm);
}
+/*
+ * Special Variant of smp_call_function for use by TLB functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
+ */
+static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
+{
+#ifndef CONFIG_MIPS_MT_SMTC
+ smp_call_function(func, info, 1, 1);
+#endif
+}
+
+static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
+{
+ preempt_disable();
+
+ smp_on_other_tlbs(func, info);
+ func(info);
+
+ preempt_enable();
+}
+
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
fd.vma = vma;
fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
void flush_tlb_one(unsigned long vaddr)
{
- smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
- local_flush_tlb_one(vaddr);
+ smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
}
static DEFINE_PER_CPU(struct cpu, cpu_devices);