From: Dave Hansen Date: Thu, 31 Jul 2014 15:40:54 +0000 (-0700) Subject: x86/mm: Clean up the TLB flushing code X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=4995ab9cf512e9a6cc07dfd6b1d4e2fc48ce7fef;p=openwrt%2Fstaging%2Fblogic.git x86/mm: Clean up the TLB flushing code The if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) line of code is not exactly the easiest to audit, especially when it ends up at two different indentation levels. This eliminates one of the the copy-n-paste versions. It also gives us a unified exit point for each path through this function. We need this in a minute for our tracepoint. Signed-off-by: Dave Hansen Link: http://lkml.kernel.org/r/20140731154054.44F1CDDC@viggo.jf.intel.com Acked-by: Rik van Riel Acked-by: Mel Gorman Signed-off-by: H. Peter Anvin --- diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index dd8dda167a24..378fbef279d2 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -161,23 +161,24 @@ void flush_tlb_current_task(void) void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { + bool need_flush_others_all = true; unsigned long addr; unsigned act_entries, tlb_entries = 0; unsigned long nr_base_pages; preempt_disable(); if (current->active_mm != mm) - goto flush_all; + goto out; if (!current->mm) { leave_mm(smp_processor_id()); - goto flush_all; + goto out; } if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || vmflag & VM_HUGETLB) { local_flush_tlb(); - goto flush_all; + goto out; } /* In modern CPU, last level tlb used for both data/ins */ @@ -196,22 +197,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { + need_flush_others_all = false; /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } - - if (cpumask_any_but(mm_cpumask(mm), - smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, end); - preempt_enable(); - return; } - -flush_all: +out: + if (need_flush_others_all) { + start = 0UL; + end = TLB_FLUSH_ALL; + } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); + flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); }