mm/page_alloc: when handling percpu_pagelist_fraction, don't unneedly recalulate...
authorCody P Schafer <cody@linux.vnet.ibm.com>
Wed, 3 Jul 2013 22:01:34 +0000 (15:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Jul 2013 23:07:27 +0000 (16:07 -0700)
Simply moves calculation of the new 'high' value outside the
for_each_possible_cpu() loop, as it does not depend on the cpu.

Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 8125263be60f98f848acd1ee375a3d8d3f199baa..386de0f11beae28343a52e34d6a08c5c536f635d 100644 (file)
@@ -5575,7 +5575,6 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
  * can have before it gets flushed back to buddy allocator.
  */
-
 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
@@ -5589,12 +5588,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
 
        mutex_lock(&pcp_batch_high_lock);
        for_each_populated_zone(zone) {
-               for_each_possible_cpu(cpu) {
-                       unsigned long  high;
-                       high = zone->managed_pages / percpu_pagelist_fraction;
+               unsigned long  high;
+               high = zone->managed_pages / percpu_pagelist_fraction;
+               for_each_possible_cpu(cpu)
                        setup_pagelist_highmark(
-                               per_cpu_ptr(zone->pageset, cpu), high);
-               }
+                                       per_cpu_ptr(zone->pageset, cpu), high);
        }
        mutex_unlock(&pcp_batch_high_lock);
        return 0;