From 7ade3c997208566c5bf50ece8fc319a8caf0d41a Mon Sep 17 00:00:00 2001 From: Weijie Yang Date: Thu, 9 Oct 2014 15:28:12 -0700 Subject: [PATCH] mm: page_alloc: avoid wakeup kswapd on the unintended node When entering the page_alloc slowpath, we wakeup kswapd on every pgdat according to the zonelist and high_zoneidx. However, this doesn't take nodemask into account, and could prematurely wakeup kswapd on some unintended nodes. This patch uses for_each_zone_zonelist_nodemask() instead of for_each_zone_zonelist() in wake_all_kswapds() to avoid the above situation. Signed-off-by: Weijie Yang Acked-by: Mel Gorman Acked-by: Johannes Weiner Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a950144f80b..ae2f8474273c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2471,12 +2471,14 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, static void wake_all_kswapds(unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, - struct zone *preferred_zone) + struct zone *preferred_zone, + nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) + for_each_zone_zonelist_nodemask(zone, z, zonelist, + high_zoneidx, nodemask) wakeup_kswapd(zone, order, zone_idx(preferred_zone)); } @@ -2574,7 +2576,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, restart: if (!(gfp_mask & __GFP_NO_KSWAPD)) - wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone); + wake_all_kswapds(order, zonelist, high_zoneidx, + preferred_zone, nodemask); /* * OK, we're below the kswapd watermark and have kicked background -- 2.30.2