mm: remain migratetype in freed page
authorMinchan Kim <minchan@kernel.org>
Mon, 8 Oct 2012 23:32:11 +0000 (16:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Oct 2012 07:22:45 +0000 (16:22 +0900)
The page allocator caches the pageblock information in page->private while
it is in the PCP freelists but this is overwritten with the order of the
page when freed to the buddy allocator.  This patch stores the migratetype
of the page in the page->index field so that it is available at all times
when the page remain in free_list.

This patch adds a new call site in __free_pages_ok so it might be overhead
a bit but it's for high order allocation.  So I believe damage isn't hurt.

Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/page_alloc.c

index 4ed5c7367b9b185a162ee8d706fd073681b18376..b01e585ab4b538e26b7aba4daca7b57d31c37f59 100644 (file)
@@ -240,13 +240,13 @@ struct inode;
 /* It's valid only if the page is free path or free_list */
 static inline void set_freepage_migratetype(struct page *page, int migratetype)
 {
-       set_page_private(page, migratetype);
+       page->index = migratetype;
 }
 
 /* It's valid only if the page is free path or free_list */
 static inline int get_freepage_migratetype(struct page *page)
 {
-       return page_private(page);
+       return page->index;
 }
 
 /*
index 6aa0a8e89c5de328c02ee7078440beb664eaf0b8..94fd283dde9824c454e214df34116cec25c2601a 100644 (file)
@@ -729,6 +729,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 {
        unsigned long flags;
        int wasMlocked = __TestClearPageMlocked(page);
+       int migratetype;
 
        if (!free_pages_prepare(page, order))
                return;
@@ -737,8 +738,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
-       free_one_page(page_zone(page), page, order,
-                                       get_pageblock_migratetype(page));
+       migratetype = get_pageblock_migratetype(page);
+       set_freepage_migratetype(page, migratetype);
+       free_one_page(page_zone(page), page, order, migratetype);
        local_irq_restore(flags);
 }
 
@@ -959,6 +961,7 @@ static int move_freepages(struct zone *zone,
                order = page_order(page);
                list_move(&page->lru,
                          &zone->free_area[order].free_list[migratetype]);
+               set_freepage_migratetype(page, migratetype);
                page += 1 << order;
                pages_moved += 1 << order;
        }