From ee3c72a14bfecdf783738032ff3c73ef6412f5b3 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 16 Oct 2007 01:26:07 -0700 Subject: [PATCH] SLUB: Avoid touching page struct when freeing to per cpu slab Set c->node to -1 if we allocate from a debug slab instead for SlabDebug which requires access the page struct cacheline. Signed-off-by: Christoph Lameter Tested-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 5d895d44c327..ea9fd72093d8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1537,6 +1537,7 @@ debug: c->page->inuse++; c->page->freelist = object[c->offset]; + c->node = -1; slab_unlock(c->page); return object; } @@ -1560,8 +1561,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); - if (unlikely(!c->page || !c->freelist || - !node_match(c, node))) + if (unlikely(!c->freelist || !node_match(c, node))) object = __slab_alloc(s, gfpflags, node, addr, c); @@ -1670,7 +1670,7 @@ static void __always_inline slab_free(struct kmem_cache *s, local_irq_save(flags); debug_check_no_locks_freed(object, s->objsize); c = get_cpu_slab(s, smp_processor_id()); - if (likely(page == c->page && !SlabDebug(page))) { + if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; } else @@ -3250,12 +3250,16 @@ static unsigned long slab_objects(struct kmem_cache *s, for_each_possible_cpu(cpu) { struct page *page; + int node; struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); if (!c) continue; page = c->page; + node = c->node; + if (node < 0) + continue; if (page) { if (flags & SO_CPU) { int x = 0; @@ -3265,9 +3269,9 @@ static unsigned long slab_objects(struct kmem_cache *s, else x = 1; total += x; - nodes[c->node] += x; + nodes[node] += x; } - per_cpu[c->node]++; + per_cpu[node]++; } } -- 2.30.2