From 4307d1e5ada595c87f9a4d16db16ba5edb70dcb1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 7 Nov 2007 18:37:48 +0100 Subject: [PATCH] x86: ignore the sys_getcpu() tcache parameter dont use the vgetcpu tcache - it's causing problems with tasks migrating, they'll see the old cache up to a jiffy after the migration, further increasing the costs of the migration. In the worst case they see a complete bogus information from the tcache, when a sys_getcpu() call "invalidated" the cache info by incrementing the jiffies _and_ the cpuid info in the cache and the following vdso_getcpu() call happens after vdso_jiffies have been incremented. Signed-off-by: Ingo Molnar Signed-off-by: Ulrich Drepper Signed-off-by: Thomas Gleixner --- arch/x86/vdso/vgetcpu.c | 19 ++----------------- kernel/sys.c | 20 +------------------- 2 files changed, 3 insertions(+), 36 deletions(-) diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c index 91f6e85d0fc2..3b1ae1abfba9 100644 --- a/arch/x86/vdso/vgetcpu.c +++ b/arch/x86/vdso/vgetcpu.c @@ -13,32 +13,17 @@ #include #include "vextern.h" -long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) +long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) { unsigned int dummy, p; - unsigned long j = 0; - /* Fast cache - only recompute value once per jiffies and avoid - relatively costly rdtscp/cpuid otherwise. - This works because the scheduler usually keeps the process - on the same CPU and this syscall doesn't guarantee its - results anyways. - We do this here because otherwise user space would do it on - its own in a likely inferior way (no access to jiffies). - If you don't like it pass NULL. */ - if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) { - p = tcache->blob[1]; - } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { + if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { /* Load per CPU data from RDTSCP */ rdtscp(dummy, dummy, p); } else { /* Load per CPU data from GDT */ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); } - if (tcache) { - tcache->blob[0] = j; - tcache->blob[1] = p; - } if (cpu) *cpu = p & 0xfff; if (node) diff --git a/kernel/sys.c b/kernel/sys.c index 304b5410d746..d1fe71eb4546 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, } asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, - struct getcpu_cache __user *cache) + struct getcpu_cache __user *unused) { int err = 0; int cpu = raw_smp_processor_id(); @@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, err |= put_user(cpu, cpup); if (nodep) err |= put_user(cpu_to_node(cpu), nodep); - if (cache) { - /* - * The cache is not needed for this implementation, - * but make sure user programs pass something - * valid. vsyscall implementations can instead make - * good use of the cache. Only use t0 and t1 because - * these are available in both 32bit and 64bit ABI (no - * need for a compat_getcpu). 32bit has enough - * padding - */ - unsigned long t0, t1; - get_user(t0, &cache->blob[0]); - get_user(t1, &cache->blob[1]); - t0++; - t1++; - put_user(t0, &cache->blob[0]); - put_user(t1, &cache->blob[1]); - } return err ? -EFAULT : 0; } -- 2.30.2