From 82727018b0d33d188e9916bcf76f18387484cb04 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 7 Oct 2013 11:29:28 +0100 Subject: [PATCH] sched/numa: Call task_numa_free() from do_execve() It is possible for a task in a numa group to call exec, and have the new (unrelated) executable inherit the numa group association from its former self. This has the potential to break numa grouping, and is trivial to fix. Signed-off-by: Rik van Riel Signed-off-by: Mel Gorman Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Srikar Dronamraju Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1381141781-10992-51-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar --- fs/exec.c | 1 + include/linux/sched.h | 4 ++++ kernel/sched/fair.c | 9 ++++++++- kernel/sched/sched.h | 5 ----- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 8875dd10ae7a..2ea437e5acf4 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1547,6 +1547,7 @@ static int do_execve_common(const char *filename, current->fs->in_exec = 0; current->in_execve = 0; acct_update_integrals(current); + task_numa_free(current); free_bprm(bprm); if (displaced) put_files_struct(displaced); diff --git a/include/linux/sched.h b/include/linux/sched.h index 724482200b83..f6385107c352 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1458,6 +1458,7 @@ struct task_struct { extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); #else static inline void task_numa_fault(int last_node, int node, int pages, int flags) @@ -1470,6 +1471,9 @@ static inline pid_t task_numa_group_id(struct task_struct *p) static inline void set_numabalancing_state(bool enabled) { } +static inline void task_numa_free(struct task_struct *p) +{ +} #endif static inline struct pid *task_pid(struct task_struct *task) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4c40e13310e9..c4df2de6ca4a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1418,6 +1418,7 @@ void task_numa_free(struct task_struct *p) { struct numa_group *grp = p->numa_group; int i; + void *numa_faults = p->numa_faults; if (grp) { for (i = 0; i < 2*nr_node_ids; i++) @@ -1433,7 +1434,9 @@ void task_numa_free(struct task_struct *p) put_numa_group(grp); } - kfree(p->numa_faults); + p->numa_faults = NULL; + p->numa_faults_buffer = NULL; + kfree(numa_faults); } /* @@ -1452,6 +1455,10 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) if (!p->mm) return; + /* Do not worry about placement if exiting */ + if (p->state == TASK_DEAD) + return; + /* Allocate buffer to track faults on a per-node basis */ if (unlikely(!p->numa_faults)) { int size = sizeof(*p->numa_faults) * 2 * nr_node_ids; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8037b10a256f..eeb1923812a1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -559,11 +559,6 @@ static inline u64 rq_clock_task(struct rq *rq) #ifdef CONFIG_NUMA_BALANCING extern int migrate_task_to(struct task_struct *p, int cpu); extern int migrate_swap(struct task_struct *, struct task_struct *); -extern void task_numa_free(struct task_struct *p); -#else /* CONFIG_NUMA_BALANCING */ -static inline void task_numa_free(struct task_struct *p) -{ -} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_SMP -- 2.30.2