diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-10-09 18:27:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:56 -0400 |
commit | 498f237178a3d3151f7ebe329af9a4734e41f6ed (patch) | |
tree | 84aab1a5f84bde657ae30fc8de7a9833927c1d51 | |
parent | 74d2c3a05cc6c1eef2d7236a9919036ed85ddaaf (diff) |
mempolicy: fix show_numa_map() vs exec() + do_set_mempolicy() race
9e7814404b77 "hold task->mempolicy while numa_maps scans." fixed the
race with the exiting task but this is not enough.
The current code assumes that get_vma_policy(task) should either see
task->mempolicy == NULL or it should be equal to ->task_mempolicy saved
by hold_task_mempolicy(), so we can never race with __mpol_put(). But
this can only work if we can't race with do_set_mempolicy(), and thus
we can't race with another do_set_mempolicy() or do_exit() after that.
However, do_set_mempolicy()->down_write(mmap_sem) can not prevent this
race. This task can exec, change it's ->mm, and call do_set_mempolicy()
after that; in this case they take 2 different locks.
Change hold_task_mempolicy() to use get_task_policy(), it never returns
NULL, and change show_numa_map() to use __get_vma_policy() or fall back
to proc_priv->task_mempolicy.
Note: this is the minimal fix, we will cleanup this code later. I think
hold_task_mempolicy() and release_task_mempolicy() should die, we can
move this logic into show_numa_map(). Or we can move get_task_policy()
outside of ->mmap_sem and !CONFIG_NUMA code at least.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/proc/task_mmu.c | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index adddf697c4ea..1acec26a3758 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -87,32 +87,14 @@ unsigned long task_statm(struct mm_struct *mm, | |||
87 | 87 | ||
88 | #ifdef CONFIG_NUMA | 88 | #ifdef CONFIG_NUMA |
89 | /* | 89 | /* |
90 | * These functions are for numa_maps but called in generic **maps seq_file | 90 | * Save get_task_policy() for show_numa_map(). |
91 | * ->start(), ->stop() ops. | ||
92 | * | ||
93 | * numa_maps scans all vmas under mmap_sem and checks their mempolicy. | ||
94 | * Each mempolicy object is controlled by reference counting. The problem here | ||
95 | * is how to avoid accessing dead mempolicy object. | ||
96 | * | ||
97 | * Because we're holding mmap_sem while reading seq_file, it's safe to access | ||
98 | * each vma's mempolicy, no vma objects will never drop refs to mempolicy. | ||
99 | * | ||
100 | * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy | ||
101 | * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). | ||
102 | * So, without task_lock(), we cannot trust get_vma_policy() because we cannot | ||
103 | * gurantee the task never exits under us. But taking task_lock() around | ||
104 | * get_vma_plicy() causes lock order problem. | ||
105 | * | ||
106 | * To access task->mempolicy without lock, we hold a reference count of an | ||
107 | * object pointed by task->mempolicy and remember it. This will guarantee | ||
108 | * that task->mempolicy points to an alive object or NULL in numa_maps accesses. | ||
109 | */ | 91 | */ |
110 | static void hold_task_mempolicy(struct proc_maps_private *priv) | 92 | static void hold_task_mempolicy(struct proc_maps_private *priv) |
111 | { | 93 | { |
112 | struct task_struct *task = priv->task; | 94 | struct task_struct *task = priv->task; |
113 | 95 | ||
114 | task_lock(task); | 96 | task_lock(task); |
115 | priv->task_mempolicy = task->mempolicy; | 97 | priv->task_mempolicy = get_task_policy(task); |
116 | mpol_get(priv->task_mempolicy); | 98 | mpol_get(priv->task_mempolicy); |
117 | task_unlock(task); | 99 | task_unlock(task); |
118 | } | 100 | } |
@@ -1431,7 +1413,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1431 | struct vm_area_struct *vma = v; | 1413 | struct vm_area_struct *vma = v; |
1432 | struct numa_maps *md = &numa_priv->md; | 1414 | struct numa_maps *md = &numa_priv->md; |
1433 | struct file *file = vma->vm_file; | 1415 | struct file *file = vma->vm_file; |
1434 | struct task_struct *task = proc_priv->task; | ||
1435 | struct mm_struct *mm = vma->vm_mm; | 1416 | struct mm_struct *mm = vma->vm_mm; |
1436 | struct mm_walk walk = {}; | 1417 | struct mm_walk walk = {}; |
1437 | struct mempolicy *pol; | 1418 | struct mempolicy *pol; |
@@ -1451,9 +1432,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1451 | walk.private = md; | 1432 | walk.private = md; |
1452 | walk.mm = mm; | 1433 | walk.mm = mm; |
1453 | 1434 | ||
1454 | pol = get_vma_policy(task, vma, vma->vm_start); | 1435 | pol = __get_vma_policy(vma, vma->vm_start); |
1455 | mpol_to_str(buffer, sizeof(buffer), pol); | 1436 | if (pol) { |
1456 | mpol_cond_put(pol); | 1437 | mpol_to_str(buffer, sizeof(buffer), pol); |
1438 | mpol_cond_put(pol); | ||
1439 | } else { | ||
1440 | mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); | ||
1441 | } | ||
1457 | 1442 | ||
1458 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | 1443 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); |
1459 | 1444 | ||