diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-10-09 18:25:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:48 -0400 |
commit | 46c298cf69d0e9a27d33ff992a81bd7b441c7933 (patch) | |
tree | 5bf50f1166996eb08fd7303ba93cc8bf8c5df7d9 /fs/proc | |
parent | b1a8de1f534337b398c7778578a56ec4f018cb27 (diff) |
fs/proc/task_mmu.c: don't use task->mm in m_start() and show_*map()
get_gate_vma(priv->task->mm) looks ugly and wrong, task->mm can be NULL or
it can changed by exec right after mm_access().
And in theory this race is not harmless, the task can exec and then later
exit and free the new mm_struct. In this case get_task_mm(oldmm) can't
help, get_gate_vma(task->mm) can read the freed/unmapped memory.
I think that priv->task should simply die and hold_task_mempolicy() logic
can be simplified. tail_vma logic asks for cleanups too.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c34156888d70..289dfdc0ec09 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -170,7 +170,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
170 | return mm; | 170 | return mm; |
171 | down_read(&mm->mmap_sem); | 171 | down_read(&mm->mmap_sem); |
172 | 172 | ||
173 | tail_vma = get_gate_vma(priv->task->mm); | 173 | tail_vma = get_gate_vma(mm); |
174 | priv->tail_vma = tail_vma; | 174 | priv->tail_vma = tail_vma; |
175 | hold_task_mempolicy(priv); | 175 | hold_task_mempolicy(priv); |
176 | /* Start with last addr hint */ | 176 | /* Start with last addr hint */ |
@@ -351,12 +351,11 @@ static int show_map(struct seq_file *m, void *v, int is_pid) | |||
351 | { | 351 | { |
352 | struct vm_area_struct *vma = v; | 352 | struct vm_area_struct *vma = v; |
353 | struct proc_maps_private *priv = m->private; | 353 | struct proc_maps_private *priv = m->private; |
354 | struct task_struct *task = priv->task; | ||
355 | 354 | ||
356 | show_map_vma(m, vma, is_pid); | 355 | show_map_vma(m, vma, is_pid); |
357 | 356 | ||
358 | if (m->count < m->size) /* vma is copied successfully */ | 357 | if (m->count < m->size) /* vma is copied successfully */ |
359 | m->version = (vma != get_gate_vma(task->mm)) | 358 | m->version = (vma != priv->tail_vma) |
360 | ? vma->vm_start : 0; | 359 | ? vma->vm_start : 0; |
361 | return 0; | 360 | return 0; |
362 | } | 361 | } |
@@ -584,7 +583,6 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) | |||
584 | static int show_smap(struct seq_file *m, void *v, int is_pid) | 583 | static int show_smap(struct seq_file *m, void *v, int is_pid) |
585 | { | 584 | { |
586 | struct proc_maps_private *priv = m->private; | 585 | struct proc_maps_private *priv = m->private; |
587 | struct task_struct *task = priv->task; | ||
588 | struct vm_area_struct *vma = v; | 586 | struct vm_area_struct *vma = v; |
589 | struct mem_size_stats mss; | 587 | struct mem_size_stats mss; |
590 | struct mm_walk smaps_walk = { | 588 | struct mm_walk smaps_walk = { |
@@ -639,7 +637,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) | |||
639 | show_smap_vma_flags(m, vma); | 637 | show_smap_vma_flags(m, vma); |
640 | 638 | ||
641 | if (m->count < m->size) /* vma is copied successfully */ | 639 | if (m->count < m->size) /* vma is copied successfully */ |
642 | m->version = (vma != get_gate_vma(task->mm)) | 640 | m->version = (vma != priv->tail_vma) |
643 | ? vma->vm_start : 0; | 641 | ? vma->vm_start : 0; |
644 | return 0; | 642 | return 0; |
645 | } | 643 | } |