diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2008-04-28 05:13:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:23 -0400 |
commit | f4e53d910b7dde2685b177f1e7c3e3e0b4a42f7b (patch) | |
tree | db4db3abbc0b63de88dabd13e9359ca86973935c /mm | |
parent | 846a16bf0fc80dc95a414ffce465e3cbf9680247 (diff) |
mempolicy: write lock mmap_sem while changing task mempolicy
A read of /proc/<pid>/numa_maps holds the target task's mmap_sem for read
while examining each vma's mempolicy. A vma's mempolicy can fall back to the
task's policy. However, the task could be changing it's task policy and free
the one that the show_numa_maps() is examining.
To prevent this, grab the mmap_sem for write when updating task mempolicy.
Pointed out to me by Christoph Lameter and extracted and reworked from
Christoph's alternative mempol reference counting patch.
This is analogous to the way that do_mbind() and do_get_mempolicy() prevent
races between task's sharing an mm_struct [a.k.a. threads] setting and
querying a mempolicy for a particular address.
Note: this is necessary, but not sufficient, to allow us to stop taking an
extra reference on "other task's mempolicy" in get_vma_policy. Subsequent
patches will complete this update, allowing us to simplify the tests for
whether we need to unref a mempolicy at various points in the code.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e9fc1c1ae66c..c6c61ea6bb8c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -591,16 +591,29 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, | |||
591 | nodemask_t *nodes) | 591 | nodemask_t *nodes) |
592 | { | 592 | { |
593 | struct mempolicy *new; | 593 | struct mempolicy *new; |
594 | struct mm_struct *mm = current->mm; | ||
594 | 595 | ||
595 | new = mpol_new(mode, flags, nodes); | 596 | new = mpol_new(mode, flags, nodes); |
596 | if (IS_ERR(new)) | 597 | if (IS_ERR(new)) |
597 | return PTR_ERR(new); | 598 | return PTR_ERR(new); |
599 | |||
600 | /* | ||
601 | * prevent changing our mempolicy while show_numa_maps() | ||
602 | * is using it. | ||
603 | * Note: do_set_mempolicy() can be called at init time | ||
604 | * with no 'mm'. | ||
605 | */ | ||
606 | if (mm) | ||
607 | down_write(&mm->mmap_sem); | ||
598 | mpol_put(current->mempolicy); | 608 | mpol_put(current->mempolicy); |
599 | current->mempolicy = new; | 609 | current->mempolicy = new; |
600 | mpol_set_task_struct_flag(); | 610 | mpol_set_task_struct_flag(); |
601 | if (new && new->policy == MPOL_INTERLEAVE && | 611 | if (new && new->policy == MPOL_INTERLEAVE && |
602 | nodes_weight(new->v.nodes)) | 612 | nodes_weight(new->v.nodes)) |
603 | current->il_next = first_node(new->v.nodes); | 613 | current->il_next = first_node(new->v.nodes); |
614 | if (mm) | ||
615 | up_write(&mm->mmap_sem); | ||
616 | |||
604 | return 0; | 617 | return 0; |
605 | } | 618 | } |
606 | 619 | ||