diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:41 -0400 |
commit | deceb6cd17e6dfafe4c4f81b1b4153bc41b2cb70 (patch) | |
tree | 2a722f50e8edef8609a49f65bfcb222e499c44cc /fs/proc | |
parent | c34d1b4d165c67b966bca4aba026443d7ff161eb (diff) |
[PATCH] mm: follow_page with inner ptlock
Final step in pushing down common core's page_table_lock. follow_page no
longer wants caller to hold page_table_lock, uses pte_offset_map_lock itself;
and so no page_table_lock is taken in get_user_pages itself.
But get_user_pages (and get_futex_key) do then need follow_page to pin the
page for them: take Daniel's suggestion of bitflags to follow_page.
Need one for WRITE, another for TOUCH (it was the accessed flag before:
vanished along with check_user_page_readable, but surely get_numa_maps is
wrong to mark every page it finds as accessed), another for GET.
And another, ANON to dispose of untouched_anonymous_page: it seems silly for
that to descend a second time, let follow_page observe if there was no page
table and return ZERO_PAGE if so. Fix minor bug in that: check VM_LOCKED -
make_pages_present ought to make readonly anonymous present.
Give get_numa_maps a cond_resched while we're there.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 3 |
1 files changed, 1 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 7e5e7ec2e36d..d2fa42006d8f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -419,7 +419,6 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | |||
419 | for_each_node(i) | 419 | for_each_node(i) |
420 | md->node[i] =0; | 420 | md->node[i] =0; |
421 | 421 | ||
422 | spin_lock(&mm->page_table_lock); | ||
423 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { | 422 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { |
424 | page = follow_page(mm, vaddr, 0); | 423 | page = follow_page(mm, vaddr, 0); |
425 | if (page) { | 424 | if (page) { |
@@ -434,8 +433,8 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | |||
434 | md->anon++; | 433 | md->anon++; |
435 | md->node[page_to_nid(page)]++; | 434 | md->node[page_to_nid(page)]++; |
436 | } | 435 | } |
436 | cond_resched(); | ||
437 | } | 437 | } |
438 | spin_unlock(&mm->page_table_lock); | ||
439 | return md; | 438 | return md; |
440 | } | 439 | } |
441 | 440 | ||