aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2008-11-06 15:53:30 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-06 18:41:18 -0500
commit0aedadf91a70a11c4a3e7c7d99b21e5528af8d5d (patch)
tree9e2529fa79ff4134f99772b5ed3693316e2221a5 /mm/mempolicy.c
parent17a1217e12d8c8434f8a3deef7bf980c724a6ac7 (diff)
mm: move migrate_prep out from under mmap_sem
Move the migrate_prep outside the mmap_sem for the following system calls 1. sys_move_pages 2. sys_migrate_pages 3. sys_mbind() It really does not matter when we flush the lru. The system is free to add pages onto the lru even during migration which will make the page migration either skip the page (mbind, migrate_pages) or return a busy state (move_pages). Fixes this lockdep warning (and potential deadlock): Some VM place has mmap_sem -> kevent_wq via lru_add_drain_all() net/core/dev.c::dev_ioctl() has rtnl_lock -> mmap_sem (*) the ioctl has copy_from_user() and it can do page fault. linkwatch_event has kevent_wq -> rtnl_lock Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reported-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Hugh Dickins <hugh@veritas.com> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 36f42573a335..e9493b1c1117 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -489,12 +489,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
489 int err; 489 int err;
490 struct vm_area_struct *first, *vma, *prev; 490 struct vm_area_struct *first, *vma, *prev;
491 491
492 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
493
494 err = migrate_prep();
495 if (err)
496 return ERR_PTR(err);
497 }
498 492
499 first = find_vma(mm, start); 493 first = find_vma(mm, start);
500 if (!first) 494 if (!first)
@@ -809,9 +803,13 @@ int do_migrate_pages(struct mm_struct *mm,
809 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 803 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
810{ 804{
811 int busy = 0; 805 int busy = 0;
812 int err = 0; 806 int err;
813 nodemask_t tmp; 807 nodemask_t tmp;
814 808
809 err = migrate_prep();
810 if (err)
811 return err;
812
815 down_read(&mm->mmap_sem); 813 down_read(&mm->mmap_sem);
816 814
817 err = migrate_vmas(mm, from_nodes, to_nodes, flags); 815 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
@@ -974,6 +972,12 @@ static long do_mbind(unsigned long start, unsigned long len,
974 start, start + len, mode, mode_flags, 972 start, start + len, mode, mode_flags,
975 nmask ? nodes_addr(*nmask)[0] : -1); 973 nmask ? nodes_addr(*nmask)[0] : -1);
976 974
975 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976
977 err = migrate_prep();
978 if (err)
979 return err;
980 }
977 down_write(&mm->mmap_sem); 981 down_write(&mm->mmap_sem);
978 vma = check_range(mm, start, end, nmask, 982 vma = check_range(mm, start, end, nmask,
979 flags | MPOL_MF_INVERT, &pagelist); 983 flags | MPOL_MF_INVERT, &pagelist);