diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2013-11-14 17:30:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 19:32:14 -0500 |
commit | 117b0791ac42f2ec447bc864e70ad622b5604059 (patch) | |
tree | 3381f98791bcafd31ea4ae9d0fa566815112020d /mm/rmap.c | |
parent | bf929152e9f6c49b66fad4ebf08cc95b02ce48f5 (diff) |
mm, thp: move ptl taking inside page_check_address_pmd()
With split page table lock we can't know which lock we need to take
before we find the relevant pmd.
Let's move lock taking inside the function.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Alex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 13 |
1 files changed, 5 insertions, 8 deletions
@@ -665,25 +665,23 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
665 | unsigned long *vm_flags) | 665 | unsigned long *vm_flags) |
666 | { | 666 | { |
667 | struct mm_struct *mm = vma->vm_mm; | 667 | struct mm_struct *mm = vma->vm_mm; |
668 | spinlock_t *ptl; | ||
668 | int referenced = 0; | 669 | int referenced = 0; |
669 | 670 | ||
670 | if (unlikely(PageTransHuge(page))) { | 671 | if (unlikely(PageTransHuge(page))) { |
671 | pmd_t *pmd; | 672 | pmd_t *pmd; |
672 | 673 | ||
673 | spin_lock(&mm->page_table_lock); | ||
674 | /* | 674 | /* |
675 | * rmap might return false positives; we must filter | 675 | * rmap might return false positives; we must filter |
676 | * these out using page_check_address_pmd(). | 676 | * these out using page_check_address_pmd(). |
677 | */ | 677 | */ |
678 | pmd = page_check_address_pmd(page, mm, address, | 678 | pmd = page_check_address_pmd(page, mm, address, |
679 | PAGE_CHECK_ADDRESS_PMD_FLAG); | 679 | PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); |
680 | if (!pmd) { | 680 | if (!pmd) |
681 | spin_unlock(&mm->page_table_lock); | ||
682 | goto out; | 681 | goto out; |
683 | } | ||
684 | 682 | ||
685 | if (vma->vm_flags & VM_LOCKED) { | 683 | if (vma->vm_flags & VM_LOCKED) { |
686 | spin_unlock(&mm->page_table_lock); | 684 | spin_unlock(ptl); |
687 | *mapcount = 0; /* break early from loop */ | 685 | *mapcount = 0; /* break early from loop */ |
688 | *vm_flags |= VM_LOCKED; | 686 | *vm_flags |= VM_LOCKED; |
689 | goto out; | 687 | goto out; |
@@ -692,10 +690,9 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
692 | /* go ahead even if the pmd is pmd_trans_splitting() */ | 690 | /* go ahead even if the pmd is pmd_trans_splitting() */ |
693 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | 691 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) |
694 | referenced++; | 692 | referenced++; |
695 | spin_unlock(&mm->page_table_lock); | 693 | spin_unlock(ptl); |
696 | } else { | 694 | } else { |
697 | pte_t *pte; | 695 | pte_t *pte; |
698 | spinlock_t *ptl; | ||
699 | 696 | ||
700 | /* | 697 | /* |
701 | * rmap might return false positives; we must filter | 698 | * rmap might return false positives; we must filter |