diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2013-11-14 17:31:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 19:32:14 -0500 |
commit | cb900f41215447433cbc456d1c4294e858a84d7c (patch) | |
tree | 1f3704d9a023a20baa2872d6639a58387ef2d7c2 /mm/mempolicy.c | |
parent | c389a250ab4cfa4a3775d9f2c45271618af6d5b2 (diff) |
mm, hugetlb: convert hugetlbfs to use split pmd lock
Hugetlb supports multiple page sizes. We use split lock only for PMD
level, but not for PUD.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Alex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4cc19f6ab6c6..c4403cdf3433 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -525,8 +525,9 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, | |||
525 | #ifdef CONFIG_HUGETLB_PAGE | 525 | #ifdef CONFIG_HUGETLB_PAGE |
526 | int nid; | 526 | int nid; |
527 | struct page *page; | 527 | struct page *page; |
528 | spinlock_t *ptl; | ||
528 | 529 | ||
529 | spin_lock(&vma->vm_mm->page_table_lock); | 530 | ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); |
530 | page = pte_page(huge_ptep_get((pte_t *)pmd)); | 531 | page = pte_page(huge_ptep_get((pte_t *)pmd)); |
531 | nid = page_to_nid(page); | 532 | nid = page_to_nid(page); |
532 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) | 533 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) |
@@ -536,7 +537,7 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, | |||
536 | (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) | 537 | (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) |
537 | isolate_huge_page(page, private); | 538 | isolate_huge_page(page, private); |
538 | unlock: | 539 | unlock: |
539 | spin_unlock(&vma->vm_mm->page_table_lock); | 540 | spin_unlock(ptl); |
540 | #else | 541 | #else |
541 | BUG(); | 542 | BUG(); |
542 | #endif | 543 | #endif |