diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2012-12-12 16:50:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 20:38:31 -0500 |
commit | e180377f1ae48b3cbc559c9875d9b038f7f000c6 (patch) | |
tree | c51e0db181acc0372479bce5cd68b99abca7d8bb /mm | |
parent | cad7f613c4d010e1d0f05c9a4fb33c7ae40ba115 (diff) |
thp: change split_huge_page_pmd() interface
Pass vma instead of mm and add address parameter.
In most cases we already have vma on the stack. We provides
split_huge_page_pmd_mm() for few cases when we have mm, but not vma.
This change is preparation to huge zero pmd splitting implementation.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 19 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 2 | ||||
-rw-r--r-- | mm/pagewalk.c | 2 |
6 files changed, 23 insertions, 8 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7742fb36eb4d..de6aa5f3fdd2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2475,9 +2475,14 @@ static int khugepaged(void *none) | |||
2475 | return 0; | 2475 | return 0; |
2476 | } | 2476 | } |
2477 | 2477 | ||
2478 | void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) | 2478 | void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, |
2479 | pmd_t *pmd) | ||
2479 | { | 2480 | { |
2480 | struct page *page; | 2481 | struct page *page; |
2482 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
2483 | struct mm_struct *mm = vma->vm_mm; | ||
2484 | |||
2485 | BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); | ||
2481 | 2486 | ||
2482 | spin_lock(&mm->page_table_lock); | 2487 | spin_lock(&mm->page_table_lock); |
2483 | if (unlikely(!pmd_trans_huge(*pmd))) { | 2488 | if (unlikely(!pmd_trans_huge(*pmd))) { |
@@ -2495,6 +2500,16 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) | |||
2495 | BUG_ON(pmd_trans_huge(*pmd)); | 2500 | BUG_ON(pmd_trans_huge(*pmd)); |
2496 | } | 2501 | } |
2497 | 2502 | ||
2503 | void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | ||
2504 | pmd_t *pmd) | ||
2505 | { | ||
2506 | struct vm_area_struct *vma; | ||
2507 | |||
2508 | vma = find_vma(mm, address); | ||
2509 | BUG_ON(vma == NULL); | ||
2510 | split_huge_page_pmd(vma, address, pmd); | ||
2511 | } | ||
2512 | |||
2498 | static void split_huge_page_address(struct mm_struct *mm, | 2513 | static void split_huge_page_address(struct mm_struct *mm, |
2499 | unsigned long address) | 2514 | unsigned long address) |
2500 | { | 2515 | { |
@@ -2509,7 +2524,7 @@ static void split_huge_page_address(struct mm_struct *mm, | |||
2509 | * Caller holds the mmap_sem write mode, so a huge pmd cannot | 2524 | * Caller holds the mmap_sem write mode, so a huge pmd cannot |
2510 | * materialize from under us. | 2525 | * materialize from under us. |
2511 | */ | 2526 | */ |
2512 | split_huge_page_pmd(mm, pmd); | 2527 | split_huge_page_pmd_mm(mm, address, pmd); |
2513 | } | 2528 | } |
2514 | 2529 | ||
2515 | void __vma_adjust_trans_huge(struct vm_area_struct *vma, | 2530 | void __vma_adjust_trans_huge(struct vm_area_struct *vma, |
diff --git a/mm/memory.c b/mm/memory.c index 259b34fe1347..f9a4b0cb8623 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1243,7 +1243,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1243 | BUG(); | 1243 | BUG(); |
1244 | } | 1244 | } |
1245 | #endif | 1245 | #endif |
1246 | split_huge_page_pmd(vma->vm_mm, pmd); | 1246 | split_huge_page_pmd(vma, addr, pmd); |
1247 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1247 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1248 | goto next; | 1248 | goto next; |
1249 | /* fall through */ | 1249 | /* fall through */ |
@@ -1512,7 +1512,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |||
1512 | } | 1512 | } |
1513 | if (pmd_trans_huge(*pmd)) { | 1513 | if (pmd_trans_huge(*pmd)) { |
1514 | if (flags & FOLL_SPLIT) { | 1514 | if (flags & FOLL_SPLIT) { |
1515 | split_huge_page_pmd(mm, pmd); | 1515 | split_huge_page_pmd(vma, address, pmd); |
1516 | goto split_fallthrough; | 1516 | goto split_fallthrough; |
1517 | } | 1517 | } |
1518 | spin_lock(&mm->page_table_lock); | 1518 | spin_lock(&mm->page_table_lock); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 05b28361a39b..0719e8dd4945 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -511,7 +511,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
511 | pmd = pmd_offset(pud, addr); | 511 | pmd = pmd_offset(pud, addr); |
512 | do { | 512 | do { |
513 | next = pmd_addr_end(addr, end); | 513 | next = pmd_addr_end(addr, end); |
514 | split_huge_page_pmd(vma->vm_mm, pmd); | 514 | split_huge_page_pmd(vma, addr, pmd); |
515 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 515 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
516 | continue; | 516 | continue; |
517 | if (check_pte_range(vma, pmd, addr, next, nodes, | 517 | if (check_pte_range(vma, pmd, addr, next, nodes, |
diff --git a/mm/mprotect.c b/mm/mprotect.c index a40992610ab6..e8c3938db6fa 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -90,7 +90,7 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
90 | next = pmd_addr_end(addr, end); | 90 | next = pmd_addr_end(addr, end); |
91 | if (pmd_trans_huge(*pmd)) { | 91 | if (pmd_trans_huge(*pmd)) { |
92 | if (next - addr != HPAGE_PMD_SIZE) | 92 | if (next - addr != HPAGE_PMD_SIZE) |
93 | split_huge_page_pmd(vma->vm_mm, pmd); | 93 | split_huge_page_pmd(vma, addr, pmd); |
94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) | 94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) |
95 | continue; | 95 | continue; |
96 | /* fall through */ | 96 | /* fall through */ |
diff --git a/mm/mremap.c b/mm/mremap.c index 1b61c2d3307a..eabb24da6c9e 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -182,7 +182,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
182 | need_flush = true; | 182 | need_flush = true; |
183 | continue; | 183 | continue; |
184 | } else if (!err) { | 184 | } else if (!err) { |
185 | split_huge_page_pmd(vma->vm_mm, old_pmd); | 185 | split_huge_page_pmd(vma, old_addr, old_pmd); |
186 | } | 186 | } |
187 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); | 187 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); |
188 | } | 188 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 6c118d012bb5..35aa294656cd 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -58,7 +58,7 @@ again: | |||
58 | if (!walk->pte_entry) | 58 | if (!walk->pte_entry) |
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | split_huge_page_pmd(walk->mm, pmd); | 61 | split_huge_page_pmd_mm(walk->mm, addr, pmd); |
62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
63 | goto again; | 63 | goto again; |
64 | err = walk_pte_range(pmd, addr, next, walk); | 64 | err = walk_pte_range(pmd, addr, next, walk); |