diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 18:46:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:40 -0500 |
commit | 8ac1f8320a0073f28cf9e0491af4cd98f504f92a (patch) | |
tree | 4dad891c302587fdc7b099b18e05d7dbc5526c64 /mm/memory.c | |
parent | 64cc6ae001d70bc59e5f854e6b5678f59110df16 (diff) |
thp: pte alloc trans splitting
pte alloc routines must wait for split_huge_page if the pmd is not present
and not null (i.e. pmd_trans_splitting). The additional branches are
optimized away at compile time by pmd_trans_splitting if the config option
is off. However we must pass the vma down in order to know the anon_vma
lock to wait for.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c index bdf19366b705..567bca80ea53 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -394,9 +394,11 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
394 | } | 394 | } |
395 | } | 395 | } |
396 | 396 | ||
397 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | 397 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
398 | pmd_t *pmd, unsigned long address) | ||
398 | { | 399 | { |
399 | pgtable_t new = pte_alloc_one(mm, address); | 400 | pgtable_t new = pte_alloc_one(mm, address); |
401 | int wait_split_huge_page; | ||
400 | if (!new) | 402 | if (!new) |
401 | return -ENOMEM; | 403 | return -ENOMEM; |
402 | 404 | ||
@@ -416,14 +418,18 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | |||
416 | smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ | 418 | smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ |
417 | 419 | ||
418 | spin_lock(&mm->page_table_lock); | 420 | spin_lock(&mm->page_table_lock); |
419 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ | 421 | wait_split_huge_page = 0; |
422 | if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ | ||
420 | mm->nr_ptes++; | 423 | mm->nr_ptes++; |
421 | pmd_populate(mm, pmd, new); | 424 | pmd_populate(mm, pmd, new); |
422 | new = NULL; | 425 | new = NULL; |
423 | } | 426 | } else if (unlikely(pmd_trans_splitting(*pmd))) |
427 | wait_split_huge_page = 1; | ||
424 | spin_unlock(&mm->page_table_lock); | 428 | spin_unlock(&mm->page_table_lock); |
425 | if (new) | 429 | if (new) |
426 | pte_free(mm, new); | 430 | pte_free(mm, new); |
431 | if (wait_split_huge_page) | ||
432 | wait_split_huge_page(vma->anon_vma, pmd); | ||
427 | return 0; | 433 | return 0; |
428 | } | 434 | } |
429 | 435 | ||
@@ -436,10 +442,11 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) | |||
436 | smp_wmb(); /* See comment in __pte_alloc */ | 442 | smp_wmb(); /* See comment in __pte_alloc */ |
437 | 443 | ||
438 | spin_lock(&init_mm.page_table_lock); | 444 | spin_lock(&init_mm.page_table_lock); |
439 | if (!pmd_present(*pmd)) { /* Has another populated it ? */ | 445 | if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ |
440 | pmd_populate_kernel(&init_mm, pmd, new); | 446 | pmd_populate_kernel(&init_mm, pmd, new); |
441 | new = NULL; | 447 | new = NULL; |
442 | } | 448 | } else |
449 | VM_BUG_ON(pmd_trans_splitting(*pmd)); | ||
443 | spin_unlock(&init_mm.page_table_lock); | 450 | spin_unlock(&init_mm.page_table_lock); |
444 | if (new) | 451 | if (new) |
445 | pte_free_kernel(&init_mm, new); | 452 | pte_free_kernel(&init_mm, new); |
@@ -3253,7 +3260,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3253 | pmd = pmd_alloc(mm, pud, address); | 3260 | pmd = pmd_alloc(mm, pud, address); |
3254 | if (!pmd) | 3261 | if (!pmd) |
3255 | return VM_FAULT_OOM; | 3262 | return VM_FAULT_OOM; |
3256 | pte = pte_alloc_map(mm, pmd, address); | 3263 | pte = pte_alloc_map(mm, vma, pmd, address); |
3257 | if (!pte) | 3264 | if (!pte) |
3258 | return VM_FAULT_OOM; | 3265 | return VM_FAULT_OOM; |
3259 | 3266 | ||