diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2014-10-09 18:27:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:53 -0400 |
commit | 8b1645685acf3c7e0b93611fb4b328ef45c47e92 (patch) | |
tree | 2446890b39f06e4ddbe6101e0644fb969a790b80 /mm/huge_memory.c | |
parent | 447f05bb488bff4282088259b04f47f0f9f76760 (diff) |
mm, THP: don't hold mmap_sem in khugepaged when allocating THP
When allocating huge page for collapsing, khugepaged currently holds
mmap_sem for reading on the mm where collapsing occurs. Afterwards the
read lock is dropped before write lock is taken on the same mmap_sem.
Holding mmap_sem during whole huge page allocation is therefore useless,
the vma needs to be rechecked after taking the write lock anyway.
Furthemore, huge page allocation might involve a rather long sync
compaction, and thus block any mmap_sem writers and i.e. affect workloads
that perform frequent m(un)map or mprotect oterations.
This patch simply releases the read lock before allocating a huge page.
It also deletes an outdated comment that assumed vma must be stable, as it
was using alloc_hugepage_vma(). This is no longer true since commit
9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target node").
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f8ffd9412ec5..55ab569c31b4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2322,23 +2322,17 @@ static struct page | |||
2322 | int node) | 2322 | int node) |
2323 | { | 2323 | { |
2324 | VM_BUG_ON_PAGE(*hpage, *hpage); | 2324 | VM_BUG_ON_PAGE(*hpage, *hpage); |
2325 | |||
2325 | /* | 2326 | /* |
2326 | * Allocate the page while the vma is still valid and under | 2327 | * Before allocating the hugepage, release the mmap_sem read lock. |
2327 | * the mmap_sem read mode so there is no memory allocation | 2328 | * The allocation can take potentially a long time if it involves |
2328 | * later when we take the mmap_sem in write mode. This is more | 2329 | * sync compaction, and we do not need to hold the mmap_sem during |
2329 | * friendly behavior (OTOH it may actually hide bugs) to | 2330 | * that. We will recheck the vma after taking it again in write mode. |
2330 | * filesystems in userland with daemons allocating memory in | ||
2331 | * the userland I/O paths. Allocating memory with the | ||
2332 | * mmap_sem in read mode is good idea also to allow greater | ||
2333 | * scalability. | ||
2334 | */ | 2331 | */ |
2332 | up_read(&mm->mmap_sem); | ||
2333 | |||
2335 | *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( | 2334 | *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( |
2336 | khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); | 2335 | khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); |
2337 | /* | ||
2338 | * After allocating the hugepage, release the mmap_sem read lock in | ||
2339 | * preparation for taking it in write mode. | ||
2340 | */ | ||
2341 | up_read(&mm->mmap_sem); | ||
2342 | if (unlikely(!*hpage)) { | 2336 | if (unlikely(!*hpage)) { |
2343 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | 2337 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
2344 | *hpage = ERR_PTR(-ENOMEM); | 2338 | *hpage = ERR_PTR(-ENOMEM); |