aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-05-24 20:12:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:21 -0400
commit692e0b35427a088bf75d9363788c61c7edbe93a5 (patch)
treedb1b4365b70fbe809753e6ac097b60fb4f7299a8 /mm/huge_memory.c
parent9547d01bfb9c351dc19067f8a4cea9d3955f4125 (diff)
mm: thp: optimize memcg charge in khugepaged
We don't need to hold the mmmap_sem through mem_cgroup_newpage_charge(), the mmap_sem is only hold for keeping the vma stable and we don't need the vma stable anymore after we return from alloc_hugepage_vma(). Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 90eef404ec2e..615d9743a3cb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
1771 1771
1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1773#ifndef CONFIG_NUMA 1773#ifndef CONFIG_NUMA
1774 up_read(&mm->mmap_sem);
1774 VM_BUG_ON(!*hpage); 1775 VM_BUG_ON(!*hpage);
1775 new_page = *hpage; 1776 new_page = *hpage;
1776 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1777 up_read(&mm->mmap_sem);
1778 return;
1779 }
1780#else 1777#else
1781 VM_BUG_ON(*hpage); 1778 VM_BUG_ON(*hpage);
1782 /* 1779 /*
@@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
1791 */ 1788 */
1792 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1789 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1793 node, __GFP_OTHER_NODE); 1790 node, __GFP_OTHER_NODE);
1791
1792 /*
1793 * After allocating the hugepage, release the mmap_sem read lock in
1794 * preparation for taking it in write mode.
1795 */
1796 up_read(&mm->mmap_sem);
1794 if (unlikely(!new_page)) { 1797 if (unlikely(!new_page)) {
1795 up_read(&mm->mmap_sem);
1796 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1798 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1797 *hpage = ERR_PTR(-ENOMEM); 1799 *hpage = ERR_PTR(-ENOMEM);
1798 return; 1800 return;
1799 } 1801 }
1802#endif
1803
1800 count_vm_event(THP_COLLAPSE_ALLOC); 1804 count_vm_event(THP_COLLAPSE_ALLOC);
1801 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1805 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1802 up_read(&mm->mmap_sem); 1806#ifdef CONFIG_NUMA
1803 put_page(new_page); 1807 put_page(new_page);
1808#endif
1804 return; 1809 return;
1805 } 1810 }
1806#endif
1807
1808 /* after allocating the hugepage upgrade to mmap_sem write mode */
1809 up_read(&mm->mmap_sem);
1810 1811
1811 /* 1812 /*
1812 * Prevent all access to pagetables with the exception of 1813 * Prevent all access to pagetables with the exception of