aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2012-03-21 19:33:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:54:57 -0400
commit025c5b2451e42c9e8dfdecd6dc84956ce8f321b5 (patch)
tree423b4ef1a0ce021360304a80f6e0ba902581a3ad /include/linux/huge_mm.h
parent5aaabe831eb527e0d9284f0745d830a755f70393 (diff)
thp: optimize away unnecessary page table locking
Currently when we check if we can handle thp as it is or we need to split it into regular sized pages, we hold page table lock prior to check whether a given pmd is mapping thp or not. Because of this, when it's not "huge pmd" we suffer from unnecessary lock/unlock overhead. To remove it, this patch introduces a optimized check function and replace several similar logics with it. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: David Rientjes <rientjes@google.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 1b921299abc4..f56cacb4fec3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
113 unsigned long start, 113 unsigned long start,
114 unsigned long end, 114 unsigned long end,
115 long adjust_next); 115 long adjust_next);
116extern int __pmd_trans_huge_lock(pmd_t *pmd,
117 struct vm_area_struct *vma);
118/* mmap_sem must be held on entry */
119static inline int pmd_trans_huge_lock(pmd_t *pmd,
120 struct vm_area_struct *vma)
121{
122 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
123 if (pmd_trans_huge(*pmd))
124 return __pmd_trans_huge_lock(pmd, vma);
125 else
126 return 0;
127}
116static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 128static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
117 unsigned long start, 129 unsigned long start,
118 unsigned long end, 130 unsigned long end,
@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
176 long adjust_next) 188 long adjust_next)
177{ 189{
178} 190}
191static inline int pmd_trans_huge_lock(pmd_t *pmd,
192 struct vm_area_struct *vma)
193{
194 return 0;
195}
179#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
180 197
181#endif /* _LINUX_HUGE_MM_H */ 198#endif /* _LINUX_HUGE_MM_H */