aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-25 08:16:31 -0400
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:39 -0500
commitd10e63f29488b0f312a443f9507ea9b6fd3c9090 (patch)
treeb39e3caa5d25e9e5ebad84c606a724e25c6b8e91 /include/linux/huge_mm.h
parent1ba6e0b50b479cbadb8f05ebde3020da9ac87201 (diff)
mm: numa: Create basic numa page hinting infrastructure
Note: This patch started as "mm/mpol: Create special PROT_NONE infrastructure" and preserves the basic idea but steals *very* heavily from "autonuma: numa hinting page faults entry points" for the actual fault handlers without the migration parts. The end result is barely recognisable as either patch so all Signed-off and Reviewed-bys are dropped. If Peter, Ingo and Andrea are ok with this version, I will re-add the signed-offs-by to reflect the history. In order to facilitate a lazy -- fault driven -- migration of pages, create a special transient PAGE_NUMA variant, we can then use the 'spurious' protection faults to drive our migrations from. The meaning of PAGE_NUMA depends on the architecture but on x86 it is effectively PROT_NONE. Actual PROT_NONE mappings will not generate these NUMA faults for the reason that the page fault code checks the permission on the VMA (and will throw a segmentation fault on actual PROT_NONE mappings), before it ever calls handle_mm_fault. [dhillf@gmail.com: Fix typo] Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b31cb7da0346..a1d26a98c655 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -159,6 +159,10 @@ static inline struct page *compound_trans_head(struct page *page)
159 } 159 }
160 return page; 160 return page;
161} 161}
162
163extern int do_huge_pmd_numa_page(struct mm_struct *mm, unsigned long addr,
164 pmd_t pmd, pmd_t *pmdp);
165
162#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 166#else /* CONFIG_TRANSPARENT_HUGEPAGE */
163#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 167#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
164#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 168#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -195,6 +199,12 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd,
195{ 199{
196 return 0; 200 return 0;
197} 201}
202
203static inline int do_huge_pmd_numa_page(struct mm_struct *mm, unsigned long addr,
204 pmd_t pmd, pmd_t *pmdp)
205{
206}
207
198#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 208#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
199 209
200#endif /* _LINUX_HUGE_MM_H */ 210#endif /* _LINUX_HUGE_MM_H */