aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index acc0fb3cf067..58980676b842 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,9 @@ unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES]; 27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES]; 29static unsigned int free_huge_pages_node[MAX_NUMNODES];
30static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
30/* 33/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */ 35 */
@@ -68,12 +71,13 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
68{ 71{
69 int nid; 72 int nid;
70 struct page *page = NULL; 73 struct page *page = NULL;
71 struct zonelist *zonelist = huge_zonelist(vma, address); 74 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
72 struct zone **z; 76 struct zone **z;
73 77
74 for (z = zonelist->zones; *z; z++) { 78 for (z = zonelist->zones; *z; z++) {
75 nid = zone_to_nid(*z); 79 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && 80 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
77 !list_empty(&hugepage_freelists[nid])) 81 !list_empty(&hugepage_freelists[nid]))
78 break; 82 break;
79 } 83 }
@@ -113,7 +117,7 @@ static int alloc_fresh_huge_page(void)
113 prev_nid = nid; 117 prev_nid = nid;
114 spin_unlock(&nid_lock); 118 spin_unlock(&nid_lock);
115 119
116 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 120 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
117 HUGETLB_PAGE_ORDER); 121 HUGETLB_PAGE_ORDER);
118 if (page) { 122 if (page) {
119 set_compound_page_dtor(page, free_huge_page); 123 set_compound_page_dtor(page, free_huge_page);
@@ -263,6 +267,19 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
263 max_huge_pages = set_max_huge_pages(max_huge_pages); 267 max_huge_pages = set_max_huge_pages(max_huge_pages);
264 return 0; 268 return 0;
265} 269}
270
271int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
272 struct file *file, void __user *buffer,
273 size_t *length, loff_t *ppos)
274{
275 proc_dointvec(table, write, file, buffer, length, ppos);
276 if (hugepages_treat_as_movable)
277 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
278 else
279 htlb_alloc_mask = GFP_HIGHUSER;
280 return 0;
281}
282
266#endif /* CONFIG_SYSCTL */ 283#endif /* CONFIG_SYSCTL */
267 284
268int hugetlb_report_meminfo(char *buf) 285int hugetlb_report_meminfo(char *buf)