aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-07-17 07:03:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit396faf0303d273219db5d7eb4a2879ad977ed185 (patch)
tree96cb64fd6713ef7a924f4f878e259aea781f079a /mm
parent2a1e274acf0b1c192face19a4be7c12d4503eaaf (diff)
Allow huge page allocations to use GFP_HIGH_MOVABLE
Huge pages are not movable so are not allocated from ZONE_MOVABLE. However, as ZONE_MOVABLE will always have pages that can be migrated or reclaimed, it can be used to satisfy hugepage allocations even when the system has been running a long time. This allows an administrator to resize the hugepage pool at runtime depending on the size of ZONE_MOVABLE. This patch adds a new sysctl called hugepages_treat_as_movable. When a non-zero value is written to it, future allocations for the huge page pool will use ZONE_MOVABLE. Despite huge pages being non-movable, we do not introduce additional external fragmentation of note as huge pages are always the largest contiguous block we care about. [akpm@linux-foundation.org: various fixes] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/mempolicy.c5
2 files changed, 23 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index acc0fb3cf067..58980676b842 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,9 @@ unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES]; 27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES]; 29static unsigned int free_huge_pages_node[MAX_NUMNODES];
30static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
30/* 33/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */ 35 */
@@ -68,12 +71,13 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
68{ 71{
69 int nid; 72 int nid;
70 struct page *page = NULL; 73 struct page *page = NULL;
71 struct zonelist *zonelist = huge_zonelist(vma, address); 74 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
72 struct zone **z; 76 struct zone **z;
73 77
74 for (z = zonelist->zones; *z; z++) { 78 for (z = zonelist->zones; *z; z++) {
75 nid = zone_to_nid(*z); 79 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && 80 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
77 !list_empty(&hugepage_freelists[nid])) 81 !list_empty(&hugepage_freelists[nid]))
78 break; 82 break;
79 } 83 }
@@ -113,7 +117,7 @@ static int alloc_fresh_huge_page(void)
113 prev_nid = nid; 117 prev_nid = nid;
114 spin_unlock(&nid_lock); 118 spin_unlock(&nid_lock);
115 119
116 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 120 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
117 HUGETLB_PAGE_ORDER); 121 HUGETLB_PAGE_ORDER);
118 if (page) { 122 if (page) {
119 set_compound_page_dtor(page, free_huge_page); 123 set_compound_page_dtor(page, free_huge_page);
@@ -263,6 +267,19 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
263 max_huge_pages = set_max_huge_pages(max_huge_pages); 267 max_huge_pages = set_max_huge_pages(max_huge_pages);
264 return 0; 268 return 0;
265} 269}
270
271int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
272 struct file *file, void __user *buffer,
273 size_t *length, loff_t *ppos)
274{
275 proc_dointvec(table, write, file, buffer, length, ppos);
276 if (hugepages_treat_as_movable)
277 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
278 else
279 htlb_alloc_mask = GFP_HIGHUSER;
280 return 0;
281}
282
266#endif /* CONFIG_SYSCTL */ 283#endif /* CONFIG_SYSCTL */
267 284
268int hugetlb_report_meminfo(char *buf) 285int hugetlb_report_meminfo(char *buf)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4c0f99996811..9f4e9b95e8f2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1203,7 +1203,8 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1203 1203
1204#ifdef CONFIG_HUGETLBFS 1204#ifdef CONFIG_HUGETLBFS
1205/* Return a zonelist suitable for a huge page allocation. */ 1205/* Return a zonelist suitable for a huge page allocation. */
1206struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) 1206struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1207 gfp_t gfp_flags)
1207{ 1208{
1208 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1209 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1209 1210
@@ -1211,7 +1212,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1211 unsigned nid; 1212 unsigned nid;
1212 1213
1213 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1214 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1214 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); 1215 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
1215 } 1216 }
1216 return zonelist_policy(GFP_HIGHUSER, pol); 1217 return zonelist_policy(GFP_HIGHUSER, pol);
1217} 1218}