aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 19:41:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:40 -0400
commit47d38344abd0c7c6793b59ac741aa5b205fc197c (patch)
treea68c974d98720b48b0713d464ab45efaddfc8331 /mm/hugetlb.c
parent3965c9ae47d64aadf6f13b6fcd37767b83c0689a (diff)
hugetlb: rename max_hstate to hugetlb_max_hstate
This patchset implements a cgroup resource controller for HugeTLB pages. The controller allows to limit the HugeTLB usage per control group and enforces the controller limit during page fault. Since HugeTLB doesn't support page reclaim, enforcing the limit at page fault time implies that, the application will get SIGBUS signal if it tries to access HugeTLB pages beyond its limit. This requires the application to know beforehand how much HugeTLB pages it would require for its use. The goal is to control how many HugeTLB pages a group of task can allocate. It can be looked at as an extension of the existing quota interface which limits the number of HugeTLB pages per hugetlbfs superblock. HPC job scheduler requires jobs to specify their resource requirements in the job file. Once their requirements can be met, job schedulers like (SLURM) will schedule the job. We need to make sure that the jobs won't consume more resources than requested. If they do we should either error out or kill the application. This patch: Rename max_hstate to hugetlb_max_hstate. We will be using this from other subsystems like hugetlb controller in later patches. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hillf Danton <dhillf@gmail.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e198831276a3..c86830931cc6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -34,7 +34,7 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
34static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
35unsigned long hugepages_treat_as_movable; 35unsigned long hugepages_treat_as_movable;
36 36
37static int max_hstate; 37static int hugetlb_max_hstate;
38unsigned int default_hstate_idx; 38unsigned int default_hstate_idx;
39struct hstate hstates[HUGE_MAX_HSTATE]; 39struct hstate hstates[HUGE_MAX_HSTATE];
40 40
@@ -46,7 +46,7 @@ static unsigned long __initdata default_hstate_max_huge_pages;
46static unsigned long __initdata default_hstate_size; 46static unsigned long __initdata default_hstate_size;
47 47
48#define for_each_hstate(h) \ 48#define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
50 50
51/* 51/*
52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -1897,9 +1897,9 @@ void __init hugetlb_add_hstate(unsigned order)
1897 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1897 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1898 return; 1898 return;
1899 } 1899 }
1900 BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1900 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1901 BUG_ON(order == 0); 1901 BUG_ON(order == 0);
1902 h = &hstates[max_hstate++]; 1902 h = &hstates[hugetlb_max_hstate++];
1903 h->order = order; 1903 h->order = order;
1904 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 1904 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1905 h->nr_huge_pages = 0; 1905 h->nr_huge_pages = 0;
@@ -1920,10 +1920,10 @@ static int __init hugetlb_nrpages_setup(char *s)
1920 static unsigned long *last_mhp; 1920 static unsigned long *last_mhp;
1921 1921
1922 /* 1922 /*
1923 * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1923 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1924 * so this hugepages= parameter goes to the "default hstate". 1924 * so this hugepages= parameter goes to the "default hstate".
1925 */ 1925 */
1926 if (!max_hstate) 1926 if (!hugetlb_max_hstate)
1927 mhp = &default_hstate_max_huge_pages; 1927 mhp = &default_hstate_max_huge_pages;
1928 else 1928 else
1929 mhp = &parsed_hstate->max_huge_pages; 1929 mhp = &parsed_hstate->max_huge_pages;
@@ -1942,7 +1942,7 @@ static int __init hugetlb_nrpages_setup(char *s)
1942 * But we need to allocate >= MAX_ORDER hstates here early to still 1942 * But we need to allocate >= MAX_ORDER hstates here early to still
1943 * use the bootmem allocator. 1943 * use the bootmem allocator.
1944 */ 1944 */
1945 if (max_hstate && parsed_hstate->order >= MAX_ORDER) 1945 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1946 hugetlb_hstate_alloc_pages(parsed_hstate); 1946 hugetlb_hstate_alloc_pages(parsed_hstate);
1947 1947
1948 last_mhp = mhp; 1948 last_mhp = mhp;