diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2015-04-15 19:13:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:35:18 -0400 |
commit | 7ca02d0ae586fe7df59632966a64f3f1a756ef05 (patch) | |
tree | 71a66a1c50afb8b8e47c48058695693b7f405cb3 /mm/hugetlb.c | |
parent | 1c5ecae3a93fa1ab51a784d77e9c9ed54e67c65f (diff) |
hugetlbfs: accept subpool min_size mount option and setup accordingly
Make 'min_size=<value>' be an option when mounting a hugetlbfs. This
option takes the same value as the 'size' option. min_size can be
specified without specifying size. If both are specified, min_size must
be less that or equal to size else the mount will fail. If min_size is
specified, then at mount time an attempt is made to reserve min_size
pages. If the reservation fails, the mount fails. At umount time, the
reserved pages are released.
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 499cb72c74b1..995c8d65a95c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -61,6 +61,9 @@ DEFINE_SPINLOCK(hugetlb_lock); | |||
61 | static int num_fault_mutexes; | 61 | static int num_fault_mutexes; |
62 | static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; | 62 | static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp; |
63 | 63 | ||
64 | /* Forward declaration */ | ||
65 | static int hugetlb_acct_memory(struct hstate *h, long delta); | ||
66 | |||
64 | static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) | 67 | static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) |
65 | { | 68 | { |
66 | bool free = (spool->count == 0) && (spool->used_hpages == 0); | 69 | bool free = (spool->count == 0) && (spool->used_hpages == 0); |
@@ -68,12 +71,18 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) | |||
68 | spin_unlock(&spool->lock); | 71 | spin_unlock(&spool->lock); |
69 | 72 | ||
70 | /* If no pages are used, and no other handles to the subpool | 73 | /* If no pages are used, and no other handles to the subpool |
71 | * remain, free the subpool the subpool remain */ | 74 | * remain, give up any reservations mased on minimum size and |
72 | if (free) | 75 | * free the subpool */ |
76 | if (free) { | ||
77 | if (spool->min_hpages != -1) | ||
78 | hugetlb_acct_memory(spool->hstate, | ||
79 | -spool->min_hpages); | ||
73 | kfree(spool); | 80 | kfree(spool); |
81 | } | ||
74 | } | 82 | } |
75 | 83 | ||
76 | struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) | 84 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
85 | long min_hpages) | ||
77 | { | 86 | { |
78 | struct hugepage_subpool *spool; | 87 | struct hugepage_subpool *spool; |
79 | 88 | ||
@@ -83,7 +92,15 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks) | |||
83 | 92 | ||
84 | spin_lock_init(&spool->lock); | 93 | spin_lock_init(&spool->lock); |
85 | spool->count = 1; | 94 | spool->count = 1; |
86 | spool->max_hpages = nr_blocks; | 95 | spool->max_hpages = max_hpages; |
96 | spool->hstate = h; | ||
97 | spool->min_hpages = min_hpages; | ||
98 | |||
99 | if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { | ||
100 | kfree(spool); | ||
101 | return NULL; | ||
102 | } | ||
103 | spool->rsv_hpages = min_hpages; | ||
87 | 104 | ||
88 | return spool; | 105 | return spool; |
89 | } | 106 | } |