summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2019-05-13 20:19:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:48 -0400
commitfd875dca7c71744cbb0ebbcde7d45e5ee05b7637 (patch)
tree75e13f70af2f5dd2796c5133e49e8386038b0494 /mm/hugetlb.c
parent299c83dce9ea3a79bb4b5511d2cb996b6b8e5111 (diff)
hugetlbfs: fix potential over/underflow setting node specific nr_hugepages
The number of node specific huge pages can be set via a file such as: /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages When a node specific value is specified, the global number of huge pages must also be adjusted. This adjustment is calculated as the specified node specific value + (global value - current node value). If the node specific value provided by the user is large enough, this calculation could overflow an unsigned long leading to a smaller than expected number of huge pages. To fix, check the calculation for overflow. If overflow is detected, use ULONG_MAX as the requested value. This is inline with the user request to allocate as many huge pages as possible. It was also noticed that the above calculation was done outside the hugetlb_lock. Therefore, the values could be inconsistent and result in underflow. To fix, the calculation is moved within the routine set_max_huge_pages() where the lock is held. In addition, the code in __nr_hugepages_store_common() which tries to handle the case of not being able to allocate a node mask would likely result in incorrect behavior. Luckily, it is very unlikely we will ever take this path. If we do, simply return ENOMEM. Link: http://lkml.kernel.org/r/20190328220533.19884-1-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reported-by: Jing Xiangfeng <jingxiangfeng@huawei.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Alex Ghiti <alex@ghiti.fr> Cc: Jing Xiangfeng <jingxiangfeng@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2f901a6e13d2..a81f2a8556c8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2288,7 +2288,7 @@ found:
2288} 2288}
2289 2289
2290#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 2290#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2291static int set_max_huge_pages(struct hstate *h, unsigned long count, 2291static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2292 nodemask_t *nodes_allowed) 2292 nodemask_t *nodes_allowed)
2293{ 2293{
2294 unsigned long min_count, ret; 2294 unsigned long min_count, ret;
@@ -2296,6 +2296,26 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count,
2296 spin_lock(&hugetlb_lock); 2296 spin_lock(&hugetlb_lock);
2297 2297
2298 /* 2298 /*
2299 * Check for a node specific request.
2300 * Changing node specific huge page count may require a corresponding
2301 * change to the global count. In any case, the passed node mask
2302 * (nodes_allowed) will restrict alloc/free to the specified node.
2303 */
2304 if (nid != NUMA_NO_NODE) {
2305 unsigned long old_count = count;
2306
2307 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2308 /*
2309 * User may have specified a large count value which caused the
2310 * above calculation to overflow. In this case, they wanted
2311 * to allocate as many huge pages as possible. Set count to
2312 * largest possible value to align with their intention.
2313 */
2314 if (count < old_count)
2315 count = ULONG_MAX;
2316 }
2317
2318 /*
2299 * Gigantic pages runtime allocation depend on the capability for large 2319 * Gigantic pages runtime allocation depend on the capability for large
2300 * page range allocation. 2320 * page range allocation.
2301 * If the system does not provide this feature, return an error when 2321 * If the system does not provide this feature, return an error when
@@ -2446,15 +2466,22 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2446 } 2466 }
2447 } else if (nodes_allowed) { 2467 } else if (nodes_allowed) {
2448 /* 2468 /*
2449 * per node hstate attribute: adjust count to global, 2469 * Node specific request. count adjustment happens in
2450 * but restrict alloc/free to the specified node. 2470 * set_max_huge_pages() after acquiring hugetlb_lock.
2451 */ 2471 */
2452 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2453 init_nodemask_of_node(nodes_allowed, nid); 2472 init_nodemask_of_node(nodes_allowed, nid);
2454 } else 2473 } else {
2455 nodes_allowed = &node_states[N_MEMORY]; 2474 /*
2475 * Node specific request, but we could not allocate the few
2476 * words required for a node mask. We are unlikely to hit
2477 * this condition. Since we can not pass down the appropriate
2478 * node mask, just return ENOMEM.
2479 */
2480 err = -ENOMEM;
2481 goto out;
2482 }
2456 2483
2457 err = set_max_huge_pages(h, count, nodes_allowed); 2484 err = set_max_huge_pages(h, count, nid, nodes_allowed);
2458 2485
2459out: 2486out:
2460 if (nodes_allowed != &node_states[N_MEMORY]) 2487 if (nodes_allowed != &node_states[N_MEMORY])