diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2015-06-24 19:56:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 20:49:42 -0400 |
commit | 641844f5616d7c6597309f560838f996466d7aac (patch) | |
tree | 1b9fff7ae2ba5ec92959563c52a144c01735341a /mm/hugetlb.c | |
parent | 414e2fb8ce5a999571c21eb2ca4d66e53ddce800 (diff) |
mm/hugetlb: introduce minimum hugepage order
Currently the initial value of order in dissolve_free_huge_page is 64 or
32, which leads to the following warning in static checker:
mm/hugetlb.c:1203 dissolve_free_huge_pages()
warn: potential right shift more than type allows '9,18,64'
This is a potential risk of infinite loop, because 1 << order (== 0) is used
in for-loop like this:
for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D 1 << order)
...
So this patch fixes it by using global minimum_order calculated at boot time.
text data bss dec hex filename
28313 469 84236 113018 1b97a mm/hugetlb.o
28256 473 84236 112965 1b945 mm/hugetlb.o (patched)
Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 716465ae57aa..10de25cf1f99 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -40,6 +40,11 @@ int hugepages_treat_as_movable; | |||
40 | int hugetlb_max_hstate __read_mostly; | 40 | int hugetlb_max_hstate __read_mostly; |
41 | unsigned int default_hstate_idx; | 41 | unsigned int default_hstate_idx; |
42 | struct hstate hstates[HUGE_MAX_HSTATE]; | 42 | struct hstate hstates[HUGE_MAX_HSTATE]; |
43 | /* | ||
44 | * Minimum page order among possible hugepage sizes, set to a proper value | ||
45 | * at boot time. | ||
46 | */ | ||
47 | static unsigned int minimum_order __read_mostly = UINT_MAX; | ||
43 | 48 | ||
44 | __initdata LIST_HEAD(huge_boot_pages); | 49 | __initdata LIST_HEAD(huge_boot_pages); |
45 | 50 | ||
@@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page) | |||
1188 | */ | 1193 | */ |
1189 | void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) | 1194 | void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) |
1190 | { | 1195 | { |
1191 | unsigned int order = 8 * sizeof(void *); | ||
1192 | unsigned long pfn; | 1196 | unsigned long pfn; |
1193 | struct hstate *h; | ||
1194 | 1197 | ||
1195 | if (!hugepages_supported()) | 1198 | if (!hugepages_supported()) |
1196 | return; | 1199 | return; |
1197 | 1200 | ||
1198 | /* Set scan step to minimum hugepage size */ | 1201 | VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order)); |
1199 | for_each_hstate(h) | 1202 | for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) |
1200 | if (order > huge_page_order(h)) | ||
1201 | order = huge_page_order(h); | ||
1202 | VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); | ||
1203 | for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) | ||
1204 | dissolve_free_huge_page(pfn_to_page(pfn)); | 1203 | dissolve_free_huge_page(pfn_to_page(pfn)); |
1205 | } | 1204 | } |
1206 | 1205 | ||
@@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void) | |||
1627 | struct hstate *h; | 1626 | struct hstate *h; |
1628 | 1627 | ||
1629 | for_each_hstate(h) { | 1628 | for_each_hstate(h) { |
1629 | if (minimum_order > huge_page_order(h)) | ||
1630 | minimum_order = huge_page_order(h); | ||
1631 | |||
1630 | /* oversize hugepages were init'ed in early boot */ | 1632 | /* oversize hugepages were init'ed in early boot */ |
1631 | if (!hstate_is_gigantic(h)) | 1633 | if (!hstate_is_gigantic(h)) |
1632 | hugetlb_hstate_alloc_pages(h); | 1634 | hugetlb_hstate_alloc_pages(h); |
1633 | } | 1635 | } |
1636 | VM_BUG_ON(minimum_order == UINT_MAX); | ||
1634 | } | 1637 | } |
1635 | 1638 | ||
1636 | static char * __init memfmt(char *buf, unsigned long n) | 1639 | static char * __init memfmt(char *buf, unsigned long n) |