aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXishi Qiu <qiuxishi@huawei.com>2012-07-31 19:43:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:43 -0400
commitca57df79d4f64e1a4886606af4289d40636189c5 (patch)
treee56d0411e552dec588778caaaa5e7d9885dd1a2c
parent51a07e50b230d14e1b8bef50d66655d003fa006c (diff)
mm: setup pageblock_order before it's used by sparsemem
On architectures with CONFIG_HUGETLB_PAGE_SIZE_VARIABLE set, such as Itanium, pageblock_order is a variable with default value of 0. It's set to the right value by set_pageblock_order() in function free_area_init_core(). But pageblock_order may be used by sparse_init() before free_area_init_core() is called along path: sparse_init() ->sparse_early_usemaps_alloc_node() ->usemap_size() ->SECTION_BLOCKFLAGS_BITS ->((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) The uninitialized pageblock_size will cause memory wasting because usemap_size() returns a much bigger value then it's really needed. For example, on an Itanium platform, sparse_init() pageblock_order=0 usemap_size=24576 free_area_init_core() before pageblock_order=0, usemap_size=24576 free_area_init_core() after pageblock_order=12, usemap_size=8 That means 24K memory has been wasted for each section, so fix it by calling set_pageblock_order() from sparse_init(). Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Signed-off-by: Jiang Liu <liuj97@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Keping Chen <chenkeping@huawei.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/sparse.c3
3 files changed, 7 insertions, 2 deletions
diff --git a/mm/internal.h b/mm/internal.h
index da6b9b2ed3fc..3314f79d775a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -353,3 +353,5 @@ extern u32 hwpoison_filter_enable;
353extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, 353extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
354 unsigned long, unsigned long, 354 unsigned long, unsigned long,
355 unsigned long, unsigned long); 355 unsigned long, unsigned long);
356
357extern void set_pageblock_order(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 94fc475c3f94..6c7e3bd93a85 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4304,7 +4304,7 @@ static inline void setup_usemap(struct pglist_data *pgdat,
4304#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4304#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4305 4305
4306/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4306/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4307static inline void __init set_pageblock_order(void) 4307void __init set_pageblock_order(void)
4308{ 4308{
4309 unsigned int order; 4309 unsigned int order;
4310 4310
@@ -4332,7 +4332,7 @@ static inline void __init set_pageblock_order(void)
4332 * include/linux/pageblock-flags.h for the values of pageblock_order based on 4332 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4333 * the kernel config 4333 * the kernel config
4334 */ 4334 */
4335static inline void set_pageblock_order(void) 4335void __init set_pageblock_order(void)
4336{ 4336{
4337} 4337}
4338 4338
diff --git a/mm/sparse.c b/mm/sparse.c
index c7bb952400c8..950981fd07c5 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -493,6 +493,9 @@ void __init sparse_init(void)
493 struct page **map_map; 493 struct page **map_map;
494#endif 494#endif
495 495
496 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
497 set_pageblock_order();
498
496 /* 499 /*
497 * map is using big page (aka 2M in x86 64 bit) 500 * map is using big page (aka 2M in x86 64 bit)
498 * usemap is less one page (aka 24 bytes) 501 * usemap is less one page (aka 24 bytes)