aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Liu <liuj97@gmail.com>2013-04-29 18:06:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:29 -0400
commit69afade72a3e13e96a065f757891d384d466123f (patch)
treee294349336908e3ffac054f8fa9827226c9ed5b1
parent2d42a40d592fadbd01cb639ac21a761fa31423f8 (diff)
mm: introduce common help functions to deal with reserved/managed pages
The original goal of this patchset is to fix the bug reported by https://bugzilla.kernel.org/show_bug.cgi?id=53501 Now it has also been expanded to reduce common code used by memory initializion. This is the first part, which applies to v3.9-rc1. It introduces following common helper functions to simplify free_initmem() and free_initrd_mem() on different architectures: adjust_managed_page_count(): will be used to adjust totalram_pages, totalhigh_pages, zone->managed_pages when reserving/unresering a page. __free_reserved_page(): free a reserved page into the buddy system without adjusting page statistics info free_reserved_page(): free a reserved page into the buddy system and adjust page statistics info mark_page_reserved(): mark a page as reserved and adjust page statistics info free_reserved_area(): free a continous ranges of pages by calling free_reserved_page() free_initmem_default(): default method to free __init pages. We have only tested these patchset on x86 platforms, and have done basic compliation tests using cross-compilers from ftp.kernel.org. That means some code may not pass compilation on some architectures. So any help to test this patchset are welcomed! There are several other parts still under development: Part2: introduce free_highmem_page() to simplify freeing highmem pages Part3: refine code to manage totalram_pages, totalhigh_pages and zone->managed_pages Part4: introduce helper functions to simplify mem_init() and remove the global variable num_physpages. This patch: Code to deal with reserved/managed pages are duplicated by many architectures, so introduce common help functions to reduce duplicated code. These common help functions will also be used to concentrate code to modify totalram_pages and zone->managed_pages, which makes the code much more clear. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Anatolij Gustschin <agust@denx.de> Cc: Aurelien Jacquiot <a-jacquiot@ti.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chen Liqin <liqin.chen@sunplusct.com> Cc: Chris Zankel <chris@zankel.net> Cc: David Howells <dhowells@redhat.com> Cc: David S. Miller <davem@davemloft.net> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: James Hogan <james.hogan@imgtec.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Mikael Starvik <starvik@axis.com> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h48
-rw-r--r--mm/page_alloc.c20
2 files changed, 68 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f3c7b1f9d1d8..d064c73c925e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1295,6 +1295,54 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
1295 unsigned long zone_start_pfn, unsigned long *zholes_size); 1295 unsigned long zone_start_pfn, unsigned long *zholes_size);
1296extern void free_initmem(void); 1296extern void free_initmem(void);
1297 1297
1298/*
1299 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1300 * into the buddy system. The freed pages will be poisoned with pattern
1301 * "poison" if it's non-zero.
1302 * Return pages freed into the buddy system.
1303 */
1304extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
1305 int poison, char *s);
1306
1307static inline void adjust_managed_page_count(struct page *page, long count)
1308{
1309 totalram_pages += count;
1310}
1311
1312/* Free the reserved page into the buddy system, so it gets managed. */
1313static inline void __free_reserved_page(struct page *page)
1314{
1315 ClearPageReserved(page);
1316 init_page_count(page);
1317 __free_page(page);
1318}
1319
1320static inline void free_reserved_page(struct page *page)
1321{
1322 __free_reserved_page(page);
1323 adjust_managed_page_count(page, 1);
1324}
1325
1326static inline void mark_page_reserved(struct page *page)
1327{
1328 SetPageReserved(page);
1329 adjust_managed_page_count(page, -1);
1330}
1331
1332/*
1333 * Default method to free all the __init memory into the buddy system.
1334 * The freed pages will be poisoned with pattern "poison" if it is
1335 * non-zero. Return pages freed into the buddy system.
1336 */
1337static inline unsigned long free_initmem_default(int poison)
1338{
1339 extern char __init_begin[], __init_end[];
1340
1341 return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
1342 ((unsigned long)&__init_end) & PAGE_MASK,
1343 poison, "unused kernel");
1344}
1345
1298#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1346#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1299/* 1347/*
1300 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1348 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index da7a2fe7332e..5c660f5ba3d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5121,6 +5121,26 @@ early_param("movablecore", cmdline_parse_movablecore);
5121 5121
5122#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5122#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5123 5123
5124unsigned long free_reserved_area(unsigned long start, unsigned long end,
5125 int poison, char *s)
5126{
5127 unsigned long pages, pos;
5128
5129 pos = start = PAGE_ALIGN(start);
5130 end &= PAGE_MASK;
5131 for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
5132 if (poison)
5133 memset((void *)pos, poison, PAGE_SIZE);
5134 free_reserved_page(virt_to_page(pos));
5135 }
5136
5137 if (pages && s)
5138 pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
5139 s, pages << (PAGE_SHIFT - 10), start, end);
5140
5141 return pages;
5142}
5143
5124/** 5144/**
5125 * set_dma_reserve - set the specified number of pages reserved in the first zone 5145 * set_dma_reserve - set the specified number of pages reserved in the first zone
5126 * @new_dma_reserve: The number of pages to mark reserved 5146 * @new_dma_reserve: The number of pages to mark reserved