aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h48
-rw-r--r--mm/page_alloc.c20
2 files changed, 68 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f3c7b1f9d1d8..d064c73c925e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1295,6 +1295,54 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
1295 unsigned long zone_start_pfn, unsigned long *zholes_size); 1295 unsigned long zone_start_pfn, unsigned long *zholes_size);
1296extern void free_initmem(void); 1296extern void free_initmem(void);
1297 1297
1298/*
1299 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1300 * into the buddy system. The freed pages will be poisoned with pattern
1301 * "poison" if it's non-zero.
1302 * Return pages freed into the buddy system.
1303 */
1304extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
1305 int poison, char *s);
1306
1307static inline void adjust_managed_page_count(struct page *page, long count)
1308{
1309 totalram_pages += count;
1310}
1311
1312/* Free the reserved page into the buddy system, so it gets managed. */
1313static inline void __free_reserved_page(struct page *page)
1314{
1315 ClearPageReserved(page);
1316 init_page_count(page);
1317 __free_page(page);
1318}
1319
1320static inline void free_reserved_page(struct page *page)
1321{
1322 __free_reserved_page(page);
1323 adjust_managed_page_count(page, 1);
1324}
1325
1326static inline void mark_page_reserved(struct page *page)
1327{
1328 SetPageReserved(page);
1329 adjust_managed_page_count(page, -1);
1330}
1331
1332/*
1333 * Default method to free all the __init memory into the buddy system.
1334 * The freed pages will be poisoned with pattern "poison" if it is
1335 * non-zero. Return pages freed into the buddy system.
1336 */
1337static inline unsigned long free_initmem_default(int poison)
1338{
1339 extern char __init_begin[], __init_end[];
1340
1341 return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
1342 ((unsigned long)&__init_end) & PAGE_MASK,
1343 poison, "unused kernel");
1344}
1345
1298#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1346#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1299/* 1347/*
1300 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1348 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index da7a2fe7332e..5c660f5ba3d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5121,6 +5121,26 @@ early_param("movablecore", cmdline_parse_movablecore);
5121 5121
5122#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5122#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5123 5123
5124unsigned long free_reserved_area(unsigned long start, unsigned long end,
5125 int poison, char *s)
5126{
5127 unsigned long pages, pos;
5128
5129 pos = start = PAGE_ALIGN(start);
5130 end &= PAGE_MASK;
5131 for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
5132 if (poison)
5133 memset((void *)pos, poison, PAGE_SIZE);
5134 free_reserved_page(virt_to_page(pos));
5135 }
5136
5137 if (pages && s)
5138 pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
5139 s, pages << (PAGE_SHIFT - 10), start, end);
5140
5141 return pages;
5142}
5143
5124/** 5144/**
5125 * set_dma_reserve - set the specified number of pages reserved in the first zone 5145 * set_dma_reserve - set the specified number of pages reserved in the first zone
5126 * @new_dma_reserve: The number of pages to mark reserved 5146 * @new_dma_reserve: The number of pages to mark reserved