summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNathan Zimmer <nzimmer@sgi.com>2015-06-30 17:56:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 22:44:55 -0400
commit92923ca3aacef63c92dc297a75ad0c6dfe4eab37 (patch)
tree586db6346196f978ec9e55da7e9d525235436ab2 /mm/page_alloc.c
parent1e8ce83cd17fd0f549a7ad145ddd2bfcdd7dfe37 (diff)
mm: meminit: only set page reserved in the memblock region
Currently each page struct is set as reserved upon initialization. This patch leaves the reserved bit clear and only sets the reserved bit when it is known the memory was allocated by the bootmem allocator. This makes it easier to distinguish between uninitialised struct pages and reserved struct pages in later patches. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Nathan Zimmer <nzimmer@sgi.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bc5da2cdfc84..39c8d56a4056 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -774,7 +774,6 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
774 init_page_count(page); 774 init_page_count(page);
775 page_mapcount_reset(page); 775 page_mapcount_reset(page);
776 page_cpupid_reset_last(page); 776 page_cpupid_reset_last(page);
777 SetPageReserved(page);
778 777
779 /* 778 /*
780 * Mark the block movable so that blocks are reserved for 779 * Mark the block movable so that blocks are reserved for
@@ -809,6 +808,22 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
809 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 808 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
810} 809}
811 810
811/*
812 * Initialised pages do not have PageReserved set. This function is
813 * called for each range allocated by the bootmem allocator and
814 * marks the pages PageReserved. The remaining valid pages are later
815 * sent to the buddy page allocator.
816 */
817void reserve_bootmem_region(unsigned long start, unsigned long end)
818{
819 unsigned long start_pfn = PFN_DOWN(start);
820 unsigned long end_pfn = PFN_UP(end);
821
822 for (; start_pfn < end_pfn; start_pfn++)
823 if (pfn_valid(start_pfn))
824 SetPageReserved(pfn_to_page(start_pfn));
825}
826
812static bool free_pages_prepare(struct page *page, unsigned int order) 827static bool free_pages_prepare(struct page *page, unsigned int order)
813{ 828{
814 bool compound = PageCompound(page); 829 bool compound = PageCompound(page);