summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2017-11-15 20:36:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 21:21:05 -0500
commita4a3ede2132ae0863e2d43e06f9b5697c51a7a3b (patch)
tree140f54f3d41eee12a7464f69954c0f44d50b722c /mm/page_alloc.c
parentea1f5f3712afe895dfa4176ec87376b4a9ac23be (diff)
mm: zero reserved and unavailable struct pages
Some memory is reserved but unavailable: not present in memblock.memory (because not backed by physical pages), but present in memblock.reserved. Such memory has backing struct pages, but they are not initialized by going through __init_single_page(). In some cases these struct pages are accessed even if they do not contain any data. One example is page_to_pfn() might access page->flags if this is where section information is stored (CONFIG_SPARSEMEM, SECTION_IN_PAGE_FLAGS). One example of such memory: trim_low_memory_range() unconditionally reserves from pfn 0, but e820__memblock_setup() might provide the exiting memory from pfn 1 (i.e. KVM). Since struct pages are zeroed in __init_single_page(), and not during allocation time, we must zero such struct pages explicitly. The patch involves adding a new memblock iterator: for_each_resv_unavail_range(i, p_start, p_end) Which iterates through reserved && !memory lists, and we zero struct pages explicitly by calling mm_zero_struct_page(). === Here is more detailed example of problem that this patch is addressing: Run tested on qemu with the following arguments: -enable-kvm -cpu kvm64 -m 512 -smp 2 This patch reports that there are 98 unavailable pages. They are: pfn 0 and pfns in range [159, 255]. Note, trim_low_memory_range() reserves only pfns in range [0, 15], it does not reserve [159, 255] ones. e820__memblock_setup() reports linux that the following physical ranges are available: [1 , 158] [256, 130783] Notice, that exactly unavailable pfns are missing! Now, lets check what we have in zone 0: [1, 131039] pfn 0, is not part of the zone, but pfns [1, 158], are. However, the bigger problem we have if we do not initialize these struct pages is with memory hotplug. Because, that path operates at 2M boundaries (section_nr). And checks if 2M range of pages is hot removable. It starts with first pfn from zone, rounds it down to 2M boundary (sturct pages are allocated at 2M boundaries when vmemmap is created), and checks if that section is hot removable. In this case start with pfn 1 and convert it down to pfn 0. Later pfn is converted to struct page, and some fields are checked. Now, if we do not zero struct pages, we get unpredictable results. In fact when CONFIG_VM_DEBUG is enabled, and we explicitly set all vmemmap memory to ones, the following panic is observed with kernel test without this patch applied: BUG: unable to handle kernel NULL pointer dereference at (null) IP: is_pageblock_removable_nolock+0x35/0x90 PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT ... task: ffff88001f4e2900 task.stack: ffffc90000314000 RIP: 0010:is_pageblock_removable_nolock+0x35/0x90 Call Trace: ? is_mem_section_removable+0x5a/0xd0 show_mem_removable+0x6b/0xa0 dev_attr_show+0x1b/0x50 sysfs_kf_seq_show+0xa1/0x100 kernfs_seq_show+0x22/0x30 seq_read+0x1ac/0x3a0 kernfs_fop_read+0x36/0x190 ? security_file_permission+0x90/0xb0 __vfs_read+0x16/0x30 vfs_read+0x81/0x130 SyS_read+0x44/0xa0 entry_SYSCALL_64_fastpath+0x1f/0xbd Link: http://lkml.kernel.org/r/20171013173214.27300-7-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Steven Sistare <steven.sistare@oracle.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Bob Picco <bob.picco@oracle.com> Tested-by: Bob Picco <bob.picco@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@kernel.org> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 805f30dd1c26..c37343ef2889 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6215,6 +6215,44 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6215 free_area_init_core(pgdat); 6215 free_area_init_core(pgdat);
6216} 6216}
6217 6217
6218#ifdef CONFIG_HAVE_MEMBLOCK
6219/*
6220 * Only struct pages that are backed by physical memory are zeroed and
6221 * initialized by going through __init_single_page(). But, there are some
6222 * struct pages which are reserved in memblock allocator and their fields
6223 * may be accessed (for example page_to_pfn() on some configuration accesses
6224 * flags). We must explicitly zero those struct pages.
6225 */
6226void __paginginit zero_resv_unavail(void)
6227{
6228 phys_addr_t start, end;
6229 unsigned long pfn;
6230 u64 i, pgcnt;
6231
6232 /*
6233 * Loop through ranges that are reserved, but do not have reported
6234 * physical memory backing.
6235 */
6236 pgcnt = 0;
6237 for_each_resv_unavail_range(i, &start, &end) {
6238 for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
6239 mm_zero_struct_page(pfn_to_page(pfn));
6240 pgcnt++;
6241 }
6242 }
6243
6244 /*
6245 * Struct pages that do not have backing memory. This could be because
6246 * firmware is using some of this memory, or for some other reasons.
6247 * Once memblock is changed so such behaviour is not allowed: i.e.
6248 * list of "reserved" memory must be a subset of list of "memory", then
6249 * this code can be removed.
6250 */
6251 if (pgcnt)
6252 pr_info("Reserved but unavailable: %lld pages", pgcnt);
6253}
6254#endif /* CONFIG_HAVE_MEMBLOCK */
6255
6218#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6256#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6219 6257
6220#if MAX_NUMNODES > 1 6258#if MAX_NUMNODES > 1
@@ -6638,6 +6676,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6638 node_set_state(nid, N_MEMORY); 6676 node_set_state(nid, N_MEMORY);
6639 check_for_memory(pgdat, nid); 6677 check_for_memory(pgdat, nid);
6640 } 6678 }
6679 zero_resv_unavail();
6641} 6680}
6642 6681
6643static int __init cmdline_parse_core(char *p, unsigned long *core) 6682static int __init cmdline_parse_core(char *p, unsigned long *core)
@@ -6801,6 +6840,7 @@ void __init free_area_init(unsigned long *zones_size)
6801{ 6840{
6802 free_area_init_node(0, zones_size, 6841 free_area_init_node(0, zones_size,
6803 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 6842 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6843 zero_resv_unavail();
6804} 6844}
6805 6845
6806static int page_alloc_cpu_dead(unsigned int cpu) 6846static int page_alloc_cpu_dead(unsigned int cpu)