diff options
author | Rick Edgecombe <rick.p.edgecombe@intel.com> | 2019-04-25 20:11:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-04-30 06:37:57 -0400 |
commit | d63326928611600ad65baff54a70f53b02b3cdfe (patch) | |
tree | 65cd76b106b5b15272b1960ca2ba600f9ff21571 | |
parent | d253ca0c3865a8d9a8c01143cf20425e0be4d0ce (diff) |
mm/hibernation: Make hibernation handle unmapped pages
Make hibernate handle unmapped pages on the direct map when
CONFIG_ARCH_HAS_SET_ALIAS=y is set. These functions allow for setting pages
to invalid configurations, so now hibernate should check if the pages have
valid mappings and handle if they are unmapped when doing a hibernate
save operation.
Previously this checking was already done when CONFIG_DEBUG_PAGEALLOC=y
was configured. It does not appear to have a big hibernating performance
impact. The speed of the saving operation before this change was measured
as 819.02 MB/s, and after was measured at 813.32 MB/s.
Before:
[ 4.670938] PM: Wrote 171996 kbytes in 0.21 seconds (819.02 MB/s)
After:
[ 4.504714] PM: Wrote 178932 kbytes in 0.22 seconds (813.32 MB/s)
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Pavel Machek <pavel@ucw.cz>
Cc: <akpm@linux-foundation.org>
Cc: <ard.biesheuvel@linaro.org>
Cc: <deneen.t.dock@intel.com>
Cc: <kernel-hardening@lists.openwall.com>
Cc: <kristen@linux.intel.com>
Cc: <linux_dti@icloud.com>
Cc: <will.deacon@arm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190426001143.4983-16-namit@vmware.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/mm/pageattr.c | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 18 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 |
4 files changed, 14 insertions, 20 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3574550192c6..daf4d645e537 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -2257,7 +2257,6 @@ int set_direct_map_default_noflush(struct page *page) | |||
2257 | return __set_pages_p(page, 1); | 2257 | return __set_pages_p(page, 1); |
2258 | } | 2258 | } |
2259 | 2259 | ||
2260 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2261 | void __kernel_map_pages(struct page *page, int numpages, int enable) | 2260 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
2262 | { | 2261 | { |
2263 | if (PageHighMem(page)) | 2262 | if (PageHighMem(page)) |
@@ -2302,11 +2301,8 @@ bool kernel_page_present(struct page *page) | |||
2302 | pte = lookup_address((unsigned long)page_address(page), &level); | 2301 | pte = lookup_address((unsigned long)page_address(page), &level); |
2303 | return (pte_val(*pte) & _PAGE_PRESENT); | 2302 | return (pte_val(*pte) & _PAGE_PRESENT); |
2304 | } | 2303 | } |
2305 | |||
2306 | #endif /* CONFIG_HIBERNATION */ | 2304 | #endif /* CONFIG_HIBERNATION */ |
2307 | 2305 | ||
2308 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
2309 | |||
2310 | int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, | 2306 | int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
2311 | unsigned numpages, unsigned long page_flags) | 2307 | unsigned numpages, unsigned long page_flags) |
2312 | { | 2308 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 6b10c21630f5..083d7b4863ed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2610,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages, | |||
2610 | int enable) { } | 2610 | int enable) { } |
2611 | #endif | 2611 | #endif |
2612 | 2612 | ||
2613 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2614 | extern bool _debug_pagealloc_enabled; | 2613 | extern bool _debug_pagealloc_enabled; |
2615 | extern void __kernel_map_pages(struct page *page, int numpages, int enable); | ||
2616 | 2614 | ||
2617 | static inline bool debug_pagealloc_enabled(void) | 2615 | static inline bool debug_pagealloc_enabled(void) |
2618 | { | 2616 | { |
2619 | return _debug_pagealloc_enabled; | 2617 | return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled; |
2620 | } | 2618 | } |
2621 | 2619 | ||
2620 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) | ||
2621 | extern void __kernel_map_pages(struct page *page, int numpages, int enable); | ||
2622 | |||
2622 | static inline void | 2623 | static inline void |
2623 | kernel_map_pages(struct page *page, int numpages, int enable) | 2624 | kernel_map_pages(struct page *page, int numpages, int enable) |
2624 | { | 2625 | { |
2625 | if (!debug_pagealloc_enabled()) | ||
2626 | return; | ||
2627 | |||
2628 | __kernel_map_pages(page, numpages, enable); | 2626 | __kernel_map_pages(page, numpages, enable); |
2629 | } | 2627 | } |
2630 | #ifdef CONFIG_HIBERNATION | 2628 | #ifdef CONFIG_HIBERNATION |
2631 | extern bool kernel_page_present(struct page *page); | 2629 | extern bool kernel_page_present(struct page *page); |
2632 | #endif /* CONFIG_HIBERNATION */ | 2630 | #endif /* CONFIG_HIBERNATION */ |
2633 | #else /* CONFIG_DEBUG_PAGEALLOC */ | 2631 | #else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ |
2634 | static inline void | 2632 | static inline void |
2635 | kernel_map_pages(struct page *page, int numpages, int enable) {} | 2633 | kernel_map_pages(struct page *page, int numpages, int enable) {} |
2636 | #ifdef CONFIG_HIBERNATION | 2634 | #ifdef CONFIG_HIBERNATION |
2637 | static inline bool kernel_page_present(struct page *page) { return true; } | 2635 | static inline bool kernel_page_present(struct page *page) { return true; } |
2638 | #endif /* CONFIG_HIBERNATION */ | 2636 | #endif /* CONFIG_HIBERNATION */ |
2639 | static inline bool debug_pagealloc_enabled(void) | 2637 | #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ |
2640 | { | ||
2641 | return false; | ||
2642 | } | ||
2643 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
2644 | 2638 | ||
2645 | #ifdef __HAVE_ARCH_GATE_AREA | 2639 | #ifdef __HAVE_ARCH_GATE_AREA |
2646 | extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); | 2640 | extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f08a1e4ee1d4..bc9558ab1e5b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1342,8 +1342,9 @@ static inline void do_copy_page(long *dst, long *src) | |||
1342 | * safe_copy_page - Copy a page in a safe way. | 1342 | * safe_copy_page - Copy a page in a safe way. |
1343 | * | 1343 | * |
1344 | * Check if the page we are going to copy is marked as present in the kernel | 1344 | * Check if the page we are going to copy is marked as present in the kernel |
1345 | * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set | 1345 | * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or |
1346 | * and in that case kernel_page_present() always returns 'true'). | 1346 | * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present() |
1347 | * always returns 'true'. | ||
1347 | */ | 1348 | */ |
1348 | static void safe_copy_page(void *dst, struct page *s_page) | 1349 | static void safe_copy_page(void *dst, struct page *s_page) |
1349 | { | 1350 | { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c02cff1ed56e..59661106da16 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1144,7 +1144,9 @@ static __always_inline bool free_pages_prepare(struct page *page, | |||
1144 | } | 1144 | } |
1145 | arch_free_page(page, order); | 1145 | arch_free_page(page, order); |
1146 | kernel_poison_pages(page, 1 << order, 0); | 1146 | kernel_poison_pages(page, 1 << order, 0); |
1147 | kernel_map_pages(page, 1 << order, 0); | 1147 | if (debug_pagealloc_enabled()) |
1148 | kernel_map_pages(page, 1 << order, 0); | ||
1149 | |||
1148 | kasan_free_nondeferred_pages(page, order); | 1150 | kasan_free_nondeferred_pages(page, order); |
1149 | 1151 | ||
1150 | return true; | 1152 | return true; |
@@ -2014,7 +2016,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, | |||
2014 | set_page_refcounted(page); | 2016 | set_page_refcounted(page); |
2015 | 2017 | ||
2016 | arch_alloc_page(page, order); | 2018 | arch_alloc_page(page, order); |
2017 | kernel_map_pages(page, 1 << order, 1); | 2019 | if (debug_pagealloc_enabled()) |
2020 | kernel_map_pages(page, 1 << order, 1); | ||
2018 | kasan_alloc_pages(page, order); | 2021 | kasan_alloc_pages(page, order); |
2019 | kernel_poison_pages(page, 1 << order, 1); | 2022 | kernel_poison_pages(page, 1 << order, 1); |
2020 | set_page_owner(page, order, gfp_flags); | 2023 | set_page_owner(page, order, gfp_flags); |