aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-01-24 15:20:14 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-29 22:32:59 -0500
commit8b78c21d72d9dbcb7230e97423a2cd8d8402c20c (patch)
tree5cea2fec0c2bea041399fcf8c64275a37f893111
parent72212675d1c96f5db8ec6fb35701879911193158 (diff)
x86, 64bit, mm: hibernate use generic mapping_init
We should set mappings only for usable memory ranges under max_pfn Otherwise causes same problem that is fixed by x86, mm: Only direct map addresses that are marked as E820_RAM Make it only map range in pfn_mapped array. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-34-git-send-email-yinghai@kernel.org Cc: Pavel Machek <pavel@ucw.cz> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: linux-pm@vger.kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/power/hibernate_64.c66
1 files changed, 22 insertions, 44 deletions
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 460f314d13e5..a0fde91c16cf 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -11,6 +11,8 @@
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/suspend.h> 13#include <linux/suspend.h>
14
15#include <asm/init.h>
14#include <asm/proto.h> 16#include <asm/proto.h>
15#include <asm/page.h> 17#include <asm/page.h>
16#include <asm/pgtable.h> 18#include <asm/pgtable.h>
@@ -39,41 +41,21 @@ pgd_t *temp_level4_pgt;
39 41
40void *relocated_restore_code; 42void *relocated_restore_code;
41 43
42static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 44static void *alloc_pgt_page(void *context)
43{ 45{
44 long i, j; 46 return (void *)get_safe_page(GFP_ATOMIC);
45
46 i = pud_index(address);
47 pud = pud + i;
48 for (; i < PTRS_PER_PUD; pud++, i++) {
49 unsigned long paddr;
50 pmd_t *pmd;
51
52 paddr = address + i*PUD_SIZE;
53 if (paddr >= end)
54 break;
55
56 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
57 if (!pmd)
58 return -ENOMEM;
59 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
60 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
61 unsigned long pe;
62
63 if (paddr >= end)
64 break;
65 pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
66 pe &= __supported_pte_mask;
67 set_pmd(pmd, __pmd(pe));
68 }
69 }
70 return 0;
71} 47}
72 48
73static int set_up_temporary_mappings(void) 49static int set_up_temporary_mappings(void)
74{ 50{
75 unsigned long start, end, next; 51 struct x86_mapping_info info = {
76 int error; 52 .alloc_pgt_page = alloc_pgt_page,
53 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
54 .kernel_mapping = true,
55 };
56 unsigned long mstart, mend;
57 int result;
58 int i;
77 59
78 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); 60 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
79 if (!temp_level4_pgt) 61 if (!temp_level4_pgt)
@@ -84,21 +66,17 @@ static int set_up_temporary_mappings(void)
84 init_level4_pgt[pgd_index(__START_KERNEL_map)]); 66 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
85 67
86 /* Set up the direct mapping from scratch */ 68 /* Set up the direct mapping from scratch */
87 start = (unsigned long)pfn_to_kaddr(0); 69 for (i = 0; i < nr_pfn_mapped; i++) {
88 end = (unsigned long)pfn_to_kaddr(max_pfn); 70 mstart = pfn_mapped[i].start << PAGE_SHIFT;
89 71 mend = pfn_mapped[i].end << PAGE_SHIFT;
90 for (; start < end; start = next) { 72
91 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); 73 result = kernel_ident_mapping_init(&info, temp_level4_pgt,
92 if (!pud) 74 mstart, mend);
93 return -ENOMEM; 75
94 next = start + PGDIR_SIZE; 76 if (result)
95 if (next > end) 77 return result;
96 next = end;
97 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
98 return error;
99 set_pgd(temp_level4_pgt + pgd_index(start),
100 mk_kernel_pgd(__pa(pud)));
101 } 78 }
79
102 return 0; 80 return 0;
103} 81}
104 82