diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-09-22 16:58:44 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-09-22 16:58:44 -0400 |
commit | 846955c8afe5ebca2f8841b042ca3342e08a092b (patch) | |
tree | 85584d6f009932c18e55562c3303c6331aebe0ca /arch/s390 | |
parent | 2e50195f58ec045bc4601ec94478d957974f4aa4 (diff) |
[S390] hibernation: fix guest page hinting related crash
On resume the system that loads the to be resumed image might have
unstable pages.
When the resume image is copied back and a write access happen to an
unstable page this causes an exception and the system crashes.
To fix this set all free pages to stable before copying the resumed
image data. Also after everything has been restored set all free
pages of the resumed system to unstable again.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/kernel/swsusp_asm64.S | 7 | ||||
-rw-r--r-- | arch/s390/mm/page-states.c | 52 |
2 files changed, 51 insertions, 8 deletions
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 7cd6b096f0d1..9a86ccb91a8a 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -102,6 +102,9 @@ swsusp_arch_resume: | |||
102 | aghi %r15,-STACK_FRAME_OVERHEAD | 102 | aghi %r15,-STACK_FRAME_OVERHEAD |
103 | stg %r1,__SF_BACKCHAIN(%r15) | 103 | stg %r1,__SF_BACKCHAIN(%r15) |
104 | 104 | ||
105 | /* Make all free pages stable */ | ||
106 | lghi %r2,1 | ||
107 | brasl %r14,arch_set_page_states | ||
105 | #ifdef CONFIG_SMP | 108 | #ifdef CONFIG_SMP |
106 | /* Save boot cpu number */ | 109 | /* Save boot cpu number */ |
107 | brasl %r14,smp_get_phys_cpu_id | 110 | brasl %r14,smp_get_phys_cpu_id |
@@ -178,6 +181,10 @@ swsusp_arch_resume: | |||
178 | /* Activate DAT */ | 181 | /* Activate DAT */ |
179 | stosm __SF_EMPTY(%r15),0x04 | 182 | stosm __SF_EMPTY(%r15),0x04 |
180 | 183 | ||
184 | /* Make all free pages unstable */ | ||
185 | lghi %r2,0 | ||
186 | brasl %r14,arch_set_page_states | ||
187 | |||
181 | /* Return 0 */ | 188 | /* Return 0 */ |
182 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 189 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
183 | lghi %r2,0 | 190 | lghi %r2,0 |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index f92ec203ad92..098923ae458f 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
@@ -50,28 +50,64 @@ void __init cmma_init(void) | |||
50 | cmma_flag = 0; | 50 | cmma_flag = 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | void arch_free_page(struct page *page, int order) | 53 | static inline void set_page_unstable(struct page *page, int order) |
54 | { | 54 | { |
55 | int i, rc; | 55 | int i, rc; |
56 | 56 | ||
57 | if (!cmma_flag) | ||
58 | return; | ||
59 | for (i = 0; i < (1 << order); i++) | 57 | for (i = 0; i < (1 << order); i++) |
60 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | 58 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
61 | : "=&d" (rc) | 59 | : "=&d" (rc) |
62 | : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), | 60 | : "a" (page_to_phys(page + i)), |
63 | "i" (ESSA_SET_UNUSED)); | 61 | "i" (ESSA_SET_UNUSED)); |
64 | } | 62 | } |
65 | 63 | ||
66 | void arch_alloc_page(struct page *page, int order) | 64 | void arch_free_page(struct page *page, int order) |
67 | { | 65 | { |
68 | int i, rc; | ||
69 | |||
70 | if (!cmma_flag) | 66 | if (!cmma_flag) |
71 | return; | 67 | return; |
68 | set_page_unstable(page, order); | ||
69 | } | ||
70 | |||
71 | static inline void set_page_stable(struct page *page, int order) | ||
72 | { | ||
73 | int i, rc; | ||
74 | |||
72 | for (i = 0; i < (1 << order); i++) | 75 | for (i = 0; i < (1 << order); i++) |
73 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" | 76 | asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" |
74 | : "=&d" (rc) | 77 | : "=&d" (rc) |
75 | : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), | 78 | : "a" (page_to_phys(page + i)), |
76 | "i" (ESSA_SET_STABLE)); | 79 | "i" (ESSA_SET_STABLE)); |
77 | } | 80 | } |
81 | |||
82 | void arch_alloc_page(struct page *page, int order) | ||
83 | { | ||
84 | if (!cmma_flag) | ||
85 | return; | ||
86 | set_page_stable(page, order); | ||
87 | } | ||
88 | |||
89 | void arch_set_page_states(int make_stable) | ||
90 | { | ||
91 | unsigned long flags, order, t; | ||
92 | struct list_head *l; | ||
93 | struct page *page; | ||
94 | struct zone *zone; | ||
95 | |||
96 | if (!cmma_flag) | ||
97 | return; | ||
98 | if (make_stable) | ||
99 | drain_local_pages(NULL); | ||
100 | for_each_populated_zone(zone) { | ||
101 | spin_lock_irqsave(&zone->lock, flags); | ||
102 | for_each_migratetype_order(order, t) { | ||
103 | list_for_each(l, &zone->free_area[order].free_list[t]) { | ||
104 | page = list_entry(l, struct page, lru); | ||
105 | if (make_stable) | ||
106 | set_page_stable(page, order); | ||
107 | else | ||
108 | set_page_unstable(page, order); | ||
109 | } | ||
110 | } | ||
111 | spin_unlock_irqrestore(&zone->lock, flags); | ||
112 | } | ||
113 | } | ||