aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2009-09-22 16:58:44 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-09-22 16:58:44 -0400
commit846955c8afe5ebca2f8841b042ca3342e08a092b (patch)
tree85584d6f009932c18e55562c3303c6331aebe0ca /arch/s390/mm
parent2e50195f58ec045bc4601ec94478d957974f4aa4 (diff)
[S390] hibernation: fix guest page hinting related crash
On resume the system that loads the to be resumed image might have unstable pages. When the resume image is copied back and a write access happen to an unstable page this causes an exception and the system crashes. To fix this set all free pages to stable before copying the resumed image data. Also after everything has been restored set all free pages of the resumed system to unstable again. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/page-states.c52
1 files changed, 44 insertions, 8 deletions
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index f92ec203ad92..098923ae458f 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -50,28 +50,64 @@ void __init cmma_init(void)
50 cmma_flag = 0; 50 cmma_flag = 0;
51} 51}
52 52
53void arch_free_page(struct page *page, int order) 53static inline void set_page_unstable(struct page *page, int order)
54{ 54{
55 int i, rc; 55 int i, rc;
56 56
57 if (!cmma_flag)
58 return;
59 for (i = 0; i < (1 << order); i++) 57 for (i = 0; i < (1 << order); i++)
60 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 58 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
61 : "=&d" (rc) 59 : "=&d" (rc)
62 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), 60 : "a" (page_to_phys(page + i)),
63 "i" (ESSA_SET_UNUSED)); 61 "i" (ESSA_SET_UNUSED));
64} 62}
65 63
66void arch_alloc_page(struct page *page, int order) 64void arch_free_page(struct page *page, int order)
67{ 65{
68 int i, rc;
69
70 if (!cmma_flag) 66 if (!cmma_flag)
71 return; 67 return;
68 set_page_unstable(page, order);
69}
70
71static inline void set_page_stable(struct page *page, int order)
72{
73 int i, rc;
74
72 for (i = 0; i < (1 << order); i++) 75 for (i = 0; i < (1 << order); i++)
73 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 76 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
74 : "=&d" (rc) 77 : "=&d" (rc)
75 : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), 78 : "a" (page_to_phys(page + i)),
76 "i" (ESSA_SET_STABLE)); 79 "i" (ESSA_SET_STABLE));
77} 80}
81
82void arch_alloc_page(struct page *page, int order)
83{
84 if (!cmma_flag)
85 return;
86 set_page_stable(page, order);
87}
88
89void arch_set_page_states(int make_stable)
90{
91 unsigned long flags, order, t;
92 struct list_head *l;
93 struct page *page;
94 struct zone *zone;
95
96 if (!cmma_flag)
97 return;
98 if (make_stable)
99 drain_local_pages(NULL);
100 for_each_populated_zone(zone) {
101 spin_lock_irqsave(&zone->lock, flags);
102 for_each_migratetype_order(order, t) {
103 list_for_each(l, &zone->free_area[order].free_list[t]) {
104 page = list_entry(l, struct page, lru);
105 if (make_stable)
106 set_page_stable(page, order);
107 else
108 set_page_unstable(page, order);
109 }
110 }
111 spin_unlock_irqrestore(&zone->lock, flags);
112 }
113}