aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c62
1 files changed, 18 insertions, 44 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0de28576807..06efa54f93d 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -711,10 +711,9 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
711 list_for_each_entry(region, &nosave_regions, list) { 711 list_for_each_entry(region, &nosave_regions, list) {
712 unsigned long pfn; 712 unsigned long pfn;
713 713
714 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", 714 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
715 (unsigned long long) region->start_pfn << PAGE_SHIFT, 715 region->start_pfn << PAGE_SHIFT,
716 ((unsigned long long) region->end_pfn << PAGE_SHIFT) 716 region->end_pfn << PAGE_SHIFT);
717 - 1);
718 717
719 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 718 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
720 if (pfn_valid(pfn)) { 719 if (pfn_valid(pfn)) {
@@ -813,8 +812,7 @@ unsigned int snapshot_additional_pages(struct zone *zone)
813 unsigned int res; 812 unsigned int res;
814 813
815 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 814 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
816 res += DIV_ROUND_UP(res * sizeof(struct bm_block), 815 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
817 LINKED_PAGE_DATA_SIZE);
818 return 2 * res; 816 return 2 * res;
819} 817}
820 818
@@ -860,9 +858,6 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
860 PageReserved(page)) 858 PageReserved(page))
861 return NULL; 859 return NULL;
862 860
863 if (page_is_guard(page))
864 return NULL;
865
866 return page; 861 return page;
867} 862}
868 863
@@ -925,9 +920,6 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
925 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 920 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
926 return NULL; 921 return NULL;
927 922
928 if (page_is_guard(page))
929 return NULL;
930
931 return page; 923 return page;
932} 924}
933 925
@@ -1001,20 +993,20 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1001 s_page = pfn_to_page(src_pfn); 993 s_page = pfn_to_page(src_pfn);
1002 d_page = pfn_to_page(dst_pfn); 994 d_page = pfn_to_page(dst_pfn);
1003 if (PageHighMem(s_page)) { 995 if (PageHighMem(s_page)) {
1004 src = kmap_atomic(s_page); 996 src = kmap_atomic(s_page, KM_USER0);
1005 dst = kmap_atomic(d_page); 997 dst = kmap_atomic(d_page, KM_USER1);
1006 do_copy_page(dst, src); 998 do_copy_page(dst, src);
1007 kunmap_atomic(dst); 999 kunmap_atomic(dst, KM_USER1);
1008 kunmap_atomic(src); 1000 kunmap_atomic(src, KM_USER0);
1009 } else { 1001 } else {
1010 if (PageHighMem(d_page)) { 1002 if (PageHighMem(d_page)) {
1011 /* Page pointed to by src may contain some kernel 1003 /* Page pointed to by src may contain some kernel
1012 * data modified by kmap_atomic() 1004 * data modified by kmap_atomic()
1013 */ 1005 */
1014 safe_copy_page(buffer, s_page); 1006 safe_copy_page(buffer, s_page);
1015 dst = kmap_atomic(d_page); 1007 dst = kmap_atomic(d_page, KM_USER0);
1016 copy_page(dst, buffer); 1008 copy_page(dst, buffer);
1017 kunmap_atomic(dst); 1009 kunmap_atomic(dst, KM_USER0);
1018 } else { 1010 } else {
1019 safe_copy_page(page_address(d_page), s_page); 1011 safe_copy_page(page_address(d_page), s_page);
1020 } 1012 }
@@ -1347,9 +1339,6 @@ int hibernate_preallocate_memory(void)
1347 count += highmem; 1339 count += highmem;
1348 count -= totalreserve_pages; 1340 count -= totalreserve_pages;
1349 1341
1350 /* Add number of pages required for page keys (s390 only). */
1351 size += page_key_additional_pages(saveable);
1352
1353 /* Compute the maximum number of saveable pages to leave in memory. */ 1342 /* Compute the maximum number of saveable pages to leave in memory. */
1354 max_size = (count - (size + PAGES_FOR_IO)) / 2 1343 max_size = (count - (size + PAGES_FOR_IO)) / 2
1355 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); 1344 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
@@ -1673,8 +1662,6 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1673 buf[j] = memory_bm_next_pfn(bm); 1662 buf[j] = memory_bm_next_pfn(bm);
1674 if (unlikely(buf[j] == BM_END_OF_MAP)) 1663 if (unlikely(buf[j] == BM_END_OF_MAP))
1675 break; 1664 break;
1676 /* Save page key for data page (s390 only). */
1677 page_key_read(buf + j);
1678 } 1665 }
1679} 1666}
1680 1667
@@ -1729,9 +1716,9 @@ int snapshot_read_next(struct snapshot_handle *handle)
1729 */ 1716 */
1730 void *kaddr; 1717 void *kaddr;
1731 1718
1732 kaddr = kmap_atomic(page); 1719 kaddr = kmap_atomic(page, KM_USER0);
1733 copy_page(buffer, kaddr); 1720 copy_page(buffer, kaddr);
1734 kunmap_atomic(kaddr); 1721 kunmap_atomic(kaddr, KM_USER0);
1735 handle->buffer = buffer; 1722 handle->buffer = buffer;
1736 } else { 1723 } else {
1737 handle->buffer = page_address(page); 1724 handle->buffer = page_address(page);
@@ -1834,9 +1821,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1834 if (unlikely(buf[j] == BM_END_OF_MAP)) 1821 if (unlikely(buf[j] == BM_END_OF_MAP))
1835 break; 1822 break;
1836 1823
1837 /* Extract and buffer page key for data page (s390 only). */
1838 page_key_memorize(buf + j);
1839
1840 if (memory_bm_pfn_present(bm, buf[j])) 1824 if (memory_bm_pfn_present(bm, buf[j]))
1841 memory_bm_set_bit(bm, buf[j]); 1825 memory_bm_set_bit(bm, buf[j]);
1842 else 1826 else
@@ -2015,9 +1999,9 @@ static void copy_last_highmem_page(void)
2015 if (last_highmem_page) { 1999 if (last_highmem_page) {
2016 void *dst; 2000 void *dst;
2017 2001
2018 dst = kmap_atomic(last_highmem_page); 2002 dst = kmap_atomic(last_highmem_page, KM_USER0);
2019 copy_page(dst, buffer); 2003 copy_page(dst, buffer);
2020 kunmap_atomic(dst); 2004 kunmap_atomic(dst, KM_USER0);
2021 last_highmem_page = NULL; 2005 last_highmem_page = NULL;
2022 } 2006 }
2023} 2007}
@@ -2239,11 +2223,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
2239 if (error) 2223 if (error)
2240 return error; 2224 return error;
2241 2225
2242 /* Allocate buffer for page keys. */
2243 error = page_key_alloc(nr_copy_pages);
2244 if (error)
2245 return error;
2246
2247 } else if (handle->cur <= nr_meta_pages + 1) { 2226 } else if (handle->cur <= nr_meta_pages + 1) {
2248 error = unpack_orig_pfns(buffer, &copy_bm); 2227 error = unpack_orig_pfns(buffer, &copy_bm);
2249 if (error) 2228 if (error)
@@ -2264,8 +2243,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
2264 } 2243 }
2265 } else { 2244 } else {
2266 copy_last_highmem_page(); 2245 copy_last_highmem_page();
2267 /* Restore page key for data page (s390 only). */
2268 page_key_write(handle->buffer);
2269 handle->buffer = get_buffer(&orig_bm, &ca); 2246 handle->buffer = get_buffer(&orig_bm, &ca);
2270 if (IS_ERR(handle->buffer)) 2247 if (IS_ERR(handle->buffer))
2271 return PTR_ERR(handle->buffer); 2248 return PTR_ERR(handle->buffer);
@@ -2287,9 +2264,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
2287void snapshot_write_finalize(struct snapshot_handle *handle) 2264void snapshot_write_finalize(struct snapshot_handle *handle)
2288{ 2265{
2289 copy_last_highmem_page(); 2266 copy_last_highmem_page();
2290 /* Restore page key for data page (s390 only). */
2291 page_key_write(handle->buffer);
2292 page_key_free();
2293 /* Free only if we have loaded the image entirely */ 2267 /* Free only if we have loaded the image entirely */
2294 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2268 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2295 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); 2269 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
@@ -2310,13 +2284,13 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2310{ 2284{
2311 void *kaddr1, *kaddr2; 2285 void *kaddr1, *kaddr2;
2312 2286
2313 kaddr1 = kmap_atomic(p1); 2287 kaddr1 = kmap_atomic(p1, KM_USER0);
2314 kaddr2 = kmap_atomic(p2); 2288 kaddr2 = kmap_atomic(p2, KM_USER1);
2315 copy_page(buf, kaddr1); 2289 copy_page(buf, kaddr1);
2316 copy_page(kaddr1, kaddr2); 2290 copy_page(kaddr1, kaddr2);
2317 copy_page(kaddr2, buf); 2291 copy_page(kaddr2, buf);
2318 kunmap_atomic(kaddr2); 2292 kunmap_atomic(kaddr2, KM_USER1);
2319 kunmap_atomic(kaddr1); 2293 kunmap_atomic(kaddr1, KM_USER0);
2320} 2294}
2321 2295
2322/** 2296/**