aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power/snapshot.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power/snapshot.c')
-rw-r--r--kernel/power/snapshot.c69
1 files changed, 46 insertions, 23 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index d3f795f01bbc..06efa54f93d6 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -41,12 +41,29 @@ static void swsusp_set_page_forbidden(struct page *);
41static void swsusp_unset_page_forbidden(struct page *); 41static void swsusp_unset_page_forbidden(struct page *);
42 42
43/* 43/*
44 * Number of bytes to reserve for memory allocations made by device drivers
45 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
46 * cause image creation to fail (tunable via /sys/power/reserved_size).
47 */
48unsigned long reserved_size;
49
50void __init hibernate_reserved_size_init(void)
51{
52 reserved_size = SPARE_PAGES * PAGE_SIZE;
53}
54
55/*
44 * Preferred image size in bytes (tunable via /sys/power/image_size). 56 * Preferred image size in bytes (tunable via /sys/power/image_size).
45 * When it is set to N, swsusp will do its best to ensure the image 57 * When it is set to N, swsusp will do its best to ensure the image
46 * size will not exceed N bytes, but if that is impossible, it will 58 * size will not exceed N bytes, but if that is impossible, it will
47 * try to create the smallest image possible. 59 * try to create the smallest image possible.
48 */ 60 */
49unsigned long image_size = 500 * 1024 * 1024; 61unsigned long image_size;
62
63void __init hibernate_image_size_init(void)
64{
65 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
66}
50 67
51/* List of PBEs needed for restoring the pages that were allocated before 68/* List of PBEs needed for restoring the pages that were allocated before
52 * the suspend and included in the suspend image, but have also been 69 * the suspend and included in the suspend image, but have also been
@@ -979,8 +996,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
979 src = kmap_atomic(s_page, KM_USER0); 996 src = kmap_atomic(s_page, KM_USER0);
980 dst = kmap_atomic(d_page, KM_USER1); 997 dst = kmap_atomic(d_page, KM_USER1);
981 do_copy_page(dst, src); 998 do_copy_page(dst, src);
982 kunmap_atomic(src, KM_USER0);
983 kunmap_atomic(dst, KM_USER1); 999 kunmap_atomic(dst, KM_USER1);
1000 kunmap_atomic(src, KM_USER0);
984 } else { 1001 } else {
985 if (PageHighMem(d_page)) { 1002 if (PageHighMem(d_page)) {
986 /* Page pointed to by src may contain some kernel 1003 /* Page pointed to by src may contain some kernel
@@ -988,7 +1005,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
988 */ 1005 */
989 safe_copy_page(buffer, s_page); 1006 safe_copy_page(buffer, s_page);
990 dst = kmap_atomic(d_page, KM_USER0); 1007 dst = kmap_atomic(d_page, KM_USER0);
991 memcpy(dst, buffer, PAGE_SIZE); 1008 copy_page(dst, buffer);
992 kunmap_atomic(dst, KM_USER0); 1009 kunmap_atomic(dst, KM_USER0);
993 } else { 1010 } else {
994 safe_copy_page(page_address(d_page), s_page); 1011 safe_copy_page(page_address(d_page), s_page);
@@ -1194,7 +1211,11 @@ static void free_unnecessary_pages(void)
1194 to_free_highmem = alloc_highmem - save; 1211 to_free_highmem = alloc_highmem - save;
1195 } else { 1212 } else {
1196 to_free_highmem = 0; 1213 to_free_highmem = 0;
1197 to_free_normal -= save - alloc_highmem; 1214 save -= alloc_highmem;
1215 if (to_free_normal > save)
1216 to_free_normal -= save;
1217 else
1218 to_free_normal = 0;
1198 } 1219 }
1199 1220
1200 memory_bm_position_reset(&copy_bm); 1221 memory_bm_position_reset(&copy_bm);
@@ -1258,11 +1279,13 @@ static unsigned long minimum_image_size(unsigned long saveable)
1258 * frame in use. We also need a number of page frames to be free during 1279 * frame in use. We also need a number of page frames to be free during
1259 * hibernation for allocations made while saving the image and for device 1280 * hibernation for allocations made while saving the image and for device
1260 * drivers, in case they need to allocate memory from their hibernation 1281 * drivers, in case they need to allocate memory from their hibernation
1261 * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, 1282 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1262 * respectively, both of which are rough estimates). To make this happen, we 1283 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1263 * compute the total number of available page frames and allocate at least 1284 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1285 * total number of available page frames and allocate at least
1264 * 1286 *
1265 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES 1287 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1288 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1266 * 1289 *
1267 * of them, which corresponds to the maximum size of a hibernation image. 1290 * of them, which corresponds to the maximum size of a hibernation image.
1268 * 1291 *
@@ -1317,13 +1340,16 @@ int hibernate_preallocate_memory(void)
1317 count -= totalreserve_pages; 1340 count -= totalreserve_pages;
1318 1341
1319 /* Compute the maximum number of saveable pages to leave in memory. */ 1342 /* Compute the maximum number of saveable pages to leave in memory. */
1320 max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; 1343 max_size = (count - (size + PAGES_FOR_IO)) / 2
1344 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1345 /* Compute the desired number of image pages specified by image_size. */
1321 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1346 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1322 if (size > max_size) 1347 if (size > max_size)
1323 size = max_size; 1348 size = max_size;
1324 /* 1349 /*
1325 * If the maximum is not less than the current number of saveable pages 1350 * If the desired number of image pages is at least as large as the
1326 * in memory, allocate page frames for the image and we're done. 1351 * current number of saveable pages in memory, allocate page frames for
1352 * the image and we're done.
1327 */ 1353 */
1328 if (size >= saveable) { 1354 if (size >= saveable) {
1329 pages = preallocate_image_highmem(save_highmem); 1355 pages = preallocate_image_highmem(save_highmem);
@@ -1512,11 +1538,8 @@ static int
1512swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, 1538swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1513 unsigned int nr_pages, unsigned int nr_highmem) 1539 unsigned int nr_pages, unsigned int nr_highmem)
1514{ 1540{
1515 int error = 0;
1516
1517 if (nr_highmem > 0) { 1541 if (nr_highmem > 0) {
1518 error = get_highmem_buffer(PG_ANY); 1542 if (get_highmem_buffer(PG_ANY))
1519 if (error)
1520 goto err_out; 1543 goto err_out;
1521 if (nr_highmem > alloc_highmem) { 1544 if (nr_highmem > alloc_highmem) {
1522 nr_highmem -= alloc_highmem; 1545 nr_highmem -= alloc_highmem;
@@ -1539,7 +1562,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1539 1562
1540 err_out: 1563 err_out:
1541 swsusp_free(); 1564 swsusp_free();
1542 return error; 1565 return -ENOMEM;
1543} 1566}
1544 1567
1545asmlinkage int swsusp_save(void) 1568asmlinkage int swsusp_save(void)
@@ -1680,7 +1703,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
1680 memory_bm_position_reset(&orig_bm); 1703 memory_bm_position_reset(&orig_bm);
1681 memory_bm_position_reset(&copy_bm); 1704 memory_bm_position_reset(&copy_bm);
1682 } else if (handle->cur <= nr_meta_pages) { 1705 } else if (handle->cur <= nr_meta_pages) {
1683 memset(buffer, 0, PAGE_SIZE); 1706 clear_page(buffer);
1684 pack_pfns(buffer, &orig_bm); 1707 pack_pfns(buffer, &orig_bm);
1685 } else { 1708 } else {
1686 struct page *page; 1709 struct page *page;
@@ -1694,7 +1717,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
1694 void *kaddr; 1717 void *kaddr;
1695 1718
1696 kaddr = kmap_atomic(page, KM_USER0); 1719 kaddr = kmap_atomic(page, KM_USER0);
1697 memcpy(buffer, kaddr, PAGE_SIZE); 1720 copy_page(buffer, kaddr);
1698 kunmap_atomic(kaddr, KM_USER0); 1721 kunmap_atomic(kaddr, KM_USER0);
1699 handle->buffer = buffer; 1722 handle->buffer = buffer;
1700 } else { 1723 } else {
@@ -1977,7 +2000,7 @@ static void copy_last_highmem_page(void)
1977 void *dst; 2000 void *dst;
1978 2001
1979 dst = kmap_atomic(last_highmem_page, KM_USER0); 2002 dst = kmap_atomic(last_highmem_page, KM_USER0);
1980 memcpy(dst, buffer, PAGE_SIZE); 2003 copy_page(dst, buffer);
1981 kunmap_atomic(dst, KM_USER0); 2004 kunmap_atomic(dst, KM_USER0);
1982 last_highmem_page = NULL; 2005 last_highmem_page = NULL;
1983 } 2006 }
@@ -2263,11 +2286,11 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2263 2286
2264 kaddr1 = kmap_atomic(p1, KM_USER0); 2287 kaddr1 = kmap_atomic(p1, KM_USER0);
2265 kaddr2 = kmap_atomic(p2, KM_USER1); 2288 kaddr2 = kmap_atomic(p2, KM_USER1);
2266 memcpy(buf, kaddr1, PAGE_SIZE); 2289 copy_page(buf, kaddr1);
2267 memcpy(kaddr1, kaddr2, PAGE_SIZE); 2290 copy_page(kaddr1, kaddr2);
2268 memcpy(kaddr2, buf, PAGE_SIZE); 2291 copy_page(kaddr2, buf);
2269 kunmap_atomic(kaddr1, KM_USER0);
2270 kunmap_atomic(kaddr2, KM_USER1); 2292 kunmap_atomic(kaddr2, KM_USER1);
2293 kunmap_atomic(kaddr1, KM_USER0);
2271} 2294}
2272 2295
2273/** 2296/**