aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/snapshot.c86
-rw-r--r--kernel/power/swap.c6
4 files changed, 69 insertions, 26 deletions
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c77963938bca..8dc31e02ae12 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
338 goto Close; 338 goto Close;
339 339
340 suspend_console(); 340 suspend_console();
341 hibernation_freeze_swap();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b337006276..d52359374e85 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
24 24
25static DECLARE_WORK(poweroff_work, do_poweroff); 25static DECLARE_WORK(poweroff_work, do_poweroff);
26 26
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb05e66..d3f795f01bbc 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap();
1090} 1089}
1091 1090
1092/* Helper functions used for the shrinking of memory. */ 1091/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1122 return nr_alloc; 1121 return nr_alloc;
1123} 1122}
1124 1123
1125static unsigned long preallocate_image_memory(unsigned long nr_pages) 1124static unsigned long preallocate_image_memory(unsigned long nr_pages,
1125 unsigned long avail_normal)
1126{ 1126{
1127 return preallocate_image_pages(nr_pages, GFP_IMAGE); 1127 unsigned long alloc;
1128
1129 if (avail_normal <= alloc_normal)
1130 return 0;
1131
1132 alloc = avail_normal - alloc_normal;
1133 if (nr_pages < alloc)
1134 alloc = nr_pages;
1135
1136 return preallocate_image_pages(alloc, GFP_IMAGE);
1128} 1137}
1129 1138
1130#ifdef CONFIG_HIGHMEM 1139#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1170 */ 1179 */
1171static void free_unnecessary_pages(void) 1180static void free_unnecessary_pages(void)
1172{ 1181{
1173 unsigned long save_highmem, to_free_normal, to_free_highmem; 1182 unsigned long save, to_free_normal, to_free_highmem;
1174 1183
1175 to_free_normal = alloc_normal - count_data_pages(); 1184 save = count_data_pages();
1176 save_highmem = count_highmem_pages(); 1185 if (alloc_normal >= save) {
1177 if (alloc_highmem > save_highmem) { 1186 to_free_normal = alloc_normal - save;
1178 to_free_highmem = alloc_highmem - save_highmem; 1187 save = 0;
1188 } else {
1189 to_free_normal = 0;
1190 save -= alloc_normal;
1191 }
1192 save += count_highmem_pages();
1193 if (alloc_highmem >= save) {
1194 to_free_highmem = alloc_highmem - save;
1179 } else { 1195 } else {
1180 to_free_highmem = 0; 1196 to_free_highmem = 0;
1181 to_free_normal -= save_highmem - alloc_highmem; 1197 to_free_normal -= save - alloc_highmem;
1182 } 1198 }
1183 1199
1184 memory_bm_position_reset(&copy_bm); 1200 memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
1259{ 1275{
1260 struct zone *zone; 1276 struct zone *zone;
1261 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1277 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1262 unsigned long alloc, save_highmem, pages_highmem; 1278 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1263 struct timeval start, stop; 1279 struct timeval start, stop;
1264 int error; 1280 int error;
1265 1281
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
1296 else 1312 else
1297 count += zone_page_state(zone, NR_FREE_PAGES); 1313 count += zone_page_state(zone, NR_FREE_PAGES);
1298 } 1314 }
1315 avail_normal = count;
1299 count += highmem; 1316 count += highmem;
1300 count -= totalreserve_pages; 1317 count -= totalreserve_pages;
1301 1318
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
1310 */ 1327 */
1311 if (size >= saveable) { 1328 if (size >= saveable) {
1312 pages = preallocate_image_highmem(save_highmem); 1329 pages = preallocate_image_highmem(save_highmem);
1313 pages += preallocate_image_memory(saveable - pages); 1330 pages += preallocate_image_memory(saveable - pages, avail_normal);
1314 goto out; 1331 goto out;
1315 } 1332 }
1316 1333
1317 /* Estimate the minimum size of the image. */ 1334 /* Estimate the minimum size of the image. */
1318 pages = minimum_image_size(saveable); 1335 pages = minimum_image_size(saveable);
1336 /*
1337 * To avoid excessive pressure on the normal zone, leave room in it to
1338 * accommodate an image of the minimum size (unless it's already too
1339 * small, in which case don't preallocate pages from it at all).
1340 */
1341 if (avail_normal > pages)
1342 avail_normal -= pages;
1343 else
1344 avail_normal = 0;
1319 if (size < pages) 1345 if (size < pages)
1320 size = min_t(unsigned long, pages, max_size); 1346 size = min_t(unsigned long, pages, max_size);
1321 1347
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
1336 */ 1362 */
1337 pages_highmem = preallocate_image_highmem(highmem / 2); 1363 pages_highmem = preallocate_image_highmem(highmem / 2);
1338 alloc = (count - max_size) - pages_highmem; 1364 alloc = (count - max_size) - pages_highmem;
1339 pages = preallocate_image_memory(alloc); 1365 pages = preallocate_image_memory(alloc, avail_normal);
1340 if (pages < alloc) 1366 if (pages < alloc) {
1341 goto err_out; 1367 /* We have exhausted non-highmem pages, try highmem. */
1342 size = max_size - size; 1368 alloc -= pages;
1343 alloc = size; 1369 pages += pages_highmem;
1344 size = preallocate_highmem_fraction(size, highmem, count); 1370 pages_highmem = preallocate_image_highmem(alloc);
1345 pages_highmem += size; 1371 if (pages_highmem < alloc)
1346 alloc -= size; 1372 goto err_out;
1347 pages += preallocate_image_memory(alloc); 1373 pages += pages_highmem;
1348 pages += pages_highmem; 1374 /*
1375 * size is the desired number of saveable pages to leave in
1376 * memory, so try to preallocate (all memory - size) pages.
1377 */
1378 alloc = (count - pages) - size;
1379 pages += preallocate_image_highmem(alloc);
1380 } else {
1381 /*
1382 * There are approximately max_size saveable pages at this point
1383 * and we want to reduce this number down to size.
1384 */
1385 alloc = max_size - size;
1386 size = preallocate_highmem_fraction(alloc, highmem, count);
1387 pages_highmem += size;
1388 alloc -= size;
1389 size = preallocate_image_memory(alloc, avail_normal);
1390 pages_highmem += preallocate_image_highmem(alloc - size);
1391 pages += pages_highmem + size;
1392 }
1349 1393
1350 /* 1394 /*
1351 * We only need as many page frames for the image as there are saveable 1395 * We only need as many page frames for the image as there are saveable
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059eed3e4..e6a5bdf61a37 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
136{ 136{
137 unsigned long offset; 137 unsigned long offset;
138 138
139 offset = swp_offset(get_swap_for_hibernation(swap)); 139 offset = swp_offset(get_swap_page_of_type(swap));
140 if (offset) { 140 if (offset) {
141 if (swsusp_extents_insert(offset)) 141 if (swsusp_extents_insert(offset))
142 swap_free_for_hibernation(swp_entry(swap, offset)); 142 swap_free(swp_entry(swap, offset));
143 else 143 else
144 return swapdev_block(swap, offset); 144 return swapdev_block(swap, offset);
145 } 145 }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
163 ext = container_of(node, struct swsusp_extent, node); 163 ext = container_of(node, struct swsusp_extent, node);
164 rb_erase(node, &swsusp_extents); 164 rb_erase(node, &swsusp_extents);
165 for (offset = ext->start; offset <= ext->end; offset++) 165 for (offset = ext->start; offset <= ext->end; offset++)
166 swap_free_for_hibernation(swp_entry(swap, offset)); 166 swap_free(swp_entry(swap, offset));
167 167
168 kfree(ext); 168 kfree(ext);
169 } 169 }