aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-06-28 21:05:10 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-01 19:52:10 -0400
commit307c5971c972ef2bfd541d2850b36a692c6354c9 (patch)
treee57099612a87150b5728612e77ae0330abecdb81 /kernel/power
parent6dbecfd345a617888da370b13d5b190c9ff3df53 (diff)
PM / hibernate: Recycle safe pages after image restoration
One of the memory bitmaps used by the hibernation image restoration code is freed after the image has been loaded. That is not quite efficient, though, because the memory pages used for building that bitmap are known to be safe (ie. they were not used by the image kernel before hibernation) and the arch-specific code finalizing the image restoration may need them. In that case it needs to allocate those pages again via the memory management subsystem, check if they are really safe again by consulting the other bitmaps and so on. To avoid that, recycle those pages by putting them into the global list of known safe pages so that they can be given to the arch code right away when necessary. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/snapshot.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 39bbad5fac5a..94b6fe6c9ae3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -158,6 +158,14 @@ static struct page *alloc_image_page(gfp_t gfp_mask)
158 return page; 158 return page;
159} 159}
160 160
161static void recycle_safe_page(void *page_address)
162{
163 struct linked_page *lp = page_address;
164
165 lp->next = safe_pages_list;
166 safe_pages_list = lp;
167}
168
161/** 169/**
162 * free_image_page - free page represented by @addr, allocated with 170 * free_image_page - free page represented by @addr, allocated with
163 * get_image_page (page flags set by it must be cleared) 171 * get_image_page (page flags set by it must be cleared)
@@ -852,6 +860,34 @@ struct nosave_region {
852 860
853static LIST_HEAD(nosave_regions); 861static LIST_HEAD(nosave_regions);
854 862
863static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
864{
865 struct rtree_node *node;
866
867 list_for_each_entry(node, &zone->nodes, list)
868 recycle_safe_page(node->data);
869
870 list_for_each_entry(node, &zone->leaves, list)
871 recycle_safe_page(node->data);
872}
873
874static void memory_bm_recycle(struct memory_bitmap *bm)
875{
876 struct mem_zone_bm_rtree *zone;
877 struct linked_page *p_list;
878
879 list_for_each_entry(zone, &bm->zones, list)
880 recycle_zone_bm_rtree(zone);
881
882 p_list = bm->p_list;
883 while (p_list) {
884 struct linked_page *lp = p_list;
885
886 p_list = lp->next;
887 recycle_safe_page(lp);
888 }
889}
890
855/** 891/**
856 * register_nosave_region - register a range of page frames the contents 892 * register_nosave_region - register a range of page frames the contents
857 * of which should not be saved during the suspend (to be used in the early 893 * of which should not be saved during the suspend (to be used in the early
@@ -2542,9 +2578,9 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
2542 /* Restore page key for data page (s390 only). */ 2578 /* Restore page key for data page (s390 only). */
2543 page_key_write(handle->buffer); 2579 page_key_write(handle->buffer);
2544 page_key_free(); 2580 page_key_free();
2545 /* Free only if we have loaded the image entirely */ 2581 /* Do that only if we have loaded the image entirely */
2546 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2582 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2547 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); 2583 memory_bm_recycle(&orig_bm);
2548 free_highmem_data(); 2584 free_highmem_data();
2549 } 2585 }
2550} 2586}