aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2014-07-21 06:27:00 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-07-28 19:47:44 -0400
commit6efde38f07690652bf0d93f5e4f1a5f496574806 (patch)
treefeb445fec8aa97bd2fd4cc06cf627f41cd04bbb3 /kernel/power
parent3a20cb1779616ebcaade393cc9beac0e03cbffef (diff)
PM / Hibernate: Iterate over set bits instead of PFNs in swsusp_free()
The existing implementation of swsusp_free iterates over all pfns in the system and checks every bit in the two memory bitmaps. This doesn't scale very well with large numbers of pfns, especially when the bitmaps are not populated very densly. Change the algorithm to iterate over the set bits in the bitmaps instead to make it scale better in large memory configurations. Also add a memory_bm_clear_current() helper function that clears the bit for the last position returned from the memory bitmap. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/snapshot.c53
1 files changed, 38 insertions, 15 deletions
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 802f2415408e..5b71caf43d32 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -848,6 +848,17 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
848 clear_bit(bit, addr); 848 clear_bit(bit, addr);
849} 849}
850 850
851static void memory_bm_clear_current(struct memory_bitmap *bm)
852{
853 int bit;
854
855 bit = max(bm->cur.node_bit - 1, 0);
856 clear_bit(bit, bm->cur.node->data);
857
858 bit = max(bm->cur.bit - 1, 0);
859 clear_bit(bit, bm->cur.block->data);
860}
861
851static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 862static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
852{ 863{
853 void *addr; 864 void *addr;
@@ -1491,23 +1502,35 @@ static struct memory_bitmap copy_bm;
1491 1502
1492void swsusp_free(void) 1503void swsusp_free(void)
1493{ 1504{
1494 struct zone *zone; 1505 unsigned long fb_pfn, fr_pfn;
1495 unsigned long pfn, max_zone_pfn;
1496 1506
1497 for_each_populated_zone(zone) { 1507 memory_bm_position_reset(forbidden_pages_map);
1498 max_zone_pfn = zone_end_pfn(zone); 1508 memory_bm_position_reset(free_pages_map);
1499 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1509
1500 if (pfn_valid(pfn)) { 1510loop:
1501 struct page *page = pfn_to_page(pfn); 1511 fr_pfn = memory_bm_next_pfn(free_pages_map);
1502 1512 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1503 if (swsusp_page_is_forbidden(page) && 1513
1504 swsusp_page_is_free(page)) { 1514 /*
1505 swsusp_unset_page_forbidden(page); 1515 * Find the next bit set in both bitmaps. This is guaranteed to
1506 swsusp_unset_page_free(page); 1516 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1507 __free_page(page); 1517 */
1508 } 1518 do {
1509 } 1519 if (fb_pfn < fr_pfn)
1520 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1521 if (fr_pfn < fb_pfn)
1522 fr_pfn = memory_bm_next_pfn(free_pages_map);
1523 } while (fb_pfn != fr_pfn);
1524
1525 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1526 struct page *page = pfn_to_page(fr_pfn);
1527
1528 memory_bm_clear_current(forbidden_pages_map);
1529 memory_bm_clear_current(free_pages_map);
1530 __free_page(page);
1531 goto loop;
1510 } 1532 }
1533
1511 nr_copy_pages = 0; 1534 nr_copy_pages = 0;
1512 nr_meta_pages = 0; 1535 nr_meta_pages = 0;
1513 restore_pblist = NULL; 1536 restore_pblist = NULL;