diff options
Diffstat (limited to 'mm/swapfile.c')
| -rw-r--r-- | mm/swapfile.c | 129 |
1 files changed, 46 insertions, 83 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..7c703ff2f36f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -47,8 +47,6 @@ long nr_swap_pages; | |||
| 47 | long total_swap_pages; | 47 | long total_swap_pages; |
| 48 | static int least_priority; | 48 | static int least_priority; |
| 49 | 49 | ||
| 50 | static bool swap_for_hibernation; | ||
| 51 | |||
| 52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
| 53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
| 54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
| @@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
| 141 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); | 139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); |
| 142 | if (nr_blocks) { | 140 | if (nr_blocks) { |
| 143 | err = blkdev_issue_discard(si->bdev, start_block, | 141 | err = blkdev_issue_discard(si->bdev, start_block, |
| 144 | nr_blocks, GFP_KERNEL, | 142 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
| 145 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
| 146 | if (err) | 143 | if (err) |
| 147 | return err; | 144 | return err; |
| 148 | cond_resched(); | 145 | cond_resched(); |
| @@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
| 153 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); |
| 154 | 151 | ||
| 155 | err = blkdev_issue_discard(si->bdev, start_block, | 152 | err = blkdev_issue_discard(si->bdev, start_block, |
| 156 | nr_blocks, GFP_KERNEL, | 153 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); |
| 157 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
| 158 | if (err) | 154 | if (err) |
| 159 | break; | 155 | break; |
| 160 | 156 | ||
| @@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, | |||
| 193 | start_block <<= PAGE_SHIFT - 9; | 189 | start_block <<= PAGE_SHIFT - 9; |
| 194 | nr_blocks <<= PAGE_SHIFT - 9; | 190 | nr_blocks <<= PAGE_SHIFT - 9; |
| 195 | if (blkdev_issue_discard(si->bdev, start_block, | 191 | if (blkdev_issue_discard(si->bdev, start_block, |
| 196 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | | 192 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) |
| 197 | BLKDEV_IFL_BARRIER)) | ||
| 198 | break; | 193 | break; |
| 199 | } | 194 | } |
| 200 | 195 | ||
| @@ -320,10 +315,8 @@ checks: | |||
| 320 | if (offset > si->highest_bit) | 315 | if (offset > si->highest_bit) |
| 321 | scan_base = offset = si->lowest_bit; | 316 | scan_base = offset = si->lowest_bit; |
| 322 | 317 | ||
| 323 | /* reuse swap entry of cache-only swap if not hibernation. */ | 318 | /* reuse swap entry of cache-only swap if not busy. */ |
| 324 | if (vm_swap_full() | 319 | if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { |
| 325 | && usage == SWAP_HAS_CACHE | ||
| 326 | && si->swap_map[offset] == SWAP_HAS_CACHE) { | ||
| 327 | int swap_was_freed; | 320 | int swap_was_freed; |
| 328 | spin_unlock(&swap_lock); | 321 | spin_unlock(&swap_lock); |
| 329 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 322 | swap_was_freed = __try_to_reclaim_swap(si, offset); |
| @@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void) | |||
| 453 | spin_lock(&swap_lock); | 446 | spin_lock(&swap_lock); |
| 454 | if (nr_swap_pages <= 0) | 447 | if (nr_swap_pages <= 0) |
| 455 | goto noswap; | 448 | goto noswap; |
| 456 | if (swap_for_hibernation) | ||
| 457 | goto noswap; | ||
| 458 | nr_swap_pages--; | 449 | nr_swap_pages--; |
| 459 | 450 | ||
| 460 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 451 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
| @@ -487,6 +478,28 @@ noswap: | |||
| 487 | return (swp_entry_t) {0}; | 478 | return (swp_entry_t) {0}; |
| 488 | } | 479 | } |
| 489 | 480 | ||
| 481 | /* The only caller of this function is now susupend routine */ | ||
| 482 | swp_entry_t get_swap_page_of_type(int type) | ||
| 483 | { | ||
| 484 | struct swap_info_struct *si; | ||
| 485 | pgoff_t offset; | ||
| 486 | |||
| 487 | spin_lock(&swap_lock); | ||
| 488 | si = swap_info[type]; | ||
| 489 | if (si && (si->flags & SWP_WRITEOK)) { | ||
| 490 | nr_swap_pages--; | ||
| 491 | /* This is called for allocating swap entry, not cache */ | ||
| 492 | offset = scan_swap_map(si, 1); | ||
| 493 | if (offset) { | ||
| 494 | spin_unlock(&swap_lock); | ||
| 495 | return swp_entry(type, offset); | ||
| 496 | } | ||
| 497 | nr_swap_pages++; | ||
| 498 | } | ||
| 499 | spin_unlock(&swap_lock); | ||
| 500 | return (swp_entry_t) {0}; | ||
| 501 | } | ||
| 502 | |||
| 490 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 503 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
| 491 | { | 504 | { |
| 492 | struct swap_info_struct *p; | 505 | struct swap_info_struct *p; |
| @@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page) | |||
| 670 | if (page_swapcount(page)) | 683 | if (page_swapcount(page)) |
| 671 | return 0; | 684 | return 0; |
| 672 | 685 | ||
| 686 | /* | ||
| 687 | * Once hibernation has begun to create its image of memory, | ||
| 688 | * there's a danger that one of the calls to try_to_free_swap() | ||
| 689 | * - most probably a call from __try_to_reclaim_swap() while | ||
| 690 | * hibernation is allocating its own swap pages for the image, | ||
| 691 | * but conceivably even a call from memory reclaim - will free | ||
| 692 | * the swap from a page which has already been recorded in the | ||
| 693 | * image as a clean swapcache page, and then reuse its swap for | ||
| 694 | * another page of the image. On waking from hibernation, the | ||
| 695 | * original page might be freed under memory pressure, then | ||
| 696 | * later read back in from swap, now with the wrong data. | ||
| 697 | * | ||
| 698 | * Hibernation clears bits from gfp_allowed_mask to prevent | ||
| 699 | * memory reclaim from writing to disk, so check that here. | ||
| 700 | */ | ||
| 701 | if (!(gfp_allowed_mask & __GFP_IO)) | ||
| 702 | return 0; | ||
| 703 | |||
| 673 | delete_from_swap_cache(page); | 704 | delete_from_swap_cache(page); |
| 674 | SetPageDirty(page); | 705 | SetPageDirty(page); |
| 675 | return 1; | 706 | return 1; |
| @@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
| 746 | #endif | 777 | #endif |
| 747 | 778 | ||
| 748 | #ifdef CONFIG_HIBERNATION | 779 | #ifdef CONFIG_HIBERNATION |
| 749 | |||
| 750 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
| 751 | /* | ||
| 752 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
| 753 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
| 754 | * changing without synchronization with hibernation snap shot. | ||
| 755 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
| 756 | * used maps easily. | ||
| 757 | */ | ||
| 758 | void hibernation_freeze_swap(void) | ||
| 759 | { | ||
| 760 | int i; | ||
| 761 | |||
| 762 | spin_lock(&swap_lock); | ||
| 763 | |||
| 764 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
| 765 | swap_for_hibernation = true; | ||
| 766 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
| 767 | hibernation_offset[i] = 1; | ||
| 768 | spin_unlock(&swap_lock); | ||
| 769 | } | ||
| 770 | |||
| 771 | void hibernation_thaw_swap(void) | ||
| 772 | { | ||
| 773 | spin_lock(&swap_lock); | ||
| 774 | if (swap_for_hibernation) { | ||
| 775 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
| 776 | swap_for_hibernation = false; | ||
| 777 | } | ||
| 778 | spin_unlock(&swap_lock); | ||
| 779 | } | ||
| 780 | |||
| 781 | /* | ||
| 782 | * Because updateing swap_map[] can make not-saved-status-change, | ||
| 783 | * we use our own easy allocator. | ||
| 784 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
| 785 | * RB-tree. | ||
| 786 | */ | ||
| 787 | swp_entry_t get_swap_for_hibernation(int type) | ||
| 788 | { | ||
| 789 | pgoff_t off; | ||
| 790 | swp_entry_t val = {0}; | ||
| 791 | struct swap_info_struct *si; | ||
| 792 | |||
| 793 | spin_lock(&swap_lock); | ||
| 794 | |||
| 795 | si = swap_info[type]; | ||
| 796 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
| 797 | goto done; | ||
| 798 | |||
| 799 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
| 800 | if (!si->swap_map[off]) | ||
| 801 | break; | ||
| 802 | } | ||
| 803 | if (off < si->max) { | ||
| 804 | val = swp_entry(type, off); | ||
| 805 | hibernation_offset[type] = off + 1; | ||
| 806 | } | ||
| 807 | done: | ||
| 808 | spin_unlock(&swap_lock); | ||
| 809 | return val; | ||
| 810 | } | ||
| 811 | |||
| 812 | void swap_free_for_hibernation(swp_entry_t ent) | ||
| 813 | { | ||
| 814 | /* Nothing to do */ | ||
| 815 | } | ||
| 816 | |||
| 817 | /* | 780 | /* |
| 818 | * Find the swap type that corresponds to given device (if any). | 781 | * Find the swap type that corresponds to given device (if any). |
| 819 | * | 782 | * |
| @@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 2084 | p->flags |= SWP_SOLIDSTATE; | 2047 | p->flags |= SWP_SOLIDSTATE; |
| 2085 | p->cluster_next = 1 + (random32() % p->highest_bit); | 2048 | p->cluster_next = 1 + (random32() % p->highest_bit); |
| 2086 | } | 2049 | } |
| 2087 | if (discard_swap(p) == 0) | 2050 | if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) |
| 2088 | p->flags |= SWP_DISCARDABLE; | 2051 | p->flags |= SWP_DISCARDABLE; |
| 2089 | } | 2052 | } |
| 2090 | 2053 | ||
