diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-10-19 03:13:04 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-10-19 03:13:04 -0400 |
commit | fa251f89903d73989e2f63e13d0eaed1e07ce0da (patch) | |
tree | 3f7fe779941e3b6d67754dd7c44a32f48ea47c74 /mm/swapfile.c | |
parent | dd3932eddf428571762596e17b65f5dc92ca361b (diff) | |
parent | cd07202cc8262e1669edff0d97715f3dd9260917 (diff) |
Merge branch 'v2.6.36-rc8' into for-2.6.37/barrier
Conflicts:
block/blk-core.c
drivers/block/loop.c
mm/swapfile.c
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r-- | mm/swapfile.c | 120 |
1 files changed, 43 insertions, 77 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index e132e1708acc..9fc7bac7db0c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -47,8 +47,6 @@ long nr_swap_pages; | |||
47 | long total_swap_pages; | 47 | long total_swap_pages; |
48 | static int least_priority; | 48 | static int least_priority; |
49 | 49 | ||
50 | static bool swap_for_hibernation; | ||
51 | |||
52 | static const char Bad_file[] = "Bad swap file entry "; | 50 | static const char Bad_file[] = "Bad swap file entry "; |
53 | static const char Unused_file[] = "Unused swap file entry "; | 51 | static const char Unused_file[] = "Unused swap file entry "; |
54 | static const char Bad_offset[] = "Bad swap offset entry "; | 52 | static const char Bad_offset[] = "Bad swap offset entry "; |
@@ -317,10 +315,8 @@ checks: | |||
317 | if (offset > si->highest_bit) | 315 | if (offset > si->highest_bit) |
318 | scan_base = offset = si->lowest_bit; | 316 | scan_base = offset = si->lowest_bit; |
319 | 317 | ||
320 | /* reuse swap entry of cache-only swap if not hibernation. */ | 318 | /* reuse swap entry of cache-only swap if not busy. */ |
321 | if (vm_swap_full() | 319 | if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { |
322 | && usage == SWAP_HAS_CACHE | ||
323 | && si->swap_map[offset] == SWAP_HAS_CACHE) { | ||
324 | int swap_was_freed; | 320 | int swap_was_freed; |
325 | spin_unlock(&swap_lock); | 321 | spin_unlock(&swap_lock); |
326 | swap_was_freed = __try_to_reclaim_swap(si, offset); | 322 | swap_was_freed = __try_to_reclaim_swap(si, offset); |
@@ -450,8 +446,6 @@ swp_entry_t get_swap_page(void) | |||
450 | spin_lock(&swap_lock); | 446 | spin_lock(&swap_lock); |
451 | if (nr_swap_pages <= 0) | 447 | if (nr_swap_pages <= 0) |
452 | goto noswap; | 448 | goto noswap; |
453 | if (swap_for_hibernation) | ||
454 | goto noswap; | ||
455 | nr_swap_pages--; | 449 | nr_swap_pages--; |
456 | 450 | ||
457 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 451 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { |
@@ -484,6 +478,28 @@ noswap: | |||
484 | return (swp_entry_t) {0}; | 478 | return (swp_entry_t) {0}; |
485 | } | 479 | } |
486 | 480 | ||
481 | /* The only caller of this function is now susupend routine */ | ||
482 | swp_entry_t get_swap_page_of_type(int type) | ||
483 | { | ||
484 | struct swap_info_struct *si; | ||
485 | pgoff_t offset; | ||
486 | |||
487 | spin_lock(&swap_lock); | ||
488 | si = swap_info[type]; | ||
489 | if (si && (si->flags & SWP_WRITEOK)) { | ||
490 | nr_swap_pages--; | ||
491 | /* This is called for allocating swap entry, not cache */ | ||
492 | offset = scan_swap_map(si, 1); | ||
493 | if (offset) { | ||
494 | spin_unlock(&swap_lock); | ||
495 | return swp_entry(type, offset); | ||
496 | } | ||
497 | nr_swap_pages++; | ||
498 | } | ||
499 | spin_unlock(&swap_lock); | ||
500 | return (swp_entry_t) {0}; | ||
501 | } | ||
502 | |||
487 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) | 503 | static struct swap_info_struct *swap_info_get(swp_entry_t entry) |
488 | { | 504 | { |
489 | struct swap_info_struct *p; | 505 | struct swap_info_struct *p; |
@@ -667,6 +683,24 @@ int try_to_free_swap(struct page *page) | |||
667 | if (page_swapcount(page)) | 683 | if (page_swapcount(page)) |
668 | return 0; | 684 | return 0; |
669 | 685 | ||
686 | /* | ||
687 | * Once hibernation has begun to create its image of memory, | ||
688 | * there's a danger that one of the calls to try_to_free_swap() | ||
689 | * - most probably a call from __try_to_reclaim_swap() while | ||
690 | * hibernation is allocating its own swap pages for the image, | ||
691 | * but conceivably even a call from memory reclaim - will free | ||
692 | * the swap from a page which has already been recorded in the | ||
693 | * image as a clean swapcache page, and then reuse its swap for | ||
694 | * another page of the image. On waking from hibernation, the | ||
695 | * original page might be freed under memory pressure, then | ||
696 | * later read back in from swap, now with the wrong data. | ||
697 | * | ||
698 | * Hibernation clears bits from gfp_allowed_mask to prevent | ||
699 | * memory reclaim from writing to disk, so check that here. | ||
700 | */ | ||
701 | if (!(gfp_allowed_mask & __GFP_IO)) | ||
702 | return 0; | ||
703 | |||
670 | delete_from_swap_cache(page); | 704 | delete_from_swap_cache(page); |
671 | SetPageDirty(page); | 705 | SetPageDirty(page); |
672 | return 1; | 706 | return 1; |
@@ -743,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) | |||
743 | #endif | 777 | #endif |
744 | 778 | ||
745 | #ifdef CONFIG_HIBERNATION | 779 | #ifdef CONFIG_HIBERNATION |
746 | |||
747 | static pgoff_t hibernation_offset[MAX_SWAPFILES]; | ||
748 | /* | ||
749 | * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, | ||
750 | * saved swap_map[] image to the disk will be an incomplete because it's | ||
751 | * changing without synchronization with hibernation snap shot. | ||
752 | * At resume, we just make swap_for_hibernation=false. We can forget | ||
753 | * used maps easily. | ||
754 | */ | ||
755 | void hibernation_freeze_swap(void) | ||
756 | { | ||
757 | int i; | ||
758 | |||
759 | spin_lock(&swap_lock); | ||
760 | |||
761 | printk(KERN_INFO "PM: Freeze Swap\n"); | ||
762 | swap_for_hibernation = true; | ||
763 | for (i = 0; i < MAX_SWAPFILES; i++) | ||
764 | hibernation_offset[i] = 1; | ||
765 | spin_unlock(&swap_lock); | ||
766 | } | ||
767 | |||
768 | void hibernation_thaw_swap(void) | ||
769 | { | ||
770 | spin_lock(&swap_lock); | ||
771 | if (swap_for_hibernation) { | ||
772 | printk(KERN_INFO "PM: Thaw Swap\n"); | ||
773 | swap_for_hibernation = false; | ||
774 | } | ||
775 | spin_unlock(&swap_lock); | ||
776 | } | ||
777 | |||
778 | /* | ||
779 | * Because updateing swap_map[] can make not-saved-status-change, | ||
780 | * we use our own easy allocator. | ||
781 | * Please see kernel/power/swap.c, Used swaps are recorded into | ||
782 | * RB-tree. | ||
783 | */ | ||
784 | swp_entry_t get_swap_for_hibernation(int type) | ||
785 | { | ||
786 | pgoff_t off; | ||
787 | swp_entry_t val = {0}; | ||
788 | struct swap_info_struct *si; | ||
789 | |||
790 | spin_lock(&swap_lock); | ||
791 | |||
792 | si = swap_info[type]; | ||
793 | if (!si || !(si->flags & SWP_WRITEOK)) | ||
794 | goto done; | ||
795 | |||
796 | for (off = hibernation_offset[type]; off < si->max; ++off) { | ||
797 | if (!si->swap_map[off]) | ||
798 | break; | ||
799 | } | ||
800 | if (off < si->max) { | ||
801 | val = swp_entry(type, off); | ||
802 | hibernation_offset[type] = off + 1; | ||
803 | } | ||
804 | done: | ||
805 | spin_unlock(&swap_lock); | ||
806 | return val; | ||
807 | } | ||
808 | |||
809 | void swap_free_for_hibernation(swp_entry_t ent) | ||
810 | { | ||
811 | /* Nothing to do */ | ||
812 | } | ||
813 | |||
814 | /* | 780 | /* |
815 | * Find the swap type that corresponds to given device (if any). | 781 | * Find the swap type that corresponds to given device (if any). |
816 | * | 782 | * |
@@ -2081,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2081 | p->flags |= SWP_SOLIDSTATE; | 2047 | p->flags |= SWP_SOLIDSTATE; |
2082 | p->cluster_next = 1 + (random32() % p->highest_bit); | 2048 | p->cluster_next = 1 + (random32() % p->highest_bit); |
2083 | } | 2049 | } |
2084 | if (discard_swap(p) == 0) | 2050 | if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) |
2085 | p->flags |= SWP_DISCARDABLE; | 2051 | p->flags |= SWP_DISCARDABLE; |
2086 | } | 2052 | } |
2087 | 2053 | ||