aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-09-24 01:37:33 -0400
committerDave Airlie <airlied@redhat.com>2010-09-24 01:37:33 -0400
commit4a445f291ac3faa08f56eaeb6e44856b6b72b74c (patch)
treeb66cc93311dc4c623113a87aa9b40730a21ac18e /mm/swapfile.c
parentcbc60ca04b342a4e1f2a1086a7277c077f07dbed (diff)
parenta850ea30374ebed32a0724742601861853fde869 (diff)
Merge remote branch 'origin/master' of /home/airlied/kernel//linux-2.6 into drm-core-next
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c129
1 files changed, 46 insertions, 83 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c59a73a..7c703ff2f36f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@ long nr_swap_pages;
47long total_swap_pages; 47long total_swap_pages;
48static int least_priority; 48static int least_priority;
49 49
50static bool swap_for_hibernation;
51
52static const char Bad_file[] = "Bad swap file entry "; 50static const char Bad_file[] = "Bad swap file entry ";
53static const char Unused_file[] = "Unused swap file entry "; 51static const char Unused_file[] = "Unused swap file entry ";
54static const char Bad_offset[] = "Bad swap offset entry "; 52static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
141 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
142 if (nr_blocks) { 140 if (nr_blocks) {
143 err = blkdev_issue_discard(si->bdev, start_block, 141 err = blkdev_issue_discard(si->bdev, start_block,
144 nr_blocks, GFP_KERNEL, 142 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
145 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
146 if (err) 143 if (err)
147 return err; 144 return err;
148 cond_resched(); 145 cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
153 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
154 151
155 err = blkdev_issue_discard(si->bdev, start_block, 152 err = blkdev_issue_discard(si->bdev, start_block,
156 nr_blocks, GFP_KERNEL, 153 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
157 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
158 if (err) 154 if (err)
159 break; 155 break;
160 156
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
193 start_block <<= PAGE_SHIFT - 9; 189 start_block <<= PAGE_SHIFT - 9;
194 nr_blocks <<= PAGE_SHIFT - 9; 190 nr_blocks <<= PAGE_SHIFT - 9;
195 if (blkdev_issue_discard(si->bdev, start_block, 191 if (blkdev_issue_discard(si->bdev, start_block,
196 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | 192 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
197 BLKDEV_IFL_BARRIER))
198 break; 193 break;
199 } 194 }
200 195
@@ -320,10 +315,8 @@ checks:
320 if (offset > si->highest_bit) 315 if (offset > si->highest_bit)
321 scan_base = offset = si->lowest_bit; 316 scan_base = offset = si->lowest_bit;
322 317
323 /* reuse swap entry of cache-only swap if not hibernation. */ 318 /* reuse swap entry of cache-only swap if not busy. */
324 if (vm_swap_full() 319 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
325 && usage == SWAP_HAS_CACHE
326 && si->swap_map[offset] == SWAP_HAS_CACHE) {
327 int swap_was_freed; 320 int swap_was_freed;
328 spin_unlock(&swap_lock); 321 spin_unlock(&swap_lock);
329 swap_was_freed = __try_to_reclaim_swap(si, offset); 322 swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
453 spin_lock(&swap_lock); 446 spin_lock(&swap_lock);
454 if (nr_swap_pages <= 0) 447 if (nr_swap_pages <= 0)
455 goto noswap; 448 goto noswap;
456 if (swap_for_hibernation)
457 goto noswap;
458 nr_swap_pages--; 449 nr_swap_pages--;
459 450
460 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 451 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
487 return (swp_entry_t) {0}; 478 return (swp_entry_t) {0};
488} 479}
489 480
481/* The only caller of this function is now susupend routine */
482swp_entry_t get_swap_page_of_type(int type)
483{
484 struct swap_info_struct *si;
485 pgoff_t offset;
486
487 spin_lock(&swap_lock);
488 si = swap_info[type];
489 if (si && (si->flags & SWP_WRITEOK)) {
490 nr_swap_pages--;
491 /* This is called for allocating swap entry, not cache */
492 offset = scan_swap_map(si, 1);
493 if (offset) {
494 spin_unlock(&swap_lock);
495 return swp_entry(type, offset);
496 }
497 nr_swap_pages++;
498 }
499 spin_unlock(&swap_lock);
500 return (swp_entry_t) {0};
501}
502
490static struct swap_info_struct *swap_info_get(swp_entry_t entry) 503static struct swap_info_struct *swap_info_get(swp_entry_t entry)
491{ 504{
492 struct swap_info_struct *p; 505 struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
670 if (page_swapcount(page)) 683 if (page_swapcount(page))
671 return 0; 684 return 0;
672 685
686 /*
687 * Once hibernation has begun to create its image of memory,
688 * there's a danger that one of the calls to try_to_free_swap()
689 * - most probably a call from __try_to_reclaim_swap() while
690 * hibernation is allocating its own swap pages for the image,
691 * but conceivably even a call from memory reclaim - will free
692 * the swap from a page which has already been recorded in the
693 * image as a clean swapcache page, and then reuse its swap for
694 * another page of the image. On waking from hibernation, the
695 * original page might be freed under memory pressure, then
696 * later read back in from swap, now with the wrong data.
697 *
698 * Hibernation clears bits from gfp_allowed_mask to prevent
699 * memory reclaim from writing to disk, so check that here.
700 */
701 if (!(gfp_allowed_mask & __GFP_IO))
702 return 0;
703
673 delete_from_swap_cache(page); 704 delete_from_swap_cache(page);
674 SetPageDirty(page); 705 SetPageDirty(page);
675 return 1; 706 return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
746#endif 777#endif
747 778
748#ifdef CONFIG_HIBERNATION 779#ifdef CONFIG_HIBERNATION
749
750static pgoff_t hibernation_offset[MAX_SWAPFILES];
751/*
752 * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
753 * saved swap_map[] image to the disk will be an incomplete because it's
754 * changing without synchronization with hibernation snap shot.
755 * At resume, we just make swap_for_hibernation=false. We can forget
756 * used maps easily.
757 */
758void hibernation_freeze_swap(void)
759{
760 int i;
761
762 spin_lock(&swap_lock);
763
764 printk(KERN_INFO "PM: Freeze Swap\n");
765 swap_for_hibernation = true;
766 for (i = 0; i < MAX_SWAPFILES; i++)
767 hibernation_offset[i] = 1;
768 spin_unlock(&swap_lock);
769}
770
771void hibernation_thaw_swap(void)
772{
773 spin_lock(&swap_lock);
774 if (swap_for_hibernation) {
775 printk(KERN_INFO "PM: Thaw Swap\n");
776 swap_for_hibernation = false;
777 }
778 spin_unlock(&swap_lock);
779}
780
781/*
782 * Because updateing swap_map[] can make not-saved-status-change,
783 * we use our own easy allocator.
784 * Please see kernel/power/swap.c, Used swaps are recorded into
785 * RB-tree.
786 */
787swp_entry_t get_swap_for_hibernation(int type)
788{
789 pgoff_t off;
790 swp_entry_t val = {0};
791 struct swap_info_struct *si;
792
793 spin_lock(&swap_lock);
794
795 si = swap_info[type];
796 if (!si || !(si->flags & SWP_WRITEOK))
797 goto done;
798
799 for (off = hibernation_offset[type]; off < si->max; ++off) {
800 if (!si->swap_map[off])
801 break;
802 }
803 if (off < si->max) {
804 val = swp_entry(type, off);
805 hibernation_offset[type] = off + 1;
806 }
807done:
808 spin_unlock(&swap_lock);
809 return val;
810}
811
812void swap_free_for_hibernation(swp_entry_t ent)
813{
814 /* Nothing to do */
815}
816
817/* 780/*
818 * Find the swap type that corresponds to given device (if any). 781 * Find the swap type that corresponds to given device (if any).
819 * 782 *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2084 p->flags |= SWP_SOLIDSTATE; 2047 p->flags |= SWP_SOLIDSTATE;
2085 p->cluster_next = 1 + (random32() % p->highest_bit); 2048 p->cluster_next = 1 + (random32() % p->highest_bit);
2086 } 2049 }
2087 if (discard_swap(p) == 0) 2050 if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
2088 p->flags |= SWP_DISCARDABLE; 2051 p->flags |= SWP_DISCARDABLE;
2089 } 2052 }
2090 2053