aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c43
1 files changed, 9 insertions, 34 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e5fd5385f0cc..cc367f7e75d8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -395,6 +395,9 @@ void free_swap_and_cache(swp_entry_t entry)
395 struct swap_info_struct * p; 395 struct swap_info_struct * p;
396 struct page *page = NULL; 396 struct page *page = NULL;
397 397
398 if (is_migration_entry(entry))
399 return;
400
398 p = swap_info_get(entry); 401 p = swap_info_get(entry);
399 if (p) { 402 if (p) {
400 if (swap_entry_free(p, swp_offset(entry)) == 1) { 403 if (swap_entry_free(p, swp_offset(entry)) == 1) {
@@ -615,15 +618,6 @@ static int unuse_mm(struct mm_struct *mm,
615 return 0; 618 return 0;
616} 619}
617 620
618#ifdef CONFIG_MIGRATION
619int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
620{
621 swp_entry_t entry = { .val = page_private(page) };
622
623 return unuse_vma(vma, entry, page);
624}
625#endif
626
627/* 621/*
628 * Scan swap_map from current position to next entry still in use. 622 * Scan swap_map from current position to next entry still in use.
629 * Recycle to start on reaching the end, returning 0 when empty. 623 * Recycle to start on reaching the end, returning 0 when empty.
@@ -716,7 +710,6 @@ static int try_to_unuse(unsigned int type)
716 */ 710 */
717 swap_map = &si->swap_map[i]; 711 swap_map = &si->swap_map[i];
718 entry = swp_entry(type, i); 712 entry = swp_entry(type, i);
719again:
720 page = read_swap_cache_async(entry, NULL, 0); 713 page = read_swap_cache_async(entry, NULL, 0);
721 if (!page) { 714 if (!page) {
722 /* 715 /*
@@ -751,12 +744,6 @@ again:
751 wait_on_page_locked(page); 744 wait_on_page_locked(page);
752 wait_on_page_writeback(page); 745 wait_on_page_writeback(page);
753 lock_page(page); 746 lock_page(page);
754 if (!PageSwapCache(page)) {
755 /* Page migration has occured */
756 unlock_page(page);
757 page_cache_release(page);
758 goto again;
759 }
760 wait_on_page_writeback(page); 747 wait_on_page_writeback(page);
761 748
762 /* 749 /*
@@ -785,10 +772,8 @@ again:
785 while (*swap_map > 1 && !retval && 772 while (*swap_map > 1 && !retval &&
786 (p = p->next) != &start_mm->mmlist) { 773 (p = p->next) != &start_mm->mmlist) {
787 mm = list_entry(p, struct mm_struct, mmlist); 774 mm = list_entry(p, struct mm_struct, mmlist);
788 if (atomic_inc_return(&mm->mm_users) == 1) { 775 if (!atomic_inc_not_zero(&mm->mm_users))
789 atomic_dec(&mm->mm_users);
790 continue; 776 continue;
791 }
792 spin_unlock(&mmlist_lock); 777 spin_unlock(&mmlist_lock);
793 mmput(prev_mm); 778 mmput(prev_mm);
794 prev_mm = mm; 779 prev_mm = mm;
@@ -1407,19 +1392,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1407 if (!(p->flags & SWP_USED)) 1392 if (!(p->flags & SWP_USED))
1408 break; 1393 break;
1409 error = -EPERM; 1394 error = -EPERM;
1410 /* 1395 if (type >= MAX_SWAPFILES) {
1411 * Test if adding another swap device is possible. There are
1412 * two limiting factors: 1) the number of bits for the swap
1413 * type swp_entry_t definition and 2) the number of bits for
1414 * the swap type in the swap ptes as defined by the different
1415 * architectures. To honor both limitations a swap entry
1416 * with swap offset 0 and swap type ~0UL is created, encoded
1417 * to a swap pte, decoded to a swp_entry_t again and finally
1418 * the swap type part is extracted. This will mask all bits
1419 * from the initial ~0UL that can't be encoded in either the
1420 * swp_entry_t or the architecture definition of a swap pte.
1421 */
1422 if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
1423 spin_unlock(&swap_lock); 1396 spin_unlock(&swap_lock);
1424 goto out; 1397 goto out;
1425 } 1398 }
@@ -1504,8 +1477,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1504 error = -EINVAL; 1477 error = -EINVAL;
1505 goto bad_swap; 1478 goto bad_swap;
1506 } 1479 }
1507 page = read_cache_page(mapping, 0, 1480 page = read_mapping_page(mapping, 0, swap_file);
1508 (filler_t *)mapping->a_ops->readpage, swap_file);
1509 if (IS_ERR(page)) { 1481 if (IS_ERR(page)) {
1510 error = PTR_ERR(page); 1482 error = PTR_ERR(page);
1511 goto bad_swap; 1483 goto bad_swap;
@@ -1709,6 +1681,9 @@ int swap_duplicate(swp_entry_t entry)
1709 unsigned long offset, type; 1681 unsigned long offset, type;
1710 int result = 0; 1682 int result = 0;
1711 1683
1684 if (is_migration_entry(entry))
1685 return 1;
1686
1712 type = swp_type(entry); 1687 type = swp_type(entry);
1713 if (type >= nr_swapfiles) 1688 if (type >= nr_swapfiles)
1714 goto bad_file; 1689 goto bad_file;