aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
commit0a1340c185734a57fbf4775927966ad4a1347b02 (patch)
treed9ed8f0dd809a7c542a3356601125ea5b5aaa804 /mm/swapfile.c
parentaf18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff)
parent29454dde27d8e340bb1987bad9aa504af7081eba (diff)
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/linux/kernel.h
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c44
1 files changed, 9 insertions, 35 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e5fd5385f0cc..e70d6c6d6fee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -5,7 +5,6 @@
5 * Swap reorganised 29.12.95, Stephen Tweedie 5 * Swap reorganised 29.12.95, Stephen Tweedie
6 */ 6 */
7 7
8#include <linux/config.h>
9#include <linux/mm.h> 8#include <linux/mm.h>
10#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
11#include <linux/mman.h> 10#include <linux/mman.h>
@@ -395,6 +394,9 @@ void free_swap_and_cache(swp_entry_t entry)
395 struct swap_info_struct * p; 394 struct swap_info_struct * p;
396 struct page *page = NULL; 395 struct page *page = NULL;
397 396
397 if (is_migration_entry(entry))
398 return;
399
398 p = swap_info_get(entry); 400 p = swap_info_get(entry);
399 if (p) { 401 if (p) {
400 if (swap_entry_free(p, swp_offset(entry)) == 1) { 402 if (swap_entry_free(p, swp_offset(entry)) == 1) {
@@ -615,15 +617,6 @@ static int unuse_mm(struct mm_struct *mm,
615 return 0; 617 return 0;
616} 618}
617 619
618#ifdef CONFIG_MIGRATION
619int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
620{
621 swp_entry_t entry = { .val = page_private(page) };
622
623 return unuse_vma(vma, entry, page);
624}
625#endif
626
627/* 620/*
628 * Scan swap_map from current position to next entry still in use. 621 * Scan swap_map from current position to next entry still in use.
629 * Recycle to start on reaching the end, returning 0 when empty. 622 * Recycle to start on reaching the end, returning 0 when empty.
@@ -716,7 +709,6 @@ static int try_to_unuse(unsigned int type)
716 */ 709 */
717 swap_map = &si->swap_map[i]; 710 swap_map = &si->swap_map[i];
718 entry = swp_entry(type, i); 711 entry = swp_entry(type, i);
719again:
720 page = read_swap_cache_async(entry, NULL, 0); 712 page = read_swap_cache_async(entry, NULL, 0);
721 if (!page) { 713 if (!page) {
722 /* 714 /*
@@ -751,12 +743,6 @@ again:
751 wait_on_page_locked(page); 743 wait_on_page_locked(page);
752 wait_on_page_writeback(page); 744 wait_on_page_writeback(page);
753 lock_page(page); 745 lock_page(page);
754 if (!PageSwapCache(page)) {
755 /* Page migration has occured */
756 unlock_page(page);
757 page_cache_release(page);
758 goto again;
759 }
760 wait_on_page_writeback(page); 746 wait_on_page_writeback(page);
761 747
762 /* 748 /*
@@ -785,10 +771,8 @@ again:
785 while (*swap_map > 1 && !retval && 771 while (*swap_map > 1 && !retval &&
786 (p = p->next) != &start_mm->mmlist) { 772 (p = p->next) != &start_mm->mmlist) {
787 mm = list_entry(p, struct mm_struct, mmlist); 773 mm = list_entry(p, struct mm_struct, mmlist);
788 if (atomic_inc_return(&mm->mm_users) == 1) { 774 if (!atomic_inc_not_zero(&mm->mm_users))
789 atomic_dec(&mm->mm_users);
790 continue; 775 continue;
791 }
792 spin_unlock(&mmlist_lock); 776 spin_unlock(&mmlist_lock);
793 mmput(prev_mm); 777 mmput(prev_mm);
794 prev_mm = mm; 778 prev_mm = mm;
@@ -1407,19 +1391,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1407 if (!(p->flags & SWP_USED)) 1391 if (!(p->flags & SWP_USED))
1408 break; 1392 break;
1409 error = -EPERM; 1393 error = -EPERM;
1410 /* 1394 if (type >= MAX_SWAPFILES) {
1411 * Test if adding another swap device is possible. There are
1412 * two limiting factors: 1) the number of bits for the swap
1413 * type swp_entry_t definition and 2) the number of bits for
1414 * the swap type in the swap ptes as defined by the different
1415 * architectures. To honor both limitations a swap entry
1416 * with swap offset 0 and swap type ~0UL is created, encoded
1417 * to a swap pte, decoded to a swp_entry_t again and finally
1418 * the swap type part is extracted. This will mask all bits
1419 * from the initial ~0UL that can't be encoded in either the
1420 * swp_entry_t or the architecture definition of a swap pte.
1421 */
1422 if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
1423 spin_unlock(&swap_lock); 1395 spin_unlock(&swap_lock);
1424 goto out; 1396 goto out;
1425 } 1397 }
@@ -1504,8 +1476,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1504 error = -EINVAL; 1476 error = -EINVAL;
1505 goto bad_swap; 1477 goto bad_swap;
1506 } 1478 }
1507 page = read_cache_page(mapping, 0, 1479 page = read_mapping_page(mapping, 0, swap_file);
1508 (filler_t *)mapping->a_ops->readpage, swap_file);
1509 if (IS_ERR(page)) { 1480 if (IS_ERR(page)) {
1510 error = PTR_ERR(page); 1481 error = PTR_ERR(page);
1511 goto bad_swap; 1482 goto bad_swap;
@@ -1709,6 +1680,9 @@ int swap_duplicate(swp_entry_t entry)
1709 unsigned long offset, type; 1680 unsigned long offset, type;
1710 int result = 0; 1681 int result = 0;
1711 1682
1683 if (is_migration_entry(entry))
1684 return 1;
1685
1712 type = swp_type(entry); 1686 type = swp_type(entry);
1713 if (type >= nr_swapfiles) 1687 if (type >= nr_swapfiles)
1714 goto bad_file; 1688 goto bad_file;