aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_inline.h22
-rw-r--r--include/linux/swap.h3
-rw-r--r--mm/vmscan.c100
3 files changed, 112 insertions, 13 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 47762ca695a5..49cc68af01f8 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -38,3 +38,25 @@ del_page_from_lru(struct zone *zone, struct page *page)
38 zone->nr_inactive--; 38 zone->nr_inactive--;
39 } 39 }
40} 40}
41
42/*
43 * Isolate one page from the LRU lists.
44 *
45 * - zone->lru_lock must be held
46 */
47static inline int __isolate_lru_page(struct page *page)
48{
49 if (unlikely(!TestClearPageLRU(page)))
50 return 0;
51
52 if (get_page_testone(page)) {
53 /*
54 * It is being freed elsewhere
55 */
56 __put_page(page);
57 SetPageLRU(page);
58 return -ENOENT;
59 }
60
61 return 1;
62}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 556617bcf7ac..a49112536c02 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -175,6 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t);
175extern int shrink_all_memory(int); 175extern int shrink_all_memory(int);
176extern int vm_swappiness; 176extern int vm_swappiness;
177 177
178extern int isolate_lru_page(struct page *p);
179extern int putback_lru_pages(struct list_head *l);
180
178#ifdef CONFIG_MMU 181#ifdef CONFIG_MMU
179/* linux/mm/shmem.c */ 182/* linux/mm/shmem.c */
180extern int shmem_unuse(swp_entry_t entry, struct page *page); 183extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 428c5801d4b4..261a56ee11b6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -593,20 +593,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
593 page = lru_to_page(src); 593 page = lru_to_page(src);
594 prefetchw_prev_lru_page(page, src, flags); 594 prefetchw_prev_lru_page(page, src, flags);
595 595
596 if (!TestClearPageLRU(page)) 596 switch (__isolate_lru_page(page)) {
597 BUG(); 597 case 1:
598 list_del(&page->lru); 598 /* Succeeded to isolate page */
599 if (get_page_testone(page)) { 599 list_move(&page->lru, dst);
600 /*
601 * It is being freed elsewhere
602 */
603 __put_page(page);
604 SetPageLRU(page);
605 list_add(&page->lru, src);
606 continue;
607 } else {
608 list_add(&page->lru, dst);
609 nr_taken++; 600 nr_taken++;
601 break;
602 case -ENOENT:
603 /* Not possible to isolate */
604 list_move(&page->lru, src);
605 break;
606 default:
607 BUG();
610 } 608 }
611 } 609 }
612 610
@@ -614,6 +612,48 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
614 return nr_taken; 612 return nr_taken;
615} 613}
616 614
615static void lru_add_drain_per_cpu(void *dummy)
616{
617 lru_add_drain();
618}
619
620/*
621 * Isolate one page from the LRU lists and put it on the
622 * indicated list. Do necessary cache draining if the
623 * page is not on the LRU lists yet.
624 *
625 * Result:
626 * 0 = page not on LRU list
627 * 1 = page removed from LRU list and added to the specified list.
628 * -ENOENT = page is being freed elsewhere.
629 */
630int isolate_lru_page(struct page *page)
631{
632 int rc = 0;
633 struct zone *zone = page_zone(page);
634
635redo:
636 spin_lock_irq(&zone->lru_lock);
637 rc = __isolate_lru_page(page);
638 if (rc == 1) {
639 if (PageActive(page))
640 del_page_from_active_list(zone, page);
641 else
642 del_page_from_inactive_list(zone, page);
643 }
644 spin_unlock_irq(&zone->lru_lock);
645 if (rc == 0) {
646 /*
647 * Maybe this page is still waiting for a cpu to drain it
648 * from one of the lru lists?
649 */
650 rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
651 if (rc == 0 && PageLRU(page))
652 goto redo;
653 }
654 return rc;
655}
656
617/* 657/*
618 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 658 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
619 */ 659 */
@@ -679,6 +719,40 @@ done:
679 pagevec_release(&pvec); 719 pagevec_release(&pvec);
680} 720}
681 721
722static inline void move_to_lru(struct page *page)
723{
724 list_del(&page->lru);
725 if (PageActive(page)) {
726 /*
727 * lru_cache_add_active checks that
728 * the PG_active bit is off.
729 */
730 ClearPageActive(page);
731 lru_cache_add_active(page);
732 } else {
733 lru_cache_add(page);
734 }
735 put_page(page);
736}
737
738/*
739 * Add isolated pages on the list back to the LRU
740 *
741 * returns the number of pages put back.
742 */
743int putback_lru_pages(struct list_head *l)
744{
745 struct page *page;
746 struct page *page2;
747 int count = 0;
748
749 list_for_each_entry_safe(page, page2, l, lru) {
750 move_to_lru(page);
751 count++;
752 }
753 return count;
754}
755
682/* 756/*
683 * This moves pages from the active list to the inactive list. 757 * This moves pages from the active list to the inactive list.
684 * 758 *