aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c100
1 files changed, 87 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 428c5801d4b4..261a56ee11b6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -593,20 +593,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
593 page = lru_to_page(src); 593 page = lru_to_page(src);
594 prefetchw_prev_lru_page(page, src, flags); 594 prefetchw_prev_lru_page(page, src, flags);
595 595
596 if (!TestClearPageLRU(page)) 596 switch (__isolate_lru_page(page)) {
597 BUG(); 597 case 1:
598 list_del(&page->lru); 598 /* Succeeded to isolate page */
599 if (get_page_testone(page)) { 599 list_move(&page->lru, dst);
600 /*
601 * It is being freed elsewhere
602 */
603 __put_page(page);
604 SetPageLRU(page);
605 list_add(&page->lru, src);
606 continue;
607 } else {
608 list_add(&page->lru, dst);
609 nr_taken++; 600 nr_taken++;
601 break;
602 case -ENOENT:
603 /* Not possible to isolate */
604 list_move(&page->lru, src);
605 break;
606 default:
607 BUG();
610 } 608 }
611 } 609 }
612 610
@@ -614,6 +612,48 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
614 return nr_taken; 612 return nr_taken;
615} 613}
616 614
615static void lru_add_drain_per_cpu(void *dummy)
616{
617 lru_add_drain();
618}
619
620/*
621 * Isolate one page from the LRU lists and put it on the
622 * indicated list. Do necessary cache draining if the
623 * page is not on the LRU lists yet.
624 *
625 * Result:
626 * 0 = page not on LRU list
627 * 1 = page removed from LRU list and added to the specified list.
628 * -ENOENT = page is being freed elsewhere.
629 */
630int isolate_lru_page(struct page *page)
631{
632 int rc = 0;
633 struct zone *zone = page_zone(page);
634
635redo:
636 spin_lock_irq(&zone->lru_lock);
637 rc = __isolate_lru_page(page);
638 if (rc == 1) {
639 if (PageActive(page))
640 del_page_from_active_list(zone, page);
641 else
642 del_page_from_inactive_list(zone, page);
643 }
644 spin_unlock_irq(&zone->lru_lock);
645 if (rc == 0) {
646 /*
647 * Maybe this page is still waiting for a cpu to drain it
648 * from one of the lru lists?
649 */
650 rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
651 if (rc == 0 && PageLRU(page))
652 goto redo;
653 }
654 return rc;
655}
656
617/* 657/*
618 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 658 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
619 */ 659 */
@@ -679,6 +719,40 @@ done:
679 pagevec_release(&pvec); 719 pagevec_release(&pvec);
680} 720}
681 721
722static inline void move_to_lru(struct page *page)
723{
724 list_del(&page->lru);
725 if (PageActive(page)) {
726 /*
727 * lru_cache_add_active checks that
728 * the PG_active bit is off.
729 */
730 ClearPageActive(page);
731 lru_cache_add_active(page);
732 } else {
733 lru_cache_add(page);
734 }
735 put_page(page);
736}
737
738/*
739 * Add isolated pages on the list back to the LRU
740 *
741 * returns the number of pages put back.
742 */
743int putback_lru_pages(struct list_head *l)
744{
745 struct page *page;
746 struct page *page2;
747 int count = 0;
748
749 list_for_each_entry_safe(page, page2, l, lru) {
750 move_to_lru(page);
751 count++;
752 }
753 return count;
754}
755
682/* 756/*
683 * This moves pages from the active list to the inactive list. 757 * This moves pages from the active list to the inactive list.
684 * 758 *