aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c56
1 files changed, 43 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5cbf64dd79c1..ba4e87df3fc6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -579,6 +579,40 @@ redo:
579 put_page(page); /* drop ref from isolate */ 579 put_page(page); /* drop ref from isolate */
580} 580}
581 581
582enum page_references {
583 PAGEREF_RECLAIM,
584 PAGEREF_RECLAIM_CLEAN,
585 PAGEREF_ACTIVATE,
586};
587
588static enum page_references page_check_references(struct page *page,
589 struct scan_control *sc)
590{
591 unsigned long vm_flags;
592 int referenced;
593
594 referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
595 if (!referenced)
596 return PAGEREF_RECLAIM;
597
598 /* Lumpy reclaim - ignore references */
599 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
600 return PAGEREF_RECLAIM;
601
602 /*
603 * Mlock lost the isolation race with us. Let try_to_unmap()
604 * move the page to the unevictable list.
605 */
606 if (vm_flags & VM_LOCKED)
607 return PAGEREF_RECLAIM;
608
609 if (page_mapping_inuse(page))
610 return PAGEREF_ACTIVATE;
611
612 /* Reclaim if clean, defer dirty pages to writeback */
613 return PAGEREF_RECLAIM_CLEAN;
614}
615
582/* 616/*
583 * shrink_page_list() returns the number of reclaimed pages 617 * shrink_page_list() returns the number of reclaimed pages
584 */ 618 */
@@ -590,16 +624,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
590 struct pagevec freed_pvec; 624 struct pagevec freed_pvec;
591 int pgactivate = 0; 625 int pgactivate = 0;
592 unsigned long nr_reclaimed = 0; 626 unsigned long nr_reclaimed = 0;
593 unsigned long vm_flags;
594 627
595 cond_resched(); 628 cond_resched();
596 629
597 pagevec_init(&freed_pvec, 1); 630 pagevec_init(&freed_pvec, 1);
598 while (!list_empty(page_list)) { 631 while (!list_empty(page_list)) {
632 enum page_references references;
599 struct address_space *mapping; 633 struct address_space *mapping;
600 struct page *page; 634 struct page *page;
601 int may_enter_fs; 635 int may_enter_fs;
602 int referenced;
603 636
604 cond_resched(); 637 cond_resched();
605 638
@@ -641,17 +674,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
641 goto keep_locked; 674 goto keep_locked;
642 } 675 }
643 676
644 referenced = page_referenced(page, 1, 677 references = page_check_references(page, sc);
645 sc->mem_cgroup, &vm_flags); 678 switch (references) {
646 /* 679 case PAGEREF_ACTIVATE:
647 * In active use or really unfreeable? Activate it.
648 * If page which have PG_mlocked lost isoltation race,
649 * try_to_unmap moves it to unevictable list
650 */
651 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
652 referenced && page_mapping_inuse(page)
653 && !(vm_flags & VM_LOCKED))
654 goto activate_locked; 680 goto activate_locked;
681 case PAGEREF_RECLAIM:
682 case PAGEREF_RECLAIM_CLEAN:
683 ; /* try to reclaim the page below */
684 }
655 685
656 /* 686 /*
657 * Anonymous process memory has backing store? 687 * Anonymous process memory has backing store?
@@ -685,7 +715,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
685 } 715 }
686 716
687 if (PageDirty(page)) { 717 if (PageDirty(page)) {
688 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) 718 if (references == PAGEREF_RECLAIM_CLEAN)
689 goto keep_locked; 719 goto keep_locked;
690 if (!may_enter_fs) 720 if (!may_enter_fs)
691 goto keep_locked; 721 goto keep_locked;