diff options
author | Takashi Iwai <tiwai@suse.de> | 2012-03-18 13:22:37 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2012-03-18 13:22:37 -0400 |
commit | cb3f2adc03ab055b19c677a6283523861fafebdd (patch) | |
tree | 59cfb6800f0635a4aec16c8e0da619f27e51ee79 /mm/vmscan.c | |
parent | 44c76a960a62fcc46cbcaa0a22a34e666a729329 (diff) | |
parent | 828006de1bddf83b6ecf03ec459c15f7c7c22db7 (diff) |
Merge branch 'topic/asoc' into for-linus
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 124 |
1 files changed, 42 insertions, 82 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2880396f7953..c52b23552659 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/buffer_head.h> /* for try_to_release_page(), | 26 | #include <linux/buffer_head.h> /* for try_to_release_page(), |
27 | buffer_heads_over_limit */ | 27 | buffer_heads_over_limit */ |
28 | #include <linux/mm_inline.h> | 28 | #include <linux/mm_inline.h> |
29 | #include <linux/pagevec.h> | ||
30 | #include <linux/backing-dev.h> | 29 | #include <linux/backing-dev.h> |
31 | #include <linux/rmap.h> | 30 | #include <linux/rmap.h> |
32 | #include <linux/topology.h> | 31 | #include <linux/topology.h> |
@@ -661,7 +660,7 @@ redo: | |||
661 | * When racing with an mlock or AS_UNEVICTABLE clearing | 660 | * When racing with an mlock or AS_UNEVICTABLE clearing |
662 | * (page is unlocked) make sure that if the other thread | 661 | * (page is unlocked) make sure that if the other thread |
663 | * does not observe our setting of PG_lru and fails | 662 | * does not observe our setting of PG_lru and fails |
664 | * isolation/check_move_unevictable_page, | 663 | * isolation/check_move_unevictable_pages, |
665 | * we see PG_mlocked/AS_UNEVICTABLE cleared below and move | 664 | * we see PG_mlocked/AS_UNEVICTABLE cleared below and move |
666 | * the page back to the evictable list. | 665 | * the page back to the evictable list. |
667 | * | 666 | * |
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) | |||
3499 | return 1; | 3498 | return 1; |
3500 | } | 3499 | } |
3501 | 3500 | ||
3501 | #ifdef CONFIG_SHMEM | ||
3502 | /** | 3502 | /** |
3503 | * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list | 3503 | * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list |
3504 | * @page: page to check evictability and move to appropriate lru list | 3504 | * @pages: array of pages to check |
3505 | * @zone: zone page is in | 3505 | * @nr_pages: number of pages to check |
3506 | * | 3506 | * |
3507 | * Checks a page for evictability and moves the page to the appropriate | 3507 | * Checks pages for evictability and moves them to the appropriate lru list. |
3508 | * zone lru list. | ||
3509 | * | 3508 | * |
3510 | * Restrictions: zone->lru_lock must be held, page must be on LRU and must | 3509 | * This function is only used for SysV IPC SHM_UNLOCK. |
3511 | * have PageUnevictable set. | ||
3512 | */ | 3510 | */ |
3513 | static void check_move_unevictable_page(struct page *page, struct zone *zone) | 3511 | void check_move_unevictable_pages(struct page **pages, int nr_pages) |
3514 | { | 3512 | { |
3515 | struct lruvec *lruvec; | 3513 | struct lruvec *lruvec; |
3514 | struct zone *zone = NULL; | ||
3515 | int pgscanned = 0; | ||
3516 | int pgrescued = 0; | ||
3517 | int i; | ||
3516 | 3518 | ||
3517 | VM_BUG_ON(PageActive(page)); | 3519 | for (i = 0; i < nr_pages; i++) { |
3518 | retry: | 3520 | struct page *page = pages[i]; |
3519 | ClearPageUnevictable(page); | 3521 | struct zone *pagezone; |
3520 | if (page_evictable(page, NULL)) { | ||
3521 | enum lru_list l = page_lru_base_type(page); | ||
3522 | |||
3523 | __dec_zone_state(zone, NR_UNEVICTABLE); | ||
3524 | lruvec = mem_cgroup_lru_move_lists(zone, page, | ||
3525 | LRU_UNEVICTABLE, l); | ||
3526 | list_move(&page->lru, &lruvec->lists[l]); | ||
3527 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); | ||
3528 | __count_vm_event(UNEVICTABLE_PGRESCUED); | ||
3529 | } else { | ||
3530 | /* | ||
3531 | * rotate unevictable list | ||
3532 | */ | ||
3533 | SetPageUnevictable(page); | ||
3534 | lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE, | ||
3535 | LRU_UNEVICTABLE); | ||
3536 | list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]); | ||
3537 | if (page_evictable(page, NULL)) | ||
3538 | goto retry; | ||
3539 | } | ||
3540 | } | ||
3541 | |||
3542 | /** | ||
3543 | * scan_mapping_unevictable_pages - scan an address space for evictable pages | ||
3544 | * @mapping: struct address_space to scan for evictable pages | ||
3545 | * | ||
3546 | * Scan all pages in mapping. Check unevictable pages for | ||
3547 | * evictability and move them to the appropriate zone lru list. | ||
3548 | */ | ||
3549 | void scan_mapping_unevictable_pages(struct address_space *mapping) | ||
3550 | { | ||
3551 | pgoff_t next = 0; | ||
3552 | pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> | ||
3553 | PAGE_CACHE_SHIFT; | ||
3554 | struct zone *zone; | ||
3555 | struct pagevec pvec; | ||
3556 | |||
3557 | if (mapping->nrpages == 0) | ||
3558 | return; | ||
3559 | |||
3560 | pagevec_init(&pvec, 0); | ||
3561 | while (next < end && | ||
3562 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | ||
3563 | int i; | ||
3564 | int pg_scanned = 0; | ||
3565 | |||
3566 | zone = NULL; | ||
3567 | |||
3568 | for (i = 0; i < pagevec_count(&pvec); i++) { | ||
3569 | struct page *page = pvec.pages[i]; | ||
3570 | pgoff_t page_index = page->index; | ||
3571 | struct zone *pagezone = page_zone(page); | ||
3572 | 3522 | ||
3573 | pg_scanned++; | 3523 | pgscanned++; |
3574 | if (page_index > next) | 3524 | pagezone = page_zone(page); |
3575 | next = page_index; | 3525 | if (pagezone != zone) { |
3576 | next++; | 3526 | if (zone) |
3527 | spin_unlock_irq(&zone->lru_lock); | ||
3528 | zone = pagezone; | ||
3529 | spin_lock_irq(&zone->lru_lock); | ||
3530 | } | ||
3577 | 3531 | ||
3578 | if (pagezone != zone) { | 3532 | if (!PageLRU(page) || !PageUnevictable(page)) |
3579 | if (zone) | 3533 | continue; |
3580 | spin_unlock_irq(&zone->lru_lock); | ||
3581 | zone = pagezone; | ||
3582 | spin_lock_irq(&zone->lru_lock); | ||
3583 | } | ||
3584 | 3534 | ||
3585 | if (PageLRU(page) && PageUnevictable(page)) | 3535 | if (page_evictable(page, NULL)) { |
3586 | check_move_unevictable_page(page, zone); | 3536 | enum lru_list lru = page_lru_base_type(page); |
3537 | |||
3538 | VM_BUG_ON(PageActive(page)); | ||
3539 | ClearPageUnevictable(page); | ||
3540 | __dec_zone_state(zone, NR_UNEVICTABLE); | ||
3541 | lruvec = mem_cgroup_lru_move_lists(zone, page, | ||
3542 | LRU_UNEVICTABLE, lru); | ||
3543 | list_move(&page->lru, &lruvec->lists[lru]); | ||
3544 | __inc_zone_state(zone, NR_INACTIVE_ANON + lru); | ||
3545 | pgrescued++; | ||
3587 | } | 3546 | } |
3588 | if (zone) | ||
3589 | spin_unlock_irq(&zone->lru_lock); | ||
3590 | pagevec_release(&pvec); | ||
3591 | |||
3592 | count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); | ||
3593 | } | 3547 | } |
3594 | 3548 | ||
3549 | if (zone) { | ||
3550 | __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); | ||
3551 | __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); | ||
3552 | spin_unlock_irq(&zone->lru_lock); | ||
3553 | } | ||
3595 | } | 3554 | } |
3555 | #endif /* CONFIG_SHMEM */ | ||
3596 | 3556 | ||
3597 | static void warn_scan_unevictable_pages(void) | 3557 | static void warn_scan_unevictable_pages(void) |
3598 | { | 3558 | { |