aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c44
1 files changed, 6 insertions, 38 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b5860294bb6..7ea1440b53db 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
623 * Try to allocate it some swap space here. 623 * Try to allocate it some swap space here.
624 */ 624 */
625 if (PageAnon(page) && !PageSwapCache(page)) { 625 if (PageAnon(page) && !PageSwapCache(page)) {
626 if (!(sc->gfp_mask & __GFP_IO))
627 goto keep_locked;
626 switch (try_to_munlock(page)) { 628 switch (try_to_munlock(page)) {
627 case SWAP_FAIL: /* shouldn't happen */ 629 case SWAP_FAIL: /* shouldn't happen */
628 case SWAP_AGAIN: 630 case SWAP_AGAIN:
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
634 } 636 }
635 if (!add_to_swap(page, GFP_ATOMIC)) 637 if (!add_to_swap(page, GFP_ATOMIC))
636 goto activate_locked; 638 goto activate_locked;
639 may_enter_fs = 1;
637 } 640 }
638#endif /* CONFIG_SWAP */ 641#endif /* CONFIG_SWAP */
639 642
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1386 file_prio = 200 - sc->swappiness; 1389 file_prio = 200 - sc->swappiness;
1387 1390
1388 /* 1391 /*
1389 * anon recent_rotated[0] 1392 * The amount of pressure on anon vs file pages is inversely
1390 * %anon = 100 * ----------- / ----------------- * IO cost 1393 * proportional to the fraction of recently scanned pages on
1391 * anon + file rotate_sum 1394 * each list that were recently referenced and in active use.
1392 */ 1395 */
1393 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1396 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
1394 ap /= zone->recent_rotated[0] + 1; 1397 ap /= zone->recent_rotated[0] + 1;
@@ -2368,39 +2371,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
2368 return 1; 2371 return 1;
2369} 2372}
2370 2373
2371static void show_page_path(struct page *page)
2372{
2373 char buf[256];
2374 if (page_is_file_cache(page)) {
2375 struct address_space *mapping = page->mapping;
2376 struct dentry *dentry;
2377 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
2378
2379 spin_lock(&mapping->i_mmap_lock);
2380 dentry = d_find_alias(mapping->host);
2381 printk(KERN_INFO "rescued: %s %lu\n",
2382 dentry_path(dentry, buf, 256), pgoff);
2383 spin_unlock(&mapping->i_mmap_lock);
2384 } else {
2385#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
2386 struct anon_vma *anon_vma;
2387 struct vm_area_struct *vma;
2388
2389 anon_vma = page_lock_anon_vma(page);
2390 if (!anon_vma)
2391 return;
2392
2393 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
2394 printk(KERN_INFO "rescued: anon %s\n",
2395 vma->vm_mm->owner->comm);
2396 break;
2397 }
2398 page_unlock_anon_vma(anon_vma);
2399#endif
2400 }
2401}
2402
2403
2404/** 2374/**
2405 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2375 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2406 * @page: page to check evictability and move to appropriate lru list 2376 * @page: page to check evictability and move to appropriate lru list
@@ -2421,8 +2391,6 @@ retry:
2421 if (page_evictable(page, NULL)) { 2391 if (page_evictable(page, NULL)) {
2422 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); 2392 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2423 2393
2424 show_page_path(page);
2425
2426 __dec_zone_state(zone, NR_UNEVICTABLE); 2394 __dec_zone_state(zone, NR_UNEVICTABLE);
2427 list_move(&page->lru, &zone->lru[l].list); 2395 list_move(&page->lru, &zone->lru[l].list);
2428 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2396 __inc_zone_state(zone, NR_INACTIVE_ANON + l);