diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index c141b3e78071..62e7f62fb559 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
623 | * Try to allocate it some swap space here. | 623 | * Try to allocate it some swap space here. |
624 | */ | 624 | */ |
625 | if (PageAnon(page) && !PageSwapCache(page)) { | 625 | if (PageAnon(page) && !PageSwapCache(page)) { |
626 | if (!(sc->gfp_mask & __GFP_IO)) | ||
627 | goto keep_locked; | ||
626 | switch (try_to_munlock(page)) { | 628 | switch (try_to_munlock(page)) { |
627 | case SWAP_FAIL: /* shouldn't happen */ | 629 | case SWAP_FAIL: /* shouldn't happen */ |
628 | case SWAP_AGAIN: | 630 | case SWAP_AGAIN: |
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
634 | } | 636 | } |
635 | if (!add_to_swap(page, GFP_ATOMIC)) | 637 | if (!add_to_swap(page, GFP_ATOMIC)) |
636 | goto activate_locked; | 638 | goto activate_locked; |
639 | may_enter_fs = 1; | ||
637 | } | 640 | } |
638 | #endif /* CONFIG_SWAP */ | 641 | #endif /* CONFIG_SWAP */ |
639 | 642 | ||
@@ -1245,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1245 | list_add(&page->lru, &l_inactive); | 1248 | list_add(&page->lru, &l_inactive); |
1246 | } | 1249 | } |
1247 | 1250 | ||
1251 | spin_lock_irq(&zone->lru_lock); | ||
1248 | /* | 1252 | /* |
1249 | * Count referenced pages from currently used mappings as | 1253 | * Count referenced pages from currently used mappings as |
1250 | * rotated, even though they are moved to the inactive list. | 1254 | * rotated, even though they are moved to the inactive list. |
@@ -1260,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1260 | 1264 | ||
1261 | pgmoved = 0; | 1265 | pgmoved = 0; |
1262 | lru = LRU_BASE + file * LRU_FILE; | 1266 | lru = LRU_BASE + file * LRU_FILE; |
1263 | spin_lock_irq(&zone->lru_lock); | ||
1264 | while (!list_empty(&l_inactive)) { | 1267 | while (!list_empty(&l_inactive)) { |
1265 | page = lru_to_page(&l_inactive); | 1268 | page = lru_to_page(&l_inactive); |
1266 | prefetchw_prev_lru_page(page, &l_inactive, flags); | 1269 | prefetchw_prev_lru_page(page, &l_inactive, flags); |
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1386 | file_prio = 200 - sc->swappiness; | 1389 | file_prio = 200 - sc->swappiness; |
1387 | 1390 | ||
1388 | /* | 1391 | /* |
1389 | * anon recent_rotated[0] | 1392 | * The amount of pressure on anon vs file pages is inversely |
1390 | * %anon = 100 * ----------- / ----------------- * IO cost | 1393 | * proportional to the fraction of recently scanned pages on |
1391 | * anon + file rotate_sum | 1394 | * each list that were recently referenced and in active use. |
1392 | */ | 1395 | */ |
1393 | ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); | 1396 | ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); |
1394 | ap /= zone->recent_rotated[0] + 1; | 1397 | ap /= zone->recent_rotated[0] + 1; |