aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 03:08:45 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:02 -0500
commit6e5ef1a96e6e3b123da56292bc35017c8c401491 (patch)
treedb9532c53013bccd7ac9d186784bbc6fd343bca4 /mm/vmscan.c
parenta7290ee08e434399660ace34427c17696e47c562 (diff)
[PATCH] vmscan: emove obsolete checks from shrink_list() and fix unlikely in refill_inactive_zone()
As suggested by Marcelo: 1. The optimization introduced recently for not calling page_referenced() during zone reclaim makes two additional checks in shrink_list unnecessary. 2. The if (unlikely(sc->may_swap)) in refill_inactive_zone is optimized for the zone_reclaim case. However, most peoples system only does swap. Undo that. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Marcelo Tosatti <marcelo.tosatti@cyclades.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3914a94aa905..f713e9f6ac73 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -460,12 +460,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
460 * Anonymous process memory has backing store? 460 * Anonymous process memory has backing store?
461 * Try to allocate it some swap space here. 461 * Try to allocate it some swap space here.
462 */ 462 */
463 if (PageAnon(page) && !PageSwapCache(page)) { 463 if (PageAnon(page) && !PageSwapCache(page))
464 if (!sc->may_swap)
465 goto keep_locked;
466 if (!add_to_swap(page, GFP_ATOMIC)) 464 if (!add_to_swap(page, GFP_ATOMIC))
467 goto activate_locked; 465 goto activate_locked;
468 }
469#endif /* CONFIG_SWAP */ 466#endif /* CONFIG_SWAP */
470 467
471 mapping = page_mapping(page); 468 mapping = page_mapping(page);
@@ -477,12 +474,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
477 * processes. Try to unmap it here. 474 * processes. Try to unmap it here.
478 */ 475 */
479 if (page_mapped(page) && mapping) { 476 if (page_mapped(page) && mapping) {
480 /*
481 * No unmapping if we do not swap
482 */
483 if (!sc->may_swap)
484 goto keep_locked;
485
486 switch (try_to_unmap(page, 0)) { 477 switch (try_to_unmap(page, 0)) {
487 case SWAP_FAIL: 478 case SWAP_FAIL:
488 goto activate_locked; 479 goto activate_locked;
@@ -1205,7 +1196,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1205 struct pagevec pvec; 1196 struct pagevec pvec;
1206 int reclaim_mapped = 0; 1197 int reclaim_mapped = 0;
1207 1198
1208 if (unlikely(sc->may_swap)) { 1199 if (sc->may_swap) {
1209 long mapped_ratio; 1200 long mapped_ratio;
1210 long distress; 1201 long distress;
1211 long swap_tendency; 1202 long swap_tendency;