aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-06 03:11:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:29 -0500
commita74609fafa2e5cc31d558012abaaa55ec9ad9da4 (patch)
tree0be653692864d99da345b575dfe2083994ee1d21 /mm/vmscan.c
parentd3cb487149bd706aa6aeb02042332a450978dc1c (diff)
[PATCH] mm: page_state opt
Optimise page_state manipulations by introducing interrupt unsafe accessors to page_state fields. Callers must provide their own locking (either disable interrupts or not update from interrupt context). Switch over the hot callsites that can easily be moved under interrupts off sections. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7681d8ee04fe..be8235fb1939 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -645,16 +645,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
645 goto done; 645 goto done;
646 646
647 max_scan -= nr_scan; 647 max_scan -= nr_scan;
648 if (current_is_kswapd())
649 mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
650 else
651 mod_page_state_zone(zone, pgscan_direct, nr_scan);
652 nr_freed = shrink_list(&page_list, sc); 648 nr_freed = shrink_list(&page_list, sc);
653 if (current_is_kswapd())
654 mod_page_state(kswapd_steal, nr_freed);
655 mod_page_state_zone(zone, pgsteal, nr_freed);
656 649
657 spin_lock_irq(&zone->lru_lock); 650 local_irq_disable();
651 if (current_is_kswapd()) {
652 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
653 __mod_page_state(kswapd_steal, nr_freed);
654 } else
655 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
656 __mod_page_state_zone(zone, pgsteal, nr_freed);
657
658 spin_lock(&zone->lru_lock);
658 /* 659 /*
659 * Put back any unfreeable pages. 660 * Put back any unfreeable pages.
660 */ 661 */
@@ -816,11 +817,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
816 } 817 }
817 } 818 }
818 zone->nr_active += pgmoved; 819 zone->nr_active += pgmoved;
819 spin_unlock_irq(&zone->lru_lock); 820 spin_unlock(&zone->lru_lock);
820 pagevec_release(&pvec); 821
822 __mod_page_state_zone(zone, pgrefill, pgscanned);
823 __mod_page_state(pgdeactivate, pgdeactivate);
824 local_irq_enable();
821 825
822 mod_page_state_zone(zone, pgrefill, pgscanned); 826 pagevec_release(&pvec);
823 mod_page_state(pgdeactivate, pgdeactivate);
824} 827}
825 828
826/* 829/*