From b555749aac87d7c2637f153e44bd77c7fdf4c65b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 6 Jan 2009 14:40:13 -0800 Subject: vmscan: shrink_active_list(): reduce lru_lock hold time These three statements manipulate local variables and do not need the lock coverage. Cc: Johannes Weiner Cc: Lee Schermerhorn Cc: Rik van Riel Signed-off-by: Linus Torvalds --- mm/vmscan.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 466a36b3bada..5daf606e0a35 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1237,6 +1237,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, list_add(&page->lru, &l_inactive); } + /* + * Move the pages to the [file or anon] inactive list. + */ + pagevec_init(&pvec, 1); + pgmoved = 0; + lru = LRU_BASE + file * LRU_FILE; + spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as @@ -1247,13 +1254,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, if (scan_global_lru(sc)) zone->recent_rotated[!!file] += pgmoved; - /* - * Move the pages to the [file or anon] inactive list. - */ - pagevec_init(&pvec, 1); - - pgmoved = 0; - lru = LRU_BASE + file * LRU_FILE; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); -- cgit v1.2.2