aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-17 17:42:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-17 17:42:19 -0500
commit7a608572a282a74978e10fd6cd63090aebe29f5c (patch)
tree03e52f73d7c35ffcea8f46e14ec569da818a7631
parent9e8a462a0141b12e22c4a2f0c12e0542770401f0 (diff)
Revert "mm: batch activate_page() to reduce lock contention"
This reverts commit 744ed1442757767ffede5008bb13e0805085902e. Chris Mason ended up chasing down some page allocation errors and pages stuck waiting on the IO scheduler, and was able to narrow it down to two commits: commit 744ed1442757 ("mm: batch activate_page() to reduce lock contention") and d8505dee1a87 ("mm: simplify code of swap.c"). This reverts the first of them. Reported-and-debugged-by: Chris Mason <chris.mason@oracle.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jens Axboe <jaxboe@fusionio.com> Cc: linux-mm <linux-mm@kvack.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h9
-rw-r--r--mm/swap.c90
-rw-r--r--mm/vmscan.c6
3 files changed, 13 insertions, 92 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 4c98630f0f77..69488205723d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,15 +39,6 @@ static inline void __put_page(struct page *page)
39 39
40extern unsigned long highest_memmap_pfn; 40extern unsigned long highest_memmap_pfn;
41 41
42#ifdef CONFIG_SMP
43extern int putback_active_lru_page(struct zone *zone, struct page *page);
44#else
45static inline int putback_active_lru_page(struct zone *zone, struct page *page)
46{
47 return 0;
48}
49#endif
50
51/* 42/*
52 * in mm/vmscan.c: 43 * in mm/vmscan.c:
53 */ 44 */
diff --git a/mm/swap.c b/mm/swap.c
index bbc1ce9f9460..ab498ea04ae3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -271,94 +271,27 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page,
271} 271}
272 272
273/* 273/*
274 * A page will go to active list either by activate_page or putback_lru_page. 274 * FIXME: speed this up?
275 * In the activate_page case, the page hasn't active bit set. The page might
276 * not in LRU list because it's isolated before it gets a chance to be moved to
277 * active list. The window is small because pagevec just stores several pages.
278 * For such case, we do nothing for such page.
279 * In the putback_lru_page case, the page isn't in lru list but has active
280 * bit set
281 */ 275 */
282static void __activate_page(struct page *page, void *arg) 276void activate_page(struct page *page)
283{ 277{
284 struct zone *zone = page_zone(page); 278 struct zone *zone = page_zone(page);
285 int file = page_is_file_cache(page);
286 int lru = page_lru_base_type(page);
287 bool putback = !PageLRU(page);
288
289 /* The page is isolated before it's moved to active list */
290 if (!PageLRU(page) && !PageActive(page))
291 return;
292 if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page))
293 return;
294
295 if (!putback)
296 del_page_from_lru_list(zone, page, lru);
297 else
298 SetPageLRU(page);
299
300 SetPageActive(page);
301 lru += LRU_ACTIVE;
302 add_page_to_lru_list(zone, page, lru);
303
304 if (putback)
305 return;
306 __count_vm_event(PGACTIVATE);
307 update_page_reclaim_stat(zone, page, file, 1);
308}
309
310#ifdef CONFIG_SMP
311static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
312
313static void activate_page_drain(int cpu)
314{
315 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
316 279
317 if (pagevec_count(pvec)) 280 spin_lock_irq(&zone->lru_lock);
318 pagevec_lru_move_fn(pvec, __activate_page, NULL);
319}
320
321void activate_page(struct page *page)
322{
323 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 281 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
324 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 282 int file = page_is_file_cache(page);
325 283 int lru = page_lru_base_type(page);
326 page_cache_get(page); 284 del_page_from_lru_list(zone, page, lru);
327 if (!pagevec_add(pvec, page))
328 pagevec_lru_move_fn(pvec, __activate_page, NULL);
329 put_cpu_var(activate_page_pvecs);
330 }
331}
332 285
333/* Caller should hold zone->lru_lock */ 286 SetPageActive(page);
334int putback_active_lru_page(struct zone *zone, struct page *page) 287 lru += LRU_ACTIVE;
335{ 288 add_page_to_lru_list(zone, page, lru);
336 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 289 __count_vm_event(PGACTIVATE);
337 290
338 if (!pagevec_add(pvec, page)) { 291 update_page_reclaim_stat(zone, page, file, 1);
339 spin_unlock_irq(&zone->lru_lock);
340 pagevec_lru_move_fn(pvec, __activate_page, NULL);
341 spin_lock_irq(&zone->lru_lock);
342 } 292 }
343 put_cpu_var(activate_page_pvecs);
344 return 1;
345}
346
347#else
348static inline void activate_page_drain(int cpu)
349{
350}
351
352void activate_page(struct page *page)
353{
354 struct zone *zone = page_zone(page);
355
356 spin_lock_irq(&zone->lru_lock);
357 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page))
358 __activate_page(page, NULL);
359 spin_unlock_irq(&zone->lru_lock); 293 spin_unlock_irq(&zone->lru_lock);
360} 294}
361#endif
362 295
363/* 296/*
364 * Mark a page as having seen activity. 297 * Mark a page as having seen activity.
@@ -457,7 +390,6 @@ static void drain_cpu_pagevecs(int cpu)
457 pagevec_move_tail(pvec); 390 pagevec_move_tail(pvec);
458 local_irq_restore(flags); 391 local_irq_restore(flags);
459 } 392 }
460 activate_page_drain(cpu);
461} 393}
462 394
463void lru_add_drain(void) 395void lru_add_drain(void)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99999a9b2b0b..47a50962ce81 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1271,16 +1271,14 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
1271 spin_lock_irq(&zone->lru_lock); 1271 spin_lock_irq(&zone->lru_lock);
1272 continue; 1272 continue;
1273 } 1273 }
1274 SetPageLRU(page);
1274 lru = page_lru(page); 1275 lru = page_lru(page);
1276 add_page_to_lru_list(zone, page, lru);
1275 if (is_active_lru(lru)) { 1277 if (is_active_lru(lru)) {
1276 int file = is_file_lru(lru); 1278 int file = is_file_lru(lru);
1277 int numpages = hpage_nr_pages(page); 1279 int numpages = hpage_nr_pages(page);
1278 reclaim_stat->recent_rotated[file] += numpages; 1280 reclaim_stat->recent_rotated[file] += numpages;
1279 if (putback_active_lru_page(zone, page))
1280 continue;
1281 } 1281 }
1282 SetPageLRU(page);
1283 add_page_to_lru_list(zone, page, lru);
1284 if (!pagevec_add(&pvec, page)) { 1282 if (!pagevec_add(&pvec, page)) {
1285 spin_unlock_irq(&zone->lru_lock); 1283 spin_unlock_irq(&zone->lru_lock);
1286 __pagevec_release(&pvec); 1284 __pagevec_release(&pvec);