aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/mm/swap.c b/mm/swap.c
index c02f93611a84..4aea806d0d44 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -39,6 +39,7 @@ int page_cluster;
39 39
40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); 40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
42 43
43/* 44/*
44 * This path almost never happens for VM activity - pages are normally 45 * This path almost never happens for VM activity - pages are normally
@@ -347,6 +348,60 @@ void add_page_to_unevictable_list(struct page *page)
347} 348}
348 349
349/* 350/*
351 * If the page can not be invalidated, it is moved to the
352 * inactive list to speed up its reclaim. It is moved to the
353 * head of the list, rather than the tail, to give the flusher
354 * threads some time to write it out, as this is much more
355 * effective than the single-page writeout from reclaim.
356 */
357static void lru_deactivate(struct page *page, struct zone *zone)
358{
359 int lru, file;
360
361 if (!PageLRU(page) || !PageActive(page))
362 return;
363
364 /* Some processes are using the page */
365 if (page_mapped(page))
366 return;
367
368 file = page_is_file_cache(page);
369 lru = page_lru_base_type(page);
370 del_page_from_lru_list(zone, page, lru + LRU_ACTIVE);
371 ClearPageActive(page);
372 ClearPageReferenced(page);
373 add_page_to_lru_list(zone, page, lru);
374 __count_vm_event(PGDEACTIVATE);
375
376 update_page_reclaim_stat(zone, page, file, 0);
377}
378
379static void ____pagevec_lru_deactivate(struct pagevec *pvec)
380{
381 int i;
382 struct zone *zone = NULL;
383
384 for (i = 0; i < pagevec_count(pvec); i++) {
385 struct page *page = pvec->pages[i];
386 struct zone *pagezone = page_zone(page);
387
388 if (pagezone != zone) {
389 if (zone)
390 spin_unlock_irq(&zone->lru_lock);
391 zone = pagezone;
392 spin_lock_irq(&zone->lru_lock);
393 }
394 lru_deactivate(page, zone);
395 }
396 if (zone)
397 spin_unlock_irq(&zone->lru_lock);
398
399 release_pages(pvec->pages, pvec->nr, pvec->cold);
400 pagevec_reinit(pvec);
401}
402
403
404/*
350 * Drain pages out of the cpu's pagevecs. 405 * Drain pages out of the cpu's pagevecs.
351 * Either "cpu" is the current CPU, and preemption has already been 406 * Either "cpu" is the current CPU, and preemption has already been
352 * disabled; or "cpu" is being hot-unplugged, and is already dead. 407 * disabled; or "cpu" is being hot-unplugged, and is already dead.
@@ -372,6 +427,29 @@ static void drain_cpu_pagevecs(int cpu)
372 pagevec_move_tail(pvec); 427 pagevec_move_tail(pvec);
373 local_irq_restore(flags); 428 local_irq_restore(flags);
374 } 429 }
430
431 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
432 if (pagevec_count(pvec))
433 ____pagevec_lru_deactivate(pvec);
434}
435
436/**
437 * deactivate_page - forcefully deactivate a page
438 * @page: page to deactivate
439 *
440 * This function hints the VM that @page is a good reclaim candidate,
441 * for example if its invalidation fails due to the page being dirty
442 * or under writeback.
443 */
444void deactivate_page(struct page *page)
445{
446 if (likely(get_page_unless_zero(page))) {
447 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
448
449 if (!pagevec_add(pvec, page))
450 ____pagevec_lru_deactivate(pvec);
451 put_cpu_var(lru_deactivate_pvecs);
452 }
375} 453}
376 454
377void lru_add_drain(void) 455void lru_add_drain(void)