aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c49
1 files changed, 3 insertions, 46 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 88895c249bc..8fd095c4ae5 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -86,9 +86,8 @@ int rotate_reclaimable_page(struct page *page)
86 zone = page_zone(page); 86 zone = page_zone(page);
87 spin_lock_irqsave(&zone->lru_lock, flags); 87 spin_lock_irqsave(&zone->lru_lock, flags);
88 if (PageLRU(page) && !PageActive(page)) { 88 if (PageLRU(page) && !PageActive(page)) {
89 list_del(&page->lru); 89 list_move_tail(&page->lru, &zone->inactive_list);
90 list_add_tail(&page->lru, &zone->inactive_list); 90 __count_vm_event(PGROTATED);
91 inc_page_state(pgrotated);
92 } 91 }
93 if (!test_clear_page_writeback(page)) 92 if (!test_clear_page_writeback(page))
94 BUG(); 93 BUG();
@@ -108,7 +107,7 @@ void fastcall activate_page(struct page *page)
108 del_page_from_inactive_list(zone, page); 107 del_page_from_inactive_list(zone, page);
109 SetPageActive(page); 108 SetPageActive(page);
110 add_page_to_active_list(zone, page); 109 add_page_to_active_list(zone, page);
111 inc_page_state(pgactivate); 110 __count_vm_event(PGACTIVATE);
112 } 111 }
113 spin_unlock_irq(&zone->lru_lock); 112 spin_unlock_irq(&zone->lru_lock);
114} 113}
@@ -480,48 +479,6 @@ static int cpu_swap_callback(struct notifier_block *nfb,
480#endif /* CONFIG_HOTPLUG_CPU */ 479#endif /* CONFIG_HOTPLUG_CPU */
481#endif /* CONFIG_SMP */ 480#endif /* CONFIG_SMP */
482 481
483#ifdef CONFIG_SMP
484void percpu_counter_mod(struct percpu_counter *fbc, long amount)
485{
486 long count;
487 long *pcount;
488 int cpu = get_cpu();
489
490 pcount = per_cpu_ptr(fbc->counters, cpu);
491 count = *pcount + amount;
492 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
493 spin_lock(&fbc->lock);
494 fbc->count += count;
495 *pcount = 0;
496 spin_unlock(&fbc->lock);
497 } else {
498 *pcount = count;
499 }
500 put_cpu();
501}
502EXPORT_SYMBOL(percpu_counter_mod);
503
504/*
505 * Add up all the per-cpu counts, return the result. This is a more accurate
506 * but much slower version of percpu_counter_read_positive()
507 */
508long percpu_counter_sum(struct percpu_counter *fbc)
509{
510 long ret;
511 int cpu;
512
513 spin_lock(&fbc->lock);
514 ret = fbc->count;
515 for_each_possible_cpu(cpu) {
516 long *pcount = per_cpu_ptr(fbc->counters, cpu);
517 ret += *pcount;
518 }
519 spin_unlock(&fbc->lock);
520 return ret < 0 ? 0 : ret;
521}
522EXPORT_SYMBOL(percpu_counter_sum);
523#endif
524
525/* 482/*
526 * Perform any setup for the swap system 483 * Perform any setup for the swap system
527 */ 484 */