aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 76247424dea1..e9ec06d845e8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -40,7 +40,7 @@ static void put_compound_page(struct page *page)
40 if (put_page_testzero(page)) { 40 if (put_page_testzero(page)) {
41 void (*dtor)(struct page *page); 41 void (*dtor)(struct page *page);
42 42
43 dtor = (void (*)(struct page *))page[1].mapping; 43 dtor = (void (*)(struct page *))page[1].lru.next;
44 (*dtor)(page); 44 (*dtor)(page);
45 } 45 }
46} 46}
@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
489 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 489 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
490 spin_lock(&fbc->lock); 490 spin_lock(&fbc->lock);
491 fbc->count += count; 491 fbc->count += count;
492 *pcount = 0;
492 spin_unlock(&fbc->lock); 493 spin_unlock(&fbc->lock);
493 count = 0; 494 } else {
495 *pcount = count;
494 } 496 }
495 *pcount = count;
496 put_cpu(); 497 put_cpu();
497} 498}
498EXPORT_SYMBOL(percpu_counter_mod); 499EXPORT_SYMBOL(percpu_counter_mod);
500
501/*
502 * Add up all the per-cpu counts, return the result. This is a more accurate
503 * but much slower version of percpu_counter_read_positive()
504 */
505long percpu_counter_sum(struct percpu_counter *fbc)
506{
507 long ret;
508 int cpu;
509
510 spin_lock(&fbc->lock);
511 ret = fbc->count;
512 for_each_cpu(cpu) {
513 long *pcount = per_cpu_ptr(fbc->counters, cpu);
514 ret += *pcount;
515 }
516 spin_unlock(&fbc->lock);
517 return ret < 0 ? 0 : ret;
518}
519EXPORT_SYMBOL(percpu_counter_sum);
499#endif 520#endif
500 521
501/* 522/*