aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index cce3dda59c59..b524ea90bddb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -393,7 +393,8 @@ void pagevec_strip(struct pagevec *pvec)
393 struct page *page = pvec->pages[i]; 393 struct page *page = pvec->pages[i];
394 394
395 if (PagePrivate(page) && !TestSetPageLocked(page)) { 395 if (PagePrivate(page) && !TestSetPageLocked(page)) {
396 try_to_release_page(page, 0); 396 if (PagePrivate(page))
397 try_to_release_page(page, 0);
397 unlock_page(page); 398 unlock_page(page);
398 } 399 }
399 } 400 }
@@ -489,13 +490,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
489 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 490 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
490 spin_lock(&fbc->lock); 491 spin_lock(&fbc->lock);
491 fbc->count += count; 492 fbc->count += count;
493 *pcount = 0;
492 spin_unlock(&fbc->lock); 494 spin_unlock(&fbc->lock);
493 count = 0; 495 } else {
496 *pcount = count;
494 } 497 }
495 *pcount = count;
496 put_cpu(); 498 put_cpu();
497} 499}
498EXPORT_SYMBOL(percpu_counter_mod); 500EXPORT_SYMBOL(percpu_counter_mod);
501
502/*
503 * Add up all the per-cpu counts, return the result. This is a more accurate
504 * but much slower version of percpu_counter_read_positive()
505 */
506long percpu_counter_sum(struct percpu_counter *fbc)
507{
508 long ret;
509 int cpu;
510
511 spin_lock(&fbc->lock);
512 ret = fbc->count;
513 for_each_cpu(cpu) {
514 long *pcount = per_cpu_ptr(fbc->counters, cpu);
515 ret += *pcount;
516 }
517 spin_unlock(&fbc->lock);
518 return ret < 0 ? 0 : ret;
519}
520EXPORT_SYMBOL(percpu_counter_sum);
499#endif 521#endif
500 522
501/* 523/*