aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2017-07-12 17:37:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-12 19:26:05 -0400
commit3e8f399da490e6ac20a3cfd6aa404c9aa961a9a2 (patch)
tree1e7ef3c2a6de595ad3d8d7f121b509f2ffeda371 /mm/page-writeback.c
parentc945dccc80856107f109c36a7d0e29a371b5d1b5 (diff)
writeback: rework wb_[dec|inc]_stat family of functions
Currently the writeback statistics code uses a percpu counters to hold various statistics. Furthermore we have 2 families of functions - those which disable local irq and those which doesn't and whose names begin with double underscore. However, they both end up calling __add_wb_stats which in turn calls percpu_counter_add_batch which is already irq-safe. Exploiting this fact allows to eliminated the __wb_* functions since they don't add any further protection than we already have. Furthermore, refactor the wb_* function to call __add_wb_stat directly without the irq-disabling dance. This will likely result in better runtime of code which deals with modifying the stat counters. While at it also document why percpu_counter_add_batch is in fact preempt and irq-safe since at least 3 people got confused. Link: http://lkml.kernel.org/r/1498029937-27293-1-git-send-email-nborisov@suse.com Signed-off-by: Nikolay Borisov <nborisov@suse.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Josef Bacik <jbacik@fb.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Jeff Layton <jlayton@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b60cc7ddac2..96e93b214d31 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb)
601{ 601{
602 struct wb_domain *cgdom; 602 struct wb_domain *cgdom;
603 603
604 __inc_wb_stat(wb, WB_WRITTEN); 604 inc_wb_stat(wb, WB_WRITTEN);
605 wb_domain_writeout_inc(&global_wb_domain, &wb->completions, 605 wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
606 wb->bdi->max_prop_frac); 606 wb->bdi->max_prop_frac);
607 607
@@ -2435,8 +2435,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2435 __inc_lruvec_page_state(page, NR_FILE_DIRTY); 2435 __inc_lruvec_page_state(page, NR_FILE_DIRTY);
2436 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2436 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2437 __inc_node_page_state(page, NR_DIRTIED); 2437 __inc_node_page_state(page, NR_DIRTIED);
2438 __inc_wb_stat(wb, WB_RECLAIMABLE); 2438 inc_wb_stat(wb, WB_RECLAIMABLE);
2439 __inc_wb_stat(wb, WB_DIRTIED); 2439 inc_wb_stat(wb, WB_DIRTIED);
2440 task_io_account_write(PAGE_SIZE); 2440 task_io_account_write(PAGE_SIZE);
2441 current->nr_dirtied++; 2441 current->nr_dirtied++;
2442 this_cpu_inc(bdp_ratelimits); 2442 this_cpu_inc(bdp_ratelimits);
@@ -2741,7 +2741,7 @@ int test_clear_page_writeback(struct page *page)
2741 if (bdi_cap_account_writeback(bdi)) { 2741 if (bdi_cap_account_writeback(bdi)) {
2742 struct bdi_writeback *wb = inode_to_wb(inode); 2742 struct bdi_writeback *wb = inode_to_wb(inode);
2743 2743
2744 __dec_wb_stat(wb, WB_WRITEBACK); 2744 dec_wb_stat(wb, WB_WRITEBACK);
2745 __wb_writeout_inc(wb); 2745 __wb_writeout_inc(wb);
2746 } 2746 }
2747 } 2747 }
@@ -2786,7 +2786,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2786 page_index(page), 2786 page_index(page),
2787 PAGECACHE_TAG_WRITEBACK); 2787 PAGECACHE_TAG_WRITEBACK);
2788 if (bdi_cap_account_writeback(bdi)) 2788 if (bdi_cap_account_writeback(bdi))
2789 __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); 2789 inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2790 2790
2791 /* 2791 /*
2792 * We can come through here when swapping anonymous 2792 * We can come through here when swapping anonymous