aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-22 17:13:41 -0400
committerJens Axboe <axboe@fb.com>2015-06-02 10:33:35 -0400
commitec8a6f2643923ee5b74d24fa8d134240379f436b (patch)
tree4188866bc98fb571c3c872d0ad972c5ccc974ee4 /mm
parentdfb8ae567835425d27db8acc6c9fc5db88d38e2b (diff)
writeback: make congestion functions per bdi_writeback
Currently, all congestion functions take bdi (backing_dev_info) and always operate on the root wb (bdi->wb) and the congestion state from the block layer is propagated only for the root blkcg. This patch introduces {set|clear}_wb_congested() and wb_congested() which take a bdi_writeback_congested and bdi_writeback respectively. The bdi counteparts are now wrappers invoking the wb based functions on @bdi->wb. While converting clear_bdi_congested() to clear_wb_congested(), the local variable declaration order between @wqh and @bit is swapped for cosmetic reason. This patch just adds the new wb based functions. The following patches will apply them. v2: Updated for bdi_writeback_congested. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 4c9386c98ec1..5029c4ad2f36 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -896,31 +896,31 @@ static wait_queue_head_t congestion_wqh[2] = {
896 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), 896 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
897 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) 897 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
898 }; 898 };
899static atomic_t nr_bdi_congested[2]; 899static atomic_t nr_wb_congested[2];
900 900
901void clear_bdi_congested(struct backing_dev_info *bdi, int sync) 901void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
902{ 902{
903 enum wb_state bit;
904 wait_queue_head_t *wqh = &congestion_wqh[sync]; 903 wait_queue_head_t *wqh = &congestion_wqh[sync];
904 enum wb_state bit;
905 905
906 bit = sync ? WB_sync_congested : WB_async_congested; 906 bit = sync ? WB_sync_congested : WB_async_congested;
907 if (test_and_clear_bit(bit, &bdi->wb.congested->state)) 907 if (test_and_clear_bit(bit, &congested->state))
908 atomic_dec(&nr_bdi_congested[sync]); 908 atomic_dec(&nr_wb_congested[sync]);
909 smp_mb__after_atomic(); 909 smp_mb__after_atomic();
910 if (waitqueue_active(wqh)) 910 if (waitqueue_active(wqh))
911 wake_up(wqh); 911 wake_up(wqh);
912} 912}
913EXPORT_SYMBOL(clear_bdi_congested); 913EXPORT_SYMBOL(clear_wb_congested);
914 914
915void set_bdi_congested(struct backing_dev_info *bdi, int sync) 915void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
916{ 916{
917 enum wb_state bit; 917 enum wb_state bit;
918 918
919 bit = sync ? WB_sync_congested : WB_async_congested; 919 bit = sync ? WB_sync_congested : WB_async_congested;
920 if (!test_and_set_bit(bit, &bdi->wb.congested->state)) 920 if (!test_and_set_bit(bit, &congested->state))
921 atomic_inc(&nr_bdi_congested[sync]); 921 atomic_inc(&nr_wb_congested[sync]);
922} 922}
923EXPORT_SYMBOL(set_bdi_congested); 923EXPORT_SYMBOL(set_wb_congested);
924 924
925/** 925/**
926 * congestion_wait - wait for a backing_dev to become uncongested 926 * congestion_wait - wait for a backing_dev to become uncongested
@@ -979,7 +979,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
979 * encountered in the current zone, yield if necessary instead 979 * encountered in the current zone, yield if necessary instead
980 * of sleeping on the congestion queue 980 * of sleeping on the congestion queue
981 */ 981 */
982 if (atomic_read(&nr_bdi_congested[sync]) == 0 || 982 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
983 !test_bit(ZONE_CONGESTED, &zone->flags)) { 983 !test_bit(ZONE_CONGESTED, &zone->flags)) {
984 cond_resched(); 984 cond_resched();
985 985