diff options
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3689f833afd..a19f58c6fc3 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -142,9 +142,9 @@ static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg) | |||
142 | return NULL; | 142 | return NULL; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline int total_nr_queued(struct throtl_data *td) | 145 | static inline unsigned int total_nr_queued(struct throtl_data *td) |
146 | { | 146 | { |
147 | return (td->nr_queued[0] + td->nr_queued[1]); | 147 | return td->nr_queued[0] + td->nr_queued[1]; |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) | 150 | static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) |
@@ -746,7 +746,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, | |||
746 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | 746 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) |
747 | { | 747 | { |
748 | bool rw = bio_data_dir(bio); | 748 | bool rw = bio_data_dir(bio); |
749 | bool sync = bio->bi_rw & REQ_SYNC; | 749 | bool sync = rw_is_sync(bio->bi_rw); |
750 | 750 | ||
751 | /* Charge the bio to the group */ | 751 | /* Charge the bio to the group */ |
752 | tg->bytes_disp[rw] += bio->bi_size; | 752 | tg->bytes_disp[rw] += bio->bi_size; |
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q) | |||
927 | 927 | ||
928 | bio_list_init(&bio_list_on_stack); | 928 | bio_list_init(&bio_list_on_stack); |
929 | 929 | ||
930 | throtl_log(td, "dispatch nr_queued=%d read=%u write=%u", | 930 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", |
931 | total_nr_queued(td), td->nr_queued[READ], | 931 | total_nr_queued(td), td->nr_queued[READ], |
932 | td->nr_queued[WRITE]); | 932 | td->nr_queued[WRITE]); |
933 | 933 | ||
@@ -970,7 +970,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
970 | struct delayed_work *dwork = &td->throtl_work; | 970 | struct delayed_work *dwork = &td->throtl_work; |
971 | 971 | ||
972 | /* schedule work if limits changed even if no bio is queued */ | 972 | /* schedule work if limits changed even if no bio is queued */ |
973 | if (total_nr_queued(td) > 0 || td->limits_changed) { | 973 | if (total_nr_queued(td) || td->limits_changed) { |
974 | /* | 974 | /* |
975 | * We might have a work scheduled to be executed in future. | 975 | * We might have a work scheduled to be executed in future. |
976 | * Cancel that and schedule a new one. | 976 | * Cancel that and schedule a new one. |
@@ -1150,7 +1150,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1150 | 1150 | ||
1151 | if (tg_no_rule_group(tg, rw)) { | 1151 | if (tg_no_rule_group(tg, rw)) { |
1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
1153 | rw, bio->bi_rw & REQ_SYNC); | 1153 | rw, rw_is_sync(bio->bi_rw)); |
1154 | rcu_read_unlock(); | 1154 | rcu_read_unlock(); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |