diff options
author | Joe Perches <joe@perches.com> | 2011-06-13 14:19:27 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-06-13 14:19:27 -0400 |
commit | d2f31a5fd60d168b00fc4f7617b68a1287b21e90 (patch) | |
tree | c51b882926a840df57e376d21df74ec4177561a3 /block/blk-throttle.c | |
parent | fd16d263194aa6b50b215eb593a567b59d744d6e (diff) |
blk-throttle: Make total_nr_queued unsigned
The total of two unsigned values should also be unsigned.
Update throtl_log output to unsigned.
Update total_nr_queued test to non-zero to be the
same as the other total_nr_queued tests.
Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3689f833afdc..f6a794120505 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -142,9 +142,9 @@ static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg) | |||
142 | return NULL; | 142 | return NULL; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline int total_nr_queued(struct throtl_data *td) | 145 | static inline unsigned int total_nr_queued(struct throtl_data *td) |
146 | { | 146 | { |
147 | return (td->nr_queued[0] + td->nr_queued[1]); | 147 | return td->nr_queued[0] + td->nr_queued[1]; |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) | 150 | static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) |
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q) | |||
927 | 927 | ||
928 | bio_list_init(&bio_list_on_stack); | 928 | bio_list_init(&bio_list_on_stack); |
929 | 929 | ||
930 | throtl_log(td, "dispatch nr_queued=%d read=%u write=%u", | 930 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", |
931 | total_nr_queued(td), td->nr_queued[READ], | 931 | total_nr_queued(td), td->nr_queued[READ], |
932 | td->nr_queued[WRITE]); | 932 | td->nr_queued[WRITE]); |
933 | 933 | ||
@@ -970,7 +970,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
970 | struct delayed_work *dwork = &td->throtl_work; | 970 | struct delayed_work *dwork = &td->throtl_work; |
971 | 971 | ||
972 | /* schedule work if limits changed even if no bio is queued */ | 972 | /* schedule work if limits changed even if no bio is queued */ |
973 | if (total_nr_queued(td) > 0 || td->limits_changed) { | 973 | if (total_nr_queued(td) || td->limits_changed) { |
974 | /* | 974 | /* |
975 | * We might have a work scheduled to be executed in future. | 975 | * We might have a work scheduled to be executed in future. |
976 | * Cancel that and schedule a new one. | 976 | * Cancel that and schedule a new one. |