diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
commit | 5b2c16aae0c074c3bb546c4c066ca7064684553c (patch) | |
tree | b7132e755036daf5f51438c312e5b936d4fdcefe /block/blk-throttle.c | |
parent | c9e0332e877c1a1ccfe4ba315a437c7a8cf6e575 (diff) |
blk-throttle: simplify throtl_grp flag handling
blk-throttle is still using function-defining macros to define flag
handling functions, which went out style at least a decade ago.
Just define the flag as bitmask and use direct bit operations.
This patch doesn't make any functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 34 |
1 files changed, 9 insertions, 25 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index b279110ba287..e8ef43d3fab3 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -36,6 +36,10 @@ struct throtl_service_queue { | |||
36 | #define THROTL_SERVICE_QUEUE_INITIALIZER \ | 36 | #define THROTL_SERVICE_QUEUE_INITIALIZER \ |
37 | (struct throtl_service_queue){ .pending_tree = RB_ROOT } | 37 | (struct throtl_service_queue){ .pending_tree = RB_ROOT } |
38 | 38 | ||
39 | enum tg_state_flags { | ||
40 | THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ | ||
41 | }; | ||
42 | |||
39 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) | 43 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) |
40 | 44 | ||
41 | /* Per-cpu group stats */ | 45 | /* Per-cpu group stats */ |
@@ -136,26 +140,6 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td) | |||
136 | return blkg_to_tg(td->queue->root_blkg); | 140 | return blkg_to_tg(td->queue->root_blkg); |
137 | } | 141 | } |
138 | 142 | ||
139 | enum tg_state_flags { | ||
140 | THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ | ||
141 | }; | ||
142 | |||
143 | #define THROTL_TG_FNS(name) \ | ||
144 | static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \ | ||
145 | { \ | ||
146 | (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \ | ||
147 | } \ | ||
148 | static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \ | ||
149 | { \ | ||
150 | (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \ | ||
151 | } \ | ||
152 | static inline int throtl_tg_##name(const struct throtl_grp *tg) \ | ||
153 | { \ | ||
154 | return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \ | ||
155 | } | ||
156 | |||
157 | THROTL_TG_FNS(on_rr); | ||
158 | |||
159 | #define throtl_log_tg(td, tg, fmt, args...) do { \ | 143 | #define throtl_log_tg(td, tg, fmt, args...) do { \ |
160 | char __pbuf[128]; \ | 144 | char __pbuf[128]; \ |
161 | \ | 145 | \ |
@@ -369,25 +353,25 @@ static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | |||
369 | struct throtl_service_queue *sq = &td->service_queue; | 353 | struct throtl_service_queue *sq = &td->service_queue; |
370 | 354 | ||
371 | tg_service_queue_add(sq, tg); | 355 | tg_service_queue_add(sq, tg); |
372 | throtl_mark_tg_on_rr(tg); | 356 | tg->flags |= THROTL_TG_PENDING; |
373 | sq->nr_pending++; | 357 | sq->nr_pending++; |
374 | } | 358 | } |
375 | 359 | ||
376 | static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | 360 | static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) |
377 | { | 361 | { |
378 | if (!throtl_tg_on_rr(tg)) | 362 | if (!(tg->flags & THROTL_TG_PENDING)) |
379 | __throtl_enqueue_tg(td, tg); | 363 | __throtl_enqueue_tg(td, tg); |
380 | } | 364 | } |
381 | 365 | ||
382 | static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) | 366 | static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) |
383 | { | 367 | { |
384 | throtl_rb_erase(&tg->rb_node, &td->service_queue); | 368 | throtl_rb_erase(&tg->rb_node, &td->service_queue); |
385 | throtl_clear_tg_on_rr(tg); | 369 | tg->flags &= ~THROTL_TG_PENDING; |
386 | } | 370 | } |
387 | 371 | ||
388 | static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) | 372 | static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) |
389 | { | 373 | { |
390 | if (throtl_tg_on_rr(tg)) | 374 | if (tg->flags & THROTL_TG_PENDING) |
391 | __throtl_dequeue_tg(td, tg); | 375 | __throtl_dequeue_tg(td, tg); |
392 | } | 376 | } |
393 | 377 | ||
@@ -964,7 +948,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
964 | throtl_start_new_slice(td, tg, 0); | 948 | throtl_start_new_slice(td, tg, 0); |
965 | throtl_start_new_slice(td, tg, 1); | 949 | throtl_start_new_slice(td, tg, 1); |
966 | 950 | ||
967 | if (throtl_tg_on_rr(tg)) { | 951 | if (tg->flags & THROTL_TG_PENDING) { |
968 | tg_update_disptime(td, tg); | 952 | tg_update_disptime(td, tg); |
969 | throtl_schedule_next_dispatch(td); | 953 | throtl_schedule_next_dispatch(td); |
970 | } | 954 | } |