diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:12 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:23 -0500 |
commit | 5efd611351d1a847c72d74fb12ff4bd187c0cb2c (patch) | |
tree | bac28206d088426ab872eea98155cc100bd8d9b1 /block | |
parent | 7ee9c5620504906e98451dc9a1945b2b9e892cb8 (diff) |
blkcg: add blkcg_{init|drain|exit}_queue()
Currently block core calls directly into blk-throttle for init, drain
and exit. This patch adds blkcg_{init|drain|exit}_queue() which wraps
the blk-throttle functions. This is to give more control and
visiblity to blkcg core layer for proper layering. Further patches
will add logic common to blkcg policies to the functions.
While at it, collapse blk_throtl_release() into blk_throtl_exit().
There's no reason to keep them separate.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 42 | ||||
-rw-r--r-- | block/blk-cgroup.h | 7 | ||||
-rw-r--r-- | block/blk-core.c | 7 | ||||
-rw-r--r-- | block/blk-sysfs.c | 4 | ||||
-rw-r--r-- | block/blk-throttle.c | 3 | ||||
-rw-r--r-- | block/blk.h | 2 |
6 files changed, 55 insertions, 10 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d42d826ece39..b302ce1d662b 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/genhd.h> | 20 | #include <linux/genhd.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include "blk-cgroup.h" | 22 | #include "blk-cgroup.h" |
23 | #include "blk.h" | ||
23 | 24 | ||
24 | #define MAX_KEY_LEN 100 | 25 | #define MAX_KEY_LEN 100 |
25 | 26 | ||
@@ -1459,6 +1460,47 @@ done: | |||
1459 | return &blkcg->css; | 1460 | return &blkcg->css; |
1460 | } | 1461 | } |
1461 | 1462 | ||
1463 | /** | ||
1464 | * blkcg_init_queue - initialize blkcg part of request queue | ||
1465 | * @q: request_queue to initialize | ||
1466 | * | ||
1467 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | ||
1468 | * part of new request_queue @q. | ||
1469 | * | ||
1470 | * RETURNS: | ||
1471 | * 0 on success, -errno on failure. | ||
1472 | */ | ||
1473 | int blkcg_init_queue(struct request_queue *q) | ||
1474 | { | ||
1475 | might_sleep(); | ||
1476 | |||
1477 | return blk_throtl_init(q); | ||
1478 | } | ||
1479 | |||
1480 | /** | ||
1481 | * blkcg_drain_queue - drain blkcg part of request_queue | ||
1482 | * @q: request_queue to drain | ||
1483 | * | ||
1484 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | ||
1485 | */ | ||
1486 | void blkcg_drain_queue(struct request_queue *q) | ||
1487 | { | ||
1488 | lockdep_assert_held(q->queue_lock); | ||
1489 | |||
1490 | blk_throtl_drain(q); | ||
1491 | } | ||
1492 | |||
1493 | /** | ||
1494 | * blkcg_exit_queue - exit and release blkcg part of request_queue | ||
1495 | * @q: request_queue being released | ||
1496 | * | ||
1497 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | ||
1498 | */ | ||
1499 | void blkcg_exit_queue(struct request_queue *q) | ||
1500 | { | ||
1501 | blk_throtl_exit(q); | ||
1502 | } | ||
1503 | |||
1462 | /* | 1504 | /* |
1463 | * We cannot support shared io contexts, as we have no mean to support | 1505 | * We cannot support shared io contexts, as we have no mean to support |
1464 | * two tasks with the same ioc in two different groups without major rework | 1506 | * two tasks with the same ioc in two different groups without major rework |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index ca1fc637bd6e..3bc171080e93 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -215,6 +215,10 @@ struct blkio_policy_type { | |||
215 | enum blkio_policy_id plid; | 215 | enum blkio_policy_id plid; |
216 | }; | 216 | }; |
217 | 217 | ||
218 | extern int blkcg_init_queue(struct request_queue *q); | ||
219 | extern void blkcg_drain_queue(struct request_queue *q); | ||
220 | extern void blkcg_exit_queue(struct request_queue *q); | ||
221 | |||
218 | /* Blkio controller policy registration */ | 222 | /* Blkio controller policy registration */ |
219 | extern void blkio_policy_register(struct blkio_policy_type *); | 223 | extern void blkio_policy_register(struct blkio_policy_type *); |
220 | extern void blkio_policy_unregister(struct blkio_policy_type *); | 224 | extern void blkio_policy_unregister(struct blkio_policy_type *); |
@@ -233,6 +237,9 @@ struct blkio_group { | |||
233 | struct blkio_policy_type { | 237 | struct blkio_policy_type { |
234 | }; | 238 | }; |
235 | 239 | ||
240 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } | ||
241 | static inline void blkcg_drain_queue(struct request_queue *q) { } | ||
242 | static inline void blkcg_exit_queue(struct request_queue *q) { } | ||
236 | static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } | 243 | static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } |
237 | static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } | 244 | static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } |
238 | static inline void blkg_destroy_all(struct request_queue *q) { } | 245 | static inline void blkg_destroy_all(struct request_queue *q) { } |
diff --git a/block/blk-core.c b/block/blk-core.c index 5a1b8cc03003..c3434c6395b9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <trace/events/block.h> | 34 | #include <trace/events/block.h> |
35 | 35 | ||
36 | #include "blk.h" | 36 | #include "blk.h" |
37 | #include "blk-cgroup.h" | ||
37 | 38 | ||
38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
@@ -280,7 +281,7 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
280 | * | 281 | * |
281 | * This function does not cancel any asynchronous activity arising | 282 | * This function does not cancel any asynchronous activity arising |
282 | * out of elevator or throttling code. That would require elevaotor_exit() | 283 | * out of elevator or throttling code. That would require elevaotor_exit() |
283 | * and blk_throtl_exit() to be called with queue lock initialized. | 284 | * and blkcg_exit_queue() to be called with queue lock initialized. |
284 | * | 285 | * |
285 | */ | 286 | */ |
286 | void blk_sync_queue(struct request_queue *q) | 287 | void blk_sync_queue(struct request_queue *q) |
@@ -372,7 +373,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) | |||
372 | if (q->elevator) | 373 | if (q->elevator) |
373 | elv_drain_elevator(q); | 374 | elv_drain_elevator(q); |
374 | 375 | ||
375 | blk_throtl_drain(q); | 376 | blkcg_drain_queue(q); |
376 | 377 | ||
377 | /* | 378 | /* |
378 | * This function might be called on a queue which failed | 379 | * This function might be called on a queue which failed |
@@ -562,7 +563,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
562 | */ | 563 | */ |
563 | q->queue_lock = &q->__queue_lock; | 564 | q->queue_lock = &q->__queue_lock; |
564 | 565 | ||
565 | if (blk_throtl_init(q)) | 566 | if (blkcg_init_queue(q)) |
566 | goto fail_id; | 567 | goto fail_id; |
567 | 568 | ||
568 | return q; | 569 | return q; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index cf150011d808..00cdc987b525 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/blktrace_api.h> | 9 | #include <linux/blktrace_api.h> |
10 | 10 | ||
11 | #include "blk.h" | 11 | #include "blk.h" |
12 | #include "blk-cgroup.h" | ||
12 | 13 | ||
13 | struct queue_sysfs_entry { | 14 | struct queue_sysfs_entry { |
14 | struct attribute attr; | 15 | struct attribute attr; |
@@ -486,7 +487,7 @@ static void blk_release_queue(struct kobject *kobj) | |||
486 | elevator_exit(q->elevator); | 487 | elevator_exit(q->elevator); |
487 | } | 488 | } |
488 | 489 | ||
489 | blk_throtl_exit(q); | 490 | blkcg_exit_queue(q); |
490 | 491 | ||
491 | if (rl->rq_pool) | 492 | if (rl->rq_pool) |
492 | mempool_destroy(rl->rq_pool); | 493 | mempool_destroy(rl->rq_pool); |
@@ -494,7 +495,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
494 | if (q->queue_tags) | 495 | if (q->queue_tags) |
495 | __blk_queue_free_tags(q); | 496 | __blk_queue_free_tags(q); |
496 | 497 | ||
497 | blk_throtl_release(q); | ||
498 | blk_trace_shutdown(q); | 498 | blk_trace_shutdown(q); |
499 | 499 | ||
500 | bdi_destroy(&q->backing_dev_info); | 500 | bdi_destroy(&q->backing_dev_info); |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index fe6a442b8482..ac6d0fe6e4ee 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1226,10 +1226,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1226 | * it. | 1226 | * it. |
1227 | */ | 1227 | */ |
1228 | throtl_shutdown_wq(q); | 1228 | throtl_shutdown_wq(q); |
1229 | } | ||
1230 | 1229 | ||
1231 | void blk_throtl_release(struct request_queue *q) | ||
1232 | { | ||
1233 | kfree(q->td); | 1230 | kfree(q->td); |
1234 | } | 1231 | } |
1235 | 1232 | ||
diff --git a/block/blk.h b/block/blk.h index 7422f3133c5d..de15f920b38f 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -236,7 +236,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); | |||
236 | extern void blk_throtl_drain(struct request_queue *q); | 236 | extern void blk_throtl_drain(struct request_queue *q); |
237 | extern int blk_throtl_init(struct request_queue *q); | 237 | extern int blk_throtl_init(struct request_queue *q); |
238 | extern void blk_throtl_exit(struct request_queue *q); | 238 | extern void blk_throtl_exit(struct request_queue *q); |
239 | extern void blk_throtl_release(struct request_queue *q); | ||
240 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 239 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
241 | static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | 240 | static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) |
242 | { | 241 | { |
@@ -245,7 +244,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
245 | static inline void blk_throtl_drain(struct request_queue *q) { } | 244 | static inline void blk_throtl_drain(struct request_queue *q) { } |
246 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 245 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
247 | static inline void blk_throtl_exit(struct request_queue *q) { } | 246 | static inline void blk_throtl_exit(struct request_queue *q) { } |
248 | static inline void blk_throtl_release(struct request_queue *q) { } | ||
249 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 247 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
250 | 248 | ||
251 | #endif /* BLK_INTERNAL_H */ | 249 | #endif /* BLK_INTERNAL_H */ |