diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2010-09-15 17:06:35 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-16 02:42:52 -0400 |
commit | e43473b7f223ec866f7db273697e76c337c390f9 (patch) | |
tree | e90b52dbe4ec4ae37263a00e2bd9eaf5367cf72f /block/blk-core.c | |
parent | 4c9eefa16c6f124ffcc736cb719b24ea27f85017 (diff) |
blkio: Core implementation of throttle policy
o Actual implementation of throttling policy in block layer. Currently it
implements READ and WRITE bytes per second throttling logic. IOPS throttling
comes in later patches.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 8d07c1b7e701..797d5095eb83 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -382,6 +382,7 @@ void blk_sync_queue(struct request_queue *q) | |||
382 | del_timer_sync(&q->unplug_timer); | 382 | del_timer_sync(&q->unplug_timer); |
383 | del_timer_sync(&q->timeout); | 383 | del_timer_sync(&q->timeout); |
384 | cancel_work_sync(&q->unplug_work); | 384 | cancel_work_sync(&q->unplug_work); |
385 | throtl_shutdown_timer_wq(q); | ||
385 | } | 386 | } |
386 | EXPORT_SYMBOL(blk_sync_queue); | 387 | EXPORT_SYMBOL(blk_sync_queue); |
387 | 388 | ||
@@ -459,6 +460,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
459 | if (q->elevator) | 460 | if (q->elevator) |
460 | elevator_exit(q->elevator); | 461 | elevator_exit(q->elevator); |
461 | 462 | ||
463 | blk_throtl_exit(q); | ||
464 | |||
462 | blk_put_queue(q); | 465 | blk_put_queue(q); |
463 | } | 466 | } |
464 | EXPORT_SYMBOL(blk_cleanup_queue); | 467 | EXPORT_SYMBOL(blk_cleanup_queue); |
@@ -515,6 +518,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
515 | return NULL; | 518 | return NULL; |
516 | } | 519 | } |
517 | 520 | ||
521 | if (blk_throtl_init(q)) { | ||
522 | kmem_cache_free(blk_requestq_cachep, q); | ||
523 | return NULL; | ||
524 | } | ||
525 | |||
518 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 526 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
519 | laptop_mode_timer_fn, (unsigned long) q); | 527 | laptop_mode_timer_fn, (unsigned long) q); |
520 | init_timer(&q->unplug_timer); | 528 | init_timer(&q->unplug_timer); |
@@ -1522,6 +1530,15 @@ static inline void __generic_make_request(struct bio *bio) | |||
1522 | goto end_io; | 1530 | goto end_io; |
1523 | } | 1531 | } |
1524 | 1532 | ||
1533 | blk_throtl_bio(q, &bio); | ||
1534 | |||
1535 | /* | ||
1536 | * If bio = NULL, bio has been throttled and will be submitted | ||
1537 | * later. | ||
1538 | */ | ||
1539 | if (!bio) | ||
1540 | break; | ||
1541 | |||
1525 | trace_block_bio_queue(q, bio); | 1542 | trace_block_bio_queue(q, bio); |
1526 | 1543 | ||
1527 | ret = q->make_request_fn(q, bio); | 1544 | ret = q->make_request_fn(q, bio); |
@@ -2580,6 +2597,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2580 | } | 2597 | } |
2581 | EXPORT_SYMBOL(kblockd_schedule_work); | 2598 | EXPORT_SYMBOL(kblockd_schedule_work); |
2582 | 2599 | ||
2600 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2601 | struct delayed_work *dwork, unsigned long delay) | ||
2602 | { | ||
2603 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
2604 | } | ||
2605 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2606 | |||
2583 | int __init blk_dev_init(void) | 2607 | int __init blk_dev_init(void) |
2584 | { | 2608 | { |
2585 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2609 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |