aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:37 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:37 -0400
commit6e1a5704cbbd244a8db2d7d59215cf9a4c9a0d31 (patch)
tree1d81963b53b4753eb5a492e5a15f8b68f81ed58c /block
parent7f52f98c2a83339b89a27d01296354e5dbb90ad0 (diff)
blk-throttle: dispatch from throtl_pending_timer_fn()
Currently, blk_throtl_dispatch_work_fn() is responsible for both dispatching bio's from throtl_grp's according to their limits and then issuing the dispatched bios. This patch moves the dispatch part to throtl_pending_timer_fn() so that the work item is kicked iff there are bio's to issue. This is to avoid work item execution at each step when hierarchy support is enabled. bio's will be dispatched towards the top-level service_queue from the timers at each layer and the work item will only be used to issue the bio's which reached the top-level service_queue. While fetching bio's to issue from bio_lists[], blk_throtl_dispatch_work_fn() fetches all READs before WRITEs. While the original code also dispatched READs first, if multiple throtl_grps are dispatched on the same run, WRITEs from throtl_grp which is dispatched first would precede READs from throtl_grps which are dispatched later. While this is a behavior change, given that the previous code already prioritized READs and block layer generally prioritizes and segregates READs from WRITEs, this isn't likely to make any noticeable differences. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-throttle.c69
1 files changed, 44 insertions, 25 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8ee8e4e0a2ba..918d22240856 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -932,31 +932,26 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
932 return nr_disp; 932 return nr_disp;
933} 933}
934 934
935/**
936 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
937 * @arg: the throtl_service_queue being serviced
938 *
939 * This timer is armed when a child throtl_grp with active bio's become
940 * pending and queued on the service_queue's pending_tree and expires when
941 * the first child throtl_grp should be dispatched. This function
942 * dispatches bio's from the children throtl_grps and kicks
943 * throtl_data->dispatch_work if there are bio's ready to be issued.
944 */
935static void throtl_pending_timer_fn(unsigned long arg) 945static void throtl_pending_timer_fn(unsigned long arg)
936{ 946{
937 struct throtl_service_queue *sq = (void *)arg; 947 struct throtl_service_queue *sq = (void *)arg;
938 struct throtl_data *td = sq_to_td(sq); 948 struct throtl_data *td = sq_to_td(sq);
939
940 queue_work(kthrotld_workqueue, &td->dispatch_work);
941}
942
943/* work function to dispatch throttled bios */
944void blk_throtl_dispatch_work_fn(struct work_struct *work)
945{
946 struct throtl_data *td = container_of(work, struct throtl_data,
947 dispatch_work);
948 struct throtl_service_queue *sq = &td->service_queue;
949 struct request_queue *q = td->queue; 949 struct request_queue *q = td->queue;
950 struct bio_list bio_list_on_stack;
951 struct bio *bio;
952 struct blk_plug plug;
953 bool dispatched = false; 950 bool dispatched = false;
954 int rw, ret; 951 int ret;
955 952
956 spin_lock_irq(q->queue_lock); 953 spin_lock_irq(q->queue_lock);
957 954
958 bio_list_init(&bio_list_on_stack);
959
960 while (true) { 955 while (true) {
961 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", 956 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
962 td->nr_queued[READ] + td->nr_queued[WRITE], 957 td->nr_queued[READ] + td->nr_queued[WRITE],
@@ -964,10 +959,6 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
964 959
965 ret = throtl_select_dispatch(sq); 960 ret = throtl_select_dispatch(sq);
966 if (ret) { 961 if (ret) {
967 for (rw = READ; rw <= WRITE; rw++) {
968 bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]);
969 bio_list_init(&sq->bio_lists[rw]);
970 }
971 throtl_log(sq, "bios disp=%u", ret); 962 throtl_log(sq, "bios disp=%u", ret);
972 dispatched = true; 963 dispatched = true;
973 } 964 }
@@ -981,13 +972,41 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
981 spin_lock_irq(q->queue_lock); 972 spin_lock_irq(q->queue_lock);
982 } 973 }
983 974
975 if (dispatched)
976 queue_work(kthrotld_workqueue, &td->dispatch_work);
977
984 spin_unlock_irq(q->queue_lock); 978 spin_unlock_irq(q->queue_lock);
979}
985 980
986 /* 981/**
987 * If we dispatched some requests, unplug the queue to make sure 982 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
988 * immediate dispatch 983 * @work: work item being executed
989 */ 984 *
990 if (dispatched) { 985 * This function is queued for execution when bio's reach the bio_lists[]
986 * of throtl_data->service_queue. Those bio's are ready and issued by this
987 * function.
988 */
989void blk_throtl_dispatch_work_fn(struct work_struct *work)
990{
991 struct throtl_data *td = container_of(work, struct throtl_data,
992 dispatch_work);
993 struct throtl_service_queue *td_sq = &td->service_queue;
994 struct request_queue *q = td->queue;
995 struct bio_list bio_list_on_stack;
996 struct bio *bio;
997 struct blk_plug plug;
998 int rw;
999
1000 bio_list_init(&bio_list_on_stack);
1001
1002 spin_lock_irq(q->queue_lock);
1003 for (rw = READ; rw <= WRITE; rw++) {
1004 bio_list_merge(&bio_list_on_stack, &td_sq->bio_lists[rw]);
1005 bio_list_init(&td_sq->bio_lists[rw]);
1006 }
1007 spin_unlock_irq(q->queue_lock);
1008
1009 if (!bio_list_empty(&bio_list_on_stack)) {
991 blk_start_plug(&plug); 1010 blk_start_plug(&plug);
992 while((bio = bio_list_pop(&bio_list_on_stack))) 1011 while((bio = bio_list_pop(&bio_list_on_stack)))
993 generic_make_request(bio); 1012 generic_make_request(bio);