From e2494e1b42ebac402324105d57646489d19e2b01 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 2 Apr 2009 13:43:26 +0800 Subject: blktrace: fix pdu_len when tracing packet command requests Impact: output all of packet commands - not just the first 4 / 8 bytes Since commit d7e3c3249ef23b4617393c69fe464765b4ff1645 ("block: add large command support"), struct request->cmd has been changed from unsinged char cmd[BLK_MAX_CDB] to unsigned char *cmd. v1 -> v2: by: FUJITA Tomonori - make sure rq->cmd_len is always intialized, and then we can use rq->cmd_len instead of BLK_MAX_CDB. Signed-off-by: Li Zefan Acked-by: FUJITA Tomonori Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jens Axboe LKML-Reference: <49D4507E.2060602@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- block/blk-core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 29bcfac6c688..859879d0a0bf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -132,6 +132,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; rq->ref_count = 1; } -- cgit v1.2.2 From 8feb4d20b4b867e7a44f7486ecb028cc01a564ae Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Wed, 1 Apr 2009 15:01:39 +0100 Subject: pata_artop: typo Fix a typo (this was in the original patch but was not merged when the code fixes were for some reason) Signed-off-by: Alan Cox Signed-off-by: Jeff Garzik --- block/blk-settings.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-settings.c b/block/blk-settings.c index 59fd05d9f1d5..69c42adde52b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); * * description: * set required memory and length alignment for direct dma transactions. - * this is used when buiding direct io requests for the queue. + * this is used when building direct io requests for the queue. * **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) -- cgit v1.2.2 From 6c7e8cee6a9128eeb7f83c3ad1cb243f77f5cb16 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 27 Mar 2009 10:30:47 +0100 Subject: block: elevator quiescing helpers Simple helper functions to quiesce the request queue. These are currently only used for switching IO schedulers on-the-fly, but we can use them to properly switch IO accounting on and off as well. Signed-off-by: Jerome Marchand Signed-off-by: Jens Axboe --- block/blk.h | 4 ++++ block/elevator.c | 40 +++++++++++++++++++++++++++------------- 2 files changed, 31 insertions(+), 13 deletions(-) (limited to 'block') diff --git a/block/blk.h b/block/blk.h index 3ee94358b43d..22043c2886c7 100644 --- a/block/blk.h +++ b/block/blk.h @@ -70,6 +70,10 @@ void blk_queue_congestion_threshold(struct request_queue *q); int blk_dev_init(void); +void elv_quisce_start(struct request_queue *q); +void elv_quisce_end(struct request_queue *q); + + /* * Return the threshold (number of used requests) at which the queue is * considered to be congested. It include a little hysteresis to keep the diff --git a/block/elevator.c b/block/elevator.c index ca6788a0195a..c6744913ff4a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -587,6 +587,31 @@ static void elv_drain_elevator(struct request_queue *q) } } +/* + * Call with queue lock held, interrupts disabled + */ +void elv_quisce_start(struct request_queue *q) +{ + queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); + + /* + * make sure we don't have any requests in flight + */ + elv_drain_elevator(q); + while (q->rq.elvpriv) { + blk_start_queueing(q); + spin_unlock_irq(q->queue_lock); + msleep(10); + spin_lock_irq(q->queue_lock); + elv_drain_elevator(q); + } +} + +void elv_quisce_end(struct request_queue *q) +{ + queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); +} + void elv_insert(struct request_queue *q, struct request *rq, int where) { struct list_head *pos; @@ -1101,18 +1126,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) * Turn on BYPASS and drain all requests w/ elevator private data */ spin_lock_irq(q->queue_lock); - - queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); - - elv_drain_elevator(q); - - while (q->rq.elvpriv) { - blk_start_queueing(q); - spin_unlock_irq(q->queue_lock); - msleep(10); - spin_lock_irq(q->queue_lock); - elv_drain_elevator(q); - } + elv_quisce_start(q); /* * Remember old elevator. @@ -1136,7 +1150,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) */ elevator_exit(old_elevator); spin_lock_irq(q->queue_lock); - queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); + elv_quisce_end(q); spin_unlock_irq(q->queue_lock); blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); -- cgit v1.2.2 From 26308eab69aa193f7b3fb50764a64ae14544a39b Mon Sep 17 00:00:00 2001 From: Jerome Marchand Date: Fri, 27 Mar 2009 10:31:51 +0100 Subject: block: fix inconsistency in I/O stat accounting code This forces in_flight to be zero when turning off or on the I/O stat accounting and stops updating I/O stats in attempt_merge() when accounting is turned off. Signed-off-by: Jerome Marchand Signed-off-by: Jens Axboe --- block/blk-core.c | 13 ++++--------- block/blk-merge.c | 29 +++++++++++++++++------------ block/blk-sysfs.c | 4 ++++ block/blk.h | 10 ++++++---- block/elevator.c | 2 +- 5 files changed, 32 insertions(+), 26 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 25572802dac2..3688abff2430 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -64,12 +64,11 @@ static struct workqueue_struct *kblockd_workqueue; static void drive_stat_acct(struct request *rq, int new_io) { - struct gendisk *disk = rq->rq_disk; struct hd_struct *part; int rw = rq_data_dir(rq); int cpu; - if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue)) + if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) return; cpu = part_stat_lock(); @@ -1675,9 +1674,7 @@ EXPORT_SYMBOL(blkdev_dequeue_request); static void blk_account_io_completion(struct request *req, unsigned int bytes) { - struct gendisk *disk = req->rq_disk; - - if (!disk || !blk_do_io_stat(disk->queue)) + if (!blk_do_io_stat(req)) return; if (blk_fs_request(req)) { @@ -1694,9 +1691,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) static void blk_account_io_done(struct request *req) { - struct gendisk *disk = req->rq_disk; - - if (!disk || !blk_do_io_stat(disk->queue)) + if (!blk_do_io_stat(req)) return; /* @@ -1711,7 +1706,7 @@ static void blk_account_io_done(struct request *req) int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(disk, req->sector); + part = disk_map_sector_rcu(req->rq_disk, req->sector); part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); diff --git a/block/blk-merge.c b/block/blk-merge.c index e39cb24b7679..63760ca3da0f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -338,6 +338,22 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 1; } +static void blk_account_io_merge(struct request *req) +{ + if (blk_do_io_stat(req)) { + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, req->sector); + + part_round_stats(cpu, part); + part_dec_in_flight(part); + + part_stat_unlock(); + } +} + /* * Has to be called with the request spinlock acquired */ @@ -386,18 +402,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, elv_merge_requests(q, req, next); - if (req->rq_disk) { - struct hd_struct *part; - int cpu; - - cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); - - part_round_stats(cpu, part); - part_dec_in_flight(part); - - part_stat_unlock(); - } + blk_account_io_merge(req); req->ioprio = ioprio_best(req->ioprio, next->ioprio); if (blk_rq_cpu_valid(next)) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3ff9bba3379a..73f36beff5cd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -209,10 +209,14 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page, ssize_t ret = queue_var_store(&stats, page, count); spin_lock_irq(q->queue_lock); + elv_quisce_start(q); + if (stats) queue_flag_set(QUEUE_FLAG_IO_STAT, q); else queue_flag_clear(QUEUE_FLAG_IO_STAT, q); + + elv_quisce_end(q); spin_unlock_irq(q->queue_lock); return ret; diff --git a/block/blk.h b/block/blk.h index 22043c2886c7..24fcaeeaf620 100644 --- a/block/blk.h +++ b/block/blk.h @@ -112,12 +112,14 @@ static inline int blk_cpu_to_group(int cpu) #endif } -static inline int blk_do_io_stat(struct request_queue *q) +static inline int blk_do_io_stat(struct request *rq) { - if (q) - return blk_queue_io_stat(q); + struct gendisk *disk = rq->rq_disk; - return 0; + if (!disk || !disk->queue) + return 0; + + return blk_queue_io_stat(disk->queue) && (rq->cmd_flags & REQ_ELVPRIV); } #endif diff --git a/block/elevator.c b/block/elevator.c index c6744913ff4a..fb81bcc14a8c 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -573,7 +573,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); } -static void elv_drain_elevator(struct request_queue *q) +void elv_drain_elevator(struct request_queue *q) { static int printed; while (q->elevator->ops->elevator_dispatch_fn(q, 1)) -- cgit v1.2.2 From 2f5cb7381b737e24c8046fd4aeab571fb71315f5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 7 Apr 2009 08:51:19 +0200 Subject: cfq-iosched: change dispatch logic to deal with single requests at the time The IO scheduler core calls into the IO scheduler dispatch_request hook to move requests from the IO scheduler and into the driver dispatch list. It only does so when the dispatch list is empty. CFQ moves several requests to the dispatch list, which can cause higher latencies if we suddenly have to switch to some important sync IO. Change the logic to move one request at the time instead. This should almost be functionally equivalent to what we did before, except that we now honor 'quantum' as the maximum queue depth at the device side from any single cfqq. If there's just a single active cfqq, we allow up to 4 times the normal quantum. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 166 ++++++++++++++++++++++++++++------------------------ 1 file changed, 90 insertions(+), 76 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9e809345f71a..a0102a507dae 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -160,6 +160,7 @@ struct cfq_queue { unsigned long slice_end; long slice_resid; + unsigned int slice_dispatch; /* pending metadata requests */ int meta_pending; @@ -774,10 +775,16 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "set_active"); cfqq->slice_end = 0; + cfqq->slice_dispatch = 0; + + cfq_clear_cfqq_must_dispatch(cfqq); + cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); cfq_clear_cfqq_queue_new(cfqq); + + del_timer(&cfqd->idle_slice_timer); } cfqd->active_queue = cfqq; @@ -1053,66 +1060,6 @@ keep_queue: return cfqq; } -/* - * Dispatch some requests from cfqq, moving them to the request queue - * dispatch list. - */ -static int -__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, - int max_dispatch) -{ - int dispatched = 0; - - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); - - do { - struct request *rq; - - /* - * follow expired path, else get first next available - */ - rq = cfq_check_fifo(cfqq); - if (rq == NULL) - rq = cfqq->next_rq; - - /* - * finally, insert request into driver dispatch list - */ - cfq_dispatch_insert(cfqd->queue, rq); - - dispatched++; - - if (!cfqd->active_cic) { - atomic_inc(&RQ_CIC(rq)->ioc->refcount); - cfqd->active_cic = RQ_CIC(rq); - } - - if (RB_EMPTY_ROOT(&cfqq->sort_list)) - break; - - /* - * If there is a non-empty RT cfqq waiting for current - * cfqq's timeslice to complete, pre-empt this cfqq - */ - if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) - break; - - } while (dispatched < max_dispatch); - - /* - * expire an async queue immediately if it has used up its slice. idle - * queue always expire after 1 dispatch round. - */ - if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && - dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || - cfq_class_idle(cfqq))) { - cfqq->slice_end = jiffies + 1; - cfq_slice_expired(cfqd, 0); - } - - return dispatched; -} - static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) { int dispatched = 0; @@ -1146,11 +1093,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) return dispatched; } +/* + * Dispatch a request from cfqq, moving them to the request queue + * dispatch list. + */ +static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct request *rq; + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + + /* + * follow expired path, else get first next available + */ + rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; + + /* + * insert request into driver dispatch list + */ + cfq_dispatch_insert(cfqd->queue, rq); + + if (!cfqd->active_cic) { + struct cfq_io_context *cic = RQ_CIC(rq); + + atomic_inc(&cic->ioc->refcount); + cfqd->active_cic = cic; + } +} + +/* + * Find the cfqq that we need to service and move a request from that to the + * dispatch list + */ static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; - int dispatched; + unsigned int max_dispatch; if (!cfqd->busy_queues) return 0; @@ -1158,29 +1139,62 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) if (unlikely(force)) return cfq_forced_dispatch(cfqd); - dispatched = 0; - while ((cfqq = cfq_select_queue(cfqd)) != NULL) { - int max_dispatch; + cfqq = cfq_select_queue(cfqd); + if (!cfqq) + return 0; + + /* + * If this is an async queue and we have sync IO in flight, let it wait + */ + if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) + return 0; - max_dispatch = cfqd->cfq_quantum; + max_dispatch = cfqd->cfq_quantum; + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + + /* + * Does this cfqq already have too much IO in flight? + */ + if (cfqq->dispatched >= max_dispatch) { + /* + * idle queue must always only have a single IO in flight + */ if (cfq_class_idle(cfqq)) - max_dispatch = 1; + return 0; - if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1) - break; + /* + * We have other queues, don't allow more IO from this one + */ + if (cfqd->busy_queues > 1) + return 0; - if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) - break; + /* + * we are the only queue, allow up to 4 times of 'quantum' + */ + if (cfqq->dispatched >= 4 * max_dispatch) + return 0; + } - cfq_clear_cfqq_must_dispatch(cfqq); - cfq_clear_cfqq_wait_request(cfqq); - del_timer(&cfqd->idle_slice_timer); + /* + * Dispatch a request from this cfqq + */ + cfq_dispatch_request(cfqd, cfqq); + cfqq->slice_dispatch++; - dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || + cfq_class_idle(cfqq))) { + cfqq->slice_end = jiffies + 1; + cfq_slice_expired(cfqd, 0); } - cfq_log(cfqd, "dispatched=%d", dispatched); - return dispatched; + cfq_log(cfqd, "dispatched a request"); + return 1; } /* -- cgit v1.2.2 From 75e50984f062de2abc4bd84c642923e2c48ce2ae Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 7 Apr 2009 08:56:14 +0200 Subject: cfq-iosched: kill two unused cfqq flags We only manipulate the must_dispatch and queue_new flags, they are not tested anymore. So get rid of them. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a0102a507dae..11efcf196e74 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -179,11 +179,9 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ - CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ - CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ }; @@ -206,11 +204,9 @@ CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); CFQ_CFQQ_FNS(must_alloc); CFQ_CFQQ_FNS(must_alloc_slice); -CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); -CFQ_CFQQ_FNS(queue_new); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); #undef CFQ_CFQQ_FNS @@ -777,12 +773,10 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, cfqq->slice_end = 0; cfqq->slice_dispatch = 0; - cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); - cfq_clear_cfqq_queue_new(cfqq); del_timer(&cfqd->idle_slice_timer); } @@ -802,7 +796,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfq_cfqq_wait_request(cfqq)) del_timer(&cfqd->idle_slice_timer); - cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); /* @@ -931,7 +924,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) return; - cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_wait_request(cfqq); /* @@ -1520,7 +1512,6 @@ retry: cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); - cfq_mark_cfqq_queue_new(cfqq); cfq_init_prio_data(cfqq, ioc); @@ -1912,7 +1903,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * just now */ if (cfq_cfqq_wait_request(cfqq)) { - cfq_mark_cfqq_must_dispatch(cfqq); del_timer(&cfqd->idle_slice_timer); blk_start_queueing(cfqd->queue); } @@ -1924,7 +1914,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - cfq_mark_cfqq_must_dispatch(cfqq); blk_start_queueing(cfqd->queue); } } @@ -2201,10 +2190,8 @@ static void cfq_idle_slice_timer(unsigned long data) /* * not expired and it has a request pending, let it dispatch */ - if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { - cfq_mark_cfqq_must_dispatch(cfqq); + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto out_kick; - } } expire: cfq_slice_expired(cfqd, timed_out); -- cgit v1.2.2 From 2385327725419a76cfbca7258abd95908b8ba9eb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 7 Apr 2009 08:59:11 +0200 Subject: block: remove unused REQ_UNPLUG The request inherits the unplug flag from the bio, but it isn't actually used. The bio flag stops at __make_request(), which tells it to unplug after submission. Passing it on to the request doesn't make any sense. Signed-off-by: Jens Axboe --- block/blk-core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 3688abff2430..43fdedc524ee 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1123,8 +1123,6 @@ void init_request_from_bio(struct request *req, struct bio *bio) if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; - if (bio_unplug(bio)) - req->cmd_flags |= REQ_UNPLUG; if (bio_rw_meta(bio)) req->cmd_flags |= REQ_RW_META; if (bio_noidle(bio)) -- cgit v1.2.2 From b029195dda0129b427c6e579a3bb3ae752da3a93 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 7 Apr 2009 11:38:31 +0200 Subject: cfq-iosched: don't let idling interfere with plugging When CFQ is waiting for a new request from a process, currently it'll immediately restart queuing when it sees such a request. This doesn't work very well with streamed IO, since we then end up splitting IO that would otherwise have been merged nicely. For a simple dd test, this causes 10x as many requests to be issued as we should have. Normally this goes unnoticed due to the low overhead of requests at the device side, but some hardware is very sensitive to request sizes and there it can cause big slow downs. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 11efcf196e74..a4809de6fea6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -177,6 +177,7 @@ struct cfq_queue { enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ @@ -202,6 +203,7 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); +CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(must_alloc); CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(fifo_expire); @@ -774,6 +776,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, cfqq->slice_dispatch = 0; cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); @@ -1009,7 +1012,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) /* * The active queue has run out of time, expire it and select new. */ - if (cfq_slice_used(cfqq)) + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) goto expire; /* @@ -1173,6 +1176,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) */ cfq_dispatch_request(cfqd, cfqq); cfqq->slice_dispatch++; + cfq_clear_cfqq_must_dispatch(cfqq); /* * expire an async queue immediately if it has used up its slice. idle @@ -1898,14 +1902,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfqq == cfqd->active_queue) { /* - * if we are waiting for a request for this queue, let it rip - * immediately and flag that we must not expire this queue - * just now + * Remember that we saw a request from this process, but + * don't start queuing just yet. Otherwise we risk seeing lots + * of tiny requests, because we disrupt the normal plugging + * and merging. */ - if (cfq_cfqq_wait_request(cfqq)) { - del_timer(&cfqd->idle_slice_timer); - blk_start_queueing(cfqd->queue); - } + if (cfq_cfqq_wait_request(cfqq)) + cfq_mark_cfqq_must_dispatch(cfqq); } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* * not the active queue - expire current slice if it is @@ -2174,6 +2177,12 @@ static void cfq_idle_slice_timer(unsigned long data) if (cfqq) { timed_out = 0; + /* + * We saw a request before the queue expired, let it through + */ + if (cfq_cfqq_must_dispatch(cfqq)) + goto out_kick; + /* * expired */ -- cgit v1.2.2 From f600abe2de81628c40effbb3f8eaf5af0d291e57 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 8 Apr 2009 14:22:01 +0200 Subject: block: fix bad spelling of quiesce Credit goes to Andrew Morton for spotting this one. Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 4 ++-- block/blk.h | 4 ++-- block/elevator.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 73f36beff5cd..cac4e9febe6a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -209,14 +209,14 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page, ssize_t ret = queue_var_store(&stats, page, count); spin_lock_irq(q->queue_lock); - elv_quisce_start(q); + elv_quiesce_start(q); if (stats) queue_flag_set(QUEUE_FLAG_IO_STAT, q); else queue_flag_clear(QUEUE_FLAG_IO_STAT, q); - elv_quisce_end(q); + elv_quiesce_end(q); spin_unlock_irq(q->queue_lock); return ret; diff --git a/block/blk.h b/block/blk.h index 24fcaeeaf620..5dfc41267a08 100644 --- a/block/blk.h +++ b/block/blk.h @@ -70,8 +70,8 @@ void blk_queue_congestion_threshold(struct request_queue *q); int blk_dev_init(void); -void elv_quisce_start(struct request_queue *q); -void elv_quisce_end(struct request_queue *q); +void elv_quiesce_start(struct request_queue *q); +void elv_quiesce_end(struct request_queue *q); /* diff --git a/block/elevator.c b/block/elevator.c index fb81bcc14a8c..7073a9072577 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -590,7 +590,7 @@ void elv_drain_elevator(struct request_queue *q) /* * Call with queue lock held, interrupts disabled */ -void elv_quisce_start(struct request_queue *q) +void elv_quiesce_start(struct request_queue *q) { queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); @@ -607,7 +607,7 @@ void elv_quisce_start(struct request_queue *q) } } -void elv_quisce_end(struct request_queue *q) +void elv_quiesce_end(struct request_queue *q) { queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); } @@ -1126,7 +1126,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) * Turn on BYPASS and drain all requests w/ elevator private data */ spin_lock_irq(q->queue_lock); - elv_quisce_start(q); + elv_quiesce_start(q); /* * Remember old elevator. @@ -1150,7 +1150,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) */ elevator_exit(old_elevator); spin_lock_irq(q->queue_lock); - elv_quisce_end(q); + elv_quiesce_end(q); spin_unlock_irq(q->queue_lock); blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); -- cgit v1.2.2 From b0b78f81a5a3bfe9442fcc3a9c13e298a742556a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 8 Apr 2009 10:56:08 +0200 Subject: cfq-iosched: use rw_is_sync() to see if rw flags are sync or not Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a4809de6fea6..f36f8b4d04cf 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2062,7 +2062,7 @@ static int cfq_may_queue(struct request_queue *q, int rw) if (!cic) return ELV_MQUEUE_MAY; - cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); + cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); if (cfqq) { cfq_init_prio_data(cfqq, cic->ioc); cfq_prio_boost(cfqq); -- cgit v1.2.2 From ff6657c6c8ac99444e5dd4c4f7c1dc9271173382 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 8 Apr 2009 10:58:57 +0200 Subject: cfq-iosched: get rid of private SYNC/ASYNC defines We can just use the block layer BLK_RW_SYNC/ASYNC defines now. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f36f8b4d04cf..cba8a5d91bfa 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -56,9 +56,6 @@ static DEFINE_SPINLOCK(ioc_gone_lock); #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) -#define ASYNC (0) -#define SYNC (1) - #define sample_valid(samples) ((samples) > 80) /* @@ -1333,14 +1330,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, if (ioc->ioc_data == cic) rcu_assign_pointer(ioc->ioc_data, NULL); - if (cic->cfqq[ASYNC]) { - cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); - cic->cfqq[ASYNC] = NULL; + if (cic->cfqq[BLK_RW_ASYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; } - if (cic->cfqq[SYNC]) { - cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); - cic->cfqq[SYNC] = NULL; + if (cic->cfqq[BLK_RW_SYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; } } @@ -1449,17 +1446,18 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) spin_lock_irqsave(cfqd->queue->queue_lock, flags); - cfqq = cic->cfqq[ASYNC]; + cfqq = cic->cfqq[BLK_RW_ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; - new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); + new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, + GFP_ATOMIC); if (new_cfqq) { - cic->cfqq[ASYNC] = new_cfqq; + cic->cfqq[BLK_RW_ASYNC] = new_cfqq; cfq_put_queue(cfqq); } } - cfqq = cic->cfqq[SYNC]; + cfqq = cic->cfqq[BLK_RW_SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); -- cgit v1.2.2 From 1d6bfbdf38f37bcc259e7c56c8c73664772651fb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 8 Apr 2009 11:02:08 +0200 Subject: as-iosched: get rid of private REQ_SYNC/REQ_ASYNC defines We can just use the block layer BLK_RW_SYNC/ASYNC defines now. Signed-off-by: Jens Axboe --- block/as-iosched.c | 116 ++++++++++++++++++++++++++--------------------------- 1 file changed, 57 insertions(+), 59 deletions(-) (limited to 'block') diff --git a/block/as-iosched.c b/block/as-iosched.c index 631f6f44460a..c48fa670d221 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -17,9 +17,6 @@ #include #include -#define REQ_SYNC 1 -#define REQ_ASYNC 0 - /* * See Documentation/block/as-iosched.txt */ @@ -93,7 +90,7 @@ struct as_data { struct list_head fifo_list[2]; struct request *next_rq[2]; /* next in sort order */ - sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ + sector_t last_sector[2]; /* last SYNC & ASYNC sectors */ unsigned long exit_prob; /* probability a task will exit while being waited on */ @@ -109,7 +106,7 @@ struct as_data { unsigned long last_check_fifo[2]; int changed_batch; /* 1: waiting for old batch to end */ int new_batch; /* 1: waiting on first read complete */ - int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ + int batch_data_dir; /* current batch SYNC / ASYNC */ int write_batch_count; /* max # of reqs in a write batch */ int current_write_count; /* how many requests left this batch */ int write_batch_idled; /* has the write batch gone idle? */ @@ -554,7 +551,7 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, if (aic == NULL) return; - if (data_dir == REQ_SYNC) { + if (data_dir == BLK_RW_SYNC) { unsigned long in_flight = atomic_read(&aic->nr_queued) + atomic_read(&aic->nr_dispatched); spin_lock(&aic->lock); @@ -811,7 +808,7 @@ static void as_update_rq(struct as_data *ad, struct request *rq) */ static void update_write_batch(struct as_data *ad) { - unsigned long batch = ad->batch_expire[REQ_ASYNC]; + unsigned long batch = ad->batch_expire[BLK_RW_ASYNC]; long write_time; write_time = (jiffies - ad->current_batch_expires) + batch; @@ -855,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq) kblockd_schedule_work(q, &ad->antic_work); ad->changed_batch = 0; - if (ad->batch_data_dir == REQ_SYNC) + if (ad->batch_data_dir == BLK_RW_SYNC) ad->new_batch = 1; } WARN_ON(ad->nr_dispatched == 0); @@ -869,7 +866,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq) if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { update_write_batch(ad); ad->current_batch_expires = jiffies + - ad->batch_expire[REQ_SYNC]; + ad->batch_expire[BLK_RW_SYNC]; ad->new_batch = 0; } @@ -960,7 +957,7 @@ static inline int as_batch_expired(struct as_data *ad) if (ad->changed_batch || ad->new_batch) return 0; - if (ad->batch_data_dir == REQ_SYNC) + if (ad->batch_data_dir == BLK_RW_SYNC) /* TODO! add a check so a complete fifo gets written? */ return time_after(jiffies, ad->current_batch_expires); @@ -986,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq) */ ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; - if (data_dir == REQ_SYNC) { + if (data_dir == BLK_RW_SYNC) { struct io_context *ioc = RQ_IOC(rq); /* In case we have to anticipate after this */ copy_io_context(&ad->io_context, &ioc); @@ -1025,41 +1022,41 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq) static int as_dispatch_request(struct request_queue *q, int force) { struct as_data *ad = q->elevator->elevator_data; - const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); - const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); + const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]); + const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]); struct request *rq; if (unlikely(force)) { /* * Forced dispatch, accounting is useless. Reset * accounting states and dump fifo_lists. Note that - * batch_data_dir is reset to REQ_SYNC to avoid + * batch_data_dir is reset to BLK_RW_SYNC to avoid * screwing write batch accounting as write batch * accounting occurs on W->R transition. */ int dispatched = 0; - ad->batch_data_dir = REQ_SYNC; + ad->batch_data_dir = BLK_RW_SYNC; ad->changed_batch = 0; ad->new_batch = 0; - while (ad->next_rq[REQ_SYNC]) { - as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); + while (ad->next_rq[BLK_RW_SYNC]) { + as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]); dispatched++; } - ad->last_check_fifo[REQ_SYNC] = jiffies; + ad->last_check_fifo[BLK_RW_SYNC] = jiffies; - while (ad->next_rq[REQ_ASYNC]) { - as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); + while (ad->next_rq[BLK_RW_ASYNC]) { + as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]); dispatched++; } - ad->last_check_fifo[REQ_ASYNC] = jiffies; + ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; return dispatched; } /* Signal that the write batch was uncontended, so we can't time it */ - if (ad->batch_data_dir == REQ_ASYNC && !reads) { + if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) { if (ad->current_write_count == 0 || !writes) ad->write_batch_idled = 1; } @@ -1076,8 +1073,8 @@ static int as_dispatch_request(struct request_queue *q, int force) */ rq = ad->next_rq[ad->batch_data_dir]; - if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { - if (as_fifo_expired(ad, REQ_SYNC)) + if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) { + if (as_fifo_expired(ad, BLK_RW_SYNC)) goto fifo_expired; if (as_can_anticipate(ad, rq)) { @@ -1090,7 +1087,7 @@ static int as_dispatch_request(struct request_queue *q, int force) /* we have a "next request" */ if (reads && !writes) ad->current_batch_expires = - jiffies + ad->batch_expire[REQ_SYNC]; + jiffies + ad->batch_expire[BLK_RW_SYNC]; goto dispatch_request; } } @@ -1101,20 +1098,20 @@ static int as_dispatch_request(struct request_queue *q, int force) */ if (reads) { - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC])); - if (writes && ad->batch_data_dir == REQ_SYNC) + if (writes && ad->batch_data_dir == BLK_RW_SYNC) /* * Last batch was a read, switch to writes */ goto dispatch_writes; - if (ad->batch_data_dir == REQ_ASYNC) { + if (ad->batch_data_dir == BLK_RW_ASYNC) { WARN_ON(ad->new_batch); ad->changed_batch = 1; } - ad->batch_data_dir = REQ_SYNC; - rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); + ad->batch_data_dir = BLK_RW_SYNC; + rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next); ad->last_check_fifo[ad->batch_data_dir] = jiffies; goto dispatch_request; } @@ -1125,9 +1122,9 @@ static int as_dispatch_request(struct request_queue *q, int force) if (writes) { dispatch_writes: - BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); + BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC])); - if (ad->batch_data_dir == REQ_SYNC) { + if (ad->batch_data_dir == BLK_RW_SYNC) { ad->changed_batch = 1; /* @@ -1137,11 +1134,11 @@ dispatch_writes: */ ad->new_batch = 0; } - ad->batch_data_dir = REQ_ASYNC; + ad->batch_data_dir = BLK_RW_ASYNC; ad->current_write_count = ad->write_batch_count; ad->write_batch_idled = 0; - rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); - ad->last_check_fifo[REQ_ASYNC] = jiffies; + rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next); + ad->last_check_fifo[BLK_RW_ASYNC] = jiffies; goto dispatch_request; } @@ -1164,9 +1161,9 @@ fifo_expired: if (ad->nr_dispatched) return 0; - if (ad->batch_data_dir == REQ_ASYNC) + if (ad->batch_data_dir == BLK_RW_ASYNC) ad->current_batch_expires = jiffies + - ad->batch_expire[REQ_ASYNC]; + ad->batch_expire[BLK_RW_ASYNC]; else ad->new_batch = 1; @@ -1238,8 +1235,8 @@ static int as_queue_empty(struct request_queue *q) { struct as_data *ad = q->elevator->elevator_data; - return list_empty(&ad->fifo_list[REQ_ASYNC]) - && list_empty(&ad->fifo_list[REQ_SYNC]); + return list_empty(&ad->fifo_list[BLK_RW_ASYNC]) + && list_empty(&ad->fifo_list[BLK_RW_SYNC]); } static int @@ -1346,8 +1343,8 @@ static void as_exit_queue(struct elevator_queue *e) del_timer_sync(&ad->antic_timer); cancel_work_sync(&ad->antic_work); - BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); - BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); + BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC])); + BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC])); put_io_context(ad->io_context); kfree(ad); @@ -1372,18 +1369,18 @@ static void *as_init_queue(struct request_queue *q) init_timer(&ad->antic_timer); INIT_WORK(&ad->antic_work, as_work_handler); - INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); - INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); - ad->sort_list[REQ_SYNC] = RB_ROOT; - ad->sort_list[REQ_ASYNC] = RB_ROOT; - ad->fifo_expire[REQ_SYNC] = default_read_expire; - ad->fifo_expire[REQ_ASYNC] = default_write_expire; + INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]); + INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]); + ad->sort_list[BLK_RW_SYNC] = RB_ROOT; + ad->sort_list[BLK_RW_ASYNC] = RB_ROOT; + ad->fifo_expire[BLK_RW_SYNC] = default_read_expire; + ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire; ad->antic_expire = default_antic_expire; - ad->batch_expire[REQ_SYNC] = default_read_batch_expire; - ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; + ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire; + ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire; - ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; - ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; + ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC]; + ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10; if (ad->write_batch_count < 2) ad->write_batch_count = 2; @@ -1432,11 +1429,11 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ struct as_data *ad = e->elevator_data; \ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ } -SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); -SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); +SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]); +SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]); SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); -SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); -SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); +SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]); +SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ @@ -1451,13 +1448,14 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) *(__PTR) = msecs_to_jiffies(*(__PTR)); \ return ret; \ } -STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); -STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); +STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX); +STORE_FUNCTION(as_write_expire_store, + &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX); STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); STORE_FUNCTION(as_read_batch_expire_store, - &ad->batch_expire[REQ_SYNC], 0, INT_MAX); + &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX); STORE_FUNCTION(as_write_batch_expire_store, - &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); + &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX); #undef STORE_FUNCTION #define AS_ATTR(name) \ -- cgit v1.2.2 From d6ceb25e8d8bccf826848c2621a50d02c0a7f4ae Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 14 Apr 2009 14:18:16 +0200 Subject: cfq-iosched: don't delay queue kick for a merged request "Zhang, Yanmin" reports that commit b029195dda0129b427c6e579a3bb3ae752da3a93 introduced a regression of about 50% with sequential threaded read workloads. The test case is: tiotest -k0 -k1 -k3 -f 80 -t 32 which starts 32 threads each reading a 80MB file. Twiddle the kick queue logic so that we do start IO immediately, if it appears to be a fully merged request. We can't really detect that, so just check if the request is bigger than a page or not. The assumption is that since single bio issues will first queue a single request with just one page attached and then later do merges on that, if we already have more than a page worth of data in the request, then the request is most likely good to go. Verified that this doesn't cause a regression with the test case that commit b029195dda0129b427c6e579a3bb3ae752da3a93 was fixing. It does not, we still see maximum sized requests for the queue-then-merge cases. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cba8a5d91bfa..56e9d8503cf1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1903,10 +1903,17 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots * of tiny requests, because we disrupt the normal plugging - * and merging. + * and merging. If the request is already larger than a single + * page, let it rip immediately. For that case we assume that + * merging is already done. */ - if (cfq_cfqq_wait_request(cfqq)) + if (cfq_cfqq_wait_request(cfqq)) { + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) { + del_timer(&cfqd->idle_slice_timer); + blk_start_queueing(cfqd->queue); + } cfq_mark_cfqq_must_dispatch(cfqq); + } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* * not the active queue - expire current slice if it is -- cgit v1.2.2 From 91e463c8f5f7a5b09d3d034c01a88567af60c504 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 13 Apr 2009 20:03:10 +0200 Subject: block: fix SG_IO to return a proper error value blk_rq_unmap_user() returns -EFAULT if a program passes an invalid address to kernel. SG_IO path needs to pass the returned value to user space instead of ignoring it. Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- block/scsi_ioctl.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 626ee274c5c4..84b7f8709f41 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -217,7 +217,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, struct bio *bio) { - int ret = 0; + int r, ret = 0; /* * fill in all the output members @@ -242,7 +242,9 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, ret = -EFAULT; } - blk_rq_unmap_user(bio); + r = blk_rq_unmap_user(bio); + if (!ret) + ret = r; blk_put_request(rq); return ret; -- cgit v1.2.2 From 15afd1cc7b624e1c94fdf824ec5af611050b150c Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Wed, 15 Apr 2009 10:35:31 +0530 Subject: block: Remove code handling bio_alloc failure with __GFP_WAIT Remove code handling bio_alloc failure with __GFP_WAIT. GFP_KERNEL implies __GFP_WAIT. Signed-off-by: Nikanth Karthikesan Signed-off-by: Jens Axboe --- block/blk-barrier.c | 3 --- block/ioctl.c | 2 -- 2 files changed, 5 deletions(-) (limited to 'block') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index f7dae57e6cab..20b4111fa050 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -319,9 +319,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) return -ENXIO; bio = bio_alloc(GFP_KERNEL, 0); - if (!bio) - return -ENOMEM; - bio->bi_end_io = bio_end_empty_barrier; bio->bi_private = &wait; bio->bi_bdev = bdev; diff --git a/block/ioctl.c b/block/ioctl.c index 0f22e629b13c..ad474d4bbcce 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -146,8 +146,6 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, struct bio *bio; bio = bio_alloc(GFP_KERNEL, 0); - if (!bio) - return -ENOMEM; bio->bi_end_io = blk_ioc_discard_endio; bio->bi_bdev = bdev; -- cgit v1.2.2 From 40bb54d197a10fe86d73073ee8202ba2812a05fa Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Apr 2009 12:11:10 +0200 Subject: cfq-iosched: no need to save interrupts in cfq_kick_queue() It's called from the workqueue handlers from process context, so we always have irqs enabled when entered. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 56e9d8503cf1..e59f2108272f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2157,11 +2157,10 @@ static void cfq_kick_queue(struct work_struct *work) struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; - unsigned long flags; - spin_lock_irqsave(q->queue_lock, flags); + spin_lock_irq(q->queue_lock); blk_start_queueing(q); - spin_unlock_irqrestore(q->queue_lock, flags); + spin_unlock_irq(q->queue_lock); } /* -- cgit v1.2.2 From 2d870722965211de072bb36b446a4df99dae07e1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Apr 2009 12:12:46 +0200 Subject: cfq-iosched: tweak kick logic a bit more We only kick the dispatch for an idling queue, if we think it's a (somewhat) fully merged request. Also allow a kick if we have other busy queues in the system, since we don't want to risk waiting for a potential merge in that case. It's better to get some work done and proceed. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e59f2108272f..579f31822469 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1905,10 +1905,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * of tiny requests, because we disrupt the normal plugging * and merging. If the request is already larger than a single * page, let it rip immediately. For that case we assume that - * merging is already done. + * merging is already done. Ditto for a busy system that + * has other work pending, don't risk delaying until the + * idle timer unplug to continue working. */ if (cfq_cfqq_wait_request(cfqq)) { - if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) { + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || + cfqd->busy_queues > 1) { del_timer(&cfqd->idle_slice_timer); blk_start_queueing(cfqd->queue); } -- cgit v1.2.2 From 9481ffdc61738a91baf0f8b7fb20922768ae1b8e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Apr 2009 12:14:13 +0200 Subject: cfq-iosched: log responsible 'cfqq' in idle timer arm Makes it easier to read the traces. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 579f31822469..090a4ee75b9d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -936,7 +936,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); mod_timer(&cfqd->idle_slice_timer, jiffies + sl); - cfq_log(cfqd, "arm_idle: %lu", sl); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); } /* -- cgit v1.2.2 From a36e71f996e25d6213f57951f7ae1874086ec57e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 15 Apr 2009 12:15:11 +0200 Subject: cfq-iosched: add close cooperator code If we have processes that are working in close proximity to each other on disk, we don't want to idle wait. Instead allow the close process to issue a request, getting better aggregate bandwidth. The anticipatory scheduler has similar checks, noop and deadline do not need it since they don't care about process <-> io mappings. The code for CFQ is a little more involved though, since we split request queues into per-process contexts. This fixes a performance problem with eg dump(8), since it uses several processes in some silly attempt to speed IO up. Even if dump(8) isn't really a valid case (it should be fixed by using CLONE_IO), there are other cases where we see close processes and where idling ends up hurting performance. Credit goes to Jeff Moyer for writing the initial implementation. Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 198 insertions(+), 25 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 090a4ee75b9d..0d3b70de3d80 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -80,6 +80,14 @@ struct cfq_data { * rr list of queues with requests and the count of them */ struct cfq_rb_root service_tree; + + /* + * Each priority tree is sorted by next_request position. These + * trees are used when determining if two or more queues are + * interleaving requests (see cfq_close_cooperator). + */ + struct rb_root prio_trees[CFQ_PRIO_LISTS]; + unsigned int busy_queues; /* * Used to track any pending rt requests so we can pre-empt current @@ -144,6 +152,8 @@ struct cfq_queue { struct rb_node rb_node; /* service_tree key */ unsigned long rb_key; + /* prio tree member */ + struct rb_node p_node; /* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ @@ -182,6 +192,7 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ + CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ }; #define CFQ_CFQQ_FNS(name) \ @@ -208,6 +219,7 @@ CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); +CFQ_CFQQ_FNS(coop); #undef CFQ_CFQQ_FNS #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ @@ -416,13 +428,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) return NULL; } +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; - - rb_erase(n, &root->rb); - RB_CLEAR_NODE(n); + rb_erase_init(n, &root->rb); } /* @@ -467,8 +483,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ -static void cfq_service_tree_add(struct cfq_data *cfqd, - struct cfq_queue *cfqq, int add_front) +static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, + int add_front) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; @@ -541,6 +557,63 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); } +static struct cfq_queue * +cfq_prio_tree_lookup(struct cfq_data *cfqd, int ioprio, sector_t sector, + struct rb_node **ret_parent, struct rb_node ***rb_link) +{ + struct rb_root *root = &cfqd->prio_trees[ioprio]; + struct rb_node **p, *parent; + struct cfq_queue *cfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + cfqq = rb_entry(parent, struct cfq_queue, p_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > cfqq->next_rq->sector) + n = &(*p)->rb_right; + else if (sector < cfqq->next_rq->sector) + n = &(*p)->rb_left; + else + break; + p = n; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + return NULL; +} + +static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct rb_root *root = &cfqd->prio_trees[cfqq->ioprio]; + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + + if (!RB_EMPTY_NODE(&cfqq->p_node)) + rb_erase_init(&cfqq->p_node, root); + + if (cfq_class_idle(cfqq)) + return; + if (!cfqq->next_rq) + return; + + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->ioprio, cfqq->next_rq->sector, + &parent, &p); + BUG_ON(__cfqq); + + rb_link_node(&cfqq->p_node, parent, p); + rb_insert_color(&cfqq->p_node, root); +} + /* * Update cfqq's position in the service tree. */ @@ -549,8 +622,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) /* * Resorting requires the cfqq to be on the RR list already. */ - if (cfq_cfqq_on_rr(cfqq)) + if (cfq_cfqq_on_rr(cfqq)) { cfq_service_tree_add(cfqd, cfqq, 0); + cfq_prio_tree_add(cfqd, cfqq); + } } /* @@ -581,6 +656,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) if (!RB_EMPTY_NODE(&cfqq->rb_node)) cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); + if (!RB_EMPTY_NODE(&cfqq->p_node)) + rb_erase_init(&cfqq->p_node, &cfqd->prio_trees[cfqq->ioprio]); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; @@ -610,7 +687,7 @@ static void cfq_add_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; - struct request *__alias; + struct request *__alias, *prev; cfqq->queued[rq_is_sync(rq)]++; @@ -627,7 +704,15 @@ static void cfq_add_rq_rb(struct request *rq) /* * check if this request is a better next-serve candidate */ + prev = cfqq->next_rq; cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); + + /* + * adjust priority tree position, if ->next_rq changes + */ + if (prev != cfqq->next_rq) + cfq_prio_tree_add(cfqd, cfqq); + BUG_ON(!cfqq->next_rq); } @@ -840,11 +925,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) /* * Get and set a new active queue for service. */ -static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) +static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) { - struct cfq_queue *cfqq; + if (!cfqq) { + cfqq = cfq_get_next_queue(cfqd); + if (cfqq) + cfq_clear_cfqq_coop(cfqq); + } - cfqq = cfq_get_next_queue(cfqd); __cfq_set_active_queue(cfqd, cfqq); return cfqq; } @@ -868,17 +957,89 @@ static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; } -static int cfq_close_cooperator(struct cfq_data *cfq_data, - struct cfq_queue *cfqq) +static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct rb_root *root = &cfqd->prio_trees[cur_cfqq->ioprio]; + struct rb_node *parent, *node; + struct cfq_queue *__cfqq; + sector_t sector = cfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __cfqq = cfq_prio_tree_lookup(cfqd, cur_cfqq->ioprio, + sector, &parent, NULL); + if (__cfqq) + return __cfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector. + */ + __cfqq = rb_entry(parent, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, __cfqq->next_rq)) + return __cfqq; + + if (__cfqq->next_rq->sector < sector) + node = rb_next(&__cfqq->p_node); + else + node = rb_prev(&__cfqq->p_node); + if (!node) + return NULL; + + __cfqq = rb_entry(node, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, __cfqq->next_rq)) + return __cfqq; + + return NULL; +} + +/* + * cfqd - obvious + * cur_cfqq - passed in so that we don't decide that the current queue is + * closely cooperating with itself. + * + * So, basically we're assuming that that cur_cfqq has dispatched at least + * one request, and that cfqd->last_position reflects a position on the disk + * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid + * assumption. + */ +static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq, + int probe) { + struct cfq_queue *cfqq; + + /* + * A valid cfq_io_context is necessary to compare requests against + * the seek_mean of the current cfqq. + */ + if (!cfqd->active_cic) + return NULL; + /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. */ - return 0; + cfqq = cfqq_close(cfqd, cur_cfqq); + if (!cfqq) + return NULL; + + if (cfq_cfqq_coop(cfqq)) + return NULL; + + if (!probe) + cfq_mark_cfqq_coop(cfqq); + return cfqq; } + #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) static void cfq_arm_slice_timer(struct cfq_data *cfqd) @@ -917,13 +1078,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) if (!cic || !atomic_read(&cic->ioc->nr_tasks)) return; - /* - * See if this prio level has a good candidate - */ - if (cfq_close_cooperator(cfqd, cfqq) && - (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) - return; - cfq_mark_cfqq_wait_request(cfqq); /* @@ -1000,7 +1154,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) { - struct cfq_queue *cfqq; + struct cfq_queue *cfqq, *new_cfqq = NULL; cfqq = cfqd->active_queue; if (!cfqq) @@ -1033,6 +1187,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto keep_queue; + /* + * If another queue has a request waiting within our mean seek + * distance, let it run. The expire code will check for close + * cooperators and put the close queue at the front of the service + * tree. + */ + new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0); + if (new_cfqq) + goto expire; + /* * No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these @@ -1047,7 +1211,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) expire: cfq_slice_expired(cfqd, 0); new_queue: - cfqq = cfq_set_active_queue(cfqd); + cfqq = cfq_set_active_queue(cfqd, new_cfqq); keep_queue: return cfqq; } @@ -1508,6 +1672,7 @@ retry: } RB_CLEAR_NODE(&cfqq->rb_node); + RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); atomic_set(&cfqq->ref, 0); @@ -2000,16 +2165,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { + const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); + if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } + /* + * If there are no requests waiting in this queue, and + * there are other queues ready to issue requests, AND + * those other queues are issuing requests within our + * mean seek distance, give them a chance to run instead + * of idling. + */ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); - else if (sync && !rq_noidle(rq) && - RB_EMPTY_ROOT(&cfqq->sort_list)) { + else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) && + sync && !rq_noidle(rq)) cfq_arm_slice_timer(cfqd); - } } if (!cfqd->rq_in_driver) -- cgit v1.2.2