diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 108 | ||||
-rw-r--r-- | block/blk-flush.c | 439 | ||||
-rw-r--r-- | block/blk-settings.c | 7 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 6 | ||||
-rw-r--r-- | block/blk.h | 12 | ||||
-rw-r--r-- | block/cfq-iosched.c | 57 | ||||
-rw-r--r-- | block/elevator.c | 9 | ||||
-rw-r--r-- | block/genhd.c | 16 |
9 files changed, 411 insertions, 245 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 518dd423a5fe..74d496ccf4d7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -149,39 +149,29 @@ EXPORT_SYMBOL(blk_rq_init); | |||
149 | static void req_bio_endio(struct request *rq, struct bio *bio, | 149 | static void req_bio_endio(struct request *rq, struct bio *bio, |
150 | unsigned int nbytes, int error) | 150 | unsigned int nbytes, int error) |
151 | { | 151 | { |
152 | struct request_queue *q = rq->q; | 152 | if (error) |
153 | 153 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
154 | if (&q->flush_rq != rq) { | 154 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
155 | if (error) | 155 | error = -EIO; |
156 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 156 | |
157 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 157 | if (unlikely(nbytes > bio->bi_size)) { |
158 | error = -EIO; | 158 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", |
159 | 159 | __func__, nbytes, bio->bi_size); | |
160 | if (unlikely(nbytes > bio->bi_size)) { | 160 | nbytes = bio->bi_size; |
161 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 161 | } |
162 | __func__, nbytes, bio->bi_size); | ||
163 | nbytes = bio->bi_size; | ||
164 | } | ||
165 | 162 | ||
166 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | 163 | if (unlikely(rq->cmd_flags & REQ_QUIET)) |
167 | set_bit(BIO_QUIET, &bio->bi_flags); | 164 | set_bit(BIO_QUIET, &bio->bi_flags); |
168 | 165 | ||
169 | bio->bi_size -= nbytes; | 166 | bio->bi_size -= nbytes; |
170 | bio->bi_sector += (nbytes >> 9); | 167 | bio->bi_sector += (nbytes >> 9); |
171 | 168 | ||
172 | if (bio_integrity(bio)) | 169 | if (bio_integrity(bio)) |
173 | bio_integrity_advance(bio, nbytes); | 170 | bio_integrity_advance(bio, nbytes); |
174 | 171 | ||
175 | if (bio->bi_size == 0) | 172 | /* don't actually finish bio if it's part of flush sequence */ |
176 | bio_endio(bio, error); | 173 | if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) |
177 | } else { | 174 | bio_endio(bio, error); |
178 | /* | ||
179 | * Okay, this is the sequenced flush request in | ||
180 | * progress, just record the error; | ||
181 | */ | ||
182 | if (error && !q->flush_err) | ||
183 | q->flush_err = error; | ||
184 | } | ||
185 | } | 175 | } |
186 | 176 | ||
187 | void blk_dump_rq_flags(struct request *rq, char *msg) | 177 | void blk_dump_rq_flags(struct request *rq, char *msg) |
@@ -390,13 +380,16 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
390 | * that its ->make_request_fn will not re-add plugging prior to calling | 380 | * that its ->make_request_fn will not re-add plugging prior to calling |
391 | * this function. | 381 | * this function. |
392 | * | 382 | * |
383 | * This function does not cancel any asynchronous activity arising | ||
384 | * out of elevator or throttling code. That would require elevaotor_exit() | ||
385 | * and blk_throtl_exit() to be called with queue lock initialized. | ||
386 | * | ||
393 | */ | 387 | */ |
394 | void blk_sync_queue(struct request_queue *q) | 388 | void blk_sync_queue(struct request_queue *q) |
395 | { | 389 | { |
396 | del_timer_sync(&q->unplug_timer); | 390 | del_timer_sync(&q->unplug_timer); |
397 | del_timer_sync(&q->timeout); | 391 | del_timer_sync(&q->timeout); |
398 | cancel_work_sync(&q->unplug_work); | 392 | cancel_work_sync(&q->unplug_work); |
399 | throtl_shutdown_timer_wq(q); | ||
400 | } | 393 | } |
401 | EXPORT_SYMBOL(blk_sync_queue); | 394 | EXPORT_SYMBOL(blk_sync_queue); |
402 | 395 | ||
@@ -457,6 +450,11 @@ void blk_put_queue(struct request_queue *q) | |||
457 | kobject_put(&q->kobj); | 450 | kobject_put(&q->kobj); |
458 | } | 451 | } |
459 | 452 | ||
453 | /* | ||
454 | * Note: If a driver supplied the queue lock, it should not zap that lock | ||
455 | * unexpectedly as some queue cleanup components like elevator_exit() and | ||
456 | * blk_throtl_exit() need queue lock. | ||
457 | */ | ||
460 | void blk_cleanup_queue(struct request_queue *q) | 458 | void blk_cleanup_queue(struct request_queue *q) |
461 | { | 459 | { |
462 | /* | 460 | /* |
@@ -475,6 +473,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
475 | if (q->elevator) | 473 | if (q->elevator) |
476 | elevator_exit(q->elevator); | 474 | elevator_exit(q->elevator); |
477 | 475 | ||
476 | blk_throtl_exit(q); | ||
477 | |||
478 | blk_put_queue(q); | 478 | blk_put_queue(q); |
479 | } | 479 | } |
480 | EXPORT_SYMBOL(blk_cleanup_queue); | 480 | EXPORT_SYMBOL(blk_cleanup_queue); |
@@ -541,7 +541,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
541 | init_timer(&q->unplug_timer); | 541 | init_timer(&q->unplug_timer); |
542 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 542 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
543 | INIT_LIST_HEAD(&q->timeout_list); | 543 | INIT_LIST_HEAD(&q->timeout_list); |
544 | INIT_LIST_HEAD(&q->pending_flushes); | 544 | INIT_LIST_HEAD(&q->flush_queue[0]); |
545 | INIT_LIST_HEAD(&q->flush_queue[1]); | ||
546 | INIT_LIST_HEAD(&q->flush_data_in_flight); | ||
545 | INIT_WORK(&q->unplug_work, blk_unplug_work); | 547 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
546 | 548 | ||
547 | kobject_init(&q->kobj, &blk_queue_ktype); | 549 | kobject_init(&q->kobj, &blk_queue_ktype); |
@@ -549,6 +551,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
549 | mutex_init(&q->sysfs_lock); | 551 | mutex_init(&q->sysfs_lock); |
550 | spin_lock_init(&q->__queue_lock); | 552 | spin_lock_init(&q->__queue_lock); |
551 | 553 | ||
554 | /* | ||
555 | * By default initialize queue_lock to internal lock and driver can | ||
556 | * override it later if need be. | ||
557 | */ | ||
558 | q->queue_lock = &q->__queue_lock; | ||
559 | |||
552 | return q; | 560 | return q; |
553 | } | 561 | } |
554 | EXPORT_SYMBOL(blk_alloc_queue_node); | 562 | EXPORT_SYMBOL(blk_alloc_queue_node); |
@@ -633,7 +641,10 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
633 | q->unprep_rq_fn = NULL; | 641 | q->unprep_rq_fn = NULL; |
634 | q->unplug_fn = generic_unplug_device; | 642 | q->unplug_fn = generic_unplug_device; |
635 | q->queue_flags = QUEUE_FLAG_DEFAULT; | 643 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
636 | q->queue_lock = lock; | 644 | |
645 | /* Override internal queue lock with supplied lock pointer */ | ||
646 | if (lock) | ||
647 | q->queue_lock = lock; | ||
637 | 648 | ||
638 | /* | 649 | /* |
639 | * This also sets hw/phys segments, boundary and size | 650 | * This also sets hw/phys segments, boundary and size |
@@ -762,6 +773,25 @@ static void freed_request(struct request_queue *q, int sync, int priv) | |||
762 | } | 773 | } |
763 | 774 | ||
764 | /* | 775 | /* |
776 | * Determine if elevator data should be initialized when allocating the | ||
777 | * request associated with @bio. | ||
778 | */ | ||
779 | static bool blk_rq_should_init_elevator(struct bio *bio) | ||
780 | { | ||
781 | if (!bio) | ||
782 | return true; | ||
783 | |||
784 | /* | ||
785 | * Flush requests do not use the elevator so skip initialization. | ||
786 | * This allows a request to share the flush and elevator data. | ||
787 | */ | ||
788 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) | ||
789 | return false; | ||
790 | |||
791 | return true; | ||
792 | } | ||
793 | |||
794 | /* | ||
765 | * Get a free request, queue_lock must be held. | 795 | * Get a free request, queue_lock must be held. |
766 | * Returns NULL on failure, with queue_lock held. | 796 | * Returns NULL on failure, with queue_lock held. |
767 | * Returns !NULL on success, with queue_lock *not held*. | 797 | * Returns !NULL on success, with queue_lock *not held*. |
@@ -773,7 +803,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
773 | struct request_list *rl = &q->rq; | 803 | struct request_list *rl = &q->rq; |
774 | struct io_context *ioc = NULL; | 804 | struct io_context *ioc = NULL; |
775 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 805 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
776 | int may_queue, priv; | 806 | int may_queue, priv = 0; |
777 | 807 | ||
778 | may_queue = elv_may_queue(q, rw_flags); | 808 | may_queue = elv_may_queue(q, rw_flags); |
779 | if (may_queue == ELV_MQUEUE_NO) | 809 | if (may_queue == ELV_MQUEUE_NO) |
@@ -817,9 +847,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
817 | rl->count[is_sync]++; | 847 | rl->count[is_sync]++; |
818 | rl->starved[is_sync] = 0; | 848 | rl->starved[is_sync] = 0; |
819 | 849 | ||
820 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 850 | if (blk_rq_should_init_elevator(bio)) { |
821 | if (priv) | 851 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
822 | rl->elvpriv++; | 852 | if (priv) |
853 | rl->elvpriv++; | ||
854 | } | ||
823 | 855 | ||
824 | if (blk_queue_io_stat(q)) | 856 | if (blk_queue_io_stat(q)) |
825 | rw_flags |= REQ_IO_STAT; | 857 | rw_flags |= REQ_IO_STAT; |
@@ -1220,7 +1252,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1220 | spin_lock_irq(q->queue_lock); | 1252 | spin_lock_irq(q->queue_lock); |
1221 | 1253 | ||
1222 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 1254 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { |
1223 | where = ELEVATOR_INSERT_FRONT; | 1255 | where = ELEVATOR_INSERT_FLUSH; |
1224 | goto get_rq; | 1256 | goto get_rq; |
1225 | } | 1257 | } |
1226 | 1258 | ||
@@ -1805,7 +1837,7 @@ static void blk_account_io_done(struct request *req) | |||
1805 | * normal IO on queueing nor completion. Accounting the | 1837 | * normal IO on queueing nor completion. Accounting the |
1806 | * containing request is enough. | 1838 | * containing request is enough. |
1807 | */ | 1839 | */ |
1808 | if (blk_do_io_stat(req) && req != &req->q->flush_rq) { | 1840 | if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { |
1809 | unsigned long duration = jiffies - req->start_time; | 1841 | unsigned long duration = jiffies - req->start_time; |
1810 | const int rw = rq_data_dir(req); | 1842 | const int rw = rq_data_dir(req); |
1811 | struct hd_struct *part; | 1843 | struct hd_struct *part; |
diff --git a/block/blk-flush.c b/block/blk-flush.c index b27d0208611b..0bd8c9c5d6e5 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -1,6 +1,69 @@ | |||
1 | /* | 1 | /* |
2 | * Functions to sequence FLUSH and FUA writes. | 2 | * Functions to sequence FLUSH and FUA writes. |
3 | * | ||
4 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | ||
5 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | ||
6 | * | ||
7 | * This file is released under the GPLv2. | ||
8 | * | ||
9 | * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three | ||
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request | ||
11 | * properties and hardware capability. | ||
12 | * | ||
13 | * If a request doesn't have data, only REQ_FLUSH makes sense, which | ||
14 | * indicates a simple flush request. If there is data, REQ_FLUSH indicates | ||
15 | * that the device cache should be flushed before the data is executed, and | ||
16 | * REQ_FUA means that the data must be on non-volatile media on request | ||
17 | * completion. | ||
18 | * | ||
19 | * If the device doesn't have writeback cache, FLUSH and FUA don't make any | ||
20 | * difference. The requests are either completed immediately if there's no | ||
21 | * data or executed as normal requests otherwise. | ||
22 | * | ||
23 | * If the device has writeback cache and supports FUA, REQ_FLUSH is | ||
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. | ||
25 | * | ||
26 | * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is | ||
27 | * translated to PREFLUSH and REQ_FUA to POSTFLUSH. | ||
28 | * | ||
29 | * The actual execution of flush is double buffered. Whenever a request | ||
30 | * needs to execute PRE or POSTFLUSH, it queues at | ||
31 | * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a | ||
32 | * flush is issued and the pending_idx is toggled. When the flush | ||
33 | * completes, all the requests which were pending are proceeded to the next | ||
34 | * step. This allows arbitrary merging of different types of FLUSH/FUA | ||
35 | * requests. | ||
36 | * | ||
37 | * Currently, the following conditions are used to determine when to issue | ||
38 | * flush. | ||
39 | * | ||
40 | * C1. At any given time, only one flush shall be in progress. This makes | ||
41 | * double buffering sufficient. | ||
42 | * | ||
43 | * C2. Flush is deferred if any request is executing DATA of its sequence. | ||
44 | * This avoids issuing separate POSTFLUSHes for requests which shared | ||
45 | * PREFLUSH. | ||
46 | * | ||
47 | * C3. The second condition is ignored if there is a request which has | ||
48 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | ||
49 | * starvation in the unlikely case where there are continuous stream of | ||
50 | * FUA (without FLUSH) requests. | ||
51 | * | ||
52 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | ||
53 | * is beneficial. | ||
54 | * | ||
55 | * Note that a sequenced FLUSH/FUA request with DATA is completed twice. | ||
56 | * Once while executing DATA and again after the whole sequence is | ||
57 | * complete. The first completion updates the contained bio but doesn't | ||
58 | * finish it so that the bio submitter is notified only after the whole | ||
59 | * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in | ||
60 | * req_bio_endio(). | ||
61 | * | ||
62 | * The above peculiarity requires that each FLUSH/FUA request has only one | ||
63 | * bio attached to it, which is guaranteed as they aren't allowed to be | ||
64 | * merged in the usual way. | ||
3 | */ | 65 | */ |
66 | |||
4 | #include <linux/kernel.h> | 67 | #include <linux/kernel.h> |
5 | #include <linux/module.h> | 68 | #include <linux/module.h> |
6 | #include <linux/bio.h> | 69 | #include <linux/bio.h> |
@@ -11,58 +74,143 @@ | |||
11 | 74 | ||
12 | /* FLUSH/FUA sequences */ | 75 | /* FLUSH/FUA sequences */ |
13 | enum { | 76 | enum { |
14 | QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ | 77 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
15 | QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ | 78 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ |
16 | QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ | 79 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ |
17 | QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ | 80 | REQ_FSEQ_DONE = (1 << 3), |
18 | QUEUE_FSEQ_DONE = (1 << 4), | 81 | |
82 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | ||
83 | REQ_FSEQ_POSTFLUSH, | ||
84 | |||
85 | /* | ||
86 | * If flush has been pending longer than the following timeout, | ||
87 | * it's issued even if flush_data requests are still in flight. | ||
88 | */ | ||
89 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | ||
19 | }; | 90 | }; |
20 | 91 | ||
21 | static struct request *queue_next_fseq(struct request_queue *q); | 92 | static bool blk_kick_flush(struct request_queue *q); |
22 | 93 | ||
23 | unsigned blk_flush_cur_seq(struct request_queue *q) | 94 | static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) |
24 | { | 95 | { |
25 | if (!q->flush_seq) | 96 | unsigned int policy = 0; |
26 | return 0; | 97 | |
27 | return 1 << ffz(q->flush_seq); | 98 | if (fflags & REQ_FLUSH) { |
99 | if (rq->cmd_flags & REQ_FLUSH) | ||
100 | policy |= REQ_FSEQ_PREFLUSH; | ||
101 | if (blk_rq_sectors(rq)) | ||
102 | policy |= REQ_FSEQ_DATA; | ||
103 | if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) | ||
104 | policy |= REQ_FSEQ_POSTFLUSH; | ||
105 | } | ||
106 | return policy; | ||
28 | } | 107 | } |
29 | 108 | ||
30 | static struct request *blk_flush_complete_seq(struct request_queue *q, | 109 | static unsigned int blk_flush_cur_seq(struct request *rq) |
31 | unsigned seq, int error) | ||
32 | { | 110 | { |
33 | struct request *next_rq = NULL; | 111 | return 1 << ffz(rq->flush.seq); |
34 | 112 | } | |
35 | if (error && !q->flush_err) | 113 | |
36 | q->flush_err = error; | 114 | static void blk_flush_restore_request(struct request *rq) |
37 | 115 | { | |
38 | BUG_ON(q->flush_seq & seq); | 116 | /* |
39 | q->flush_seq |= seq; | 117 | * After flush data completion, @rq->bio is %NULL but we need to |
40 | 118 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
41 | if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { | 119 | * original @rq->bio. Restore it. |
42 | /* not complete yet, queue the next flush sequence */ | 120 | */ |
43 | next_rq = queue_next_fseq(q); | 121 | rq->bio = rq->biotail; |
44 | } else { | 122 | |
45 | /* complete this flush request */ | 123 | /* make @rq a normal request */ |
46 | __blk_end_request_all(q->orig_flush_rq, q->flush_err); | 124 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; |
47 | q->orig_flush_rq = NULL; | 125 | rq->end_io = NULL; |
48 | q->flush_seq = 0; | 126 | } |
49 | 127 | ||
50 | /* dispatch the next flush if there's one */ | 128 | /** |
51 | if (!list_empty(&q->pending_flushes)) { | 129 | * blk_flush_complete_seq - complete flush sequence |
52 | next_rq = list_entry_rq(q->pending_flushes.next); | 130 | * @rq: FLUSH/FUA request being sequenced |
53 | list_move(&next_rq->queuelist, &q->queue_head); | 131 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
54 | } | 132 | * @error: whether an error occurred |
133 | * | ||
134 | * @rq just completed @seq part of its flush sequence, record the | ||
135 | * completion and trigger the next step. | ||
136 | * | ||
137 | * CONTEXT: | ||
138 | * spin_lock_irq(q->queue_lock) | ||
139 | * | ||
140 | * RETURNS: | ||
141 | * %true if requests were added to the dispatch queue, %false otherwise. | ||
142 | */ | ||
143 | static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | ||
144 | int error) | ||
145 | { | ||
146 | struct request_queue *q = rq->q; | ||
147 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; | ||
148 | bool queued = false; | ||
149 | |||
150 | BUG_ON(rq->flush.seq & seq); | ||
151 | rq->flush.seq |= seq; | ||
152 | |||
153 | if (likely(!error)) | ||
154 | seq = blk_flush_cur_seq(rq); | ||
155 | else | ||
156 | seq = REQ_FSEQ_DONE; | ||
157 | |||
158 | switch (seq) { | ||
159 | case REQ_FSEQ_PREFLUSH: | ||
160 | case REQ_FSEQ_POSTFLUSH: | ||
161 | /* queue for flush */ | ||
162 | if (list_empty(pending)) | ||
163 | q->flush_pending_since = jiffies; | ||
164 | list_move_tail(&rq->flush.list, pending); | ||
165 | break; | ||
166 | |||
167 | case REQ_FSEQ_DATA: | ||
168 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); | ||
169 | list_add(&rq->queuelist, &q->queue_head); | ||
170 | queued = true; | ||
171 | break; | ||
172 | |||
173 | case REQ_FSEQ_DONE: | ||
174 | /* | ||
175 | * @rq was previously adjusted by blk_flush_issue() for | ||
176 | * flush sequencing and may already have gone through the | ||
177 | * flush data request completion path. Restore @rq for | ||
178 | * normal completion and end it. | ||
179 | */ | ||
180 | BUG_ON(!list_empty(&rq->queuelist)); | ||
181 | list_del_init(&rq->flush.list); | ||
182 | blk_flush_restore_request(rq); | ||
183 | __blk_end_request_all(rq, error); | ||
184 | break; | ||
185 | |||
186 | default: | ||
187 | BUG(); | ||
55 | } | 188 | } |
56 | return next_rq; | 189 | |
190 | return blk_kick_flush(q) | queued; | ||
57 | } | 191 | } |
58 | 192 | ||
59 | static void blk_flush_complete_seq_end_io(struct request_queue *q, | 193 | static void flush_end_io(struct request *flush_rq, int error) |
60 | unsigned seq, int error) | ||
61 | { | 194 | { |
195 | struct request_queue *q = flush_rq->q; | ||
196 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; | ||
62 | bool was_empty = elv_queue_empty(q); | 197 | bool was_empty = elv_queue_empty(q); |
63 | struct request *next_rq; | 198 | bool queued = false; |
199 | struct request *rq, *n; | ||
200 | |||
201 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | ||
202 | |||
203 | /* account completion of the flush request */ | ||
204 | q->flush_running_idx ^= 1; | ||
205 | elv_completed_request(q, flush_rq); | ||
206 | |||
207 | /* and push the waiting requests to the next stage */ | ||
208 | list_for_each_entry_safe(rq, n, running, flush.list) { | ||
209 | unsigned int seq = blk_flush_cur_seq(rq); | ||
64 | 210 | ||
65 | next_rq = blk_flush_complete_seq(q, seq, error); | 211 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); |
212 | queued |= blk_flush_complete_seq(rq, seq, error); | ||
213 | } | ||
66 | 214 | ||
67 | /* | 215 | /* |
68 | * Moving a request silently to empty queue_head may stall the | 216 | * Moving a request silently to empty queue_head may stall the |
@@ -70,127 +218,154 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, | |||
70 | * from request completion path and calling directly into | 218 | * from request completion path and calling directly into |
71 | * request_fn may confuse the driver. Always use kblockd. | 219 | * request_fn may confuse the driver. Always use kblockd. |
72 | */ | 220 | */ |
73 | if (was_empty && next_rq) | 221 | if (queued && was_empty) |
74 | __blk_run_queue(q, true); | 222 | __blk_run_queue(q, true); |
75 | } | 223 | } |
76 | 224 | ||
77 | static void pre_flush_end_io(struct request *rq, int error) | 225 | /** |
226 | * blk_kick_flush - consider issuing flush request | ||
227 | * @q: request_queue being kicked | ||
228 | * | ||
229 | * Flush related states of @q have changed, consider issuing flush request. | ||
230 | * Please read the comment at the top of this file for more info. | ||
231 | * | ||
232 | * CONTEXT: | ||
233 | * spin_lock_irq(q->queue_lock) | ||
234 | * | ||
235 | * RETURNS: | ||
236 | * %true if flush was issued, %false otherwise. | ||
237 | */ | ||
238 | static bool blk_kick_flush(struct request_queue *q) | ||
78 | { | 239 | { |
79 | elv_completed_request(rq->q, rq); | 240 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; |
80 | blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error); | 241 | struct request *first_rq = |
242 | list_first_entry(pending, struct request, flush.list); | ||
243 | |||
244 | /* C1 described at the top of this file */ | ||
245 | if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) | ||
246 | return false; | ||
247 | |||
248 | /* C2 and C3 */ | ||
249 | if (!list_empty(&q->flush_data_in_flight) && | ||
250 | time_before(jiffies, | ||
251 | q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) | ||
252 | return false; | ||
253 | |||
254 | /* | ||
255 | * Issue flush and toggle pending_idx. This makes pending_idx | ||
256 | * different from running_idx, which means flush is in flight. | ||
257 | */ | ||
258 | blk_rq_init(q, &q->flush_rq); | ||
259 | q->flush_rq.cmd_type = REQ_TYPE_FS; | ||
260 | q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | ||
261 | q->flush_rq.rq_disk = first_rq->rq_disk; | ||
262 | q->flush_rq.end_io = flush_end_io; | ||
263 | |||
264 | q->flush_pending_idx ^= 1; | ||
265 | elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE); | ||
266 | return true; | ||
81 | } | 267 | } |
82 | 268 | ||
83 | static void flush_data_end_io(struct request *rq, int error) | 269 | static void flush_data_end_io(struct request *rq, int error) |
84 | { | 270 | { |
85 | elv_completed_request(rq->q, rq); | 271 | struct request_queue *q = rq->q; |
86 | blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error); | 272 | bool was_empty = elv_queue_empty(q); |
87 | } | ||
88 | 273 | ||
89 | static void post_flush_end_io(struct request *rq, int error) | 274 | /* |
90 | { | 275 | * After populating an empty queue, kick it to avoid stall. Read |
91 | elv_completed_request(rq->q, rq); | 276 | * the comment in flush_end_io(). |
92 | blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error); | 277 | */ |
278 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty) | ||
279 | __blk_run_queue(q, true); | ||
93 | } | 280 | } |
94 | 281 | ||
95 | static void init_flush_request(struct request *rq, struct gendisk *disk) | 282 | /** |
283 | * blk_insert_flush - insert a new FLUSH/FUA request | ||
284 | * @rq: request to insert | ||
285 | * | ||
286 | * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions. | ||
287 | * @rq is being submitted. Analyze what needs to be done and put it on the | ||
288 | * right queue. | ||
289 | * | ||
290 | * CONTEXT: | ||
291 | * spin_lock_irq(q->queue_lock) | ||
292 | */ | ||
293 | void blk_insert_flush(struct request *rq) | ||
96 | { | 294 | { |
97 | rq->cmd_type = REQ_TYPE_FS; | 295 | struct request_queue *q = rq->q; |
98 | rq->cmd_flags = WRITE_FLUSH; | 296 | unsigned int fflags = q->flush_flags; /* may change, cache */ |
99 | rq->rq_disk = disk; | 297 | unsigned int policy = blk_flush_policy(fflags, rq); |
100 | } | ||
101 | 298 | ||
102 | static struct request *queue_next_fseq(struct request_queue *q) | 299 | BUG_ON(rq->end_io); |
103 | { | 300 | BUG_ON(!rq->bio || rq->bio != rq->biotail); |
104 | struct request *orig_rq = q->orig_flush_rq; | ||
105 | struct request *rq = &q->flush_rq; | ||
106 | 301 | ||
107 | blk_rq_init(q, rq); | 302 | /* |
303 | * @policy now records what operations need to be done. Adjust | ||
304 | * REQ_FLUSH and FUA for the driver. | ||
305 | */ | ||
306 | rq->cmd_flags &= ~REQ_FLUSH; | ||
307 | if (!(fflags & REQ_FUA)) | ||
308 | rq->cmd_flags &= ~REQ_FUA; | ||
108 | 309 | ||
109 | switch (blk_flush_cur_seq(q)) { | 310 | /* |
110 | case QUEUE_FSEQ_PREFLUSH: | 311 | * If there's data but flush is not necessary, the request can be |
111 | init_flush_request(rq, orig_rq->rq_disk); | 312 | * processed directly without going through flush machinery. Queue |
112 | rq->end_io = pre_flush_end_io; | 313 | * for normal execution. |
113 | break; | 314 | */ |
114 | case QUEUE_FSEQ_DATA: | 315 | if ((policy & REQ_FSEQ_DATA) && |
115 | init_request_from_bio(rq, orig_rq->bio); | 316 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
116 | /* | 317 | list_add(&rq->queuelist, &q->queue_head); |
117 | * orig_rq->rq_disk may be different from | 318 | return; |
118 | * bio->bi_bdev->bd_disk if orig_rq got here through | ||
119 | * remapping drivers. Make sure rq->rq_disk points | ||
120 | * to the same one as orig_rq. | ||
121 | */ | ||
122 | rq->rq_disk = orig_rq->rq_disk; | ||
123 | rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA); | ||
124 | rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA); | ||
125 | rq->end_io = flush_data_end_io; | ||
126 | break; | ||
127 | case QUEUE_FSEQ_POSTFLUSH: | ||
128 | init_flush_request(rq, orig_rq->rq_disk); | ||
129 | rq->end_io = post_flush_end_io; | ||
130 | break; | ||
131 | default: | ||
132 | BUG(); | ||
133 | } | 319 | } |
134 | 320 | ||
135 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 321 | /* |
136 | return rq; | 322 | * @rq should go through flush machinery. Mark it part of flush |
323 | * sequence and submit for further processing. | ||
324 | */ | ||
325 | memset(&rq->flush, 0, sizeof(rq->flush)); | ||
326 | INIT_LIST_HEAD(&rq->flush.list); | ||
327 | rq->cmd_flags |= REQ_FLUSH_SEQ; | ||
328 | rq->end_io = flush_data_end_io; | ||
329 | |||
330 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); | ||
137 | } | 331 | } |
138 | 332 | ||
139 | struct request *blk_do_flush(struct request_queue *q, struct request *rq) | 333 | /** |
334 | * blk_abort_flushes - @q is being aborted, abort flush requests | ||
335 | * @q: request_queue being aborted | ||
336 | * | ||
337 | * To be called from elv_abort_queue(). @q is being aborted. Prepare all | ||
338 | * FLUSH/FUA requests for abortion. | ||
339 | * | ||
340 | * CONTEXT: | ||
341 | * spin_lock_irq(q->queue_lock) | ||
342 | */ | ||
343 | void blk_abort_flushes(struct request_queue *q) | ||
140 | { | 344 | { |
141 | unsigned int fflags = q->flush_flags; /* may change, cache it */ | 345 | struct request *rq, *n; |
142 | bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; | 346 | int i; |
143 | bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); | ||
144 | bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); | ||
145 | unsigned skip = 0; | ||
146 | 347 | ||
147 | /* | 348 | /* |
148 | * Special case. If there's data but flush is not necessary, | 349 | * Requests in flight for data are already owned by the dispatch |
149 | * the request can be issued directly. | 350 | * queue or the device driver. Just restore for normal completion. |
150 | * | ||
151 | * Flush w/o data should be able to be issued directly too but | ||
152 | * currently some drivers assume that rq->bio contains | ||
153 | * non-zero data if it isn't NULL and empty FLUSH requests | ||
154 | * getting here usually have bio's without data. | ||
155 | */ | 351 | */ |
156 | if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { | 352 | list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) { |
157 | rq->cmd_flags &= ~REQ_FLUSH; | 353 | list_del_init(&rq->flush.list); |
158 | if (!has_fua) | 354 | blk_flush_restore_request(rq); |
159 | rq->cmd_flags &= ~REQ_FUA; | ||
160 | return rq; | ||
161 | } | 355 | } |
162 | 356 | ||
163 | /* | 357 | /* |
164 | * Sequenced flushes can't be processed in parallel. If | 358 | * We need to give away requests on flush queues. Restore for |
165 | * another one is already in progress, queue for later | 359 | * normal completion and put them on the dispatch queue. |
166 | * processing. | ||
167 | */ | 360 | */ |
168 | if (q->flush_seq) { | 361 | for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) { |
169 | list_move_tail(&rq->queuelist, &q->pending_flushes); | 362 | list_for_each_entry_safe(rq, n, &q->flush_queue[i], |
170 | return NULL; | 363 | flush.list) { |
364 | list_del_init(&rq->flush.list); | ||
365 | blk_flush_restore_request(rq); | ||
366 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
367 | } | ||
171 | } | 368 | } |
172 | |||
173 | /* | ||
174 | * Start a new flush sequence | ||
175 | */ | ||
176 | q->flush_err = 0; | ||
177 | q->flush_seq |= QUEUE_FSEQ_STARTED; | ||
178 | |||
179 | /* adjust FLUSH/FUA of the original request and stash it away */ | ||
180 | rq->cmd_flags &= ~REQ_FLUSH; | ||
181 | if (!has_fua) | ||
182 | rq->cmd_flags &= ~REQ_FUA; | ||
183 | blk_dequeue_request(rq); | ||
184 | q->orig_flush_rq = rq; | ||
185 | |||
186 | /* skip unneded sequences and return the first one */ | ||
187 | if (!do_preflush) | ||
188 | skip |= QUEUE_FSEQ_PREFLUSH; | ||
189 | if (!blk_rq_sectors(rq)) | ||
190 | skip |= QUEUE_FSEQ_DATA; | ||
191 | if (!do_postflush) | ||
192 | skip |= QUEUE_FSEQ_POSTFLUSH; | ||
193 | return blk_flush_complete_seq(q, skip, 0); | ||
194 | } | 369 | } |
195 | 370 | ||
196 | static void bio_end_flush(struct bio *bio, int err) | 371 | static void bio_end_flush(struct bio *bio, int err) |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 36c8c1f2af18..df649fa59ded 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -176,13 +176,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
176 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); | 176 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * If the caller didn't supply a lock, fall back to our embedded | ||
180 | * per-queue locks | ||
181 | */ | ||
182 | if (!q->queue_lock) | ||
183 | q->queue_lock = &q->__queue_lock; | ||
184 | |||
185 | /* | ||
186 | * by default assume old behaviour and bounce for any highmem page | 179 | * by default assume old behaviour and bounce for any highmem page |
187 | */ | 180 | */ |
188 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 181 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 41fb69150b4d..261c75c665ae 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -471,8 +471,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
471 | 471 | ||
472 | blk_sync_queue(q); | 472 | blk_sync_queue(q); |
473 | 473 | ||
474 | blk_throtl_exit(q); | ||
475 | |||
476 | if (rl->rq_pool) | 474 | if (rl->rq_pool) |
477 | mempool_destroy(rl->rq_pool); | 475 | mempool_destroy(rl->rq_pool); |
478 | 476 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e36cc10a346c..061dee66e2a6 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -968,7 +968,7 @@ static void throtl_update_blkio_group_write_iops(void *key, | |||
968 | throtl_schedule_delayed_work(td, 0); | 968 | throtl_schedule_delayed_work(td, 0); |
969 | } | 969 | } |
970 | 970 | ||
971 | void throtl_shutdown_timer_wq(struct request_queue *q) | 971 | static void throtl_shutdown_wq(struct request_queue *q) |
972 | { | 972 | { |
973 | struct throtl_data *td = q->td; | 973 | struct throtl_data *td = q->td; |
974 | 974 | ||
@@ -1102,7 +1102,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1102 | 1102 | ||
1103 | BUG_ON(!td); | 1103 | BUG_ON(!td); |
1104 | 1104 | ||
1105 | throtl_shutdown_timer_wq(q); | 1105 | throtl_shutdown_wq(q); |
1106 | 1106 | ||
1107 | spin_lock_irq(q->queue_lock); | 1107 | spin_lock_irq(q->queue_lock); |
1108 | throtl_release_tgs(td); | 1108 | throtl_release_tgs(td); |
@@ -1132,7 +1132,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1132 | * update limits through cgroup and another work got queued, cancel | 1132 | * update limits through cgroup and another work got queued, cancel |
1133 | * it. | 1133 | * it. |
1134 | */ | 1134 | */ |
1135 | throtl_shutdown_timer_wq(q); | 1135 | throtl_shutdown_wq(q); |
1136 | throtl_td_free(td); | 1136 | throtl_td_free(td); |
1137 | } | 1137 | } |
1138 | 1138 | ||
diff --git a/block/blk.h b/block/blk.h index 2db8f32838e7..284b500852bd 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -51,21 +51,17 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
51 | */ | 51 | */ |
52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
53 | 53 | ||
54 | struct request *blk_do_flush(struct request_queue *q, struct request *rq); | 54 | void blk_insert_flush(struct request *rq); |
55 | void blk_abort_flushes(struct request_queue *q); | ||
55 | 56 | ||
56 | static inline struct request *__elv_next_request(struct request_queue *q) | 57 | static inline struct request *__elv_next_request(struct request_queue *q) |
57 | { | 58 | { |
58 | struct request *rq; | 59 | struct request *rq; |
59 | 60 | ||
60 | while (1) { | 61 | while (1) { |
61 | while (!list_empty(&q->queue_head)) { | 62 | if (!list_empty(&q->queue_head)) { |
62 | rq = list_entry_rq(q->queue_head.next); | 63 | rq = list_entry_rq(q->queue_head.next); |
63 | if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || | 64 | return rq; |
64 | rq == &q->flush_rq) | ||
65 | return rq; | ||
66 | rq = blk_do_flush(q, rq); | ||
67 | if (rq) | ||
68 | return rq; | ||
69 | } | 65 | } |
70 | 66 | ||
71 | if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) | 67 | if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ea83a4f0c27d..89dc745c7d94 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -54,9 +54,9 @@ static const int cfq_hist_divisor = 4; | |||
54 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | 54 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) |
55 | 55 | ||
56 | #define RQ_CIC(rq) \ | 56 | #define RQ_CIC(rq) \ |
57 | ((struct cfq_io_context *) (rq)->elevator_private) | 57 | ((struct cfq_io_context *) (rq)->elevator_private[0]) |
58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) | 58 | #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) |
59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3) | 59 | #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) |
60 | 60 | ||
61 | static struct kmem_cache *cfq_pool; | 61 | static struct kmem_cache *cfq_pool; |
62 | static struct kmem_cache *cfq_ioc_pool; | 62 | static struct kmem_cache *cfq_ioc_pool; |
@@ -146,7 +146,6 @@ struct cfq_queue { | |||
146 | struct cfq_rb_root *service_tree; | 146 | struct cfq_rb_root *service_tree; |
147 | struct cfq_queue *new_cfqq; | 147 | struct cfq_queue *new_cfqq; |
148 | struct cfq_group *cfqg; | 148 | struct cfq_group *cfqg; |
149 | struct cfq_group *orig_cfqg; | ||
150 | /* Number of sectors dispatched from queue in single dispatch round */ | 149 | /* Number of sectors dispatched from queue in single dispatch round */ |
151 | unsigned long nr_sectors; | 150 | unsigned long nr_sectors; |
152 | }; | 151 | }; |
@@ -285,7 +284,6 @@ struct cfq_data { | |||
285 | unsigned int cfq_slice_idle; | 284 | unsigned int cfq_slice_idle; |
286 | unsigned int cfq_group_idle; | 285 | unsigned int cfq_group_idle; |
287 | unsigned int cfq_latency; | 286 | unsigned int cfq_latency; |
288 | unsigned int cfq_group_isolation; | ||
289 | 287 | ||
290 | unsigned int cic_index; | 288 | unsigned int cic_index; |
291 | struct list_head cic_list; | 289 | struct list_head cic_list; |
@@ -1187,32 +1185,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1187 | int new_cfqq = 1; | 1185 | int new_cfqq = 1; |
1188 | int group_changed = 0; | 1186 | int group_changed = 0; |
1189 | 1187 | ||
1190 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
1191 | if (!cfqd->cfq_group_isolation | ||
1192 | && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD | ||
1193 | && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) { | ||
1194 | /* Move this cfq to root group */ | ||
1195 | cfq_log_cfqq(cfqd, cfqq, "moving to root group"); | ||
1196 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) | ||
1197 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | ||
1198 | cfqq->orig_cfqg = cfqq->cfqg; | ||
1199 | cfqq->cfqg = &cfqd->root_group; | ||
1200 | cfqd->root_group.ref++; | ||
1201 | group_changed = 1; | ||
1202 | } else if (!cfqd->cfq_group_isolation | ||
1203 | && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { | ||
1204 | /* cfqq is sequential now needs to go to its original group */ | ||
1205 | BUG_ON(cfqq->cfqg != &cfqd->root_group); | ||
1206 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) | ||
1207 | cfq_group_service_tree_del(cfqd, cfqq->cfqg); | ||
1208 | cfq_put_cfqg(cfqq->cfqg); | ||
1209 | cfqq->cfqg = cfqq->orig_cfqg; | ||
1210 | cfqq->orig_cfqg = NULL; | ||
1211 | group_changed = 1; | ||
1212 | cfq_log_cfqq(cfqd, cfqq, "moved to origin group"); | ||
1213 | } | ||
1214 | #endif | ||
1215 | |||
1216 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), | 1188 | service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), |
1217 | cfqq_type(cfqq)); | 1189 | cfqq_type(cfqq)); |
1218 | if (cfq_class_idle(cfqq)) { | 1190 | if (cfq_class_idle(cfqq)) { |
@@ -2542,7 +2514,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
2542 | static void cfq_put_queue(struct cfq_queue *cfqq) | 2514 | static void cfq_put_queue(struct cfq_queue *cfqq) |
2543 | { | 2515 | { |
2544 | struct cfq_data *cfqd = cfqq->cfqd; | 2516 | struct cfq_data *cfqd = cfqq->cfqd; |
2545 | struct cfq_group *cfqg, *orig_cfqg; | 2517 | struct cfq_group *cfqg; |
2546 | 2518 | ||
2547 | BUG_ON(cfqq->ref <= 0); | 2519 | BUG_ON(cfqq->ref <= 0); |
2548 | 2520 | ||
@@ -2554,7 +2526,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2554 | BUG_ON(rb_first(&cfqq->sort_list)); | 2526 | BUG_ON(rb_first(&cfqq->sort_list)); |
2555 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); | 2527 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
2556 | cfqg = cfqq->cfqg; | 2528 | cfqg = cfqq->cfqg; |
2557 | orig_cfqg = cfqq->orig_cfqg; | ||
2558 | 2529 | ||
2559 | if (unlikely(cfqd->active_queue == cfqq)) { | 2530 | if (unlikely(cfqd->active_queue == cfqq)) { |
2560 | __cfq_slice_expired(cfqd, cfqq, 0); | 2531 | __cfq_slice_expired(cfqd, cfqq, 0); |
@@ -2564,8 +2535,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
2564 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 2535 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
2565 | kmem_cache_free(cfq_pool, cfqq); | 2536 | kmem_cache_free(cfq_pool, cfqq); |
2566 | cfq_put_cfqg(cfqg); | 2537 | cfq_put_cfqg(cfqg); |
2567 | if (orig_cfqg) | ||
2568 | cfq_put_cfqg(orig_cfqg); | ||
2569 | } | 2538 | } |
2570 | 2539 | ||
2571 | /* | 2540 | /* |
@@ -3613,12 +3582,12 @@ static void cfq_put_request(struct request *rq) | |||
3613 | 3582 | ||
3614 | put_io_context(RQ_CIC(rq)->ioc); | 3583 | put_io_context(RQ_CIC(rq)->ioc); |
3615 | 3584 | ||
3616 | rq->elevator_private = NULL; | 3585 | rq->elevator_private[0] = NULL; |
3617 | rq->elevator_private2 = NULL; | 3586 | rq->elevator_private[1] = NULL; |
3618 | 3587 | ||
3619 | /* Put down rq reference on cfqg */ | 3588 | /* Put down rq reference on cfqg */ |
3620 | cfq_put_cfqg(RQ_CFQG(rq)); | 3589 | cfq_put_cfqg(RQ_CFQG(rq)); |
3621 | rq->elevator_private3 = NULL; | 3590 | rq->elevator_private[2] = NULL; |
3622 | 3591 | ||
3623 | cfq_put_queue(cfqq); | 3592 | cfq_put_queue(cfqq); |
3624 | } | 3593 | } |
@@ -3705,13 +3674,13 @@ new_queue: | |||
3705 | } | 3674 | } |
3706 | 3675 | ||
3707 | cfqq->allocated[rw]++; | 3676 | cfqq->allocated[rw]++; |
3708 | cfqq->ref++; | ||
3709 | rq->elevator_private = cic; | ||
3710 | rq->elevator_private2 = cfqq; | ||
3711 | rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); | ||
3712 | 3677 | ||
3713 | spin_unlock_irqrestore(q->queue_lock, flags); | 3678 | spin_unlock_irqrestore(q->queue_lock, flags); |
3714 | 3679 | ||
3680 | cfqq->ref++; | ||
3681 | rq->elevator_private[0] = cic; | ||
3682 | rq->elevator_private[1] = cfqq; | ||
3683 | rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); | ||
3715 | return 0; | 3684 | return 0; |
3716 | 3685 | ||
3717 | queue_fail: | 3686 | queue_fail: |
@@ -3953,7 +3922,6 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3953 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3922 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3954 | cfqd->cfq_group_idle = cfq_group_idle; | 3923 | cfqd->cfq_group_idle = cfq_group_idle; |
3955 | cfqd->cfq_latency = 1; | 3924 | cfqd->cfq_latency = 1; |
3956 | cfqd->cfq_group_isolation = 0; | ||
3957 | cfqd->hw_tag = -1; | 3925 | cfqd->hw_tag = -1; |
3958 | /* | 3926 | /* |
3959 | * we optimistically start assuming sync ops weren't delayed in last | 3927 | * we optimistically start assuming sync ops weren't delayed in last |
@@ -4029,7 +3997,6 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | |||
4029 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 3997 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
4030 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 3998 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
4031 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | 3999 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); |
4032 | SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0); | ||
4033 | #undef SHOW_FUNCTION | 4000 | #undef SHOW_FUNCTION |
4034 | 4001 | ||
4035 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 4002 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -4063,7 +4030,6 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | |||
4063 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4030 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
4064 | UINT_MAX, 0); | 4031 | UINT_MAX, 0); |
4065 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | 4032 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); |
4066 | STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0); | ||
4067 | #undef STORE_FUNCTION | 4033 | #undef STORE_FUNCTION |
4068 | 4034 | ||
4069 | #define CFQ_ATTR(name) \ | 4035 | #define CFQ_ATTR(name) \ |
@@ -4081,7 +4047,6 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
4081 | CFQ_ATTR(slice_idle), | 4047 | CFQ_ATTR(slice_idle), |
4082 | CFQ_ATTR(group_idle), | 4048 | CFQ_ATTR(group_idle), |
4083 | CFQ_ATTR(low_latency), | 4049 | CFQ_ATTR(low_latency), |
4084 | CFQ_ATTR(group_isolation), | ||
4085 | __ATTR_NULL | 4050 | __ATTR_NULL |
4086 | }; | 4051 | }; |
4087 | 4052 | ||
diff --git a/block/elevator.c b/block/elevator.c index 236e93c1f46c..fabf3675c913 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -673,6 +673,11 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
673 | q->elevator->ops->elevator_add_req_fn(q, rq); | 673 | q->elevator->ops->elevator_add_req_fn(q, rq); |
674 | break; | 674 | break; |
675 | 675 | ||
676 | case ELEVATOR_INSERT_FLUSH: | ||
677 | rq->cmd_flags |= REQ_SOFTBARRIER; | ||
678 | blk_insert_flush(rq); | ||
679 | break; | ||
680 | |||
676 | default: | 681 | default: |
677 | printk(KERN_ERR "%s: bad insertion point %d\n", | 682 | printk(KERN_ERR "%s: bad insertion point %d\n", |
678 | __func__, where); | 683 | __func__, where); |
@@ -759,7 +764,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
759 | if (e->ops->elevator_set_req_fn) | 764 | if (e->ops->elevator_set_req_fn) |
760 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); | 765 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
761 | 766 | ||
762 | rq->elevator_private = NULL; | 767 | rq->elevator_private[0] = NULL; |
763 | return 0; | 768 | return 0; |
764 | } | 769 | } |
765 | 770 | ||
@@ -785,6 +790,8 @@ void elv_abort_queue(struct request_queue *q) | |||
785 | { | 790 | { |
786 | struct request *rq; | 791 | struct request *rq; |
787 | 792 | ||
793 | blk_abort_flushes(q); | ||
794 | |||
788 | while (!list_empty(&q->queue_head)) { | 795 | while (!list_empty(&q->queue_head)) { |
789 | rq = list_entry_rq(q->queue_head.next); | 796 | rq = list_entry_rq(q->queue_head.next); |
790 | rq->cmd_flags |= REQ_QUIET; | 797 | rq->cmd_flags |= REQ_QUIET; |
diff --git a/block/genhd.c b/block/genhd.c index cbf1112a885c..3e2b57b55e38 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1158,14 +1158,14 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
1158 | "%u %lu %lu %llu %u %u %u %u\n", | 1158 | "%u %lu %lu %llu %u %u %u %u\n", |
1159 | MAJOR(part_devt(hd)), MINOR(part_devt(hd)), | 1159 | MAJOR(part_devt(hd)), MINOR(part_devt(hd)), |
1160 | disk_name(gp, hd->partno, buf), | 1160 | disk_name(gp, hd->partno, buf), |
1161 | part_stat_read(hd, ios[0]), | 1161 | part_stat_read(hd, ios[READ]), |
1162 | part_stat_read(hd, merges[0]), | 1162 | part_stat_read(hd, merges[READ]), |
1163 | (unsigned long long)part_stat_read(hd, sectors[0]), | 1163 | (unsigned long long)part_stat_read(hd, sectors[READ]), |
1164 | jiffies_to_msecs(part_stat_read(hd, ticks[0])), | 1164 | jiffies_to_msecs(part_stat_read(hd, ticks[READ])), |
1165 | part_stat_read(hd, ios[1]), | 1165 | part_stat_read(hd, ios[WRITE]), |
1166 | part_stat_read(hd, merges[1]), | 1166 | part_stat_read(hd, merges[WRITE]), |
1167 | (unsigned long long)part_stat_read(hd, sectors[1]), | 1167 | (unsigned long long)part_stat_read(hd, sectors[WRITE]), |
1168 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), | 1168 | jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), |
1169 | part_in_flight(hd), | 1169 | part_in_flight(hd), |
1170 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1170 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
1171 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) | 1171 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) |