diff options
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-flush.c | 98 | ||||
-rw-r--r-- | block/blk.h | 4 | ||||
-rw-r--r-- | include/linux/blkdev.h | 24 |
4 files changed, 72 insertions, 75 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d316662682c8..8870ae40179d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
136 | { | 136 | { |
137 | struct request_queue *q = rq->q; | 137 | struct request_queue *q = rq->q; |
138 | 138 | ||
139 | if (&q->bar_rq != rq) { | 139 | if (&q->flush_rq != rq) { |
140 | if (error) | 140 | if (error) |
141 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 141 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
142 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 142 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
@@ -160,13 +160,12 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
160 | if (bio->bi_size == 0) | 160 | if (bio->bi_size == 0) |
161 | bio_endio(bio, error); | 161 | bio_endio(bio, error); |
162 | } else { | 162 | } else { |
163 | |||
164 | /* | 163 | /* |
165 | * Okay, this is the barrier request in progress, just | 164 | * Okay, this is the sequenced flush request in |
166 | * record the error; | 165 | * progress, just record the error; |
167 | */ | 166 | */ |
168 | if (error && !q->orderr) | 167 | if (error && !q->flush_err) |
169 | q->orderr = error; | 168 | q->flush_err = error; |
170 | } | 169 | } |
171 | } | 170 | } |
172 | 171 | ||
@@ -520,7 +519,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
520 | init_timer(&q->unplug_timer); | 519 | init_timer(&q->unplug_timer); |
521 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 520 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
522 | INIT_LIST_HEAD(&q->timeout_list); | 521 | INIT_LIST_HEAD(&q->timeout_list); |
523 | INIT_LIST_HEAD(&q->pending_barriers); | 522 | INIT_LIST_HEAD(&q->pending_flushes); |
524 | INIT_WORK(&q->unplug_work, blk_unplug_work); | 523 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
525 | 524 | ||
526 | kobject_init(&q->kobj, &blk_queue_ktype); | 525 | kobject_init(&q->kobj, &blk_queue_ktype); |
@@ -1764,11 +1763,11 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) | |||
1764 | static void blk_account_io_done(struct request *req) | 1763 | static void blk_account_io_done(struct request *req) |
1765 | { | 1764 | { |
1766 | /* | 1765 | /* |
1767 | * Account IO completion. bar_rq isn't accounted as a normal | 1766 | * Account IO completion. flush_rq isn't accounted as a |
1768 | * IO on queueing nor completion. Accounting the containing | 1767 | * normal IO on queueing nor completion. Accounting the |
1769 | * request is enough. | 1768 | * containing request is enough. |
1770 | */ | 1769 | */ |
1771 | if (blk_do_io_stat(req) && req != &req->q->bar_rq) { | 1770 | if (blk_do_io_stat(req) && req != &req->q->flush_rq) { |
1772 | unsigned long duration = jiffies - req->start_time; | 1771 | unsigned long duration = jiffies - req->start_time; |
1773 | const int rw = rq_data_dir(req); | 1772 | const int rw = rq_data_dir(req); |
1774 | struct hd_struct *part; | 1773 | struct hd_struct *part; |
diff --git a/block/blk-flush.c b/block/blk-flush.c index e8b2e5c091b1..dd873225da97 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -9,41 +9,38 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static struct request *queue_next_ordseq(struct request_queue *q); | 12 | static struct request *queue_next_fseq(struct request_queue *q); |
13 | 13 | ||
14 | /* | 14 | unsigned blk_flush_cur_seq(struct request_queue *q) |
15 | * Cache flushing for ordered writes handling | ||
16 | */ | ||
17 | unsigned blk_ordered_cur_seq(struct request_queue *q) | ||
18 | { | 15 | { |
19 | if (!q->ordseq) | 16 | if (!q->flush_seq) |
20 | return 0; | 17 | return 0; |
21 | return 1 << ffz(q->ordseq); | 18 | return 1 << ffz(q->flush_seq); |
22 | } | 19 | } |
23 | 20 | ||
24 | static struct request *blk_ordered_complete_seq(struct request_queue *q, | 21 | static struct request *blk_flush_complete_seq(struct request_queue *q, |
25 | unsigned seq, int error) | 22 | unsigned seq, int error) |
26 | { | 23 | { |
27 | struct request *next_rq = NULL; | 24 | struct request *next_rq = NULL; |
28 | 25 | ||
29 | if (error && !q->orderr) | 26 | if (error && !q->flush_err) |
30 | q->orderr = error; | 27 | q->flush_err = error; |
31 | 28 | ||
32 | BUG_ON(q->ordseq & seq); | 29 | BUG_ON(q->flush_seq & seq); |
33 | q->ordseq |= seq; | 30 | q->flush_seq |= seq; |
34 | 31 | ||
35 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) { | 32 | if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) { |
36 | /* not complete yet, queue the next ordered sequence */ | 33 | /* not complete yet, queue the next flush sequence */ |
37 | next_rq = queue_next_ordseq(q); | 34 | next_rq = queue_next_fseq(q); |
38 | } else { | 35 | } else { |
39 | /* complete this barrier request */ | 36 | /* complete this flush request */ |
40 | __blk_end_request_all(q->orig_bar_rq, q->orderr); | 37 | __blk_end_request_all(q->orig_flush_rq, q->flush_err); |
41 | q->orig_bar_rq = NULL; | 38 | q->orig_flush_rq = NULL; |
42 | q->ordseq = 0; | 39 | q->flush_seq = 0; |
43 | 40 | ||
44 | /* dispatch the next barrier if there's one */ | 41 | /* dispatch the next flush if there's one */ |
45 | if (!list_empty(&q->pending_barriers)) { | 42 | if (!list_empty(&q->pending_flushes)) { |
46 | next_rq = list_entry_rq(q->pending_barriers.next); | 43 | next_rq = list_entry_rq(q->pending_flushes.next); |
47 | list_move(&next_rq->queuelist, &q->queue_head); | 44 | list_move(&next_rq->queuelist, &q->queue_head); |
48 | } | 45 | } |
49 | } | 46 | } |
@@ -53,19 +50,19 @@ static struct request *blk_ordered_complete_seq(struct request_queue *q, | |||
53 | static void pre_flush_end_io(struct request *rq, int error) | 50 | static void pre_flush_end_io(struct request *rq, int error) |
54 | { | 51 | { |
55 | elv_completed_request(rq->q, rq); | 52 | elv_completed_request(rq->q, rq); |
56 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | 53 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error); |
57 | } | 54 | } |
58 | 55 | ||
59 | static void bar_end_io(struct request *rq, int error) | 56 | static void flush_data_end_io(struct request *rq, int error) |
60 | { | 57 | { |
61 | elv_completed_request(rq->q, rq); | 58 | elv_completed_request(rq->q, rq); |
62 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | 59 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error); |
63 | } | 60 | } |
64 | 61 | ||
65 | static void post_flush_end_io(struct request *rq, int error) | 62 | static void post_flush_end_io(struct request *rq, int error) |
66 | { | 63 | { |
67 | elv_completed_request(rq->q, rq); | 64 | elv_completed_request(rq->q, rq); |
68 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | 65 | blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error); |
69 | } | 66 | } |
70 | 67 | ||
71 | static void queue_flush(struct request_queue *q, struct request *rq, | 68 | static void queue_flush(struct request_queue *q, struct request *rq, |
@@ -74,34 +71,34 @@ static void queue_flush(struct request_queue *q, struct request *rq, | |||
74 | blk_rq_init(q, rq); | 71 | blk_rq_init(q, rq); |
75 | rq->cmd_type = REQ_TYPE_FS; | 72 | rq->cmd_type = REQ_TYPE_FS; |
76 | rq->cmd_flags = REQ_FLUSH; | 73 | rq->cmd_flags = REQ_FLUSH; |
77 | rq->rq_disk = q->orig_bar_rq->rq_disk; | 74 | rq->rq_disk = q->orig_flush_rq->rq_disk; |
78 | rq->end_io = end_io; | 75 | rq->end_io = end_io; |
79 | 76 | ||
80 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 77 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
81 | } | 78 | } |
82 | 79 | ||
83 | static struct request *queue_next_ordseq(struct request_queue *q) | 80 | static struct request *queue_next_fseq(struct request_queue *q) |
84 | { | 81 | { |
85 | struct request *rq = &q->bar_rq; | 82 | struct request *rq = &q->flush_rq; |
86 | 83 | ||
87 | switch (blk_ordered_cur_seq(q)) { | 84 | switch (blk_flush_cur_seq(q)) { |
88 | case QUEUE_ORDSEQ_PREFLUSH: | 85 | case QUEUE_FSEQ_PREFLUSH: |
89 | queue_flush(q, rq, pre_flush_end_io); | 86 | queue_flush(q, rq, pre_flush_end_io); |
90 | break; | 87 | break; |
91 | 88 | ||
92 | case QUEUE_ORDSEQ_BAR: | 89 | case QUEUE_FSEQ_DATA: |
93 | /* initialize proxy request and queue it */ | 90 | /* initialize proxy request and queue it */ |
94 | blk_rq_init(q, rq); | 91 | blk_rq_init(q, rq); |
95 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 92 | init_request_from_bio(rq, q->orig_flush_rq->bio); |
96 | rq->cmd_flags &= ~REQ_HARDBARRIER; | 93 | rq->cmd_flags &= ~REQ_HARDBARRIER; |
97 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | 94 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
98 | rq->cmd_flags |= REQ_FUA; | 95 | rq->cmd_flags |= REQ_FUA; |
99 | rq->end_io = bar_end_io; | 96 | rq->end_io = flush_data_end_io; |
100 | 97 | ||
101 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 98 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
102 | break; | 99 | break; |
103 | 100 | ||
104 | case QUEUE_ORDSEQ_POSTFLUSH: | 101 | case QUEUE_FSEQ_POSTFLUSH: |
105 | queue_flush(q, rq, post_flush_end_io); | 102 | queue_flush(q, rq, post_flush_end_io); |
106 | break; | 103 | break; |
107 | 104 | ||
@@ -111,19 +108,20 @@ static struct request *queue_next_ordseq(struct request_queue *q) | |||
111 | return rq; | 108 | return rq; |
112 | } | 109 | } |
113 | 110 | ||
114 | struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | 111 | struct request *blk_do_flush(struct request_queue *q, struct request *rq) |
115 | { | 112 | { |
116 | unsigned skip = 0; | 113 | unsigned skip = 0; |
117 | 114 | ||
118 | if (!(rq->cmd_flags & REQ_HARDBARRIER)) | 115 | if (!(rq->cmd_flags & REQ_HARDBARRIER)) |
119 | return rq; | 116 | return rq; |
120 | 117 | ||
121 | if (q->ordseq) { | 118 | if (q->flush_seq) { |
122 | /* | 119 | /* |
123 | * Barrier is already in progress and they can't be | 120 | * Sequenced flush is already in progress and they |
124 | * processed in parallel. Queue for later processing. | 121 | * can't be processed in parallel. Queue for later |
122 | * processing. | ||
125 | */ | 123 | */ |
126 | list_move_tail(&rq->queuelist, &q->pending_barriers); | 124 | list_move_tail(&rq->queuelist, &q->pending_flushes); |
127 | return NULL; | 125 | return NULL; |
128 | } | 126 | } |
129 | 127 | ||
@@ -138,11 +136,11 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | |||
138 | } | 136 | } |
139 | 137 | ||
140 | /* | 138 | /* |
141 | * Start a new ordered sequence | 139 | * Start a new flush sequence |
142 | */ | 140 | */ |
143 | q->orderr = 0; | 141 | q->flush_err = 0; |
144 | q->ordered = q->next_ordered; | 142 | q->ordered = q->next_ordered; |
145 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | 143 | q->flush_seq |= QUEUE_FSEQ_STARTED; |
146 | 144 | ||
147 | /* | 145 | /* |
148 | * For an empty barrier, there's no actual BAR request, which | 146 | * For an empty barrier, there's no actual BAR request, which |
@@ -154,19 +152,19 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq) | |||
154 | 152 | ||
155 | /* stash away the original request */ | 153 | /* stash away the original request */ |
156 | blk_dequeue_request(rq); | 154 | blk_dequeue_request(rq); |
157 | q->orig_bar_rq = rq; | 155 | q->orig_flush_rq = rq; |
158 | 156 | ||
159 | if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) | 157 | if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) |
160 | skip |= QUEUE_ORDSEQ_PREFLUSH; | 158 | skip |= QUEUE_FSEQ_PREFLUSH; |
161 | 159 | ||
162 | if (!(q->ordered & QUEUE_ORDERED_DO_BAR)) | 160 | if (!(q->ordered & QUEUE_ORDERED_DO_BAR)) |
163 | skip |= QUEUE_ORDSEQ_BAR; | 161 | skip |= QUEUE_FSEQ_DATA; |
164 | 162 | ||
165 | if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH)) | 163 | if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH)) |
166 | skip |= QUEUE_ORDSEQ_POSTFLUSH; | 164 | skip |= QUEUE_FSEQ_POSTFLUSH; |
167 | 165 | ||
168 | /* complete skipped sequences and return the first sequence */ | 166 | /* complete skipped sequences and return the first sequence */ |
169 | return blk_ordered_complete_seq(q, skip, 0); | 167 | return blk_flush_complete_seq(q, skip, 0); |
170 | } | 168 | } |
171 | 169 | ||
172 | static void bio_end_empty_barrier(struct bio *bio, int err) | 170 | static void bio_end_empty_barrier(struct bio *bio, int err) |
diff --git a/block/blk.h b/block/blk.h index 08081e4b294e..24b92bd78f37 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -51,7 +51,7 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
51 | */ | 51 | */ |
52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
53 | 53 | ||
54 | struct request *blk_do_ordered(struct request_queue *q, struct request *rq); | 54 | struct request *blk_do_flush(struct request_queue *q, struct request *rq); |
55 | 55 | ||
56 | static inline struct request *__elv_next_request(struct request_queue *q) | 56 | static inline struct request *__elv_next_request(struct request_queue *q) |
57 | { | 57 | { |
@@ -60,7 +60,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) | |||
60 | while (1) { | 60 | while (1) { |
61 | while (!list_empty(&q->queue_head)) { | 61 | while (!list_empty(&q->queue_head)) { |
62 | rq = list_entry_rq(q->queue_head.next); | 62 | rq = list_entry_rq(q->queue_head.next); |
63 | rq = blk_do_ordered(q, rq); | 63 | rq = blk_do_flush(q, rq); |
64 | if (rq) | 64 | if (rq) |
65 | return rq; | 65 | return rq; |
66 | } | 66 | } |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 20a3710a481b..1cd83ec077db 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -357,13 +357,13 @@ struct request_queue | |||
357 | /* | 357 | /* |
358 | * for flush operations | 358 | * for flush operations |
359 | */ | 359 | */ |
360 | unsigned int ordered, next_ordered; | ||
360 | unsigned int flush_flags; | 361 | unsigned int flush_flags; |
361 | 362 | unsigned int flush_seq; | |
362 | unsigned int ordered, next_ordered, ordseq; | 363 | int flush_err; |
363 | int orderr; | 364 | struct request flush_rq; |
364 | struct request bar_rq; | 365 | struct request *orig_flush_rq; |
365 | struct request *orig_bar_rq; | 366 | struct list_head pending_flushes; |
366 | struct list_head pending_barriers; | ||
367 | 367 | ||
368 | struct mutex sysfs_lock; | 368 | struct mutex sysfs_lock; |
369 | 369 | ||
@@ -490,13 +490,13 @@ enum { | |||
490 | QUEUE_ORDERED_DO_FUA, | 490 | QUEUE_ORDERED_DO_FUA, |
491 | 491 | ||
492 | /* | 492 | /* |
493 | * Ordered operation sequence | 493 | * FLUSH/FUA sequences. |
494 | */ | 494 | */ |
495 | QUEUE_ORDSEQ_STARTED = (1 << 0), /* flushing in progress */ | 495 | QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */ |
496 | QUEUE_ORDSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ | 496 | QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */ |
497 | QUEUE_ORDSEQ_BAR = (1 << 2), /* barrier write in progress */ | 497 | QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */ |
498 | QUEUE_ORDSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ | 498 | QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */ |
499 | QUEUE_ORDSEQ_DONE = (1 << 4), | 499 | QUEUE_FSEQ_DONE = (1 << 4), |
500 | }; | 500 | }; |
501 | 501 | ||
502 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 502 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |