aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:07:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:07:18 -0400
commita2887097f25cd38cadfc11d10769e2b349fb5eca (patch)
treecd4adcb305365d6ba9acd2c02d4eb9d0125c6f8d /block
parent8abfc6e7a45eb74e51904bbae676fae008b11366 (diff)
parent005a1d15f5a6b2bb4ada80349513effbf22b4588 (diff)
Merge branch 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/barrier' of git://git.kernel.dk/linux-2.6-block: (46 commits) xen-blkfront: disable barrier/flush write support Added blk-lib.c and blk-barrier.c was renamed to blk-flush.c block: remove BLKDEV_IFL_WAIT aic7xxx_old: removed unused 'req' variable block: remove the BH_Eopnotsupp flag block: remove the BLKDEV_IFL_BARRIER flag block: remove the WRITE_BARRIER flag swap: do not send discards as barriers fat: do not send discards as barriers ext4: do not send discards as barriers jbd2: replace barriers with explicit flush / FUA usage jbd2: Modify ASYNC_COMMIT code to not rely on queue draining on barrier jbd: replace barriers with explicit flush / FUA usage nilfs2: replace barriers with explicit flush / FUA usage reiserfs: replace barriers with explicit flush / FUA usage gfs2: replace barriers with explicit flush / FUA usage btrfs: replace barriers with explicit flush / FUA usage xfs: replace barriers with explicit flush / FUA usage block: pass gfp_mask and flags to sb_issue_discard dm: convey that all flushes are processed as empty ...
Diffstat (limited to 'block')
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-barrier.c350
-rw-r--r--block/blk-core.c72
-rw-r--r--block/blk-flush.c262
-rw-r--r--block/blk-lib.c39
-rw-r--r--block/blk-settings.c20
-rw-r--r--block/blk.h8
-rw-r--r--block/elevator.c79
-rw-r--r--block/ioctl.c4
9 files changed, 350 insertions, 486 deletions
diff --git a/block/Makefile b/block/Makefile
index c850d5ef80a2..0fec4b3fab51 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o 8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
9 9
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
deleted file mode 100644
index f0faefca032f..000000000000
--- a/block/blk-barrier.c
+++ /dev/null
@@ -1,350 +0,0 @@
1/*
2 * Functions related to barrier IO handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/gfp.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
16 *
17 * Description:
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
22 *
23 **/
24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
25{
26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA &&
30 ordered != QUEUE_ORDERED_TAG &&
31 ordered != QUEUE_ORDERED_TAG_FLUSH &&
32 ordered != QUEUE_ORDERED_TAG_FUA) {
33 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34 return -EINVAL;
35 }
36
37 q->ordered = ordered;
38 q->next_ordered = ordered;
39
40 return 0;
41}
42EXPORT_SYMBOL(blk_queue_ordered);
43
44/*
45 * Cache flushing for ordered writes handling
46 */
47unsigned blk_ordered_cur_seq(struct request_queue *q)
48{
49 if (!q->ordseq)
50 return 0;
51 return 1 << ffz(q->ordseq);
52}
53
54unsigned blk_ordered_req_seq(struct request *rq)
55{
56 struct request_queue *q = rq->q;
57
58 BUG_ON(q->ordseq == 0);
59
60 if (rq == &q->pre_flush_rq)
61 return QUEUE_ORDSEQ_PREFLUSH;
62 if (rq == &q->bar_rq)
63 return QUEUE_ORDSEQ_BAR;
64 if (rq == &q->post_flush_rq)
65 return QUEUE_ORDSEQ_POSTFLUSH;
66
67 /*
68 * !fs requests don't need to follow barrier ordering. Always
69 * put them at the front. This fixes the following deadlock.
70 *
71 * http://thread.gmane.org/gmane.linux.kernel/537473
72 */
73 if (rq->cmd_type != REQ_TYPE_FS)
74 return QUEUE_ORDSEQ_DRAIN;
75
76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
77 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
78 return QUEUE_ORDSEQ_DRAIN;
79 else
80 return QUEUE_ORDSEQ_DONE;
81}
82
83bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
84{
85 struct request *rq;
86
87 if (error && !q->orderr)
88 q->orderr = error;
89
90 BUG_ON(q->ordseq & seq);
91 q->ordseq |= seq;
92
93 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
94 return false;
95
96 /*
97 * Okay, sequence complete.
98 */
99 q->ordseq = 0;
100 rq = q->orig_bar_rq;
101 __blk_end_request_all(rq, q->orderr);
102 return true;
103}
104
105static void pre_flush_end_io(struct request *rq, int error)
106{
107 elv_completed_request(rq->q, rq);
108 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
109}
110
111static void bar_end_io(struct request *rq, int error)
112{
113 elv_completed_request(rq->q, rq);
114 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
115}
116
117static void post_flush_end_io(struct request *rq, int error)
118{
119 elv_completed_request(rq->q, rq);
120 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
121}
122
123static void queue_flush(struct request_queue *q, unsigned which)
124{
125 struct request *rq;
126 rq_end_io_fn *end_io;
127
128 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
129 rq = &q->pre_flush_rq;
130 end_io = pre_flush_end_io;
131 } else {
132 rq = &q->post_flush_rq;
133 end_io = post_flush_end_io;
134 }
135
136 blk_rq_init(q, rq);
137 rq->cmd_type = REQ_TYPE_FS;
138 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
139 rq->rq_disk = q->orig_bar_rq->rq_disk;
140 rq->end_io = end_io;
141
142 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
143}
144
145static inline bool start_ordered(struct request_queue *q, struct request **rqp)
146{
147 struct request *rq = *rqp;
148 unsigned skip = 0;
149
150 q->orderr = 0;
151 q->ordered = q->next_ordered;
152 q->ordseq |= QUEUE_ORDSEQ_STARTED;
153
154 /*
155 * For an empty barrier, there's no actual BAR request, which
156 * in turn makes POSTFLUSH unnecessary. Mask them off.
157 */
158 if (!blk_rq_sectors(rq)) {
159 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
160 QUEUE_ORDERED_DO_POSTFLUSH);
161 /*
162 * Empty barrier on a write-through device w/ ordered
163 * tag has no command to issue and without any command
164 * to issue, ordering by tag can't be used. Drain
165 * instead.
166 */
167 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
168 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
169 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
170 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
171 }
172 }
173
174 /* stash away the original request */
175 blk_dequeue_request(rq);
176 q->orig_bar_rq = rq;
177 rq = NULL;
178
179 /*
180 * Queue ordered sequence. As we stack them at the head, we
181 * need to queue in reverse order. Note that we rely on that
182 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
183 * request gets inbetween ordered sequence.
184 */
185 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
186 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
187 rq = &q->post_flush_rq;
188 } else
189 skip |= QUEUE_ORDSEQ_POSTFLUSH;
190
191 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
192 rq = &q->bar_rq;
193
194 /* initialize proxy request and queue it */
195 blk_rq_init(q, rq);
196 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
197 rq->cmd_flags |= REQ_WRITE;
198 if (q->ordered & QUEUE_ORDERED_DO_FUA)
199 rq->cmd_flags |= REQ_FUA;
200 init_request_from_bio(rq, q->orig_bar_rq->bio);
201 rq->end_io = bar_end_io;
202
203 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
204 } else
205 skip |= QUEUE_ORDSEQ_BAR;
206
207 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
208 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
209 rq = &q->pre_flush_rq;
210 } else
211 skip |= QUEUE_ORDSEQ_PREFLUSH;
212
213 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
214 rq = NULL;
215 else
216 skip |= QUEUE_ORDSEQ_DRAIN;
217
218 *rqp = rq;
219
220 /*
221 * Complete skipped sequences. If whole sequence is complete,
222 * return false to tell elevator that this request is gone.
223 */
224 return !blk_ordered_complete_seq(q, skip, 0);
225}
226
227bool blk_do_ordered(struct request_queue *q, struct request **rqp)
228{
229 struct request *rq = *rqp;
230 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
231 (rq->cmd_flags & REQ_HARDBARRIER);
232
233 if (!q->ordseq) {
234 if (!is_barrier)
235 return true;
236
237 if (q->next_ordered != QUEUE_ORDERED_NONE)
238 return start_ordered(q, rqp);
239 else {
240 /*
241 * Queue ordering not supported. Terminate
242 * with prejudice.
243 */
244 blk_dequeue_request(rq);
245 __blk_end_request_all(rq, -EOPNOTSUPP);
246 *rqp = NULL;
247 return false;
248 }
249 }
250
251 /*
252 * Ordered sequence in progress
253 */
254
255 /* Special requests are not subject to ordering rules. */
256 if (rq->cmd_type != REQ_TYPE_FS &&
257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
258 return true;
259
260 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
261 /* Ordered by tag. Blocking the next barrier is enough. */
262 if (is_barrier && rq != &q->bar_rq)
263 *rqp = NULL;
264 } else {
265 /* Ordered by draining. Wait for turn. */
266 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
267 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
268 *rqp = NULL;
269 }
270
271 return true;
272}
273
274static void bio_end_empty_barrier(struct bio *bio, int err)
275{
276 if (err) {
277 if (err == -EOPNOTSUPP)
278 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
279 clear_bit(BIO_UPTODATE, &bio->bi_flags);
280 }
281 if (bio->bi_private)
282 complete(bio->bi_private);
283 bio_put(bio);
284}
285
286/**
287 * blkdev_issue_flush - queue a flush
288 * @bdev: blockdev to issue flush for
289 * @gfp_mask: memory allocation flags (for bio_alloc)
290 * @error_sector: error sector
291 * @flags: BLKDEV_IFL_* flags to control behaviour
292 *
293 * Description:
294 * Issue a flush for the block device in question. Caller can supply
295 * room for storing the error offset in case of a flush error, if they
296 * wish to. If WAIT flag is not passed then caller may check only what
297 * request was pushed in some internal queue for later handling.
298 */
299int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
300 sector_t *error_sector, unsigned long flags)
301{
302 DECLARE_COMPLETION_ONSTACK(wait);
303 struct request_queue *q;
304 struct bio *bio;
305 int ret = 0;
306
307 if (bdev->bd_disk == NULL)
308 return -ENXIO;
309
310 q = bdev_get_queue(bdev);
311 if (!q)
312 return -ENXIO;
313
314 /*
315 * some block devices may not have their queue correctly set up here
316 * (e.g. loop device without a backing file) and so issuing a flush
317 * here will panic. Ensure there is a request function before issuing
318 * the barrier.
319 */
320 if (!q->make_request_fn)
321 return -ENXIO;
322
323 bio = bio_alloc(gfp_mask, 0);
324 bio->bi_end_io = bio_end_empty_barrier;
325 bio->bi_bdev = bdev;
326 if (test_bit(BLKDEV_WAIT, &flags))
327 bio->bi_private = &wait;
328
329 bio_get(bio);
330 submit_bio(WRITE_BARRIER, bio);
331 if (test_bit(BLKDEV_WAIT, &flags)) {
332 wait_for_completion(&wait);
333 /*
334 * The driver must store the error location in ->bi_sector, if
335 * it supports it. For non-stacked drivers, this should be
336 * copied from blk_rq_pos(rq).
337 */
338 if (error_sector)
339 *error_sector = bio->bi_sector;
340 }
341
342 if (bio_flagged(bio, BIO_EOPNOTSUPP))
343 ret = -EOPNOTSUPP;
344 else if (!bio_flagged(bio, BIO_UPTODATE))
345 ret = -EIO;
346
347 bio_put(bio);
348 return ret;
349}
350EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-core.c b/block/blk-core.c
index 500eb859886e..45141469e89e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -139,7 +139,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
139{ 139{
140 struct request_queue *q = rq->q; 140 struct request_queue *q = rq->q;
141 141
142 if (&q->bar_rq != rq) { 142 if (&q->flush_rq != rq) {
143 if (error) 143 if (error)
144 clear_bit(BIO_UPTODATE, &bio->bi_flags); 144 clear_bit(BIO_UPTODATE, &bio->bi_flags);
145 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 145 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
@@ -163,13 +163,12 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
163 if (bio->bi_size == 0) 163 if (bio->bi_size == 0)
164 bio_endio(bio, error); 164 bio_endio(bio, error);
165 } else { 165 } else {
166
167 /* 166 /*
168 * Okay, this is the barrier request in progress, just 167 * Okay, this is the sequenced flush request in
169 * record the error; 168 * progress, just record the error;
170 */ 169 */
171 if (error && !q->orderr) 170 if (error && !q->flush_err)
172 q->orderr = error; 171 q->flush_err = error;
173 } 172 }
174} 173}
175 174
@@ -531,6 +530,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
531 init_timer(&q->unplug_timer); 530 init_timer(&q->unplug_timer);
532 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 531 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
533 INIT_LIST_HEAD(&q->timeout_list); 532 INIT_LIST_HEAD(&q->timeout_list);
533 INIT_LIST_HEAD(&q->pending_flushes);
534 INIT_WORK(&q->unplug_work, blk_unplug_work); 534 INIT_WORK(&q->unplug_work, blk_unplug_work);
535 535
536 kobject_init(&q->kobj, &blk_queue_ktype); 536 kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1053,22 +1053,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1053} 1053}
1054EXPORT_SYMBOL(blk_insert_request); 1054EXPORT_SYMBOL(blk_insert_request);
1055 1055
1056/*
1057 * add-request adds a request to the linked list.
1058 * queue lock is held and interrupts disabled, as we muck with the
1059 * request queue list.
1060 */
1061static inline void add_request(struct request_queue *q, struct request *req)
1062{
1063 drive_stat_acct(req, 1);
1064
1065 /*
1066 * elevator indicated where it wants this request to be
1067 * inserted at elevator_merge time
1068 */
1069 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1070}
1071
1072static void part_round_stats_single(int cpu, struct hd_struct *part, 1056static void part_round_stats_single(int cpu, struct hd_struct *part,
1073 unsigned long now) 1057 unsigned long now)
1074{ 1058{
@@ -1217,13 +1201,16 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1217 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1201 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1218 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); 1202 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
1219 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
1204 int where = ELEVATOR_INSERT_SORT;
1220 int rw_flags; 1205 int rw_flags;
1221 1206
1222 if ((bio->bi_rw & REQ_HARDBARRIER) && 1207 /* REQ_HARDBARRIER is no more */
1223 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1208 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1209 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1224 bio_endio(bio, -EOPNOTSUPP); 1210 bio_endio(bio, -EOPNOTSUPP);
1225 return 0; 1211 return 0;
1226 } 1212 }
1213
1227 /* 1214 /*
1228 * low level driver can indicate that it wants pages above a 1215 * low level driver can indicate that it wants pages above a
1229 * certain limit bounced to low memory (ie for highmem, or even 1216 * certain limit bounced to low memory (ie for highmem, or even
@@ -1233,7 +1220,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1233 1220
1234 spin_lock_irq(q->queue_lock); 1221 spin_lock_irq(q->queue_lock);
1235 1222
1236 if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) 1223 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1224 where = ELEVATOR_INSERT_FRONT;
1225 goto get_rq;
1226 }
1227
1228 if (elv_queue_empty(q))
1237 goto get_rq; 1229 goto get_rq;
1238 1230
1239 el_ret = elv_merge(q, &req, bio); 1231 el_ret = elv_merge(q, &req, bio);
@@ -1330,7 +1322,10 @@ get_rq:
1330 req->cpu = blk_cpu_to_group(smp_processor_id()); 1322 req->cpu = blk_cpu_to_group(smp_processor_id());
1331 if (queue_should_plug(q) && elv_queue_empty(q)) 1323 if (queue_should_plug(q) && elv_queue_empty(q))
1332 blk_plug_device(q); 1324 blk_plug_device(q);
1333 add_request(q, req); 1325
1326 /* insert the request into the elevator */
1327 drive_stat_acct(req, 1);
1328 __elv_add_request(q, req, where, 0);
1334out: 1329out:
1335 if (unplug || !queue_should_plug(q)) 1330 if (unplug || !queue_should_plug(q))
1336 __generic_unplug_device(q); 1331 __generic_unplug_device(q);
@@ -1530,6 +1525,19 @@ static inline void __generic_make_request(struct bio *bio)
1530 if (bio_check_eod(bio, nr_sectors)) 1525 if (bio_check_eod(bio, nr_sectors))
1531 goto end_io; 1526 goto end_io;
1532 1527
1528 /*
1529 * Filter flush bio's early so that make_request based
1530 * drivers without flush support don't have to worry
1531 * about them.
1532 */
1533 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1534 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1535 if (!nr_sectors) {
1536 err = 0;
1537 goto end_io;
1538 }
1539 }
1540
1533 if ((bio->bi_rw & REQ_DISCARD) && 1541 if ((bio->bi_rw & REQ_DISCARD) &&
1534 (!blk_queue_discard(q) || 1542 (!blk_queue_discard(q) ||
1535 ((bio->bi_rw & REQ_SECURE) && 1543 ((bio->bi_rw & REQ_SECURE) &&
@@ -1794,11 +1802,11 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1794static void blk_account_io_done(struct request *req) 1802static void blk_account_io_done(struct request *req)
1795{ 1803{
1796 /* 1804 /*
1797 * Account IO completion. bar_rq isn't accounted as a normal 1805 * Account IO completion. flush_rq isn't accounted as a
1798 * IO on queueing nor completion. Accounting the containing 1806 * normal IO on queueing nor completion. Accounting the
1799 * request is enough. 1807 * containing request is enough.
1800 */ 1808 */
1801 if (blk_do_io_stat(req) && req != &req->q->bar_rq) { 1809 if (blk_do_io_stat(req) && req != &req->q->flush_rq) {
1802 unsigned long duration = jiffies - req->start_time; 1810 unsigned long duration = jiffies - req->start_time;
1803 const int rw = rq_data_dir(req); 1811 const int rw = rq_data_dir(req);
1804 struct hd_struct *part; 1812 struct hd_struct *part;
@@ -2523,9 +2531,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2523static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2531static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2524{ 2532{
2525 dst->cpu = src->cpu; 2533 dst->cpu = src->cpu;
2526 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2534 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2527 if (src->cmd_flags & REQ_DISCARD)
2528 dst->cmd_flags |= REQ_DISCARD;
2529 dst->cmd_type = src->cmd_type; 2535 dst->cmd_type = src->cmd_type;
2530 dst->__sector = blk_rq_pos(src); 2536 dst->__sector = blk_rq_pos(src);
2531 dst->__data_len = blk_rq_bytes(src); 2537 dst->__data_len = blk_rq_bytes(src);
diff --git a/block/blk-flush.c b/block/blk-flush.c
new file mode 100644
index 000000000000..54b123d6563e
--- /dev/null
+++ b/block/blk-flush.c
@@ -0,0 +1,262 @@
1/*
2 * Functions to sequence FLUSH and FUA writes.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/gfp.h>
9
10#include "blk.h"
11
12/* FLUSH/FUA sequences */
13enum {
14 QUEUE_FSEQ_STARTED = (1 << 0), /* flushing in progress */
15 QUEUE_FSEQ_PREFLUSH = (1 << 1), /* pre-flushing in progress */
16 QUEUE_FSEQ_DATA = (1 << 2), /* data write in progress */
17 QUEUE_FSEQ_POSTFLUSH = (1 << 3), /* post-flushing in progress */
18 QUEUE_FSEQ_DONE = (1 << 4),
19};
20
21static struct request *queue_next_fseq(struct request_queue *q);
22
23unsigned blk_flush_cur_seq(struct request_queue *q)
24{
25 if (!q->flush_seq)
26 return 0;
27 return 1 << ffz(q->flush_seq);
28}
29
30static struct request *blk_flush_complete_seq(struct request_queue *q,
31 unsigned seq, int error)
32{
33 struct request *next_rq = NULL;
34
35 if (error && !q->flush_err)
36 q->flush_err = error;
37
38 BUG_ON(q->flush_seq & seq);
39 q->flush_seq |= seq;
40
41 if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
42 /* not complete yet, queue the next flush sequence */
43 next_rq = queue_next_fseq(q);
44 } else {
45 /* complete this flush request */
46 __blk_end_request_all(q->orig_flush_rq, q->flush_err);
47 q->orig_flush_rq = NULL;
48 q->flush_seq = 0;
49
50 /* dispatch the next flush if there's one */
51 if (!list_empty(&q->pending_flushes)) {
52 next_rq = list_entry_rq(q->pending_flushes.next);
53 list_move(&next_rq->queuelist, &q->queue_head);
54 }
55 }
56 return next_rq;
57}
58
59static void blk_flush_complete_seq_end_io(struct request_queue *q,
60 unsigned seq, int error)
61{
62 bool was_empty = elv_queue_empty(q);
63 struct request *next_rq;
64
65 next_rq = blk_flush_complete_seq(q, seq, error);
66
67 /*
68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases.
70 */
71 if (was_empty && next_rq)
72 __blk_run_queue(q);
73}
74
75static void pre_flush_end_io(struct request *rq, int error)
76{
77 elv_completed_request(rq->q, rq);
78 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
79}
80
81static void flush_data_end_io(struct request *rq, int error)
82{
83 elv_completed_request(rq->q, rq);
84 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
85}
86
87static void post_flush_end_io(struct request *rq, int error)
88{
89 elv_completed_request(rq->q, rq);
90 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
91}
92
93static void init_flush_request(struct request *rq, struct gendisk *disk)
94{
95 rq->cmd_type = REQ_TYPE_FS;
96 rq->cmd_flags = WRITE_FLUSH;
97 rq->rq_disk = disk;
98}
99
100static struct request *queue_next_fseq(struct request_queue *q)
101{
102 struct request *orig_rq = q->orig_flush_rq;
103 struct request *rq = &q->flush_rq;
104
105 blk_rq_init(q, rq);
106
107 switch (blk_flush_cur_seq(q)) {
108 case QUEUE_FSEQ_PREFLUSH:
109 init_flush_request(rq, orig_rq->rq_disk);
110 rq->end_io = pre_flush_end_io;
111 break;
112 case QUEUE_FSEQ_DATA:
113 init_request_from_bio(rq, orig_rq->bio);
114 /*
115 * orig_rq->rq_disk may be different from
116 * bio->bi_bdev->bd_disk if orig_rq got here through
117 * remapping drivers. Make sure rq->rq_disk points
118 * to the same one as orig_rq.
119 */
120 rq->rq_disk = orig_rq->rq_disk;
121 rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
122 rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
123 rq->end_io = flush_data_end_io;
124 break;
125 case QUEUE_FSEQ_POSTFLUSH:
126 init_flush_request(rq, orig_rq->rq_disk);
127 rq->end_io = post_flush_end_io;
128 break;
129 default:
130 BUG();
131 }
132
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
134 return rq;
135}
136
137struct request *blk_do_flush(struct request_queue *q, struct request *rq)
138{
139 unsigned int fflags = q->flush_flags; /* may change, cache it */
140 bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA;
141 bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH);
142 bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA);
143 unsigned skip = 0;
144
145 /*
146 * Special case. If there's data but flush is not necessary,
147 * the request can be issued directly.
148 *
149 * Flush w/o data should be able to be issued directly too but
150 * currently some drivers assume that rq->bio contains
151 * non-zero data if it isn't NULL and empty FLUSH requests
152 * getting here usually have bio's without data.
153 */
154 if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) {
155 rq->cmd_flags &= ~REQ_FLUSH;
156 if (!has_fua)
157 rq->cmd_flags &= ~REQ_FUA;
158 return rq;
159 }
160
161 /*
162 * Sequenced flushes can't be processed in parallel. If
163 * another one is already in progress, queue for later
164 * processing.
165 */
166 if (q->flush_seq) {
167 list_move_tail(&rq->queuelist, &q->pending_flushes);
168 return NULL;
169 }
170
171 /*
172 * Start a new flush sequence
173 */
174 q->flush_err = 0;
175 q->flush_seq |= QUEUE_FSEQ_STARTED;
176
177 /* adjust FLUSH/FUA of the original request and stash it away */
178 rq->cmd_flags &= ~REQ_FLUSH;
179 if (!has_fua)
180 rq->cmd_flags &= ~REQ_FUA;
181 blk_dequeue_request(rq);
182 q->orig_flush_rq = rq;
183
184 /* skip unneded sequences and return the first one */
185 if (!do_preflush)
186 skip |= QUEUE_FSEQ_PREFLUSH;
187 if (!blk_rq_sectors(rq))
188 skip |= QUEUE_FSEQ_DATA;
189 if (!do_postflush)
190 skip |= QUEUE_FSEQ_POSTFLUSH;
191 return blk_flush_complete_seq(q, skip, 0);
192}
193
194static void bio_end_flush(struct bio *bio, int err)
195{
196 if (err)
197 clear_bit(BIO_UPTODATE, &bio->bi_flags);
198 if (bio->bi_private)
199 complete(bio->bi_private);
200 bio_put(bio);
201}
202
203/**
204 * blkdev_issue_flush - queue a flush
205 * @bdev: blockdev to issue flush for
206 * @gfp_mask: memory allocation flags (for bio_alloc)
207 * @error_sector: error sector
208 *
209 * Description:
210 * Issue a flush for the block device in question. Caller can supply
211 * room for storing the error offset in case of a flush error, if they
212 * wish to. If WAIT flag is not passed then caller may check only what
213 * request was pushed in some internal queue for later handling.
214 */
215int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
216 sector_t *error_sector)
217{
218 DECLARE_COMPLETION_ONSTACK(wait);
219 struct request_queue *q;
220 struct bio *bio;
221 int ret = 0;
222
223 if (bdev->bd_disk == NULL)
224 return -ENXIO;
225
226 q = bdev_get_queue(bdev);
227 if (!q)
228 return -ENXIO;
229
230 /*
231 * some block devices may not have their queue correctly set up here
232 * (e.g. loop device without a backing file) and so issuing a flush
233 * here will panic. Ensure there is a request function before issuing
234 * the flush.
235 */
236 if (!q->make_request_fn)
237 return -ENXIO;
238
239 bio = bio_alloc(gfp_mask, 0);
240 bio->bi_end_io = bio_end_flush;
241 bio->bi_bdev = bdev;
242 bio->bi_private = &wait;
243
244 bio_get(bio);
245 submit_bio(WRITE_FLUSH, bio);
246 wait_for_completion(&wait);
247
248 /*
249 * The driver must store the error location in ->bi_sector, if
250 * it supports it. For non-stacked drivers, this should be
251 * copied from blk_rq_pos(rq).
252 */
253 if (error_sector)
254 *error_sector = bio->bi_sector;
255
256 if (!bio_flagged(bio, BIO_UPTODATE))
257 ret = -EIO;
258
259 bio_put(bio);
260 return ret;
261}
262EXPORT_SYMBOL(blkdev_issue_flush);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index c392029a104e..1a320d2406b0 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -39,8 +39,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39{ 39{
40 DECLARE_COMPLETION_ONSTACK(wait); 40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev); 41 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ? 42 int type = REQ_WRITE | REQ_DISCARD;
43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 unsigned int max_discard_sectors; 43 unsigned int max_discard_sectors;
45 struct bio *bio; 44 struct bio *bio;
46 int ret = 0; 45 int ret = 0;
@@ -62,10 +61,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
62 max_discard_sectors &= ~(disc_sects - 1); 61 max_discard_sectors &= ~(disc_sects - 1);
63 } 62 }
64 63
65 if (flags & BLKDEV_IFL_SECURE) { 64 if (flags & BLKDEV_DISCARD_SECURE) {
66 if (!blk_queue_secdiscard(q)) 65 if (!blk_queue_secdiscard(q))
67 return -EOPNOTSUPP; 66 return -EOPNOTSUPP;
68 type |= DISCARD_SECURE; 67 type |= REQ_SECURE;
69 } 68 }
70 69
71 while (nr_sects && !ret) { 70 while (nr_sects && !ret) {
@@ -78,8 +77,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
78 bio->bi_sector = sector; 77 bio->bi_sector = sector;
79 bio->bi_end_io = blkdev_discard_end_io; 78 bio->bi_end_io = blkdev_discard_end_io;
80 bio->bi_bdev = bdev; 79 bio->bi_bdev = bdev;
81 if (flags & BLKDEV_IFL_WAIT) 80 bio->bi_private = &wait;
82 bio->bi_private = &wait;
83 81
84 if (nr_sects > max_discard_sectors) { 82 if (nr_sects > max_discard_sectors) {
85 bio->bi_size = max_discard_sectors << 9; 83 bio->bi_size = max_discard_sectors << 9;
@@ -93,8 +91,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
93 bio_get(bio); 91 bio_get(bio);
94 submit_bio(type, bio); 92 submit_bio(type, bio);
95 93
96 if (flags & BLKDEV_IFL_WAIT) 94 wait_for_completion(&wait);
97 wait_for_completion(&wait);
98 95
99 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 96 if (bio_flagged(bio, BIO_EOPNOTSUPP))
100 ret = -EOPNOTSUPP; 97 ret = -EOPNOTSUPP;
@@ -140,7 +137,6 @@ static void bio_batch_end_io(struct bio *bio, int err)
140 * @sector: start sector 137 * @sector: start sector
141 * @nr_sects: number of sectors to write 138 * @nr_sects: number of sectors to write
142 * @gfp_mask: memory allocation flags (for bio_alloc) 139 * @gfp_mask: memory allocation flags (for bio_alloc)
143 * @flags: BLKDEV_IFL_* flags to control behaviour
144 * 140 *
145 * Description: 141 * Description:
146 * Generate and issue number of bios with zerofiled pages. 142 * Generate and issue number of bios with zerofiled pages.
@@ -149,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
149 */ 145 */
150 146
151int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 147int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
152 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 148 sector_t nr_sects, gfp_t gfp_mask)
153{ 149{
154 int ret; 150 int ret;
155 struct bio *bio; 151 struct bio *bio;
@@ -162,12 +158,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
162 bb.wait = &wait; 158 bb.wait = &wait;
163 bb.end_io = NULL; 159 bb.end_io = NULL;
164 160
165 if (flags & BLKDEV_IFL_BARRIER) {
166 /* issue async barrier before the data */
167 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
168 if (ret)
169 return ret;
170 }
171submit: 161submit:
172 ret = 0; 162 ret = 0;
173 while (nr_sects != 0) { 163 while (nr_sects != 0) {
@@ -181,8 +171,7 @@ submit:
181 bio->bi_sector = sector; 171 bio->bi_sector = sector;
182 bio->bi_bdev = bdev; 172 bio->bi_bdev = bdev;
183 bio->bi_end_io = bio_batch_end_io; 173 bio->bi_end_io = bio_batch_end_io;
184 if (flags & BLKDEV_IFL_WAIT) 174 bio->bi_private = &bb;
185 bio->bi_private = &bb;
186 175
187 while (nr_sects != 0) { 176 while (nr_sects != 0) {
188 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 177 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -199,18 +188,10 @@ submit:
199 issued++; 188 issued++;
200 submit_bio(WRITE, bio); 189 submit_bio(WRITE, bio);
201 } 190 }
202 /*
203 * When all data bios are in flight. Send final barrier if requeted.
204 */
205 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
206 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
207 flags & BLKDEV_IFL_WAIT);
208
209 191
210 if (flags & BLKDEV_IFL_WAIT) 192 /* Wait for bios in-flight */
211 /* Wait for bios in-flight */ 193 while (issued != atomic_read(&bb.done))
212 while ( issued != atomic_read(&bb.done)) 194 wait_for_completion(&wait);
213 wait_for_completion(&wait);
214 195
215 if (!test_bit(BIO_UPTODATE, &bb.flags)) 196 if (!test_bit(BIO_UPTODATE, &bb.flags))
216 /* One of bios in the batch was completed with error.*/ 197 /* One of bios in the batch was completed with error.*/
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 315b88c8cbbb..701859fb9647 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -792,6 +792,26 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
792} 792}
793EXPORT_SYMBOL(blk_queue_update_dma_alignment); 793EXPORT_SYMBOL(blk_queue_update_dma_alignment);
794 794
795/**
796 * blk_queue_flush - configure queue's cache flush capability
797 * @q: the request queue for the device
798 * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
799 *
800 * Tell block layer cache flush capability of @q. If it supports
801 * flushing, REQ_FLUSH should be set. If it supports bypassing
802 * write cache for individual writes, REQ_FUA should be set.
803 */
804void blk_queue_flush(struct request_queue *q, unsigned int flush)
805{
806 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
807
808 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
809 flush &= ~REQ_FUA;
810
811 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
812}
813EXPORT_SYMBOL_GPL(blk_queue_flush);
814
795static int __init blk_settings_init(void) 815static int __init blk_settings_init(void)
796{ 816{
797 blk_max_low_pfn = max_low_pfn - 1; 817 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk.h b/block/blk.h
index f864012ec300..1e675e5ade02 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,6 +51,8 @@ static inline void blk_clear_rq_complete(struct request *rq)
51 */ 51 */
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53 53
54struct request *blk_do_flush(struct request_queue *q, struct request *rq);
55
54static inline struct request *__elv_next_request(struct request_queue *q) 56static inline struct request *__elv_next_request(struct request_queue *q)
55{ 57{
56 struct request *rq; 58 struct request *rq;
@@ -58,7 +60,11 @@ static inline struct request *__elv_next_request(struct request_queue *q)
58 while (1) { 60 while (1) {
59 while (!list_empty(&q->queue_head)) { 61 while (!list_empty(&q->queue_head)) {
60 rq = list_entry_rq(q->queue_head.next); 62 rq = list_entry_rq(q->queue_head.next);
61 if (blk_do_ordered(q, &rq)) 63 if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) ||
64 rq == &q->flush_rq)
65 return rq;
66 rq = blk_do_flush(q, rq);
67 if (rq)
62 return rq; 68 return rq;
63 } 69 }
64 70
diff --git a/block/elevator.c b/block/elevator.c
index 4e11559aa2b0..282e8308f7e2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -617,8 +617,6 @@ void elv_quiesce_end(struct request_queue *q)
617 617
618void elv_insert(struct request_queue *q, struct request *rq, int where) 618void elv_insert(struct request_queue *q, struct request *rq, int where)
619{ 619{
620 struct list_head *pos;
621 unsigned ordseq;
622 int unplug_it = 1; 620 int unplug_it = 1;
623 621
624 trace_block_rq_insert(q, rq); 622 trace_block_rq_insert(q, rq);
@@ -626,9 +624,16 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
626 rq->q = q; 624 rq->q = q;
627 625
628 switch (where) { 626 switch (where) {
627 case ELEVATOR_INSERT_REQUEUE:
628 /*
629 * Most requeues happen because of a busy condition,
630 * don't force unplug of the queue for that case.
631 * Clear unplug_it and fall through.
632 */
633 unplug_it = 0;
634
629 case ELEVATOR_INSERT_FRONT: 635 case ELEVATOR_INSERT_FRONT:
630 rq->cmd_flags |= REQ_SOFTBARRIER; 636 rq->cmd_flags |= REQ_SOFTBARRIER;
631
632 list_add(&rq->queuelist, &q->queue_head); 637 list_add(&rq->queuelist, &q->queue_head);
633 break; 638 break;
634 639
@@ -668,36 +673,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
668 q->elevator->ops->elevator_add_req_fn(q, rq); 673 q->elevator->ops->elevator_add_req_fn(q, rq);
669 break; 674 break;
670 675
671 case ELEVATOR_INSERT_REQUEUE:
672 /*
673 * If ordered flush isn't in progress, we do front
674 * insertion; otherwise, requests should be requeued
675 * in ordseq order.
676 */
677 rq->cmd_flags |= REQ_SOFTBARRIER;
678
679 /*
680 * Most requeues happen because of a busy condition,
681 * don't force unplug of the queue for that case.
682 */
683 unplug_it = 0;
684
685 if (q->ordseq == 0) {
686 list_add(&rq->queuelist, &q->queue_head);
687 break;
688 }
689
690 ordseq = blk_ordered_req_seq(rq);
691
692 list_for_each(pos, &q->queue_head) {
693 struct request *pos_rq = list_entry_rq(pos);
694 if (ordseq <= blk_ordered_req_seq(pos_rq))
695 break;
696 }
697
698 list_add_tail(&rq->queuelist, pos);
699 break;
700
701 default: 676 default:
702 printk(KERN_ERR "%s: bad insertion point %d\n", 677 printk(KERN_ERR "%s: bad insertion point %d\n",
703 __func__, where); 678 __func__, where);
@@ -716,26 +691,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
716void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
717 int plug) 692 int plug)
718{ 693{
719 if (q->ordcolor)
720 rq->cmd_flags |= REQ_ORDERED_COLOR;
721
722 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
723 /* 695 /* barriers are scheduling boundary, update end_sector */
724 * toggle ordered color
725 */
726 if (rq->cmd_flags & REQ_HARDBARRIER)
727 q->ordcolor ^= 1;
728
729 /*
730 * barriers implicitly indicate back insertion
731 */
732 if (where == ELEVATOR_INSERT_SORT)
733 where = ELEVATOR_INSERT_BACK;
734
735 /*
736 * this request is scheduling boundary, update
737 * end_sector
738 */
739 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
740 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
741 q->end_sector = rq_end_sector(rq); 698 q->end_sector = rq_end_sector(rq);
@@ -855,24 +812,6 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
855 e->ops->elevator_completed_req_fn) 812 e->ops->elevator_completed_req_fn)
856 e->ops->elevator_completed_req_fn(q, rq); 813 e->ops->elevator_completed_req_fn(q, rq);
857 } 814 }
858
859 /*
860 * Check if the queue is waiting for fs requests to be
861 * drained for flush sequence.
862 */
863 if (unlikely(q->ordseq)) {
864 struct request *next = NULL;
865
866 if (!list_empty(&q->queue_head))
867 next = list_entry_rq(q->queue_head.next);
868
869 if (!queue_in_flight(q) &&
870 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
871 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
872 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
873 __blk_run_queue(q);
874 }
875 }
876} 815}
877 816
878#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 817#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
diff --git a/block/ioctl.c b/block/ioctl.c
index 2c15fe0912c4..d724ceb1d465 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -116,7 +116,7 @@ static int blkdev_reread_part(struct block_device *bdev)
116static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, 116static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
117 uint64_t len, int secure) 117 uint64_t len, int secure)
118{ 118{
119 unsigned long flags = BLKDEV_IFL_WAIT; 119 unsigned long flags = 0;
120 120
121 if (start & 511) 121 if (start & 511)
122 return -EINVAL; 122 return -EINVAL;
@@ -128,7 +128,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 128 if (start + len > (bdev->bd_inode->i_size >> 9))
129 return -EINVAL; 129 return -EINVAL;
130 if (secure) 130 if (secure)
131 flags |= BLKDEV_IFL_SECURE; 131 flags |= BLKDEV_DISCARD_SECURE;
132 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); 132 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
133} 133}
134 134