aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-02-10 11:29:00 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 11:29:00 -0500
commit18741986a4b1dc4b1f171634c4191abc3b0fa023 (patch)
treed0f632fa9b205d5fbcc76ff1cf8cba63112c7da8
parentce2c350b2cfe5b5ca5023a6b1ec4d21821d39add (diff)
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-flush.c105
-rw-r--r--block/blk-mq.c54
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h11
7 files changed, 76 insertions, 117 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 06636f3ad424..853f92749202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
693 if (!uninit_q) 693 if (!uninit_q)
694 return NULL; 694 return NULL;
695 695
696 uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
697 if (!uninit_q->flush_rq)
698 goto out_cleanup_queue;
699
696 q = blk_init_allocated_queue(uninit_q, rfn, lock); 700 q = blk_init_allocated_queue(uninit_q, rfn, lock);
697 if (!q) 701 if (!q)
698 blk_cleanup_queue(uninit_q); 702 goto out_free_flush_rq;
699
700 return q; 703 return q;
704
705out_free_flush_rq:
706 kfree(uninit_q->flush_rq);
707out_cleanup_queue:
708 blk_cleanup_queue(uninit_q);
709 return NULL;
701} 710}
702EXPORT_SYMBOL(blk_init_queue_node); 711EXPORT_SYMBOL(blk_init_queue_node);
703 712
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1127struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1136struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1128{ 1137{
1129 if (q->mq_ops) 1138 if (q->mq_ops)
1130 return blk_mq_alloc_request(q, rw, gfp_mask, false); 1139 return blk_mq_alloc_request(q, rw, gfp_mask);
1131 else 1140 else
1132 return blk_old_get_request(q, rw, gfp_mask); 1141 return blk_old_get_request(q, rw, gfp_mask);
1133} 1142}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9143e85226c7..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
130 blk_clear_rq_complete(rq); 130 blk_clear_rq_complete(rq);
131} 131}
132 132
133static void mq_flush_data_run(struct work_struct *work) 133static void mq_flush_run(struct work_struct *work)
134{ 134{
135 struct request *rq; 135 struct request *rq;
136 136
137 rq = container_of(work, struct request, mq_flush_data); 137 rq = container_of(work, struct request, mq_flush_work);
138 138
139 memset(&rq->csd, 0, sizeof(rq->csd)); 139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_run_request(rq, true, false); 140 blk_mq_run_request(rq, true, false);
141} 141}
142 142
143static void blk_mq_flush_data_insert(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq)
144{ 144{
145 INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); 145 if (rq->q->mq_ops) {
146 kblockd_schedule_work(rq->q, &rq->mq_flush_data); 146 INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148 return false;
149 } else {
150 list_add_tail(&rq->queuelist, &rq->q->queue_head);
151 return true;
152 }
147} 153}
148 154
149/** 155/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
187 193
188 case REQ_FSEQ_DATA: 194 case REQ_FSEQ_DATA:
189 list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 195 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
190 if (q->mq_ops) 196 queued = blk_flush_queue_rq(rq);
191 blk_mq_flush_data_insert(rq);
192 else {
193 list_add(&rq->queuelist, &q->queue_head);
194 queued = true;
195 }
196 break; 197 break;
197 198
198 case REQ_FSEQ_DONE: 199 case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
216 } 217 }
217 218
218 kicked = blk_kick_flush(q); 219 kicked = blk_kick_flush(q);
219 /* blk_mq_run_flush will run queue */
220 if (q->mq_ops)
221 return queued;
222 return kicked | queued; 220 return kicked | queued;
223} 221}
224 222
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
230 struct request *rq, *n; 228 struct request *rq, *n;
231 unsigned long flags = 0; 229 unsigned long flags = 0;
232 230
233 if (q->mq_ops) { 231 if (q->mq_ops)
234 blk_mq_free_request(flush_rq);
235 spin_lock_irqsave(&q->mq_flush_lock, flags); 232 spin_lock_irqsave(&q->mq_flush_lock, flags);
236 } 233
237 running = &q->flush_queue[q->flush_running_idx]; 234 running = &q->flush_queue[q->flush_running_idx];
238 BUG_ON(q->flush_pending_idx == q->flush_running_idx); 235 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239 236
@@ -263,48 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
263 * kblockd. 260 * kblockd.
264 */ 261 */
265 if (queued || q->flush_queue_delayed) { 262 if (queued || q->flush_queue_delayed) {
266 if (!q->mq_ops) 263 WARN_ON(q->mq_ops);
267 blk_run_queue_async(q); 264 blk_run_queue_async(q);
268 else
269 /*
270 * This can be optimized to only run queues with requests
271 * queued if necessary.
272 */
273 blk_mq_run_queues(q, true);
274 } 265 }
275 q->flush_queue_delayed = 0; 266 q->flush_queue_delayed = 0;
276 if (q->mq_ops) 267 if (q->mq_ops)
277 spin_unlock_irqrestore(&q->mq_flush_lock, flags); 268 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
278} 269}
279 270
280static void mq_flush_work(struct work_struct *work)
281{
282 struct request_queue *q;
283 struct request *rq;
284
285 q = container_of(work, struct request_queue, mq_flush_work);
286
287 rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
288 __GFP_WAIT|GFP_ATOMIC, false);
289 rq->cmd_type = REQ_TYPE_FS;
290 rq->end_io = flush_end_io;
291
292 blk_mq_run_request(rq, true, false);
293}
294
295/*
296 * We can't directly use q->flush_rq, because it doesn't have tag and is not in
297 * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
298 * so offload the work to workqueue.
299 *
300 * Note: we assume a flush request finished in any hardware queue will flush
301 * the whole disk cache.
302 */
303static void mq_run_flush(struct request_queue *q)
304{
305 kblockd_schedule_work(q, &q->mq_flush_work);
306}
307
308/** 271/**
309 * blk_kick_flush - consider issuing flush request 272 * blk_kick_flush - consider issuing flush request
310 * @q: request_queue being kicked 273 * @q: request_queue being kicked
@@ -339,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
339 * different from running_idx, which means flush is in flight. 302 * different from running_idx, which means flush is in flight.
340 */ 303 */
341 q->flush_pending_idx ^= 1; 304 q->flush_pending_idx ^= 1;
305
342 if (q->mq_ops) { 306 if (q->mq_ops) {
343 mq_run_flush(q); 307 struct blk_mq_ctx *ctx = first_rq->mq_ctx;
344 return true; 308 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
309
310 blk_mq_rq_init(hctx, q->flush_rq);
311 q->flush_rq->mq_ctx = ctx;
312
313 /*
314 * Reuse the tag value from the fist waiting request,
315 * with blk-mq the tag is generated during request
316 * allocation and drivers can rely on it being inside
317 * the range they asked for.
318 */
319 q->flush_rq->tag = first_rq->tag;
320 } else {
321 blk_rq_init(q, q->flush_rq);
345 } 322 }
346 323
347 blk_rq_init(q, &q->flush_rq); 324 q->flush_rq->cmd_type = REQ_TYPE_FS;
348 q->flush_rq.cmd_type = REQ_TYPE_FS; 325 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
349 q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 326 q->flush_rq->rq_disk = first_rq->rq_disk;
350 q->flush_rq.rq_disk = first_rq->rq_disk; 327 q->flush_rq->end_io = flush_end_io;
351 q->flush_rq.end_io = flush_end_io;
352 328
353 list_add_tail(&q->flush_rq.queuelist, &q->queue_head); 329 return blk_flush_queue_rq(q->flush_rq);
354 return true;
355} 330}
356 331
357static void flush_data_end_io(struct request *rq, int error) 332static void flush_data_end_io(struct request *rq, int error)
@@ -407,11 +382,8 @@ void blk_insert_flush(struct request *rq)
407 /* 382 /*
408 * @policy now records what operations need to be done. Adjust 383 * @policy now records what operations need to be done. Adjust
409 * REQ_FLUSH and FUA for the driver. 384 * REQ_FLUSH and FUA for the driver.
410 * We keep REQ_FLUSH for mq to track flush requests. For !FUA,
411 * we never dispatch the request directly.
412 */ 385 */
413 if (rq->cmd_flags & REQ_FUA) 386 rq->cmd_flags &= ~REQ_FLUSH;
414 rq->cmd_flags &= ~REQ_FLUSH;
415 if (!(fflags & REQ_FUA)) 387 if (!(fflags & REQ_FUA))
416 rq->cmd_flags &= ~REQ_FUA; 388 rq->cmd_flags &= ~REQ_FUA;
417 389
@@ -560,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
560void blk_mq_init_flush(struct request_queue *q) 532void blk_mq_init_flush(struct request_queue *q)
561{ 533{
562 spin_lock_init(&q->mq_flush_lock); 534 spin_lock_init(&q->mq_flush_lock);
563 INIT_WORK(&q->mq_flush_work, mq_flush_work);
564} 535}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 14c8f35946e1..a59b0565e940 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
194} 194}
195 195
196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
197 gfp_t gfp, bool reserved, 197 gfp_t gfp, bool reserved)
198 int rw)
199{ 198{
200 struct request *req; 199 return blk_mq_alloc_rq(hctx, gfp, reserved);
201 bool is_flush = false;
202 /*
203 * flush need allocate a request, leave at least one request for
204 * non-flush IO to avoid deadlock
205 */
206 if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
207 if (atomic_inc_return(&hctx->pending_flush) >=
208 hctx->queue_depth - hctx->reserved_tags - 1) {
209 atomic_dec(&hctx->pending_flush);
210 return NULL;
211 }
212 is_flush = true;
213 }
214 req = blk_mq_alloc_rq(hctx, gfp, reserved);
215 if (!req && is_flush)
216 atomic_dec(&hctx->pending_flush);
217 return req;
218} 200}
219 201
220static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 202static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
227 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 209 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
228 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 210 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
229 211
230 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw); 212 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
231 if (rq) { 213 if (rq) {
232 blk_mq_rq_ctx_init(q, ctx, rq, rw); 214 blk_mq_rq_ctx_init(q, ctx, rq, rw);
233 break; 215 break;
@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
244 return rq; 226 return rq;
245} 227}
246 228
247struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 229struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
248 gfp_t gfp, bool reserved)
249{ 230{
250 struct request *rq; 231 struct request *rq;
251 232
252 if (blk_mq_queue_enter(q)) 233 if (blk_mq_queue_enter(q))
253 return NULL; 234 return NULL;
254 235
255 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 236 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
256 if (rq) 237 if (rq)
257 blk_mq_put_ctx(rq->mq_ctx); 238 blk_mq_put_ctx(rq->mq_ctx);
258 return rq; 239 return rq;
@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
276/* 257/*
277 * Re-init and set pdu, if we have it 258 * Re-init and set pdu, if we have it
278 */ 259 */
279static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) 260void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
280{ 261{
281 blk_rq_init(hctx->queue, rq); 262 blk_rq_init(hctx->queue, rq);
282 263
@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
290 const int tag = rq->tag; 271 const int tag = rq->tag;
291 struct request_queue *q = rq->q; 272 struct request_queue *q = rq->q;
292 273
293 if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
294 atomic_dec(&hctx->pending_flush);
295
296 blk_mq_rq_init(hctx, rq); 274 blk_mq_rq_init(hctx, rq);
297 blk_mq_put_tag(hctx->tags, tag); 275 blk_mq_put_tag(hctx->tags, tag);
298 276
@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
946 hctx = q->mq_ops->map_queue(q, ctx->cpu); 924 hctx = q->mq_ops->map_queue(q, ctx->cpu);
947 925
948 trace_block_getrq(q, bio, rw); 926 trace_block_getrq(q, bio, rw);
949 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw); 927 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
950 if (likely(rq)) 928 if (likely(rq))
951 blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw); 929 blk_mq_rq_ctx_init(q, ctx, rq, rw);
952 else { 930 else {
953 blk_mq_put_ctx(ctx); 931 blk_mq_put_ctx(ctx);
954 trace_block_sleeprq(q, bio, rw); 932 trace_block_sleeprq(q, bio, rw);
955 rq = blk_mq_alloc_request_pinned(q, bio->bi_rw, 933 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
956 __GFP_WAIT|GFP_ATOMIC, false); 934 false);
957 ctx = rq->mq_ctx; 935 ctx = rq->mq_ctx;
958 hctx = q->mq_ops->map_queue(q, ctx->cpu); 936 hctx = q->mq_ops->map_queue(q, ctx->cpu);
959 } 937 }
@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1230 hctx->queue_num = i; 1208 hctx->queue_num = i;
1231 hctx->flags = reg->flags; 1209 hctx->flags = reg->flags;
1232 hctx->queue_depth = reg->queue_depth; 1210 hctx->queue_depth = reg->queue_depth;
1233 hctx->reserved_tags = reg->reserved_tags;
1234 hctx->cmd_size = reg->cmd_size; 1211 hctx->cmd_size = reg->cmd_size;
1235 atomic_set(&hctx->pending_flush, 0);
1236 1212
1237 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1213 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1238 blk_mq_hctx_notify, hctx); 1214 blk_mq_hctx_notify, hctx);
@@ -1412,9 +1388,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1412 blk_mq_init_flush(q); 1388 blk_mq_init_flush(q);
1413 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1389 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1414 1390
1415 if (blk_mq_init_hw_queues(q, reg, driver_data)) 1391 q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
1392 cache_line_size()), GFP_KERNEL);
1393 if (!q->flush_rq)
1416 goto err_hw; 1394 goto err_hw;
1417 1395
1396 if (blk_mq_init_hw_queues(q, reg, driver_data))
1397 goto err_flush_rq;
1398
1418 blk_mq_map_swqueue(q); 1399 blk_mq_map_swqueue(q);
1419 1400
1420 mutex_lock(&all_q_mutex); 1401 mutex_lock(&all_q_mutex);
@@ -1422,6 +1403,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1422 mutex_unlock(&all_q_mutex); 1403 mutex_unlock(&all_q_mutex);
1423 1404
1424 return q; 1405 return q;
1406
1407err_flush_rq:
1408 kfree(q->flush_rq);
1425err_hw: 1409err_hw:
1426 kfree(q->mq_map); 1410 kfree(q->mq_map);
1427err_map: 1411err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index f29b645f0e1c..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -28,6 +28,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
28void blk_mq_init_flush(struct request_queue *q); 28void blk_mq_init_flush(struct request_queue *q);
29void blk_mq_drain_queue(struct request_queue *q); 29void blk_mq_drain_queue(struct request_queue *q);
30void blk_mq_free_queue(struct request_queue *q); 30void blk_mq_free_queue(struct request_queue *q);
31void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
31 32
32/* 33/*
33 * CPU hotplug helpers 34 * CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
549 if (q->mq_ops) 549 if (q->mq_ops)
550 blk_mq_free_queue(q); 550 blk_mq_free_queue(q);
551 551
552 kfree(q->flush_rq);
553
552 blk_trace_shutdown(q); 554 blk_trace_shutdown(q);
553 555
554 bdi_destroy(&q->backing_dev_info); 556 bdi_destroy(&q->backing_dev_info);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 468be242db90..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
36 struct list_head page_list; 36 struct list_head page_list;
37 struct blk_mq_tags *tags; 37 struct blk_mq_tags *tags;
38 38
39 atomic_t pending_flush;
40
41 unsigned long queued; 39 unsigned long queued;
42 unsigned long run; 40 unsigned long run;
43#define BLK_MQ_MAX_DISPATCH_ORDER 10 41#define BLK_MQ_MAX_DISPATCH_ORDER 10
44 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
45 43
46 unsigned int queue_depth; 44 unsigned int queue_depth;
47 unsigned int reserved_tags;
48 unsigned int numa_node; 45 unsigned int numa_node;
49 unsigned int cmd_size; /* per-request extra data */ 46 unsigned int cmd_size; /* per-request extra data */
50 47
@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
129void blk_mq_run_queues(struct request_queue *q, bool async); 126void blk_mq_run_queues(struct request_queue *q, bool async);
130void blk_mq_free_request(struct request *rq); 127void blk_mq_free_request(struct request *rq);
131bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 128bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
132struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); 129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
133struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
134struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
135 132
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0375654adb28..b2d25ecbcbc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -101,7 +101,7 @@ struct request {
101 }; 101 };
102 union { 102 union {
103 struct call_single_data csd; 103 struct call_single_data csd;
104 struct work_struct mq_flush_data; 104 struct work_struct mq_flush_work;
105 }; 105 };
106 106
107 struct request_queue *q; 107 struct request_queue *q;
@@ -451,13 +451,8 @@ struct request_queue {
451 unsigned long flush_pending_since; 451 unsigned long flush_pending_since;
452 struct list_head flush_queue[2]; 452 struct list_head flush_queue[2];
453 struct list_head flush_data_in_flight; 453 struct list_head flush_data_in_flight;
454 union { 454 struct request *flush_rq;
455 struct request flush_rq; 455 spinlock_t mq_flush_lock;
456 struct {
457 spinlock_t mq_flush_lock;
458 struct work_struct mq_flush_work;
459 };
460 };
461 456
462 struct mutex sysfs_lock; 457 struct mutex sysfs_lock;
463 458