aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c42
2 files changed, 21 insertions, 24 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 96a61e029ce5..2fc269f69726 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -323,7 +323,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
323 /* 323 /*
324 * toggle ordered color 324 * toggle ordered color
325 */ 325 */
326 q->ordcolor ^= 1; 326 if (blk_barrier_rq(rq))
327 q->ordcolor ^= 1;
327 328
328 /* 329 /*
329 * barriers implicitly indicate back insertion 330 * barriers implicitly indicate back insertion
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d38b4afa37ef..ee5ed98db4cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
508 508
509int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
510{ 510{
511 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
513 513
514 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
532 } 532 }
533 } 533 }
534 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
535 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
536 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
537 *rqp = NULL; 547 *rqp = NULL;
538 return 1; 548 } else {
539 } 549 /* Ordered by draining. Wait for turn. */
540 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
541 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
542 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
543 allowed_rq = &q->pre_flush_rq;
544 break;
545 case QUEUE_ORDSEQ_BAR:
546 allowed_rq = &q->bar_rq;
547 break;
548 case QUEUE_ORDSEQ_POSTFLUSH:
549 allowed_rq = &q->post_flush_rq;
550 break;
551 default:
552 allowed_rq = NULL;
553 break;
554 } 553 }
555 554
556 if (rq != allowed_rq &&
557 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
558 rq == &q->post_flush_rq))
559 *rqp = NULL;
560
561 return 1; 555 return 1;
562} 556}
563 557
@@ -2579,6 +2573,8 @@ void disk_round_stats(struct gendisk *disk)
2579 disk->stamp = now; 2573 disk->stamp = now;
2580} 2574}
2581 2575
2576EXPORT_SYMBOL_GPL(disk_round_stats);
2577
2582/* 2578/*
2583 * queue lock must be held 2579 * queue lock must be held
2584 */ 2580 */
@@ -3451,7 +3447,7 @@ int __init blk_dev_init(void)
3451 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3447 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3452 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3448 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3453 3449
3454 for (i = 0; i < NR_CPUS; i++) 3450 for_each_cpu(i)
3455 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3451 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3456 3452
3457 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3453 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);