aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c53
1 files changed, 26 insertions, 27 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8e27d0ab0d7c..03d9c82b0fe7 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
304 * blk_queue_ordered - does this queue support ordered writes 304 * blk_queue_ordered - does this queue support ordered writes
305 * @q: the request queue 305 * @q: the request queue
306 * @ordered: one of QUEUE_ORDERED_* 306 * @ordered: one of QUEUE_ORDERED_*
307 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
307 * 308 *
308 * Description: 309 * Description:
309 * For journalled file systems, doing ordered writes on a commit 310 * For journalled file systems, doing ordered writes on a commit
@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
332 return -EINVAL; 333 return -EINVAL;
333 } 334 }
334 335
336 q->ordered = ordered;
335 q->next_ordered = ordered; 337 q->next_ordered = ordered;
336 q->prepare_flush_fn = prepare_flush_fn; 338 q->prepare_flush_fn = prepare_flush_fn;
337 339
@@ -452,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
452 rq->end_io = end_io; 454 rq->end_io = end_io;
453 q->prepare_flush_fn(q, rq); 455 q->prepare_flush_fn(q, rq);
454 456
455 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 457 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
456} 458}
457 459
458static inline struct request *start_ordered(request_queue_t *q, 460static inline struct request *start_ordered(request_queue_t *q,
@@ -488,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
488 else 490 else
489 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 491 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
490 492
491 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 493 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
492 494
493 if (q->ordered & QUEUE_ORDERED_PREFLUSH) { 495 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
494 queue_flush(q, QUEUE_ORDERED_PREFLUSH); 496 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -506,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
506 508
507int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
508{ 510{
509 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
510 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
511 513
512 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -530,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
530 } 532 }
531 } 533 }
532 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
533 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
534 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
535 *rqp = NULL; 547 *rqp = NULL;
536 return 1; 548 } else {
537 } 549 /* Ordered by draining. Wait for turn. */
538 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
539 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
540 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
541 allowed_rq = &q->pre_flush_rq;
542 break;
543 case QUEUE_ORDSEQ_BAR:
544 allowed_rq = &q->bar_rq;
545 break;
546 case QUEUE_ORDSEQ_POSTFLUSH:
547 allowed_rq = &q->post_flush_rq;
548 break;
549 default:
550 allowed_rq = NULL;
551 break;
552 } 553 }
553 554
554 if (rq != allowed_rq &&
555 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
556 rq == &q->post_flush_rq))
557 *rqp = NULL;
558
559 return 1; 555 return 1;
560} 556}
561 557
@@ -662,7 +658,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
662 * Enables a low level driver to set an upper limit on the size of 658 * Enables a low level driver to set an upper limit on the size of
663 * received requests. 659 * received requests.
664 **/ 660 **/
665void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) 661void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
666{ 662{
667 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 663 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
668 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 664 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -2577,6 +2573,8 @@ void disk_round_stats(struct gendisk *disk)
2577 disk->stamp = now; 2573 disk->stamp = now;
2578} 2574}
2579 2575
2576EXPORT_SYMBOL_GPL(disk_round_stats);
2577
2580/* 2578/*
2581 * queue lock must be held 2579 * queue lock must be held
2582 */ 2580 */
@@ -2632,6 +2630,7 @@ EXPORT_SYMBOL(blk_put_request);
2632/** 2630/**
2633 * blk_end_sync_rq - executes a completion event on a request 2631 * blk_end_sync_rq - executes a completion event on a request
2634 * @rq: request to complete 2632 * @rq: request to complete
2633 * @error: end io status of the request
2635 */ 2634 */
2636void blk_end_sync_rq(struct request *rq, int error) 2635void blk_end_sync_rq(struct request *rq, int error)
2637{ 2636{
@@ -3153,7 +3152,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3153 if (blk_fs_request(req) && req->rq_disk) { 3152 if (blk_fs_request(req) && req->rq_disk) {
3154 const int rw = rq_data_dir(req); 3153 const int rw = rq_data_dir(req);
3155 3154
3156 __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); 3155 disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
3157 } 3156 }
3158 3157
3159 total_bytes = bio_nbytes = 0; 3158 total_bytes = bio_nbytes = 0;
@@ -3448,7 +3447,7 @@ int __init blk_dev_init(void)
3448 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3447 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3448 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3450 3449
3451 for (i = 0; i < NR_CPUS; i++) 3450 for_each_cpu(i)
3452 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3451 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3453 3452
3454 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3453 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);