aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2012-09-18 12:19:25 -0400
committerJens Axboe <axboe@kernel.dk>2012-09-20 08:31:38 -0400
commite2a60da74fc8215c68509a89e9a69c66363153db (patch)
treec23dd6540dc211e2b2583c3e950a7f6977e3f1df
parentd41570b7469724005eb78448a69289900f911963 (diff)
block: Clean up special command handling logic
Remove special-casing of non-rw fs style requests (discard). The nomerge flags are consolidated in blk_types.h, and rq_mergeable() and bio_mergeable() have been modified to use them. bio_is_rw() is used in place of bio_has_data() a few places. This is done to to distinguish true reads and writes from other fs type requests that carry a payload (e.g. write same). Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-merge.c22
-rw-r--r--block/blk.h5
-rw-r--r--block/elevator.c6
-rw-r--r--include/linux/bio.h23
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h22
7 files changed, 46 insertions, 49 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2d739ca10923..5cc29299f6ac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1657,8 +1657,8 @@ generic_make_request_checks(struct bio *bio)
1657 goto end_io; 1657 goto end_io;
1658 } 1658 }
1659 1659
1660 if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1660 if (likely(bio_is_rw(bio) &&
1661 nr_sectors > queue_max_hw_sectors(q))) { 1661 nr_sectors > queue_max_hw_sectors(q))) {
1662 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1662 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1663 bdevname(bio->bi_bdev, b), 1663 bdevname(bio->bi_bdev, b),
1664 bio_sectors(bio), 1664 bio_sectors(bio),
@@ -1699,8 +1699,7 @@ generic_make_request_checks(struct bio *bio)
1699 1699
1700 if ((bio->bi_rw & REQ_DISCARD) && 1700 if ((bio->bi_rw & REQ_DISCARD) &&
1701 (!blk_queue_discard(q) || 1701 (!blk_queue_discard(q) ||
1702 ((bio->bi_rw & REQ_SECURE) && 1702 ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
1703 !blk_queue_secdiscard(q)))) {
1704 err = -EOPNOTSUPP; 1703 err = -EOPNOTSUPP;
1705 goto end_io; 1704 goto end_io;
1706 } 1705 }
@@ -1818,7 +1817,7 @@ void submit_bio(int rw, struct bio *bio)
1818 * If it's a regular read/write or a barrier with data attached, 1817 * If it's a regular read/write or a barrier with data attached,
1819 * go through the normal accounting stuff before submission. 1818 * go through the normal accounting stuff before submission.
1820 */ 1819 */
1821 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1820 if (bio_has_data(bio)) {
1822 if (rw & WRITE) { 1821 if (rw & WRITE) {
1823 count_vm_events(PGPGOUT, count); 1822 count_vm_events(PGPGOUT, count);
1824 } else { 1823 } else {
@@ -1864,7 +1863,7 @@ EXPORT_SYMBOL(submit_bio);
1864 */ 1863 */
1865int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1864int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1866{ 1865{
1867 if (rq->cmd_flags & REQ_DISCARD) 1866 if (!rq_mergeable(rq))
1868 return 0; 1867 return 0;
1869 1868
1870 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1869 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
@@ -2338,7 +2337,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2338 req->buffer = bio_data(req->bio); 2337 req->buffer = bio_data(req->bio);
2339 2338
2340 /* update sector only for requests with clear definition of sector */ 2339 /* update sector only for requests with clear definition of sector */
2341 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2340 if (req->cmd_type == REQ_TYPE_FS)
2342 req->__sector += total_bytes >> 9; 2341 req->__sector += total_bytes >> 9;
2343 2342
2344 /* mixed attributes always follow the first bio */ 2343 /* mixed attributes always follow the first bio */
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e76279e41162..86710ca408b8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -418,18 +418,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
418 return 0; 418 return 0;
419 419
420 /* 420 /*
421 * Don't merge file system requests and discard requests
422 */
423 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
424 return 0;
425
426 /*
427 * Don't merge discard requests and secure discard requests
428 */
429 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
430 return 0;
431
432 /*
433 * not contiguous 421 * not contiguous
434 */ 422 */
435 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 423 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
@@ -521,15 +509,7 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
521 509
522bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 510bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
523{ 511{
524 if (!rq_mergeable(rq)) 512 if (!rq_mergeable(rq) || !bio_mergeable(bio))
525 return false;
526
527 /* don't merge file system requests and discard requests */
528 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
529 return false;
530
531 /* don't merge discard requests and secure discard requests */
532 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
533 return false; 513 return false;
534 514
535 /* different data direction or already started, don't merge */ 515 /* different data direction or already started, don't merge */
diff --git a/block/blk.h b/block/blk.h
index 2a0ea32d249f..ca51543b248c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -171,14 +171,13 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
171 * 171 *
172 * a) it's attached to a gendisk, and 172 * a) it's attached to a gendisk, and
173 * b) the queue had IO stats enabled when this request was started, and 173 * b) the queue had IO stats enabled when this request was started, and
174 * c) it's a file system request or a discard request 174 * c) it's a file system request
175 */ 175 */
176static inline int blk_do_io_stat(struct request *rq) 176static inline int blk_do_io_stat(struct request *rq)
177{ 177{
178 return rq->rq_disk && 178 return rq->rq_disk &&
179 (rq->cmd_flags & REQ_IO_STAT) && 179 (rq->cmd_flags & REQ_IO_STAT) &&
180 (rq->cmd_type == REQ_TYPE_FS || 180 (rq->cmd_type == REQ_TYPE_FS);
181 (rq->cmd_flags & REQ_DISCARD));
182} 181}
183 182
184/* 183/*
diff --git a/block/elevator.c b/block/elevator.c
index 6a55d418896f..9b1d42b62f20 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -562,8 +562,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
562 562
563 if (rq->cmd_flags & REQ_SOFTBARRIER) { 563 if (rq->cmd_flags & REQ_SOFTBARRIER) {
564 /* barriers are scheduling boundary, update end_sector */ 564 /* barriers are scheduling boundary, update end_sector */
565 if (rq->cmd_type == REQ_TYPE_FS || 565 if (rq->cmd_type == REQ_TYPE_FS) {
566 (rq->cmd_flags & REQ_DISCARD)) {
567 q->end_sector = rq_end_sector(rq); 566 q->end_sector = rq_end_sector(rq);
568 q->boundary_rq = rq; 567 q->boundary_rq = rq;
569 } 568 }
@@ -605,8 +604,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
605 if (elv_attempt_insert_merge(q, rq)) 604 if (elv_attempt_insert_merge(q, rq))
606 break; 605 break;
607 case ELEVATOR_INSERT_SORT: 606 case ELEVATOR_INSERT_SORT:
608 BUG_ON(rq->cmd_type != REQ_TYPE_FS && 607 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
609 !(rq->cmd_flags & REQ_DISCARD));
610 rq->cmd_flags |= REQ_SORTED; 608 rq->cmd_flags |= REQ_SORTED;
611 q->nr_sorted++; 609 q->nr_sorted++;
612 if (rq_mergeable(rq)) { 610 if (rq_mergeable(rq)) {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 52b9cbc3e4da..e54305cacc98 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -386,9 +386,28 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
386/* 386/*
387 * Check whether this bio carries any data or not. A NULL bio is allowed. 387 * Check whether this bio carries any data or not. A NULL bio is allowed.
388 */ 388 */
389static inline int bio_has_data(struct bio *bio) 389static inline bool bio_has_data(struct bio *bio)
390{ 390{
391 return bio && bio->bi_io_vec != NULL; 391 if (bio && bio->bi_vcnt)
392 return true;
393
394 return false;
395}
396
397static inline bool bio_is_rw(struct bio *bio)
398{
399 if (!bio_has_data(bio))
400 return false;
401
402 return true;
403}
404
405static inline bool bio_mergeable(struct bio *bio)
406{
407 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
408 return false;
409
410 return true;
392} 411}
393 412
394/* 413/*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3eefbb291192..1b229664f573 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -194,6 +194,10 @@ enum rq_flag_bits {
194 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 194 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
195#define REQ_CLONE_MASK REQ_COMMON_MASK 195#define REQ_CLONE_MASK REQ_COMMON_MASK
196 196
197/* This mask is used for both bio and request merge checking */
198#define REQ_NOMERGE_FLAGS \
199 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
200
197#define REQ_RAHEAD (1 << __REQ_RAHEAD) 201#define REQ_RAHEAD (1 << __REQ_RAHEAD)
198#define REQ_THROTTLED (1 << __REQ_THROTTLED) 202#define REQ_THROTTLED (1 << __REQ_THROTTLED)
199 203
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4a2ab7c85393..3a6fea7460f1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -540,8 +540,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
540 540
541#define blk_account_rq(rq) \ 541#define blk_account_rq(rq) \
542 (((rq)->cmd_flags & REQ_STARTED) && \ 542 (((rq)->cmd_flags & REQ_STARTED) && \
543 ((rq)->cmd_type == REQ_TYPE_FS || \ 543 ((rq)->cmd_type == REQ_TYPE_FS))
544 ((rq)->cmd_flags & REQ_DISCARD)))
545 544
546#define blk_pm_request(rq) \ 545#define blk_pm_request(rq) \
547 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 546 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
@@ -595,17 +594,16 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
595 rl->flags &= ~flag; 594 rl->flags &= ~flag;
596} 595}
597 596
597static inline bool rq_mergeable(struct request *rq)
598{
599 if (rq->cmd_type != REQ_TYPE_FS)
600 return false;
598 601
599/* 602 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
600 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 603 return false;
601 * it already be started by driver. 604
602 */ 605 return true;
603#define RQ_NOMERGE_FLAGS \ 606}
604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
605#define rq_mergeable(rq) \
606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
607 (((rq)->cmd_flags & REQ_DISCARD) || \
608 (rq)->cmd_type == REQ_TYPE_FS))
609 607
610/* 608/*
611 * q->prep_rq_fn return values 609 * q->prep_rq_fn return values