aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h34
1 files changed, 28 insertions, 6 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 796016e63c1d..01a696b0a4d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -435,7 +435,6 @@ struct request_queue {
435 struct delayed_work delay_work; 435 struct delayed_work delay_work;
436 436
437 struct backing_dev_info *backing_dev_info; 437 struct backing_dev_info *backing_dev_info;
438 struct disk_devt *disk_devt;
439 438
440 /* 439 /*
441 * The queue owner gets to use this for whatever they like. 440 * The queue owner gets to use this for whatever they like.
@@ -611,7 +610,6 @@ struct request_queue {
611#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
612#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
613#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
614#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
615 613
616#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 614#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
617 (1 << QUEUE_FLAG_STACKABLE) | \ 615 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -1674,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
1674 return true; 1672 return true;
1675} 1673}
1676 1674
1677static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1675static inline bool bio_will_gap(struct request_queue *q,
1678 struct bio *next) 1676 struct request *prev_rq,
1677 struct bio *prev,
1678 struct bio *next)
1679{ 1679{
1680 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1680 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1681 struct bio_vec pb, nb; 1681 struct bio_vec pb, nb;
1682 1682
1683 /*
1684 * don't merge if the 1st bio starts with non-zero
1685 * offset, otherwise it is quite difficult to respect
1686 * sg gap limit. We work hard to merge a huge number of small
1687 * single bios in case of mkfs.
1688 */
1689 if (prev_rq)
1690 bio_get_first_bvec(prev_rq->bio, &pb);
1691 else
1692 bio_get_first_bvec(prev, &pb);
1693 if (pb.bv_offset)
1694 return true;
1695
1696 /*
1697 * We don't need to worry about the situation that the
1698 * merged segment ends in unaligned virt boundary:
1699 *
1700 * - if 'pb' ends aligned, the merged segment ends aligned
1701 * - if 'pb' ends unaligned, the next bio must include
1702 * one single bvec of 'nb', otherwise the 'nb' can't
1703 * merge with 'pb'
1704 */
1683 bio_get_last_bvec(prev, &pb); 1705 bio_get_last_bvec(prev, &pb);
1684 bio_get_first_bvec(next, &nb); 1706 bio_get_first_bvec(next, &nb);
1685 1707
@@ -1692,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1692 1714
1693static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1715static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1694{ 1716{
1695 return bio_will_gap(req->q, req->biotail, bio); 1717 return bio_will_gap(req->q, req, req->biotail, bio);
1696} 1718}
1697 1719
1698static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1720static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1699{ 1721{
1700 return bio_will_gap(req->q, bio, req->bio); 1722 return bio_will_gap(req->q, NULL, bio, req->bio);
1701} 1723}
1702 1724
1703int kblockd_schedule_work(struct work_struct *work); 1725int kblockd_schedule_work(struct work_struct *work);