aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h82
1 files changed, 70 insertions, 12 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4a2ab7c85393..1756001210d2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -270,6 +270,7 @@ struct queue_limits {
270 unsigned int io_min; 270 unsigned int io_min;
271 unsigned int io_opt; 271 unsigned int io_opt;
272 unsigned int max_discard_sectors; 272 unsigned int max_discard_sectors;
273 unsigned int max_write_same_sectors;
273 unsigned int discard_granularity; 274 unsigned int discard_granularity;
274 unsigned int discard_alignment; 275 unsigned int discard_alignment;
275 276
@@ -540,8 +541,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
540 541
541#define blk_account_rq(rq) \ 542#define blk_account_rq(rq) \
542 (((rq)->cmd_flags & REQ_STARTED) && \ 543 (((rq)->cmd_flags & REQ_STARTED) && \
543 ((rq)->cmd_type == REQ_TYPE_FS || \ 544 ((rq)->cmd_type == REQ_TYPE_FS))
544 ((rq)->cmd_flags & REQ_DISCARD)))
545 545
546#define blk_pm_request(rq) \ 546#define blk_pm_request(rq) \
547 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 547 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
@@ -595,17 +595,39 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
595 rl->flags &= ~flag; 595 rl->flags &= ~flag;
596} 596}
597 597
598static inline bool rq_mergeable(struct request *rq)
599{
600 if (rq->cmd_type != REQ_TYPE_FS)
601 return false;
598 602
599/* 603 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
600 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 604 return false;
601 * it already be started by driver. 605
602 */ 606 return true;
603#define RQ_NOMERGE_FLAGS \ 607}
604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD) 608
605#define rq_mergeable(rq) \ 609static inline bool blk_check_merge_flags(unsigned int flags1,
606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 610 unsigned int flags2)
607 (((rq)->cmd_flags & REQ_DISCARD) || \ 611{
608 (rq)->cmd_type == REQ_TYPE_FS)) 612 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
613 return false;
614
615 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
616 return false;
617
618 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
619 return false;
620
621 return true;
622}
623
624static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
625{
626 if (bio_data(a) == bio_data(b))
627 return true;
628
629 return false;
630}
609 631
610/* 632/*
611 * q->prep_rq_fn return values 633 * q->prep_rq_fn return values
@@ -802,6 +824,28 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
802 return blk_rq_cur_bytes(rq) >> 9; 824 return blk_rq_cur_bytes(rq) >> 9;
803} 825}
804 826
827static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
828 unsigned int cmd_flags)
829{
830 if (unlikely(cmd_flags & REQ_DISCARD))
831 return q->limits.max_discard_sectors;
832
833 if (unlikely(cmd_flags & REQ_WRITE_SAME))
834 return q->limits.max_write_same_sectors;
835
836 return q->limits.max_sectors;
837}
838
839static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
840{
841 struct request_queue *q = rq->q;
842
843 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
844 return q->limits.max_hw_sectors;
845
846 return blk_queue_get_max_sectors(q, rq->cmd_flags);
847}
848
805/* 849/*
806 * Request issue related functions. 850 * Request issue related functions.
807 */ 851 */
@@ -857,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
857extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 901extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
858extern void blk_queue_max_discard_sectors(struct request_queue *q, 902extern void blk_queue_max_discard_sectors(struct request_queue *q,
859 unsigned int max_discard_sectors); 903 unsigned int max_discard_sectors);
904extern void blk_queue_max_write_same_sectors(struct request_queue *q,
905 unsigned int max_write_same_sectors);
860extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 906extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
861extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 907extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
862extern void blk_queue_alignment_offset(struct request_queue *q, 908extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -987,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
987extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1033extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
988extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1034extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
989 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1035 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1036extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1037 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
990extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1038extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
991 sector_t nr_sects, gfp_t gfp_mask); 1039 sector_t nr_sects, gfp_t gfp_mask);
992static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1040static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1164,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1164 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1212 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1165} 1213}
1166 1214
1215static inline unsigned int bdev_write_same(struct block_device *bdev)
1216{
1217 struct request_queue *q = bdev_get_queue(bdev);
1218
1219 if (q)
1220 return q->limits.max_write_same_sectors;
1221
1222 return 0;
1223}
1224
1167static inline int queue_dma_alignment(struct request_queue *q) 1225static inline int queue_dma_alignment(struct request_queue *q)
1168{ 1226{
1169 return q ? q->dma_alignment : 511; 1227 return q ? q->dma_alignment : 511;