diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-10 20:04:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-10 20:04:23 -0400 |
commit | ce40be7a820bb393ac4ac69865f018d2f4038cf0 (patch) | |
tree | b1fe5a93346eb06f22b1c303d63ec5456d7212ab /include/linux/blkdev.h | |
parent | ba0a5a36f60e4c1152af3a2ae2813251974405bf (diff) | |
parent | 02f3939e1a9357b7c370a4a69717cf9c02452737 (diff) |
Merge branch 'for-3.7/core' of git://git.kernel.dk/linux-block
Pull block IO update from Jens Axboe:
"Core block IO bits for 3.7. Not a huge round this time, it contains:
- First series from Kent cleaning up and generalizing bio allocation
and freeing.
- WRITE_SAME support from Martin.
- Mikulas patches to prevent O_DIRECT crashes when someone changes
the block size of a device.
- Make bio_split() work on data-less bio's (like trim/discards).
- A few other minor fixups."
Fixed up silent semantic mis-merge as per Mikulas Patocka and Andrew
Morton. It is due to the VM no longer using a prio-tree (see commit
6b2dbba8b6ac: "mm: replace vma prio_tree with an interval tree").
So make set_blocksize() use mapping_mapped() instead of open-coding the
internal VM knowledge that has changed.
* 'for-3.7/core' of git://git.kernel.dk/linux-block: (26 commits)
block: makes bio_split support bio without data
scatterlist: refactor the sg_nents
scatterlist: add sg_nents
fs: fix include/percpu-rwsem.h export error
percpu-rw-semaphore: fix documentation typos
fs/block_dev.c:1644:5: sparse: symbol 'blkdev_mmap' was not declared
blockdev: turn a rw semaphore into a percpu rw semaphore
Fix a crash when block device is read and block size is changed at the same time
block: fix request_queue->flags initialization
block: lift the initial queue bypass mode on blk_register_queue() instead of blk_init_allocated_queue()
block: ioctl to zero block ranges
block: Make blkdev_issue_zeroout use WRITE SAME
block: Implement support for WRITE SAME
block: Consolidate command flag and queue limit checks for merges
block: Clean up special command handling logic
block/blk-tag.c: Remove useless kfree
block: remove the duplicated setting for congestion_threshold
block: reject invalid queue attribute values
block: Add bio_clone_bioset(), bio_clone_kmalloc()
block: Consolidate bio_alloc_bioset(), bio_kmalloc()
...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 82 |
1 files changed, 70 insertions, 12 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4a2ab7c85393..1756001210d2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -270,6 +270,7 @@ struct queue_limits { | |||
270 | unsigned int io_min; | 270 | unsigned int io_min; |
271 | unsigned int io_opt; | 271 | unsigned int io_opt; |
272 | unsigned int max_discard_sectors; | 272 | unsigned int max_discard_sectors; |
273 | unsigned int max_write_same_sectors; | ||
273 | unsigned int discard_granularity; | 274 | unsigned int discard_granularity; |
274 | unsigned int discard_alignment; | 275 | unsigned int discard_alignment; |
275 | 276 | ||
@@ -540,8 +541,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
540 | 541 | ||
541 | #define blk_account_rq(rq) \ | 542 | #define blk_account_rq(rq) \ |
542 | (((rq)->cmd_flags & REQ_STARTED) && \ | 543 | (((rq)->cmd_flags & REQ_STARTED) && \ |
543 | ((rq)->cmd_type == REQ_TYPE_FS || \ | 544 | ((rq)->cmd_type == REQ_TYPE_FS)) |
544 | ((rq)->cmd_flags & REQ_DISCARD))) | ||
545 | 545 | ||
546 | #define blk_pm_request(rq) \ | 546 | #define blk_pm_request(rq) \ |
547 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | 547 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
@@ -595,17 +595,39 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync) | |||
595 | rl->flags &= ~flag; | 595 | rl->flags &= ~flag; |
596 | } | 596 | } |
597 | 597 | ||
598 | static inline bool rq_mergeable(struct request *rq) | ||
599 | { | ||
600 | if (rq->cmd_type != REQ_TYPE_FS) | ||
601 | return false; | ||
598 | 602 | ||
599 | /* | 603 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
600 | * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may | 604 | return false; |
601 | * it already be started by driver. | 605 | |
602 | */ | 606 | return true; |
603 | #define RQ_NOMERGE_FLAGS \ | 607 | } |
604 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD) | 608 | |
605 | #define rq_mergeable(rq) \ | 609 | static inline bool blk_check_merge_flags(unsigned int flags1, |
606 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 610 | unsigned int flags2) |
607 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 611 | { |
608 | (rq)->cmd_type == REQ_TYPE_FS)) | 612 | if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) |
613 | return false; | ||
614 | |||
615 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | ||
616 | return false; | ||
617 | |||
618 | if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) | ||
619 | return false; | ||
620 | |||
621 | return true; | ||
622 | } | ||
623 | |||
624 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) | ||
625 | { | ||
626 | if (bio_data(a) == bio_data(b)) | ||
627 | return true; | ||
628 | |||
629 | return false; | ||
630 | } | ||
609 | 631 | ||
610 | /* | 632 | /* |
611 | * q->prep_rq_fn return values | 633 | * q->prep_rq_fn return values |
@@ -802,6 +824,28 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
802 | return blk_rq_cur_bytes(rq) >> 9; | 824 | return blk_rq_cur_bytes(rq) >> 9; |
803 | } | 825 | } |
804 | 826 | ||
827 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | ||
828 | unsigned int cmd_flags) | ||
829 | { | ||
830 | if (unlikely(cmd_flags & REQ_DISCARD)) | ||
831 | return q->limits.max_discard_sectors; | ||
832 | |||
833 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) | ||
834 | return q->limits.max_write_same_sectors; | ||
835 | |||
836 | return q->limits.max_sectors; | ||
837 | } | ||
838 | |||
839 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | ||
840 | { | ||
841 | struct request_queue *q = rq->q; | ||
842 | |||
843 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | ||
844 | return q->limits.max_hw_sectors; | ||
845 | |||
846 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | ||
847 | } | ||
848 | |||
805 | /* | 849 | /* |
806 | * Request issue related functions. | 850 | * Request issue related functions. |
807 | */ | 851 | */ |
@@ -857,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short); | |||
857 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 901 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
858 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 902 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
859 | unsigned int max_discard_sectors); | 903 | unsigned int max_discard_sectors); |
904 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, | ||
905 | unsigned int max_write_same_sectors); | ||
860 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 906 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
861 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 907 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
862 | extern void blk_queue_alignment_offset(struct request_queue *q, | 908 | extern void blk_queue_alignment_offset(struct request_queue *q, |
@@ -987,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
987 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 1033 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
988 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1034 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
989 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 1035 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
1036 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | ||
1037 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); | ||
990 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 1038 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
991 | sector_t nr_sects, gfp_t gfp_mask); | 1039 | sector_t nr_sects, gfp_t gfp_mask); |
992 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 1040 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
@@ -1164,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | |||
1164 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 1212 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); |
1165 | } | 1213 | } |
1166 | 1214 | ||
1215 | static inline unsigned int bdev_write_same(struct block_device *bdev) | ||
1216 | { | ||
1217 | struct request_queue *q = bdev_get_queue(bdev); | ||
1218 | |||
1219 | if (q) | ||
1220 | return q->limits.max_write_same_sectors; | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1167 | static inline int queue_dma_alignment(struct request_queue *q) | 1225 | static inline int queue_dma_alignment(struct request_queue *q) |
1168 | { | 1226 | { |
1169 | return q ? q->dma_alignment : 511; | 1227 | return q ? q->dma_alignment : 511; |