aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
committerMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
commit79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch)
tree5e0fa7156acb75ba603022bc807df8f2fedb97a8 /include/linux/blkdev.h
parent721b51fcf91898299d96f4b72cb9434cda29dce6 (diff)
parent8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff)
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h46
1 files changed, 30 insertions, 16 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d4068c17d0df..38a5ff772a37 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
213typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 213typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
214 214
215struct bio_vec; 215struct bio_vec;
216struct bvec_merge_data {
217 struct block_device *bi_bdev;
218 sector_t bi_sector;
219 unsigned bi_size;
220 unsigned long bi_rw;
221};
222typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
223 struct bio_vec *);
224typedef void (softirq_done_fn)(struct request *); 216typedef void (softirq_done_fn)(struct request *);
225typedef int (dma_drain_needed_fn)(struct request *); 217typedef int (dma_drain_needed_fn)(struct request *);
226typedef int (lld_busy_fn) (struct request_queue *q); 218typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
258struct queue_limits { 250struct queue_limits {
259 unsigned long bounce_pfn; 251 unsigned long bounce_pfn;
260 unsigned long seg_boundary_mask; 252 unsigned long seg_boundary_mask;
253 unsigned long virt_boundary_mask;
261 254
262 unsigned int max_hw_sectors; 255 unsigned int max_hw_sectors;
263 unsigned int chunk_sectors; 256 unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
268 unsigned int io_min; 261 unsigned int io_min;
269 unsigned int io_opt; 262 unsigned int io_opt;
270 unsigned int max_discard_sectors; 263 unsigned int max_discard_sectors;
264 unsigned int max_hw_discard_sectors;
271 unsigned int max_write_same_sectors; 265 unsigned int max_write_same_sectors;
272 unsigned int discard_granularity; 266 unsigned int discard_granularity;
273 unsigned int discard_alignment; 267 unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
305 make_request_fn *make_request_fn; 299 make_request_fn *make_request_fn;
306 prep_rq_fn *prep_rq_fn; 300 prep_rq_fn *prep_rq_fn;
307 unprep_rq_fn *unprep_rq_fn; 301 unprep_rq_fn *unprep_rq_fn;
308 merge_bvec_fn *merge_bvec_fn;
309 softirq_done_fn *softirq_done_fn; 302 softirq_done_fn *softirq_done_fn;
310 rq_timed_out_fn *rq_timed_out_fn; 303 rq_timed_out_fn *rq_timed_out_fn;
311 dma_drain_needed_fn *dma_drain_needed; 304 dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,7 @@ struct request_queue {
462 455
463 struct blk_mq_tag_set *tag_set; 456 struct blk_mq_tag_set *tag_set;
464 struct list_head tag_set_list; 457 struct list_head tag_set_list;
458 struct bio_set *bio_split;
465}; 459};
466 460
467#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +480,6 @@ struct request_queue {
486#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 480#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
487#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 481#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
488#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 482#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
489#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
490 483
491#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 484#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
492 (1 << QUEUE_FLAG_STACKABLE) | \ 485 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -591,7 +584,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
591 584
592#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 585#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
593 586
594#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 587#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1))
595 588
596/* 589/*
597 * Driver can handle struct request, if it either has an old style 590 * Driver can handle struct request, if it either has an old style
@@ -782,6 +775,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
782extern int blk_insert_cloned_request(struct request_queue *q, 775extern int blk_insert_cloned_request(struct request_queue *q,
783 struct request *rq); 776 struct request *rq);
784extern void blk_delay_queue(struct request_queue *, unsigned long); 777extern void blk_delay_queue(struct request_queue *, unsigned long);
778extern void blk_queue_split(struct request_queue *, struct bio **,
779 struct bio_set *);
785extern void blk_recount_segments(struct request_queue *, struct bio *); 780extern void blk_recount_segments(struct request_queue *, struct bio *);
786extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 781extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
787extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 782extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
986 void *buf, unsigned int size); 981 void *buf, unsigned int size);
987extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 982extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
988extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 983extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
984extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
989extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 985extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
990extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 986extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
991extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
992extern void blk_queue_dma_alignment(struct request_queue *, int); 987extern void blk_queue_dma_alignment(struct request_queue *, int);
993extern void blk_queue_update_dma_alignment(struct request_queue *, int); 988extern void blk_queue_update_dma_alignment(struct request_queue *, int);
994extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 989extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1138enum blk_default_limits { 1133enum blk_default_limits {
1139 BLK_MAX_SEGMENTS = 128, 1134 BLK_MAX_SEGMENTS = 128,
1140 BLK_SAFE_MAX_SECTORS = 255, 1135 BLK_SAFE_MAX_SECTORS = 255,
1136 BLK_DEF_MAX_SECTORS = 2560,
1141 BLK_MAX_SEGMENT_SIZE = 65536, 1137 BLK_MAX_SEGMENT_SIZE = 65536,
1142 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1138 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1143}; 1139};
@@ -1154,6 +1150,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
1154 return q->limits.seg_boundary_mask; 1150 return q->limits.seg_boundary_mask;
1155} 1151}
1156 1152
1153static inline unsigned long queue_virt_boundary(struct request_queue *q)
1154{
1155 return q->limits.virt_boundary_mask;
1156}
1157
1157static inline unsigned int queue_max_sectors(struct request_queue *q) 1158static inline unsigned int queue_max_sectors(struct request_queue *q)
1158{ 1159{
1159 return q->limits.max_sectors; 1160 return q->limits.max_sectors;
@@ -1354,6 +1355,19 @@ static inline void put_dev_sector(Sector p)
1354 page_cache_release(p.v); 1355 page_cache_release(p.v);
1355} 1356}
1356 1357
1358/*
1359 * Check if adding a bio_vec after bprv with offset would create a gap in
1360 * the SG list. Most drivers don't care about this, but some do.
1361 */
1362static inline bool bvec_gap_to_prev(struct request_queue *q,
1363 struct bio_vec *bprv, unsigned int offset)
1364{
1365 if (!queue_virt_boundary(q))
1366 return false;
1367 return offset ||
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369}
1370
1357struct work_struct; 1371struct work_struct;
1358int kblockd_schedule_work(struct work_struct *work); 1372int kblockd_schedule_work(struct work_struct *work);
1359int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1555,8 +1569,8 @@ struct block_device_operations {
1555 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1569 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1556 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1570 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1557 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1571 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1558 long (*direct_access)(struct block_device *, sector_t, 1572 long (*direct_access)(struct block_device *, sector_t, void __pmem **,
1559 void **, unsigned long *pfn, long size); 1573 unsigned long *pfn);
1560 unsigned int (*check_events) (struct gendisk *disk, 1574 unsigned int (*check_events) (struct gendisk *disk,
1561 unsigned int clearing); 1575 unsigned int clearing);
1562 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1576 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1574,8 +1588,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1574extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1588extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1575extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1589extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1576 struct writeback_control *); 1590 struct writeback_control *);
1577extern long bdev_direct_access(struct block_device *, sector_t, void **addr, 1591extern long bdev_direct_access(struct block_device *, sector_t,
1578 unsigned long *pfn, long size); 1592 void __pmem **addr, unsigned long *pfn, long size);
1579#else /* CONFIG_BLOCK */ 1593#else /* CONFIG_BLOCK */
1580 1594
1581struct block_device; 1595struct block_device;