aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:53:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:53:51 -0400
commitd3dc366bbaf07c125561e90d6da4bb147741101a (patch)
tree6eb7e79a8ec9df1fa705393c6d15ccea3d104661 /include
parent511c41d9e6665a07aca94eb00983cf6d77dd87ff (diff)
parente19a8a0ad2d255316830ead05b59c5a704434cbb (diff)
Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
Pull core block layer changes from Jens Axboe: "This is the core block IO pull request for 3.18. Apart from the new and improved flush machinery for blk-mq, this is all mostly bug fixes and cleanups. - blk-mq timeout updates and fixes from Christoph. - Removal of REQ_END, also from Christoph. We pass it through the ->queue_rq() hook for blk-mq instead, freeing up one of the request bits. The space was overly tight on 32-bit, so Martin also killed REQ_KERNEL since it's no longer used. - blk integrity updates and fixes from Martin and Gu Zheng. - Update to the flush machinery for blk-mq from Ming Lei. Now we have a per hardware context flush request, which both cleans up the code should scale better for flush intensive workloads on blk-mq. - Improve the error printing, from Rob Elliott. - Backing device improvements and cleanups from Tejun. - Fixup of a misplaced rq_complete() tracepoint from Hannes. - Make blk_get_request() return error pointers, fixing up issues where we NULL deref when a device goes bad or missing. From Joe Lawrence. - Prep work for drastically reducing the memory consumption of dm devices from Junichi Nomura. This allows creating clone bio sets without preallocating a lot of memory. - Fix a blk-mq hang on certain combinations of queue depths and hardware queues from me. - Limit memory consumption for blk-mq devices for crash dump scenarios and drivers that use crazy high depths (certain SCSI shared tag setups). We now just use a single queue and limited depth for that" * 'for-3.18/core' of git://git.kernel.dk/linux-block: (58 commits) block: Remove REQ_KERNEL blk-mq: allocate cpumask on the home node bio-integrity: remove the needless fail handle of bip_slab creating block: include func name in __get_request prints block: make blk_update_request print prefix match ratelimited prefix blk-merge: don't compute bi_phys_segments from bi_vcnt for cloned bio block: fix alignment_offset math that assumes io_min is a power-of-2 blk-mq: Make bt_clear_tag() easier to read blk-mq: fix potential hang if rolling wakeup depth is too high block: add bioset_create_nobvec() block: use bio_clone_fast() in blk_rq_prep_clone() block: misplaced rq_complete tracepoint sd: Honor block layer integrity handling flags block: Replace strnicmp with strncasecmp block: Add T10 Protection Information functions block: Don't merge requests if integrity flags differ block: Integrity checksum flag block: Relocate bio integrity flags block: Add a disk flag to block integrity profile block: Add prefix to block integrity profile flags ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/bio.h65
-rw-r--r--include/linux/blk-mq.h22
-rw-r--r--include/linux/blk_types.h18
-rw-r--r--include/linux/blkdev.h71
-rw-r--r--include/linux/crc-t10dif.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/scsi/scsi_cmnd.h36
10 files changed, 159 insertions, 90 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e488e9459a93..5da6012b7a14 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -28,12 +28,10 @@ struct dentry;
28 * Bits in backing_dev_info.state 28 * Bits in backing_dev_info.state
29 */ 29 */
30enum bdi_state { 30enum bdi_state {
31 BDI_wb_alloc, /* Default embedded wb allocated */
32 BDI_async_congested, /* The async (write) queue is getting full */ 31 BDI_async_congested, /* The async (write) queue is getting full */
33 BDI_sync_congested, /* The sync queue is getting full */ 32 BDI_sync_congested, /* The sync queue is getting full */
34 BDI_registered, /* bdi_register() was done */ 33 BDI_registered, /* bdi_register() was done */
35 BDI_writeback_running, /* Writeback is in progress */ 34 BDI_writeback_running, /* Writeback is in progress */
36 BDI_unused, /* Available bits start here */
37}; 35};
38 36
39typedef int (congested_fn)(void *, int); 37typedef int (congested_fn)(void *, int);
@@ -50,7 +48,6 @@ enum bdi_stat_item {
50 48
51struct bdi_writeback { 49struct bdi_writeback {
52 struct backing_dev_info *bdi; /* our parent bdi */ 50 struct backing_dev_info *bdi; /* our parent bdi */
53 unsigned int nr;
54 51
55 unsigned long last_old_flush; /* last old data flush */ 52 unsigned long last_old_flush; /* last old data flush */
56 53
@@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
124void bdi_writeback_workfn(struct work_struct *work); 121void bdi_writeback_workfn(struct work_struct *work);
125int bdi_has_dirty_io(struct backing_dev_info *bdi); 122int bdi_has_dirty_io(struct backing_dev_info *bdi);
126void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 123void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
127void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
128 124
129extern spinlock_t bdi_lock; 125extern spinlock_t bdi_lock;
130extern struct list_head bdi_list; 126extern struct list_head bdi_list;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b39e5000ff58..7347f486ceca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio)
292 */ 292 */
293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
294 294
295enum bip_flags {
296 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
297 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
298 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
299 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
300 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
301};
302
295#if defined(CONFIG_BLK_DEV_INTEGRITY) 303#if defined(CONFIG_BLK_DEV_INTEGRITY)
304
305static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
306{
307 if (bio->bi_rw & REQ_INTEGRITY)
308 return bio->bi_integrity;
309
310 return NULL;
311}
312
296/* 313/*
297 * bio integrity payload 314 * bio integrity payload
298 */ 315 */
@@ -301,21 +318,40 @@ struct bio_integrity_payload {
301 318
302 struct bvec_iter bip_iter; 319 struct bvec_iter bip_iter;
303 320
304 /* kill - should just use bip_vec */
305 void *bip_buf; /* generated integrity data */
306
307 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 321 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
308 322
309 unsigned short bip_slab; /* slab the bip came from */ 323 unsigned short bip_slab; /* slab the bip came from */
310 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 324 unsigned short bip_vcnt; /* # of integrity bio_vecs */
311 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 325 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
312 unsigned bip_owns_buf:1; /* should free bip_buf */ 326 unsigned short bip_flags; /* control flags */
313 327
314 struct work_struct bip_work; /* I/O completion */ 328 struct work_struct bip_work; /* I/O completion */
315 329
316 struct bio_vec *bip_vec; 330 struct bio_vec *bip_vec;
317 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 331 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
318}; 332};
333
334static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
335{
336 struct bio_integrity_payload *bip = bio_integrity(bio);
337
338 if (bip)
339 return bip->bip_flags & flag;
340
341 return false;
342}
343
344static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
345{
346 return bip->bip_iter.bi_sector;
347}
348
349static inline void bip_set_seed(struct bio_integrity_payload *bip,
350 sector_t seed)
351{
352 bip->bip_iter.bi_sector = seed;
353}
354
319#endif /* CONFIG_BLK_DEV_INTEGRITY */ 355#endif /* CONFIG_BLK_DEV_INTEGRITY */
320 356
321extern void bio_trim(struct bio *bio, int offset, int size); 357extern void bio_trim(struct bio *bio, int offset, int size);
@@ -342,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
342} 378}
343 379
344extern struct bio_set *bioset_create(unsigned int, unsigned int); 380extern struct bio_set *bioset_create(unsigned int, unsigned int);
381extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
345extern void bioset_free(struct bio_set *); 382extern void bioset_free(struct bio_set *);
346extern mempool_t *biovec_create_pool(int pool_entries); 383extern mempool_t *biovec_create_pool(int pool_entries);
347 384
@@ -353,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
353extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 390extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
354 391
355extern struct bio_set *fs_bio_set; 392extern struct bio_set *fs_bio_set;
356unsigned int bio_integrity_tag_size(struct bio *bio);
357 393
358static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 394static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
359{ 395{
@@ -661,14 +697,10 @@ struct biovec_slab {
661 for_each_bio(_bio) \ 697 for_each_bio(_bio) \
662 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 698 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
663 699
664#define bio_integrity(bio) (bio->bi_integrity != NULL)
665
666extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 700extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
667extern void bio_integrity_free(struct bio *); 701extern void bio_integrity_free(struct bio *);
668extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 702extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
669extern int bio_integrity_enabled(struct bio *bio); 703extern bool bio_integrity_enabled(struct bio *bio);
670extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
671extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
672extern int bio_integrity_prep(struct bio *); 704extern int bio_integrity_prep(struct bio *);
673extern void bio_integrity_endio(struct bio *, int); 705extern void bio_integrity_endio(struct bio *, int);
674extern void bio_integrity_advance(struct bio *, unsigned int); 706extern void bio_integrity_advance(struct bio *, unsigned int);
@@ -680,14 +712,14 @@ extern void bio_integrity_init(void);
680 712
681#else /* CONFIG_BLK_DEV_INTEGRITY */ 713#else /* CONFIG_BLK_DEV_INTEGRITY */
682 714
683static inline int bio_integrity(struct bio *bio) 715static inline void *bio_integrity(struct bio *bio)
684{ 716{
685 return 0; 717 return NULL;
686} 718}
687 719
688static inline int bio_integrity_enabled(struct bio *bio) 720static inline bool bio_integrity_enabled(struct bio *bio)
689{ 721{
690 return 0; 722 return false;
691} 723}
692 724
693static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 725static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
@@ -733,6 +765,11 @@ static inline void bio_integrity_init(void)
733 return; 765 return;
734} 766}
735 767
768static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
769{
770 return false;
771}
772
736#endif /* CONFIG_BLK_DEV_INTEGRITY */ 773#endif /* CONFIG_BLK_DEV_INTEGRITY */
737 774
738#endif /* CONFIG_BLOCK */ 775#endif /* CONFIG_BLOCK */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index c13a0c09faea..c9be1589415a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,6 +4,7 @@
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5 5
6struct blk_mq_tags; 6struct blk_mq_tags;
7struct blk_flush_queue;
7 8
8struct blk_mq_cpu_notifier { 9struct blk_mq_cpu_notifier {
9 struct list_head list; 10 struct list_head list;
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx {
34 35
35 struct request_queue *queue; 36 struct request_queue *queue;
36 unsigned int queue_num; 37 unsigned int queue_num;
38 struct blk_flush_queue *fq;
37 39
38 void *driver_data; 40 void *driver_data;
39 41
@@ -77,8 +79,9 @@ struct blk_mq_tag_set {
77 struct list_head tag_list; 79 struct list_head tag_list;
78}; 80};
79 81
80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 82typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool);
81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 83typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
84typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 85typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 86typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
84typedef int (init_request_fn)(void *, struct request *, unsigned int, 87typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -86,6 +89,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
86typedef void (exit_request_fn)(void *, struct request *, unsigned int, 89typedef void (exit_request_fn)(void *, struct request *, unsigned int,
87 unsigned int); 90 unsigned int);
88 91
92typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
93 bool);
94
89struct blk_mq_ops { 95struct blk_mq_ops {
90 /* 96 /*
91 * Queue request 97 * Queue request
@@ -100,7 +106,7 @@ struct blk_mq_ops {
100 /* 106 /*
101 * Called on request timeout 107 * Called on request timeout
102 */ 108 */
103 rq_timed_out_fn *timeout; 109 timeout_fn *timeout;
104 110
105 softirq_done_fn *complete; 111 softirq_done_fn *complete;
106 112
@@ -115,6 +121,10 @@ struct blk_mq_ops {
115 /* 121 /*
116 * Called for every command allocated by the block layer to allow 122 * Called for every command allocated by the block layer to allow
117 * the driver to set up driver specific data. 123 * the driver to set up driver specific data.
124 *
125 * Tag greater than or equal to queue_depth is for setting up
126 * flush request.
127 *
118 * Ditto for exit/teardown. 128 * Ditto for exit/teardown.
119 */ 129 */
120 init_request_fn *init_request; 130 init_request_fn *init_request;
@@ -160,8 +170,9 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
160struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 170struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
161struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 171struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
162 172
163void blk_mq_end_io(struct request *rq, int error); 173void blk_mq_start_request(struct request *rq);
164void __blk_mq_end_io(struct request *rq, int error); 174void blk_mq_end_request(struct request *rq, int error);
175void __blk_mq_end_request(struct request *rq, int error);
165 176
166void blk_mq_requeue_request(struct request *rq); 177void blk_mq_requeue_request(struct request *rq);
167void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 178void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
@@ -174,7 +185,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
174void blk_mq_start_hw_queues(struct request_queue *q); 185void blk_mq_start_hw_queues(struct request_queue *q);
175void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 186void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
176void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 187void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
177void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); 188void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
189 void *priv);
178 190
179/* 191/*
180 * Driver command data is immediately after the request. So subtract request 192 * Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 66c2167f04a9..445d59231bc4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -78,9 +78,11 @@ struct bio {
78 struct io_context *bi_ioc; 78 struct io_context *bi_ioc;
79 struct cgroup_subsys_state *bi_css; 79 struct cgroup_subsys_state *bi_css;
80#endif 80#endif
81 union {
81#if defined(CONFIG_BLK_DEV_INTEGRITY) 82#if defined(CONFIG_BLK_DEV_INTEGRITY)
82 struct bio_integrity_payload *bi_integrity; /* data integrity */ 83 struct bio_integrity_payload *bi_integrity; /* data integrity */
83#endif 84#endif
85 };
84 86
85 unsigned short bi_vcnt; /* how many bio_vec's */ 87 unsigned short bi_vcnt; /* how many bio_vec's */
86 88
@@ -118,10 +120,8 @@ struct bio {
118#define BIO_USER_MAPPED 6 /* contains user pages */ 120#define BIO_USER_MAPPED 6 /* contains user pages */
119#define BIO_EOPNOTSUPP 7 /* not supported */ 121#define BIO_EOPNOTSUPP 7 /* not supported */
120#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ 122#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
121#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ 123#define BIO_QUIET 9 /* Make BIO Quiet */
122#define BIO_QUIET 10 /* Make BIO Quiet */ 124#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
123#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
124#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
125 125
126/* 126/*
127 * Flags starting here get preserved by bio_reset() - this includes 127 * Flags starting here get preserved by bio_reset() - this includes
@@ -162,6 +162,7 @@ enum rq_flag_bits {
162 __REQ_WRITE_SAME, /* write same block many times */ 162 __REQ_WRITE_SAME, /* write same block many times */
163 163
164 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 164 __REQ_NOIDLE, /* don't anticipate more IO after this one */
165 __REQ_INTEGRITY, /* I/O includes block integrity payload */
165 __REQ_FUA, /* forced unit access */ 166 __REQ_FUA, /* forced unit access */
166 __REQ_FLUSH, /* request for cache flush */ 167 __REQ_FLUSH, /* request for cache flush */
167 168
@@ -186,9 +187,7 @@ enum rq_flag_bits {
186 __REQ_FLUSH_SEQ, /* request for flush sequence */ 187 __REQ_FLUSH_SEQ, /* request for flush sequence */
187 __REQ_IO_STAT, /* account I/O stat */ 188 __REQ_IO_STAT, /* account I/O stat */
188 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 189 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
189 __REQ_KERNEL, /* direct IO to kernel pages */
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_END, /* last of chain of requests */
192 __REQ_HASHED, /* on IO scheduler merge hash */ 191 __REQ_HASHED, /* on IO scheduler merge hash */
193 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
194 __REQ_NR_BITS, /* stops here */ 193 __REQ_NR_BITS, /* stops here */
@@ -204,13 +203,14 @@ enum rq_flag_bits {
204#define REQ_DISCARD (1ULL << __REQ_DISCARD) 203#define REQ_DISCARD (1ULL << __REQ_DISCARD)
205#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) 204#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
206#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) 205#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
206#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
207 207
208#define REQ_FAILFAST_MASK \ 208#define REQ_FAILFAST_MASK \
209 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 209 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
210#define REQ_COMMON_MASK \ 210#define REQ_COMMON_MASK \
211 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 211 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
212 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ 212 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
213 REQ_SECURE) 213 REQ_SECURE | REQ_INTEGRITY)
214#define REQ_CLONE_MASK REQ_COMMON_MASK 214#define REQ_CLONE_MASK REQ_COMMON_MASK
215 215
216#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) 216#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
@@ -240,9 +240,7 @@ enum rq_flag_bits {
240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) 240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) 241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
242#define REQ_SECURE (1ULL << __REQ_SECURE) 242#define REQ_SECURE (1ULL << __REQ_SECURE)
243#define REQ_KERNEL (1ULL << __REQ_KERNEL)
244#define REQ_PM (1ULL << __REQ_PM) 243#define REQ_PM (1ULL << __REQ_PM)
245#define REQ_END (1ULL << __REQ_END)
246#define REQ_HASHED (1ULL << __REQ_HASHED) 244#define REQ_HASHED (1ULL << __REQ_HASHED)
247#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
248 246
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 87be398166d3..0207a78a8d82 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -36,6 +36,7 @@ struct request;
36struct sg_io_hdr; 36struct sg_io_hdr;
37struct bsg_job; 37struct bsg_job;
38struct blkcg_gq; 38struct blkcg_gq;
39struct blk_flush_queue;
39 40
40#define BLKDEV_MIN_RQ 4 41#define BLKDEV_MIN_RQ 4
41#define BLKDEV_MAX_RQ 128 /* Default maximum */ 42#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -455,14 +456,7 @@ struct request_queue {
455 */ 456 */
456 unsigned int flush_flags; 457 unsigned int flush_flags;
457 unsigned int flush_not_queueable:1; 458 unsigned int flush_not_queueable:1;
458 unsigned int flush_queue_delayed:1; 459 struct blk_flush_queue *fq;
459 unsigned int flush_pending_idx:1;
460 unsigned int flush_running_idx:1;
461 unsigned long flush_pending_since;
462 struct list_head flush_queue[2];
463 struct list_head flush_data_in_flight;
464 struct request *flush_rq;
465 spinlock_t mq_flush_lock;
466 460
467 struct list_head requeue_list; 461 struct list_head requeue_list;
468 spinlock_t requeue_lock; 462 spinlock_t requeue_lock;
@@ -865,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
865 859
866static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 860static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
867{ 861{
868 return bdev->bd_disk->queue; 862 return bdev->bd_disk->queue; /* this is never NULL */
869} 863}
870 864
871/* 865/*
@@ -1285,10 +1279,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
1285static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1279static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1286{ 1280{
1287 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1281 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1288 unsigned int alignment = (sector << 9) & (granularity - 1); 1282 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1289 1283
1290 return (granularity + lim->alignment_offset - alignment) 1284 return (granularity + lim->alignment_offset - alignment) % granularity;
1291 & (granularity - 1);
1292} 1285}
1293 1286
1294static inline int bdev_alignment_offset(struct block_device *bdev) 1287static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1464,32 +1457,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1464 1457
1465#if defined(CONFIG_BLK_DEV_INTEGRITY) 1458#if defined(CONFIG_BLK_DEV_INTEGRITY)
1466 1459
1467#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1460enum blk_integrity_flags {
1468#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1461 BLK_INTEGRITY_VERIFY = 1 << 0,
1462 BLK_INTEGRITY_GENERATE = 1 << 1,
1463 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1464 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1465};
1469 1466
1470struct blk_integrity_exchg { 1467struct blk_integrity_iter {
1471 void *prot_buf; 1468 void *prot_buf;
1472 void *data_buf; 1469 void *data_buf;
1473 sector_t sector; 1470 sector_t seed;
1474 unsigned int data_size; 1471 unsigned int data_size;
1475 unsigned short sector_size; 1472 unsigned short interval;
1476 const char *disk_name; 1473 const char *disk_name;
1477}; 1474};
1478 1475
1479typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1476typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1480typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1481typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1482typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1483 1477
1484struct blk_integrity { 1478struct blk_integrity {
1485 integrity_gen_fn *generate_fn; 1479 integrity_processing_fn *generate_fn;
1486 integrity_vrfy_fn *verify_fn; 1480 integrity_processing_fn *verify_fn;
1487 integrity_set_tag_fn *set_tag_fn;
1488 integrity_get_tag_fn *get_tag_fn;
1489 1481
1490 unsigned short flags; 1482 unsigned short flags;
1491 unsigned short tuple_size; 1483 unsigned short tuple_size;
1492 unsigned short sector_size; 1484 unsigned short interval;
1493 unsigned short tag_size; 1485 unsigned short tag_size;
1494 1486
1495 const char *name; 1487 const char *name;
@@ -1504,10 +1496,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1504extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1496extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1505 struct scatterlist *); 1497 struct scatterlist *);
1506extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1498extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1507extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1499extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1508 struct request *); 1500 struct request *);
1509extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1501extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1510 struct bio *); 1502 struct bio *);
1511 1503
1512static inline 1504static inline
1513struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1505struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1520,12 +1512,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1520 return disk->integrity; 1512 return disk->integrity;
1521} 1513}
1522 1514
1523static inline int blk_integrity_rq(struct request *rq) 1515static inline bool blk_integrity_rq(struct request *rq)
1524{ 1516{
1525 if (rq->bio == NULL) 1517 return rq->cmd_flags & REQ_INTEGRITY;
1526 return 0;
1527
1528 return bio_integrity(rq->bio);
1529} 1518}
1530 1519
1531static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1520static inline void blk_queue_max_integrity_segments(struct request_queue *q,
@@ -1590,15 +1579,15 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue *
1590{ 1579{
1591 return 0; 1580 return 0;
1592} 1581}
1593static inline int blk_integrity_merge_rq(struct request_queue *rq, 1582static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1594 struct request *r1, 1583 struct request *r1,
1595 struct request *r2) 1584 struct request *r2)
1596{ 1585{
1597 return 0; 1586 return 0;
1598} 1587}
1599static inline int blk_integrity_merge_bio(struct request_queue *rq, 1588static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1600 struct request *r, 1589 struct request *r,
1601 struct bio *b) 1590 struct bio *b)
1602{ 1591{
1603 return 0; 1592 return 0;
1604} 1593}
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..cf53d0773ce3 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,7 +6,8 @@
6#define CRC_T10DIF_DIGEST_SIZE 2 6#define CRC_T10DIF_DIGEST_SIZE 2
7#define CRC_T10DIF_BLOCK_SIZE 1 7#define CRC_T10DIF_BLOCK_SIZE 1
8 8
9__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); 9extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
10__u16 crc_t10dif(unsigned char const *, size_t); 10 size_t len);
11extern __u16 crc_t10dif(unsigned char const *, size_t);
11 12
12#endif 13#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ab4f1a10da20..a957d4366c24 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -192,8 +192,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
192#define READ 0 192#define READ 0
193#define WRITE RW_MASK 193#define WRITE RW_MASK
194#define READA RWA_MASK 194#define READA RWA_MASK
195#define KERNEL_READ (READ|REQ_KERNEL)
196#define KERNEL_WRITE (WRITE|REQ_KERNEL)
197 195
198#define READ_SYNC (READ | REQ_SYNC) 196#define READ_SYNC (READ | REQ_SYNC)
199#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) 197#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 28d649054d5f..c72d1ad41ad4 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -448,10 +448,10 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
448extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); 448extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
449extern ssize_t nfs_file_direct_read(struct kiocb *iocb, 449extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
450 struct iov_iter *iter, 450 struct iov_iter *iter,
451 loff_t pos, bool uio); 451 loff_t pos);
452extern ssize_t nfs_file_direct_write(struct kiocb *iocb, 452extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
453 struct iov_iter *iter, 453 struct iov_iter *iter,
454 loff_t pos, bool uio); 454 loff_t pos);
455 455
456/* 456/*
457 * linux/fs/nfs/dir.c 457 * linux/fs/nfs/dir.c
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
new file mode 100644
index 000000000000..6a8b9942632d
--- /dev/null
+++ b/include/linux/t10-pi.h
@@ -0,0 +1,22 @@
1#ifndef _LINUX_T10_PI_H
2#define _LINUX_T10_PI_H
3
4#include <linux/types.h>
5#include <linux/blkdev.h>
6
7/*
8 * T10 Protection Information tuple.
9 */
10struct t10_pi_tuple {
11 __be16 guard_tag; /* Checksum */
12 __be16 app_tag; /* Opaque storage */
13 __be32 ref_tag; /* Target LBA or indirect LBA */
14};
15
16
17extern struct blk_integrity t10_pi_type1_crc;
18extern struct blk_integrity t10_pi_type1_ip;
19extern struct blk_integrity t10_pi_type3_crc;
20extern struct blk_integrity t10_pi_type3_ip;
21
22#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 73f349044941..522a5f27f553 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,9 +10,10 @@
10#include <scsi/scsi_device.h> 10#include <scsi/scsi_device.h>
11 11
12struct Scsi_Host; 12struct Scsi_Host;
13struct scsi_device;
14struct scsi_driver; 13struct scsi_driver;
15 14
15#include <scsi/scsi_device.h>
16
16/* 17/*
17 * MAX_COMMAND_SIZE is: 18 * MAX_COMMAND_SIZE is:
18 * The longest fixed-length SCSI CDB as per the SCSI standard. 19 * The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -81,6 +82,7 @@ struct scsi_cmnd {
81 82
82 unsigned char prot_op; 83 unsigned char prot_op;
83 unsigned char prot_type; 84 unsigned char prot_type;
85 unsigned char prot_flags;
84 86
85 unsigned short cmd_len; 87 unsigned short cmd_len;
86 enum dma_data_direction sc_data_direction; 88 enum dma_data_direction sc_data_direction;
@@ -252,6 +254,14 @@ static inline unsigned char scsi_get_prot_op(struct scsi_cmnd *scmd)
252 return scmd->prot_op; 254 return scmd->prot_op;
253} 255}
254 256
257enum scsi_prot_flags {
258 SCSI_PROT_TRANSFER_PI = 1 << 0,
259 SCSI_PROT_GUARD_CHECK = 1 << 1,
260 SCSI_PROT_REF_CHECK = 1 << 2,
261 SCSI_PROT_REF_INCREMENT = 1 << 3,
262 SCSI_PROT_IP_CHECKSUM = 1 << 4,
263};
264
255/* 265/*
256 * The controller usually does not know anything about the target it 266 * The controller usually does not know anything about the target it
257 * is communicating with. However, when DIX is enabled the controller 267 * is communicating with. However, when DIX is enabled the controller
@@ -280,6 +290,17 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
280 return blk_rq_pos(scmd->request); 290 return blk_rq_pos(scmd->request);
281} 291}
282 292
293static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
294{
295 return scmd->device->sector_size;
296}
297
298static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
299{
300 return blk_rq_pos(scmd->request) >>
301 (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
302}
303
283static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 304static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
284{ 305{
285 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0; 306 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
@@ -316,17 +337,12 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
316static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) 337static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
317{ 338{
318 unsigned int xfer_len = scsi_out(scmd)->length; 339 unsigned int xfer_len = scsi_out(scmd)->length;
319 unsigned int prot_op = scsi_get_prot_op(scmd); 340 unsigned int prot_interval = scsi_prot_interval(scmd);
320 unsigned int sector_size = scmd->device->sector_size;
321 341
322 switch (prot_op) { 342 if (scmd->prot_flags & SCSI_PROT_TRANSFER_PI)
323 case SCSI_PROT_NORMAL: 343 xfer_len += (xfer_len >> ilog2(prot_interval)) * 8;
324 case SCSI_PROT_WRITE_STRIP:
325 case SCSI_PROT_READ_INSERT:
326 return xfer_len;
327 }
328 344
329 return xfer_len + (xfer_len >> ilog2(sector_size)) * 8; 345 return xfer_len;
330} 346}
331 347
332#endif /* _SCSI_SCSI_CMND_H */ 348#endif /* _SCSI_SCSI_CMND_H */