aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-09-10 14:50:10 -0400
committerJens Axboe <axboe@carl.home.kernel.dk>2010-09-10 14:50:10 -0400
commit13f05c8d8e98bbdce89158bfdb2e380940695a88 (patch)
tree055215e7e2b1bdc684ead64daa61b30b35eaa3c5
parentc8bf1336824ebd698d37b71763e1c43190f2229a (diff)
block/scsi: Provide a limit on the number of integrity segments
Some controllers have a hardware limit on the number of protection information scatter-gather list segments they can handle. Introduce a max_integrity_segments limit in the block layer and provide a new scsi_host_template setting that allows HBA drivers to provide a value suitable for the hardware. Add support for honoring the integrity segment limit when merging both bios and requests. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@carl.home.kernel.dk>
-rw-r--r--block/blk-integrity.c93
-rw-r--r--block/blk-merge.c23
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/blk.h8
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/scsi_lib.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/scsi/scsi.h6
-rw-r--r--include/scsi/scsi_host.h7
12 files changed, 167 insertions, 50 deletions
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index edce1ef7933d..885cbb59967e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -32,24 +32,37 @@ static struct kmem_cache *integrity_cachep;
32 32
33/** 33/**
34 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements 34 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
35 * @rq: request with integrity metadata attached 35 * @q: request queue
36 * @bio: bio with integrity metadata attached
36 * 37 *
37 * Description: Returns the number of elements required in a 38 * Description: Returns the number of elements required in a
38 * scatterlist corresponding to the integrity metadata in a request. 39 * scatterlist corresponding to the integrity metadata in a bio.
39 */ 40 */
40int blk_rq_count_integrity_sg(struct request *rq) 41int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
41{ 42{
42 struct bio_vec *iv, *ivprv; 43 struct bio_vec *iv, *ivprv = NULL;
43 struct req_iterator iter; 44 unsigned int segments = 0;
44 unsigned int segments; 45 unsigned int seg_size = 0;
46 unsigned int i = 0;
45 47
46 ivprv = NULL; 48 bio_for_each_integrity_vec(iv, bio, i) {
47 segments = 0;
48 49
49 rq_for_each_integrity_segment(iv, rq, iter) { 50 if (ivprv) {
51 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
52 goto new_segment;
53
54 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
55 goto new_segment;
56
57 if (seg_size + iv->bv_len > queue_max_segment_size(q))
58 goto new_segment;
50 59
51 if (!ivprv || !BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 60 seg_size += iv->bv_len;
61 } else {
62new_segment:
52 segments++; 63 segments++;
64 seg_size = iv->bv_len;
65 }
53 66
54 ivprv = iv; 67 ivprv = iv;
55 } 68 }
@@ -60,30 +73,34 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
60 73
61/** 74/**
62 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist 75 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
63 * @rq: request with integrity metadata attached 76 * @q: request queue
77 * @bio: bio with integrity metadata attached
64 * @sglist: target scatterlist 78 * @sglist: target scatterlist
65 * 79 *
66 * Description: Map the integrity vectors in request into a 80 * Description: Map the integrity vectors in request into a
67 * scatterlist. The scatterlist must be big enough to hold all 81 * scatterlist. The scatterlist must be big enough to hold all
68 * elements. I.e. sized using blk_rq_count_integrity_sg(). 82 * elements. I.e. sized using blk_rq_count_integrity_sg().
69 */ 83 */
70int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist) 84int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
85 struct scatterlist *sglist)
71{ 86{
72 struct bio_vec *iv, *ivprv; 87 struct bio_vec *iv, *ivprv = NULL;
73 struct req_iterator iter; 88 struct scatterlist *sg = NULL;
74 struct scatterlist *sg; 89 unsigned int segments = 0;
75 unsigned int segments; 90 unsigned int i = 0;
76 91
77 ivprv = NULL; 92 bio_for_each_integrity_vec(iv, bio, i) {
78 sg = NULL;
79 segments = 0;
80
81 rq_for_each_integrity_segment(iv, rq, iter) {
82 93
83 if (ivprv) { 94 if (ivprv) {
84 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 95 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
85 goto new_segment; 96 goto new_segment;
86 97
98 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
99 goto new_segment;
100
101 if (sg->length + iv->bv_len > queue_max_segment_size(q))
102 goto new_segment;
103
87 sg->length += iv->bv_len; 104 sg->length += iv->bv_len;
88 } else { 105 } else {
89new_segment: 106new_segment:
@@ -162,6 +179,40 @@ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
162} 179}
163EXPORT_SYMBOL(blk_integrity_compare); 180EXPORT_SYMBOL(blk_integrity_compare);
164 181
182int blk_integrity_merge_rq(struct request_queue *q, struct request *req,
183 struct request *next)
184{
185 if (blk_integrity_rq(req) != blk_integrity_rq(next))
186 return -1;
187
188 if (req->nr_integrity_segments + next->nr_integrity_segments >
189 q->limits.max_integrity_segments)
190 return -1;
191
192 return 0;
193}
194EXPORT_SYMBOL(blk_integrity_merge_rq);
195
196int blk_integrity_merge_bio(struct request_queue *q, struct request *req,
197 struct bio *bio)
198{
199 int nr_integrity_segs;
200 struct bio *next = bio->bi_next;
201
202 bio->bi_next = NULL;
203 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
204 bio->bi_next = next;
205
206 if (req->nr_integrity_segments + nr_integrity_segs >
207 q->limits.max_integrity_segments)
208 return -1;
209
210 req->nr_integrity_segments += nr_integrity_segs;
211
212 return 0;
213}
214EXPORT_SYMBOL(blk_integrity_merge_bio);
215
165struct integrity_sysfs_entry { 216struct integrity_sysfs_entry {
166 struct attribute attr; 217 struct attribute attr;
167 ssize_t (*show)(struct blk_integrity *, char *); 218 ssize_t (*show)(struct blk_integrity *, char *);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd4249671..6a725461654d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -205,12 +205,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
205{ 205{
206 int nr_phys_segs = bio_phys_segments(q, bio); 206 int nr_phys_segs = bio_phys_segments(q, bio);
207 207
208 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { 208 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
209 req->cmd_flags |= REQ_NOMERGE; 209 goto no_merge;
210 if (req == q->last_merge) 210
211 q->last_merge = NULL; 211 if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
212 return 0; 212 goto no_merge;
213 }
214 213
215 /* 214 /*
216 * This will form the start of a new hw segment. Bump both 215 * This will form the start of a new hw segment. Bump both
@@ -218,6 +217,12 @@ static inline int ll_new_hw_segment(struct request_queue *q,
218 */ 217 */
219 req->nr_phys_segments += nr_phys_segs; 218 req->nr_phys_segments += nr_phys_segs;
220 return 1; 219 return 1;
220
221no_merge:
222 req->cmd_flags |= REQ_NOMERGE;
223 if (req == q->last_merge)
224 q->last_merge = NULL;
225 return 0;
221} 226}
222 227
223int ll_back_merge_fn(struct request_queue *q, struct request *req, 228int ll_back_merge_fn(struct request_queue *q, struct request *req,
@@ -301,6 +306,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
301 if (total_phys_segments > queue_max_segments(q)) 306 if (total_phys_segments > queue_max_segments(q))
302 return 0; 307 return 0;
303 308
309 if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
310 return 0;
311
304 /* Merge is OK... */ 312 /* Merge is OK... */
305 req->nr_phys_segments = total_phys_segments; 313 req->nr_phys_segments = total_phys_segments;
306 return 1; 314 return 1;
@@ -372,9 +380,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
372 || next->special) 380 || next->special)
373 return 0; 381 return 0;
374 382
375 if (blk_integrity_rq(req) != blk_integrity_rq(next))
376 return 0;
377
378 /* 383 /*
379 * If we are allowed to merge, then append bio list 384 * If we are allowed to merge, then append bio list
380 * from next to rq and release next. merge_requests_fn 385 * from next to rq and release next. merge_requests_fn
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8d592b559bd3..f8f2ddf20613 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -111,6 +111,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
111void blk_set_default_limits(struct queue_limits *lim) 111void blk_set_default_limits(struct queue_limits *lim)
112{ 112{
113 lim->max_segments = BLK_MAX_SEGMENTS; 113 lim->max_segments = BLK_MAX_SEGMENTS;
114 lim->max_integrity_segments = 0;
114 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 115 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
115 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 116 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
116 lim->max_sectors = BLK_DEF_MAX_SECTORS; 117 lim->max_sectors = BLK_DEF_MAX_SECTORS;
@@ -509,6 +510,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
509 b->seg_boundary_mask); 510 b->seg_boundary_mask);
510 511
511 t->max_segments = min_not_zero(t->max_segments, b->max_segments); 512 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
513 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
514 b->max_integrity_segments);
512 515
513 t->max_segment_size = min_not_zero(t->max_segment_size, 516 t->max_segment_size = min_not_zero(t->max_segment_size,
514 b->max_segment_size); 517 b->max_segment_size);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18078f5..b014f7739e98 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -112,6 +112,11 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
112 return queue_var_show(queue_max_segments(q), (page)); 112 return queue_var_show(queue_max_segments(q), (page));
113} 113}
114 114
115static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
116{
117 return queue_var_show(q->limits.max_integrity_segments, (page));
118}
119
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{ 121{
117 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
@@ -288,6 +293,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
288 .show = queue_max_segments_show, 293 .show = queue_max_segments_show,
289}; 294};
290 295
296static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
297 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
298 .show = queue_max_integrity_segments_show,
299};
300
291static struct queue_sysfs_entry queue_max_segment_size_entry = { 301static struct queue_sysfs_entry queue_max_segment_size_entry = {
292 .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 302 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
293 .show = queue_max_segment_size_show, 303 .show = queue_max_segment_size_show,
@@ -375,6 +385,7 @@ static struct attribute *default_attrs[] = {
375 &queue_max_hw_sectors_entry.attr, 385 &queue_max_hw_sectors_entry.attr,
376 &queue_max_sectors_entry.attr, 386 &queue_max_sectors_entry.attr,
377 &queue_max_segments_entry.attr, 387 &queue_max_segments_entry.attr,
388 &queue_max_integrity_segments_entry.attr,
378 &queue_max_segment_size_entry.attr, 389 &queue_max_segment_size_entry.attr,
379 &queue_iosched_entry.attr, 390 &queue_iosched_entry.attr,
380 &queue_hw_sector_size_entry.attr, 391 &queue_hw_sector_size_entry.attr,
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87141e4..6738831ba447 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -132,14 +132,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
132 return q->nr_congestion_off; 132 return q->nr_congestion_off;
133} 133}
134 134
135#if defined(CONFIG_BLK_DEV_INTEGRITY)
136
137#define rq_for_each_integrity_segment(bvl, _rq, _iter) \
138 __rq_for_each_bio(_iter.bio, _rq) \
139 bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
140
141#endif /* BLK_DEV_INTEGRITY */
142
143static inline int blk_cpu_to_group(int cpu) 135static inline int blk_cpu_to_group(int cpu)
144{ 136{
145#ifdef CONFIG_SCHED_MC 137#ifdef CONFIG_SCHED_MC
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8a8f803439e1..10478153641b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -376,6 +376,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
376 shost->this_id = sht->this_id; 376 shost->this_id = sht->this_id;
377 shost->can_queue = sht->can_queue; 377 shost->can_queue = sht->can_queue;
378 shost->sg_tablesize = sht->sg_tablesize; 378 shost->sg_tablesize = sht->sg_tablesize;
379 shost->sg_prot_tablesize = sht->sg_prot_tablesize;
379 shost->cmd_per_lun = sht->cmd_per_lun; 380 shost->cmd_per_lun = sht->cmd_per_lun;
380 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 381 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
381 shost->use_clustering = sht->use_clustering; 382 shost->use_clustering = sht->use_clustering;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720422c6..861c0b937ac9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -968,11 +968,13 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
968 */ 968 */
969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 969int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
970{ 970{
971 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); 971 struct request *rq = cmd->request;
972
973 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
972 if (error) 974 if (error)
973 goto err_exit; 975 goto err_exit;
974 976
975 if (blk_bidi_rq(cmd->request)) { 977 if (blk_bidi_rq(rq)) {
976 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 978 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
977 scsi_sdb_cache, GFP_ATOMIC); 979 scsi_sdb_cache, GFP_ATOMIC);
978 if (!bidi_sdb) { 980 if (!bidi_sdb) {
@@ -980,28 +982,28 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
980 goto err_exit; 982 goto err_exit;
981 } 983 }
982 984
983 cmd->request->next_rq->special = bidi_sdb; 985 rq->next_rq->special = bidi_sdb;
984 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, 986 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
985 GFP_ATOMIC);
986 if (error) 987 if (error)
987 goto err_exit; 988 goto err_exit;
988 } 989 }
989 990
990 if (blk_integrity_rq(cmd->request)) { 991 if (blk_integrity_rq(rq)) {
991 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 992 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
992 int ivecs, count; 993 int ivecs, count;
993 994
994 BUG_ON(prot_sdb == NULL); 995 BUG_ON(prot_sdb == NULL);
995 ivecs = blk_rq_count_integrity_sg(cmd->request); 996 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
996 997
997 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 998 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
998 error = BLKPREP_DEFER; 999 error = BLKPREP_DEFER;
999 goto err_exit; 1000 goto err_exit;
1000 } 1001 }
1001 1002
1002 count = blk_rq_map_integrity_sg(cmd->request, 1003 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1003 prot_sdb->table.sgl); 1004 prot_sdb->table.sgl);
1004 BUG_ON(unlikely(count > ivecs)); 1005 BUG_ON(unlikely(count > ivecs));
1006 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1005 1007
1006 cmd->prot_sdb = prot_sdb; 1008 cmd->prot_sdb = prot_sdb;
1007 cmd->prot_sdb->table.nents = count; 1009 cmd->prot_sdb->table.nents = count;
@@ -1625,6 +1627,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1625 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1627 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1626 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1628 SCSI_MAX_SG_CHAIN_SEGMENTS));
1627 1629
1630 if (scsi_host_prot_dma(shost)) {
1631 shost->sg_prot_tablesize =
1632 min_not_zero(shost->sg_prot_tablesize,
1633 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1634 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1635 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1636 }
1637
1628 blk_queue_max_hw_sectors(q, shost->max_sectors); 1638 blk_queue_max_hw_sectors(q, shost->max_sectors);
1629 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1639 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1630 blk_queue_segment_boundary(q, shost->dma_boundary); 1640 blk_queue_segment_boundary(q, shost->dma_boundary);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index c3f67373a4f8..20ad59dff730 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -251,6 +251,7 @@ shost_rd_attr(host_busy, "%hu\n");
251shost_rd_attr(cmd_per_lun, "%hd\n"); 251shost_rd_attr(cmd_per_lun, "%hd\n");
252shost_rd_attr(can_queue, "%hd\n"); 252shost_rd_attr(can_queue, "%hd\n");
253shost_rd_attr(sg_tablesize, "%hu\n"); 253shost_rd_attr(sg_tablesize, "%hu\n");
254shost_rd_attr(sg_prot_tablesize, "%hu\n");
254shost_rd_attr(unchecked_isa_dma, "%d\n"); 255shost_rd_attr(unchecked_isa_dma, "%d\n");
255shost_rd_attr(prot_capabilities, "%u\n"); 256shost_rd_attr(prot_capabilities, "%u\n");
256shost_rd_attr(prot_guard_type, "%hd\n"); 257shost_rd_attr(prot_guard_type, "%hd\n");
@@ -262,6 +263,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
262 &dev_attr_cmd_per_lun.attr, 263 &dev_attr_cmd_per_lun.attr,
263 &dev_attr_can_queue.attr, 264 &dev_attr_can_queue.attr,
264 &dev_attr_sg_tablesize.attr, 265 &dev_attr_sg_tablesize.attr,
266 &dev_attr_sg_prot_tablesize.attr,
265 &dev_attr_unchecked_isa_dma.attr, 267 &dev_attr_unchecked_isa_dma.attr,
266 &dev_attr_proc_name.attr, 268 &dev_attr_proc_name.attr,
267 &dev_attr_scan.attr, 269 &dev_attr_scan.attr,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5274103434ad..2c3fd7421607 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -496,6 +496,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
496#define bip_for_each_vec(bvl, bip, i) \ 496#define bip_for_each_vec(bvl, bip, i) \
497 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 497 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
498 498
499#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
500 for_each_bio(_bio) \
501 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
502
499#define bio_integrity(bio) (bio->bi_integrity != NULL) 503#define bio_integrity(bio) (bio->bi_integrity != NULL)
500 504
501extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 505extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c54906f678f..7e661106270a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -124,6 +124,9 @@ struct request {
124 * physical address coalescing is performed. 124 * physical address coalescing is performed.
125 */ 125 */
126 unsigned short nr_phys_segments; 126 unsigned short nr_phys_segments;
127#if defined(CONFIG_BLK_DEV_INTEGRITY)
128 unsigned short nr_integrity_segments;
129#endif
127 130
128 unsigned short ioprio; 131 unsigned short ioprio;
129 132
@@ -243,6 +246,7 @@ struct queue_limits {
243 246
244 unsigned short logical_block_size; 247 unsigned short logical_block_size;
245 unsigned short max_segments; 248 unsigned short max_segments;
249 unsigned short max_integrity_segments;
246 250
247 unsigned char misaligned; 251 unsigned char misaligned;
248 unsigned char discard_misaligned; 252 unsigned char discard_misaligned;
@@ -1213,8 +1217,13 @@ struct blk_integrity {
1213extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1217extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1214extern void blk_integrity_unregister(struct gendisk *); 1218extern void blk_integrity_unregister(struct gendisk *);
1215extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1219extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1216extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1220extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1217extern int blk_rq_count_integrity_sg(struct request *); 1221 struct scatterlist *);
1222extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1223extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1224 struct request *);
1225extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1226 struct bio *);
1218 1227
1219static inline 1228static inline
1220struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1229struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1235,16 +1244,32 @@ static inline int blk_integrity_rq(struct request *rq)
1235 return bio_integrity(rq->bio); 1244 return bio_integrity(rq->bio);
1236} 1245}
1237 1246
1247static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1248 unsigned int segs)
1249{
1250 q->limits.max_integrity_segments = segs;
1251}
1252
1253static inline unsigned short
1254queue_max_integrity_segments(struct request_queue *q)
1255{
1256 return q->limits.max_integrity_segments;
1257}
1258
1238#else /* CONFIG_BLK_DEV_INTEGRITY */ 1259#else /* CONFIG_BLK_DEV_INTEGRITY */
1239 1260
1240#define blk_integrity_rq(rq) (0) 1261#define blk_integrity_rq(rq) (0)
1241#define blk_rq_count_integrity_sg(a) (0) 1262#define blk_rq_count_integrity_sg(a, b) (0)
1242#define blk_rq_map_integrity_sg(a, b) (0) 1263#define blk_rq_map_integrity_sg(a, b, c) (0)
1243#define bdev_get_integrity(a) (0) 1264#define bdev_get_integrity(a) (0)
1244#define blk_get_integrity(a) (0) 1265#define blk_get_integrity(a) (0)
1245#define blk_integrity_compare(a, b) (0) 1266#define blk_integrity_compare(a, b) (0)
1246#define blk_integrity_register(a, b) (0) 1267#define blk_integrity_register(a, b) (0)
1247#define blk_integrity_unregister(a) do { } while (0); 1268#define blk_integrity_unregister(a) do { } while (0);
1269#define blk_queue_max_integrity_segments(a, b) do { } while (0);
1270#define queue_max_integrity_segments(a) (0)
1271#define blk_integrity_merge_rq(a, b, c) (0)
1272#define blk_integrity_merge_bio(a, b, c) (0)
1248 1273
1249#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1274#endif /* CONFIG_BLK_DEV_INTEGRITY */
1250 1275
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8fcb6e0e9e72..d63533a4a59e 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -32,6 +32,12 @@ struct scsi_cmnd;
32#endif 32#endif
33 33
34/* 34/*
35 * DIX-capable adapters effectively support infinite chaining for the
36 * protection information scatterlist
37 */
38#define SCSI_MAX_PROT_SG_SEGMENTS 0xFFFF
39
40/*
35 * Special value for scanning to specify scanning or rescanning of all 41 * Special value for scanning to specify scanning or rescanning of all
36 * possible channels, (target) ids, or luns on a given shost. 42 * possible channels, (target) ids, or luns on a given shost.
37 */ 43 */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index b7bdecb7b76e..d0a6a845f204 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -388,6 +388,7 @@ struct scsi_host_template {
388 * of scatter-gather. 388 * of scatter-gather.
389 */ 389 */
390 unsigned short sg_tablesize; 390 unsigned short sg_tablesize;
391 unsigned short sg_prot_tablesize;
391 392
392 /* 393 /*
393 * Set this if the host adapter has limitations beside segment count. 394 * Set this if the host adapter has limitations beside segment count.
@@ -599,6 +600,7 @@ struct Scsi_Host {
599 int can_queue; 600 int can_queue;
600 short cmd_per_lun; 601 short cmd_per_lun;
601 short unsigned int sg_tablesize; 602 short unsigned int sg_tablesize;
603 short unsigned int sg_prot_tablesize;
602 short unsigned int max_sectors; 604 short unsigned int max_sectors;
603 unsigned long dma_boundary; 605 unsigned long dma_boundary;
604 /* 606 /*
@@ -823,6 +825,11 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
823 return shost->prot_capabilities; 825 return shost->prot_capabilities;
824} 826}
825 827
828static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
829{
830 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
831}
832
826static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 833static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
827{ 834{
828 static unsigned char cap[] = { 0, 835 static unsigned char cap[] = { 0,