diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 17:48:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 17:48:06 -0500 |
commit | 938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch) | |
tree | 0854d5f6859d51032f1d853eaa8ab0e8647fb0cb /block | |
parent | af7ddd8a627c62a835524b3f5b471edbbbcce025 (diff) | |
parent | da7903092b880b25971ca9103cb0b934a44ace2b (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This is mostly update of the usual drivers: smarpqi, lpfc, qedi,
megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas.
Additionally, we have a pile of annotation, unused variable and minor
updates.
The big API change is the updates for Christoph's DMA rework which
include removing the DISABLE_CLUSTERING flag.
And finally there are a couple of target tree updates"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits)
scsi: isci: request: mark expected switch fall-through
scsi: isci: remote_node_context: mark expected switch fall-throughs
scsi: isci: remote_device: Mark expected switch fall-throughs
scsi: isci: phy: Mark expected switch fall-through
scsi: iscsi: Capture iscsi debug messages using tracepoints
scsi: myrb: Mark expected switch fall-throughs
scsi: megaraid: fix out-of-bound array accesses
scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through
scsi: fcoe: remove set but not used variable 'port'
scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown()
scsi: smartpqi: fix build warnings
scsi: smartpqi: update driver version
scsi: smartpqi: add ofa support
scsi: smartpqi: increase fw status register read timeout
scsi: smartpqi: bump driver version
scsi: smartpqi: add smp_utils support
scsi: smartpqi: correct lun reset issues
scsi: smartpqi: correct volume status
scsi: smartpqi: do not offline disks for transient did no connect conditions
scsi: smartpqi: allow for larger raid maps
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 18 | ||||
-rw-r--r-- | block/blk-settings.c | 3 | ||||
-rw-r--r-- | block/blk-sysfs.c | 5 |
3 files changed, 8 insertions, 18 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index e7f1c6cf0167..71e9ac03f621 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -195,7 +195,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
195 | goto split; | 195 | goto split; |
196 | } | 196 | } |
197 | 197 | ||
198 | if (bvprvp && blk_queue_cluster(q)) { | 198 | if (bvprvp) { |
199 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | 199 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
200 | goto new_segment; | 200 | goto new_segment; |
201 | if (!biovec_phys_mergeable(q, bvprvp, &bv)) | 201 | if (!biovec_phys_mergeable(q, bvprvp, &bv)) |
@@ -295,7 +295,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
295 | bool no_sg_merge) | 295 | bool no_sg_merge) |
296 | { | 296 | { |
297 | struct bio_vec bv, bvprv = { NULL }; | 297 | struct bio_vec bv, bvprv = { NULL }; |
298 | int cluster, prev = 0; | 298 | int prev = 0; |
299 | unsigned int seg_size, nr_phys_segs; | 299 | unsigned int seg_size, nr_phys_segs; |
300 | struct bio *fbio, *bbio; | 300 | struct bio *fbio, *bbio; |
301 | struct bvec_iter iter; | 301 | struct bvec_iter iter; |
@@ -313,7 +313,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
313 | } | 313 | } |
314 | 314 | ||
315 | fbio = bio; | 315 | fbio = bio; |
316 | cluster = blk_queue_cluster(q); | ||
317 | seg_size = 0; | 316 | seg_size = 0; |
318 | nr_phys_segs = 0; | 317 | nr_phys_segs = 0; |
319 | for_each_bio(bio) { | 318 | for_each_bio(bio) { |
@@ -325,7 +324,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
325 | if (no_sg_merge) | 324 | if (no_sg_merge) |
326 | goto new_segment; | 325 | goto new_segment; |
327 | 326 | ||
328 | if (prev && cluster) { | 327 | if (prev) { |
329 | if (seg_size + bv.bv_len | 328 | if (seg_size + bv.bv_len |
330 | > queue_max_segment_size(q)) | 329 | > queue_max_segment_size(q)) |
331 | goto new_segment; | 330 | goto new_segment; |
@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
395 | { | 394 | { |
396 | struct bio_vec end_bv = { NULL }, nxt_bv; | 395 | struct bio_vec end_bv = { NULL }, nxt_bv; |
397 | 396 | ||
398 | if (!blk_queue_cluster(q)) | ||
399 | return 0; | ||
400 | |||
401 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > | 397 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
402 | queue_max_segment_size(q)) | 398 | queue_max_segment_size(q)) |
403 | return 0; | 399 | return 0; |
@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
414 | static inline void | 410 | static inline void |
415 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | 411 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
416 | struct scatterlist *sglist, struct bio_vec *bvprv, | 412 | struct scatterlist *sglist, struct bio_vec *bvprv, |
417 | struct scatterlist **sg, int *nsegs, int *cluster) | 413 | struct scatterlist **sg, int *nsegs) |
418 | { | 414 | { |
419 | 415 | ||
420 | int nbytes = bvec->bv_len; | 416 | int nbytes = bvec->bv_len; |
421 | 417 | ||
422 | if (*sg && *cluster) { | 418 | if (*sg) { |
423 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | 419 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
424 | goto new_segment; | 420 | goto new_segment; |
425 | if (!biovec_phys_mergeable(q, bvprv, bvec)) | 421 | if (!biovec_phys_mergeable(q, bvprv, bvec)) |
@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |||
465 | { | 461 | { |
466 | struct bio_vec bvec, bvprv = { NULL }; | 462 | struct bio_vec bvec, bvprv = { NULL }; |
467 | struct bvec_iter iter; | 463 | struct bvec_iter iter; |
468 | int cluster = blk_queue_cluster(q), nsegs = 0; | 464 | int nsegs = 0; |
469 | 465 | ||
470 | for_each_bio(bio) | 466 | for_each_bio(bio) |
471 | bio_for_each_segment(bvec, bio, iter) | 467 | bio_for_each_segment(bvec, bio, iter) |
472 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | 468 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
473 | &nsegs, &cluster); | 469 | &nsegs); |
474 | 470 | ||
475 | return nsegs; | 471 | return nsegs; |
476 | } | 472 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 3abe831e92c8..3e7038e475ee 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -56,7 +56,6 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
56 | lim->alignment_offset = 0; | 56 | lim->alignment_offset = 0; |
57 | lim->io_opt = 0; | 57 | lim->io_opt = 0; |
58 | lim->misaligned = 0; | 58 | lim->misaligned = 0; |
59 | lim->cluster = 1; | ||
60 | lim->zoned = BLK_ZONED_NONE; | 59 | lim->zoned = BLK_ZONED_NONE; |
61 | } | 60 | } |
62 | EXPORT_SYMBOL(blk_set_default_limits); | 61 | EXPORT_SYMBOL(blk_set_default_limits); |
@@ -547,8 +546,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
547 | t->io_min = max(t->io_min, b->io_min); | 546 | t->io_min = max(t->io_min, b->io_min); |
548 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); | 547 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
549 | 548 | ||
550 | t->cluster &= b->cluster; | ||
551 | |||
552 | /* Physical block size a multiple of the logical block size? */ | 549 | /* Physical block size a multiple of the logical block size? */ |
553 | if (t->physical_block_size & (t->logical_block_size - 1)) { | 550 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
554 | t->physical_block_size = t->logical_block_size; | 551 | t->physical_block_size = t->logical_block_size; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0619c8922893..590d1ef2f961 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -132,10 +132,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char * | |||
132 | 132 | ||
133 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) | 133 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
134 | { | 134 | { |
135 | if (blk_queue_cluster(q)) | 135 | return queue_var_show(queue_max_segment_size(q), (page)); |
136 | return queue_var_show(queue_max_segment_size(q), (page)); | ||
137 | |||
138 | return queue_var_show(PAGE_SIZE, (page)); | ||
139 | } | 136 | } |
140 | 137 | ||
141 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) | 138 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |