diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-12-21 14:13:37 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-12-21 14:13:37 -0500 |
| commit | 9035a8961b504d0997369509ab8c6b1f0a4ee33d (patch) | |
| tree | 99aad197f1c60efe849185fff7a8f14a7792f748 | |
| parent | 409232a45096a2c0d06634269a7c1c3412ecf910 (diff) | |
| parent | 6d0e4827b72afc71349784336d5eb6df4df106e6 (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"It's been a few weeks, so here's a small collection of fixes that
should go into the current series.
This contains:
- NVMe pull request from Christoph, with a few important fixes.
- kyber hang fix from Omar.
- A blk-throttl fix from Shaohua, fixing a case where we double
charge a bio.
- Two call_single_data alignment fixes from me, fixing up some
unfortunate changes that went into 4.14 without being properly
reviewed on the block side (since nobody was CC'ed on the
patch...).
- A bounce buffer fix in two parts, one from me and one from Ming.
- Revert bdi debug error handling patch. It's causing boot issues for
some folks, and a week down the line, we're still no closer to a
fix. Revert this patch for now until it's figured out, then we can
retry for 4.16"
* 'for-linus' of git://git.kernel.dk/linux-block:
Revert "bdi: add error handle for bdi_debug_register"
null_blk: unalign call_single_data
block: unalign call_single_data in struct request
block-throttle: avoid double charge
block: fix blk_rq_append_bio
block: don't let passthrough IO go into .make_request_fn()
nvme: setup streams after initializing namespace head
nvme: check hw sectors before setting chunk sectors
nvme: call blk_integrity_unregister after queue is cleaned up
nvme-fc: remove double put reference if admin connect fails
nvme: set discard_alignment to zero
kyber: fix another domain token wait queue hang
| -rw-r--r-- | block/bio.c | 2 | ||||
| -rw-r--r-- | block/blk-map.c | 38 | ||||
| -rw-r--r-- | block/blk-throttle.c | 8 | ||||
| -rw-r--r-- | block/bounce.c | 6 | ||||
| -rw-r--r-- | block/kyber-iosched.c | 37 | ||||
| -rw-r--r-- | drivers/block/null_blk.c | 4 | ||||
| -rw-r--r-- | drivers/nvme/host/core.c | 11 | ||||
| -rw-r--r-- | drivers/nvme/host/fc.c | 1 | ||||
| -rw-r--r-- | drivers/scsi/osd/osd_initiator.c | 4 | ||||
| -rw-r--r-- | drivers/target/target_core_pscsi.c | 4 | ||||
| -rw-r--r-- | include/linux/bio.h | 2 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 9 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 25 | ||||
| -rw-r--r-- | mm/backing-dev.c | 5 |
14 files changed, 94 insertions, 62 deletions
diff --git a/block/bio.c b/block/bio.c index 8bfdea58159b..9ef6cf3addb3 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) | |||
| 599 | bio->bi_disk = bio_src->bi_disk; | 599 | bio->bi_disk = bio_src->bi_disk; |
| 600 | bio->bi_partno = bio_src->bi_partno; | 600 | bio->bi_partno = bio_src->bi_partno; |
| 601 | bio_set_flag(bio, BIO_CLONED); | 601 | bio_set_flag(bio, BIO_CLONED); |
| 602 | if (bio_flagged(bio_src, BIO_THROTTLED)) | ||
| 603 | bio_set_flag(bio, BIO_THROTTLED); | ||
| 602 | bio->bi_opf = bio_src->bi_opf; | 604 | bio->bi_opf = bio_src->bi_opf; |
| 603 | bio->bi_write_hint = bio_src->bi_write_hint; | 605 | bio->bi_write_hint = bio_src->bi_write_hint; |
| 604 | bio->bi_iter = bio_src->bi_iter; | 606 | bio->bi_iter = bio_src->bi_iter; |
diff --git a/block/blk-map.c b/block/blk-map.c index b21f8e86f120..d3a94719f03f 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
| @@ -12,22 +12,29 @@ | |||
| 12 | #include "blk.h" | 12 | #include "blk.h" |
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * Append a bio to a passthrough request. Only works can be merged into | 15 | * Append a bio to a passthrough request. Only works if the bio can be merged |
| 16 | * the request based on the driver constraints. | 16 | * into the request based on the driver constraints. |
| 17 | */ | 17 | */ |
| 18 | int blk_rq_append_bio(struct request *rq, struct bio *bio) | 18 | int blk_rq_append_bio(struct request *rq, struct bio **bio) |
| 19 | { | 19 | { |
| 20 | blk_queue_bounce(rq->q, &bio); | 20 | struct bio *orig_bio = *bio; |
| 21 | |||
| 22 | blk_queue_bounce(rq->q, bio); | ||
| 21 | 23 | ||
| 22 | if (!rq->bio) { | 24 | if (!rq->bio) { |
| 23 | blk_rq_bio_prep(rq->q, rq, bio); | 25 | blk_rq_bio_prep(rq->q, rq, *bio); |
| 24 | } else { | 26 | } else { |
| 25 | if (!ll_back_merge_fn(rq->q, rq, bio)) | 27 | if (!ll_back_merge_fn(rq->q, rq, *bio)) { |
| 28 | if (orig_bio != *bio) { | ||
| 29 | bio_put(*bio); | ||
| 30 | *bio = orig_bio; | ||
| 31 | } | ||
| 26 | return -EINVAL; | 32 | return -EINVAL; |
| 33 | } | ||
| 27 | 34 | ||
| 28 | rq->biotail->bi_next = bio; | 35 | rq->biotail->bi_next = *bio; |
| 29 | rq->biotail = bio; | 36 | rq->biotail = *bio; |
| 30 | rq->__data_len += bio->bi_iter.bi_size; | 37 | rq->__data_len += (*bio)->bi_iter.bi_size; |
| 31 | } | 38 | } |
| 32 | 39 | ||
| 33 | return 0; | 40 | return 0; |
| @@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq, | |||
| 73 | * We link the bounce buffer in and could have to traverse it | 80 | * We link the bounce buffer in and could have to traverse it |
| 74 | * later so we have to get a ref to prevent it from being freed | 81 | * later so we have to get a ref to prevent it from being freed |
| 75 | */ | 82 | */ |
| 76 | ret = blk_rq_append_bio(rq, bio); | 83 | ret = blk_rq_append_bio(rq, &bio); |
| 77 | bio_get(bio); | ||
| 78 | if (ret) { | 84 | if (ret) { |
| 79 | bio_endio(bio); | ||
| 80 | __blk_rq_unmap_user(orig_bio); | 85 | __blk_rq_unmap_user(orig_bio); |
| 81 | bio_put(bio); | ||
| 82 | return ret; | 86 | return ret; |
| 83 | } | 87 | } |
| 88 | bio_get(bio); | ||
| 84 | 89 | ||
| 85 | return 0; | 90 | return 0; |
| 86 | } | 91 | } |
| @@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
| 213 | int reading = rq_data_dir(rq) == READ; | 218 | int reading = rq_data_dir(rq) == READ; |
| 214 | unsigned long addr = (unsigned long) kbuf; | 219 | unsigned long addr = (unsigned long) kbuf; |
| 215 | int do_copy = 0; | 220 | int do_copy = 0; |
| 216 | struct bio *bio; | 221 | struct bio *bio, *orig_bio; |
| 217 | int ret; | 222 | int ret; |
| 218 | 223 | ||
| 219 | if (len > (queue_max_hw_sectors(q) << 9)) | 224 | if (len > (queue_max_hw_sectors(q) << 9)) |
| @@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
| 236 | if (do_copy) | 241 | if (do_copy) |
| 237 | rq->rq_flags |= RQF_COPY_USER; | 242 | rq->rq_flags |= RQF_COPY_USER; |
| 238 | 243 | ||
| 239 | ret = blk_rq_append_bio(rq, bio); | 244 | orig_bio = bio; |
| 245 | ret = blk_rq_append_bio(rq, &bio); | ||
| 240 | if (unlikely(ret)) { | 246 | if (unlikely(ret)) { |
| 241 | /* request is too big */ | 247 | /* request is too big */ |
| 242 | bio_put(bio); | 248 | bio_put(orig_bio); |
| 243 | return ret; | 249 | return ret; |
| 244 | } | 250 | } |
| 245 | 251 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 825bc29767e6..d19f416d6101 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -2226,13 +2226,7 @@ again: | |||
| 2226 | out_unlock: | 2226 | out_unlock: |
| 2227 | spin_unlock_irq(q->queue_lock); | 2227 | spin_unlock_irq(q->queue_lock); |
| 2228 | out: | 2228 | out: |
| 2229 | /* | 2229 | bio_set_flag(bio, BIO_THROTTLED); |
| 2230 | * As multiple blk-throtls may stack in the same issue path, we | ||
| 2231 | * don't want bios to leave with the flag set. Clear the flag if | ||
| 2232 | * being issued. | ||
| 2233 | */ | ||
| 2234 | if (!throttled) | ||
| 2235 | bio_clear_flag(bio, BIO_THROTTLED); | ||
| 2236 | 2230 | ||
| 2237 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | 2231 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 2238 | if (throttled || !td->track_bio_latency) | 2232 | if (throttled || !td->track_bio_latency) |
diff --git a/block/bounce.c b/block/bounce.c index fceb1a96480b..1d05c422c932 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
| @@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
| 200 | unsigned i = 0; | 200 | unsigned i = 0; |
| 201 | bool bounce = false; | 201 | bool bounce = false; |
| 202 | int sectors = 0; | 202 | int sectors = 0; |
| 203 | bool passthrough = bio_is_passthrough(*bio_orig); | ||
| 203 | 204 | ||
| 204 | bio_for_each_segment(from, *bio_orig, iter) { | 205 | bio_for_each_segment(from, *bio_orig, iter) { |
| 205 | if (i++ < BIO_MAX_PAGES) | 206 | if (i++ < BIO_MAX_PAGES) |
| @@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |||
| 210 | if (!bounce) | 211 | if (!bounce) |
| 211 | return; | 212 | return; |
| 212 | 213 | ||
| 213 | if (sectors < bio_sectors(*bio_orig)) { | 214 | if (!passthrough && sectors < bio_sectors(*bio_orig)) { |
| 214 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); | 215 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); |
| 215 | bio_chain(bio, *bio_orig); | 216 | bio_chain(bio, *bio_orig); |
| 216 | generic_make_request(*bio_orig); | 217 | generic_make_request(*bio_orig); |
| 217 | *bio_orig = bio; | 218 | *bio_orig = bio; |
| 218 | } | 219 | } |
| 219 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); | 220 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL : |
| 221 | bounce_bio_set); | ||
| 220 | 222 | ||
| 221 | bio_for_each_segment_all(to, bio, i) { | 223 | bio_for_each_segment_all(to, bio, i) { |
| 222 | struct page *page = to->bv_page; | 224 | struct page *page = to->bv_page; |
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index b4df317c2916..f95c60774ce8 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c | |||
| @@ -100,9 +100,13 @@ struct kyber_hctx_data { | |||
| 100 | unsigned int cur_domain; | 100 | unsigned int cur_domain; |
| 101 | unsigned int batching; | 101 | unsigned int batching; |
| 102 | wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; | 102 | wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; |
| 103 | struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS]; | ||
| 103 | atomic_t wait_index[KYBER_NUM_DOMAINS]; | 104 | atomic_t wait_index[KYBER_NUM_DOMAINS]; |
| 104 | }; | 105 | }; |
| 105 | 106 | ||
| 107 | static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, | ||
| 108 | void *key); | ||
| 109 | |||
| 106 | static int rq_sched_domain(const struct request *rq) | 110 | static int rq_sched_domain(const struct request *rq) |
| 107 | { | 111 | { |
| 108 | unsigned int op = rq->cmd_flags; | 112 | unsigned int op = rq->cmd_flags; |
| @@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | |||
| 385 | 389 | ||
| 386 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | 390 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { |
| 387 | INIT_LIST_HEAD(&khd->rqs[i]); | 391 | INIT_LIST_HEAD(&khd->rqs[i]); |
| 392 | init_waitqueue_func_entry(&khd->domain_wait[i], | ||
| 393 | kyber_domain_wake); | ||
| 394 | khd->domain_wait[i].private = hctx; | ||
| 388 | INIT_LIST_HEAD(&khd->domain_wait[i].entry); | 395 | INIT_LIST_HEAD(&khd->domain_wait[i].entry); |
| 389 | atomic_set(&khd->wait_index[i], 0); | 396 | atomic_set(&khd->wait_index[i], 0); |
| 390 | } | 397 | } |
| @@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, | |||
| 524 | int nr; | 531 | int nr; |
| 525 | 532 | ||
| 526 | nr = __sbitmap_queue_get(domain_tokens); | 533 | nr = __sbitmap_queue_get(domain_tokens); |
| 527 | if (nr >= 0) | ||
| 528 | return nr; | ||
| 529 | 534 | ||
| 530 | /* | 535 | /* |
| 531 | * If we failed to get a domain token, make sure the hardware queue is | 536 | * If we failed to get a domain token, make sure the hardware queue is |
| 532 | * run when one becomes available. Note that this is serialized on | 537 | * run when one becomes available. Note that this is serialized on |
| 533 | * khd->lock, but we still need to be careful about the waker. | 538 | * khd->lock, but we still need to be careful about the waker. |
| 534 | */ | 539 | */ |
| 535 | if (list_empty_careful(&wait->entry)) { | 540 | if (nr < 0 && list_empty_careful(&wait->entry)) { |
| 536 | init_waitqueue_func_entry(wait, kyber_domain_wake); | ||
| 537 | wait->private = hctx; | ||
| 538 | ws = sbq_wait_ptr(domain_tokens, | 541 | ws = sbq_wait_ptr(domain_tokens, |
| 539 | &khd->wait_index[sched_domain]); | 542 | &khd->wait_index[sched_domain]); |
| 543 | khd->domain_ws[sched_domain] = ws; | ||
| 540 | add_wait_queue(&ws->wait, wait); | 544 | add_wait_queue(&ws->wait, wait); |
| 541 | 545 | ||
| 542 | /* | 546 | /* |
| 543 | * Try again in case a token was freed before we got on the wait | 547 | * Try again in case a token was freed before we got on the wait |
| 544 | * queue. The waker may have already removed the entry from the | 548 | * queue. |
| 545 | * wait queue, but list_del_init() is okay with that. | ||
| 546 | */ | 549 | */ |
| 547 | nr = __sbitmap_queue_get(domain_tokens); | 550 | nr = __sbitmap_queue_get(domain_tokens); |
| 548 | if (nr >= 0) { | 551 | } |
| 549 | unsigned long flags; | ||
| 550 | 552 | ||
| 551 | spin_lock_irqsave(&ws->wait.lock, flags); | 553 | /* |
| 552 | list_del_init(&wait->entry); | 554 | * If we got a token while we were on the wait queue, remove ourselves |
| 553 | spin_unlock_irqrestore(&ws->wait.lock, flags); | 555 | * from the wait queue to ensure that all wake ups make forward |
| 554 | } | 556 | * progress. It's possible that the waker already deleted the entry |
| 557 | * between the !list_empty_careful() check and us grabbing the lock, but | ||
| 558 | * list_del_init() is okay with that. | ||
| 559 | */ | ||
| 560 | if (nr >= 0 && !list_empty_careful(&wait->entry)) { | ||
| 561 | ws = khd->domain_ws[sched_domain]; | ||
| 562 | spin_lock_irq(&ws->wait.lock); | ||
| 563 | list_del_init(&wait->entry); | ||
| 564 | spin_unlock_irq(&ws->wait.lock); | ||
| 555 | } | 565 | } |
| 566 | |||
| 556 | return nr; | 567 | return nr; |
| 557 | } | 568 | } |
| 558 | 569 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ccb9975a97fa..ad0477ae820f 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps) | |||
| 35 | struct nullb_cmd { | 35 | struct nullb_cmd { |
| 36 | struct list_head list; | 36 | struct list_head list; |
| 37 | struct llist_node ll_list; | 37 | struct llist_node ll_list; |
| 38 | call_single_data_t csd; | 38 | struct __call_single_data csd; |
| 39 | struct request *rq; | 39 | struct request *rq; |
| 40 | struct bio *bio; | 40 | struct bio *bio; |
| 41 | unsigned int tag; | 41 | unsigned int tag; |
| 42 | blk_status_t error; | ||
| 42 | struct nullb_queue *nq; | 43 | struct nullb_queue *nq; |
| 43 | struct hrtimer timer; | 44 | struct hrtimer timer; |
| 44 | blk_status_t error; | ||
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | struct nullb_queue { | 47 | struct nullb_queue { |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f837d666cbd4..1e46e60b8f10 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, | |||
| 1287 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < | 1287 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < |
| 1288 | NVME_DSM_MAX_RANGES); | 1288 | NVME_DSM_MAX_RANGES); |
| 1289 | 1289 | ||
| 1290 | queue->limits.discard_alignment = size; | 1290 | queue->limits.discard_alignment = 0; |
| 1291 | queue->limits.discard_granularity = size; | 1291 | queue->limits.discard_granularity = size; |
| 1292 | 1292 | ||
| 1293 | blk_queue_max_discard_sectors(queue, UINT_MAX); | 1293 | blk_queue_max_discard_sectors(queue, UINT_MAX); |
| @@ -1705,7 +1705,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, | |||
| 1705 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); | 1705 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
| 1706 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); | 1706 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
| 1707 | } | 1707 | } |
| 1708 | if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) | 1708 | if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && |
| 1709 | is_power_of_2(ctrl->max_hw_sectors)) | ||
| 1709 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); | 1710 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); |
| 1710 | blk_queue_virt_boundary(q, ctrl->page_size - 1); | 1711 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
| 1711 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) | 1712 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
| @@ -2869,7 +2870,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
| 2869 | 2870 | ||
| 2870 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | 2871 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
| 2871 | nvme_set_queue_limits(ctrl, ns->queue); | 2872 | nvme_set_queue_limits(ctrl, ns->queue); |
| 2872 | nvme_setup_streams_ns(ctrl, ns); | ||
| 2873 | 2873 | ||
| 2874 | id = nvme_identify_ns(ctrl, nsid); | 2874 | id = nvme_identify_ns(ctrl, nsid); |
| 2875 | if (!id) | 2875 | if (!id) |
| @@ -2880,6 +2880,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
| 2880 | 2880 | ||
| 2881 | if (nvme_init_ns_head(ns, nsid, id, &new)) | 2881 | if (nvme_init_ns_head(ns, nsid, id, &new)) |
| 2882 | goto out_free_id; | 2882 | goto out_free_id; |
| 2883 | nvme_setup_streams_ns(ctrl, ns); | ||
| 2883 | 2884 | ||
| 2884 | #ifdef CONFIG_NVME_MULTIPATH | 2885 | #ifdef CONFIG_NVME_MULTIPATH |
| 2885 | /* | 2886 | /* |
| @@ -2965,8 +2966,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
| 2965 | return; | 2966 | return; |
| 2966 | 2967 | ||
| 2967 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { | 2968 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
| 2968 | if (blk_get_integrity(ns->disk)) | ||
| 2969 | blk_integrity_unregister(ns->disk); | ||
| 2970 | nvme_mpath_remove_disk_links(ns); | 2969 | nvme_mpath_remove_disk_links(ns); |
| 2971 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, | 2970 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
| 2972 | &nvme_ns_id_attr_group); | 2971 | &nvme_ns_id_attr_group); |
| @@ -2974,6 +2973,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
| 2974 | nvme_nvm_unregister_sysfs(ns); | 2973 | nvme_nvm_unregister_sysfs(ns); |
| 2975 | del_gendisk(ns->disk); | 2974 | del_gendisk(ns->disk); |
| 2976 | blk_cleanup_queue(ns->queue); | 2975 | blk_cleanup_queue(ns->queue); |
| 2976 | if (blk_get_integrity(ns->disk)) | ||
| 2977 | blk_integrity_unregister(ns->disk); | ||
| 2977 | } | 2978 | } |
| 2978 | 2979 | ||
| 2979 | mutex_lock(&ns->ctrl->subsys->lock); | 2980 | mutex_lock(&ns->ctrl->subsys->lock); |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 0a8af4daef89..794e66e4aa20 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -3221,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
| 3221 | 3221 | ||
| 3222 | /* initiate nvme ctrl ref counting teardown */ | 3222 | /* initiate nvme ctrl ref counting teardown */ |
| 3223 | nvme_uninit_ctrl(&ctrl->ctrl); | 3223 | nvme_uninit_ctrl(&ctrl->ctrl); |
| 3224 | nvme_put_ctrl(&ctrl->ctrl); | ||
| 3225 | 3224 | ||
| 3226 | /* Remove core ctrl ref. */ | 3225 | /* Remove core ctrl ref. */ |
| 3227 | nvme_put_ctrl(&ctrl->ctrl); | 3226 | nvme_put_ctrl(&ctrl->ctrl); |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index a4f28b7e4c65..e18877177f1b 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
| @@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write, | |||
| 1576 | return req; | 1576 | return req; |
| 1577 | 1577 | ||
| 1578 | for_each_bio(bio) { | 1578 | for_each_bio(bio) { |
| 1579 | ret = blk_rq_append_bio(req, bio); | 1579 | struct bio *bounce_bio = bio; |
| 1580 | |||
| 1581 | ret = blk_rq_append_bio(req, &bounce_bio); | ||
| 1580 | if (ret) | 1582 | if (ret) |
| 1581 | return ERR_PTR(ret); | 1583 | return ERR_PTR(ret); |
| 1582 | } | 1584 | } |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c69b4a9694d..0d99b242e82e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
| @@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
| 920 | " %d i: %d bio: %p, allocating another" | 920 | " %d i: %d bio: %p, allocating another" |
| 921 | " bio\n", bio->bi_vcnt, i, bio); | 921 | " bio\n", bio->bi_vcnt, i, bio); |
| 922 | 922 | ||
| 923 | rc = blk_rq_append_bio(req, bio); | 923 | rc = blk_rq_append_bio(req, &bio); |
| 924 | if (rc) { | 924 | if (rc) { |
| 925 | pr_err("pSCSI: failed to append bio\n"); | 925 | pr_err("pSCSI: failed to append bio\n"); |
| 926 | goto fail; | 926 | goto fail; |
| @@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
| 938 | } | 938 | } |
| 939 | 939 | ||
| 940 | if (bio) { | 940 | if (bio) { |
| 941 | rc = blk_rq_append_bio(req, bio); | 941 | rc = blk_rq_append_bio(req, &bio); |
| 942 | if (rc) { | 942 | if (rc) { |
| 943 | pr_err("pSCSI: failed to append bio\n"); | 943 | pr_err("pSCSI: failed to append bio\n"); |
| 944 | goto fail; | 944 | goto fail; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 82f0c8fd7be8..23d29b39f71e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); | |||
| 492 | 492 | ||
| 493 | #define bio_set_dev(bio, bdev) \ | 493 | #define bio_set_dev(bio, bdev) \ |
| 494 | do { \ | 494 | do { \ |
| 495 | if ((bio)->bi_disk != (bdev)->bd_disk) \ | ||
| 496 | bio_clear_flag(bio, BIO_THROTTLED);\ | ||
| 495 | (bio)->bi_disk = (bdev)->bd_disk; \ | 497 | (bio)->bi_disk = (bdev)->bd_disk; \ |
| 496 | (bio)->bi_partno = (bdev)->bd_partno; \ | 498 | (bio)->bi_partno = (bdev)->bd_partno; \ |
| 497 | } while (0) | 499 | } while (0) |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1e628e032da..9e7d8bd776d2 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -50,8 +50,6 @@ struct blk_issue_stat { | |||
| 50 | struct bio { | 50 | struct bio { |
| 51 | struct bio *bi_next; /* request queue link */ | 51 | struct bio *bi_next; /* request queue link */ |
| 52 | struct gendisk *bi_disk; | 52 | struct gendisk *bi_disk; |
| 53 | u8 bi_partno; | ||
| 54 | blk_status_t bi_status; | ||
| 55 | unsigned int bi_opf; /* bottom bits req flags, | 53 | unsigned int bi_opf; /* bottom bits req flags, |
| 56 | * top bits REQ_OP. Use | 54 | * top bits REQ_OP. Use |
| 57 | * accessors. | 55 | * accessors. |
| @@ -59,8 +57,8 @@ struct bio { | |||
| 59 | unsigned short bi_flags; /* status, etc and bvec pool number */ | 57 | unsigned short bi_flags; /* status, etc and bvec pool number */ |
| 60 | unsigned short bi_ioprio; | 58 | unsigned short bi_ioprio; |
| 61 | unsigned short bi_write_hint; | 59 | unsigned short bi_write_hint; |
| 62 | 60 | blk_status_t bi_status; | |
| 63 | struct bvec_iter bi_iter; | 61 | u8 bi_partno; |
| 64 | 62 | ||
| 65 | /* Number of segments in this BIO after | 63 | /* Number of segments in this BIO after |
| 66 | * physical address coalescing is performed. | 64 | * physical address coalescing is performed. |
| @@ -74,8 +72,9 @@ struct bio { | |||
| 74 | unsigned int bi_seg_front_size; | 72 | unsigned int bi_seg_front_size; |
| 75 | unsigned int bi_seg_back_size; | 73 | unsigned int bi_seg_back_size; |
| 76 | 74 | ||
| 77 | atomic_t __bi_remaining; | 75 | struct bvec_iter bi_iter; |
| 78 | 76 | ||
| 77 | atomic_t __bi_remaining; | ||
| 79 | bio_end_io_t *bi_end_io; | 78 | bio_end_io_t *bi_end_io; |
| 80 | 79 | ||
| 81 | void *bi_private; | 80 | void *bi_private; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8089ca17db9a..0ce8a372d506 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t; | |||
| 135 | struct request { | 135 | struct request { |
| 136 | struct list_head queuelist; | 136 | struct list_head queuelist; |
| 137 | union { | 137 | union { |
| 138 | call_single_data_t csd; | 138 | struct __call_single_data csd; |
| 139 | u64 fifo_time; | 139 | u64 fifo_time; |
| 140 | }; | 140 | }; |
| 141 | 141 | ||
| @@ -241,14 +241,24 @@ struct request { | |||
| 241 | struct request *next_rq; | 241 | struct request *next_rq; |
| 242 | }; | 242 | }; |
| 243 | 243 | ||
| 244 | static inline bool blk_op_is_scsi(unsigned int op) | ||
| 245 | { | ||
| 246 | return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; | ||
| 247 | } | ||
| 248 | |||
| 249 | static inline bool blk_op_is_private(unsigned int op) | ||
| 250 | { | ||
| 251 | return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; | ||
| 252 | } | ||
| 253 | |||
| 244 | static inline bool blk_rq_is_scsi(struct request *rq) | 254 | static inline bool blk_rq_is_scsi(struct request *rq) |
| 245 | { | 255 | { |
| 246 | return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; | 256 | return blk_op_is_scsi(req_op(rq)); |
| 247 | } | 257 | } |
| 248 | 258 | ||
| 249 | static inline bool blk_rq_is_private(struct request *rq) | 259 | static inline bool blk_rq_is_private(struct request *rq) |
| 250 | { | 260 | { |
| 251 | return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; | 261 | return blk_op_is_private(req_op(rq)); |
| 252 | } | 262 | } |
| 253 | 263 | ||
| 254 | static inline bool blk_rq_is_passthrough(struct request *rq) | 264 | static inline bool blk_rq_is_passthrough(struct request *rq) |
| @@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq) | |||
| 256 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); | 266 | return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); |
| 257 | } | 267 | } |
| 258 | 268 | ||
| 269 | static inline bool bio_is_passthrough(struct bio *bio) | ||
| 270 | { | ||
| 271 | unsigned op = bio_op(bio); | ||
| 272 | |||
| 273 | return blk_op_is_scsi(op) || blk_op_is_private(op); | ||
| 274 | } | ||
| 275 | |||
| 259 | static inline unsigned short req_get_ioprio(struct request *req) | 276 | static inline unsigned short req_get_ioprio(struct request *req) |
| 260 | { | 277 | { |
| 261 | return req->ioprio; | 278 | return req->ioprio; |
| @@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
| 948 | extern void blk_rq_unprep_clone(struct request *rq); | 965 | extern void blk_rq_unprep_clone(struct request *rq); |
| 949 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, | 966 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, |
| 950 | struct request *rq); | 967 | struct request *rq); |
| 951 | extern int blk_rq_append_bio(struct request *rq, struct bio *bio); | 968 | extern int blk_rq_append_bio(struct request *rq, struct bio **bio); |
| 952 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 969 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
| 953 | extern void blk_queue_split(struct request_queue *, struct bio **); | 970 | extern void blk_queue_split(struct request_queue *, struct bio **); |
| 954 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 971 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 84b2dc76f140..b5f940ce0143 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) | |||
| 882 | if (IS_ERR(dev)) | 882 | if (IS_ERR(dev)) |
| 883 | return PTR_ERR(dev); | 883 | return PTR_ERR(dev); |
| 884 | 884 | ||
| 885 | if (bdi_debug_register(bdi, dev_name(dev))) { | ||
| 886 | device_destroy(bdi_class, dev->devt); | ||
| 887 | return -ENOMEM; | ||
| 888 | } | ||
| 889 | cgwb_bdi_register(bdi); | 885 | cgwb_bdi_register(bdi); |
| 890 | bdi->dev = dev; | 886 | bdi->dev = dev; |
| 891 | 887 | ||
| 888 | bdi_debug_register(bdi, dev_name(dev)); | ||
| 892 | set_bit(WB_registered, &bdi->wb.state); | 889 | set_bit(WB_registered, &bdi->wb.state); |
| 893 | 890 | ||
| 894 | spin_lock_bh(&bdi_lock); | 891 | spin_lock_bh(&bdi_lock); |
