diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-08 21:15:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-08 21:15:55 -0500 |
commit | 5cb8418cb533222709f362d264653a634eb8c7ac (patch) | |
tree | a99c00061c4e937b3f813a42a3225611fd75ac56 | |
parent | abf6c39796f9cccd0d258d05f2fa39a8c77eabc2 (diff) | |
parent | 65de03e251382306a4575b1779c57c87889eee49 (diff) |
Merge tag 'for-linus-2019-11-08' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- Two NVMe device removal crash fixes, and a compat fixup for for an
ioctl that was introduced in this release (Anton, Charles, Max - via
Keith)
- Missing error path mutex unlock for drbd (Dan)
- cgroup writeback fixup on dead memcg (Tejun)
- blkcg online stats print fix (Tejun)
* tag 'for-linus-2019-11-08' of git://git.kernel.dk/linux-block:
cgroup,writeback: don't switch wbs immediately on dead wbs if the memcg is dead
block: drbd: remove a stray unlock in __drbd_send_protocol()
blkcg: make blkcg_print_stat() print stats only for online blkgs
nvme: change nvme_passthru_cmd64 to explicitly mark rsvd
nvme-multipath: fix crash in nvme_mpath_clear_ctrl_paths
nvme-rdma: fix a segmentation fault during module unload
-rw-r--r-- | block/blk-cgroup.c | 13 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 1 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 8 | ||||
-rw-r--r-- | fs/fs-writeback.c | 9 | ||||
-rw-r--r-- | include/uapi/linux/nvme_ioctl.h | 1 |
6 files changed, 25 insertions, 9 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5d21027b1faf..1eb8895be4c6 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
934 | int i; | 934 | int i; |
935 | bool has_stats = false; | 935 | bool has_stats = false; |
936 | 936 | ||
937 | spin_lock_irq(&blkg->q->queue_lock); | ||
938 | |||
939 | if (!blkg->online) | ||
940 | goto skip; | ||
941 | |||
937 | dname = blkg_dev_name(blkg); | 942 | dname = blkg_dev_name(blkg); |
938 | if (!dname) | 943 | if (!dname) |
939 | continue; | 944 | goto skip; |
940 | 945 | ||
941 | /* | 946 | /* |
942 | * Hooray string manipulation, count is the size written NOT | 947 | * Hooray string manipulation, count is the size written NOT |
@@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
946 | */ | 951 | */ |
947 | off += scnprintf(buf+off, size-off, "%s ", dname); | 952 | off += scnprintf(buf+off, size-off, "%s ", dname); |
948 | 953 | ||
949 | spin_lock_irq(&blkg->q->queue_lock); | ||
950 | |||
951 | blkg_rwstat_recursive_sum(blkg, NULL, | 954 | blkg_rwstat_recursive_sum(blkg, NULL, |
952 | offsetof(struct blkcg_gq, stat_bytes), &rwstat); | 955 | offsetof(struct blkcg_gq, stat_bytes), &rwstat); |
953 | rbytes = rwstat.cnt[BLKG_RWSTAT_READ]; | 956 | rbytes = rwstat.cnt[BLKG_RWSTAT_READ]; |
@@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
960 | wios = rwstat.cnt[BLKG_RWSTAT_WRITE]; | 963 | wios = rwstat.cnt[BLKG_RWSTAT_WRITE]; |
961 | dios = rwstat.cnt[BLKG_RWSTAT_DISCARD]; | 964 | dios = rwstat.cnt[BLKG_RWSTAT_DISCARD]; |
962 | 965 | ||
963 | spin_unlock_irq(&blkg->q->queue_lock); | ||
964 | |||
965 | if (rbytes || wbytes || rios || wios) { | 966 | if (rbytes || wbytes || rios || wios) { |
966 | has_stats = true; | 967 | has_stats = true; |
967 | off += scnprintf(buf+off, size-off, | 968 | off += scnprintf(buf+off, size-off, |
@@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
999 | seq_commit(sf, -1); | 1000 | seq_commit(sf, -1); |
1000 | } | 1001 | } |
1001 | } | 1002 | } |
1003 | skip: | ||
1004 | spin_unlock_irq(&blkg->q->queue_lock); | ||
1002 | } | 1005 | } |
1003 | 1006 | ||
1004 | rcu_read_unlock(); | 1007 | rcu_read_unlock(); |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 5b248763a672..a18155cdce41 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm | |||
786 | 786 | ||
787 | if (nc->tentative && connection->agreed_pro_version < 92) { | 787 | if (nc->tentative && connection->agreed_pro_version < 92) { |
788 | rcu_read_unlock(); | 788 | rcu_read_unlock(); |
789 | mutex_unlock(&sock->mutex); | ||
790 | drbd_err(connection, "--dry-run is not supported by peer"); | 789 | drbd_err(connection, "--dry-run is not supported by peer"); |
791 | return -EOPNOTSUPP; | 790 | return -EOPNOTSUPP; |
792 | } | 791 | } |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fc99a40c1ec4..e0f064dcbd02 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) | |||
158 | struct nvme_ns *ns; | 158 | struct nvme_ns *ns; |
159 | 159 | ||
160 | mutex_lock(&ctrl->scan_lock); | 160 | mutex_lock(&ctrl->scan_lock); |
161 | down_read(&ctrl->namespaces_rwsem); | ||
161 | list_for_each_entry(ns, &ctrl->namespaces, list) | 162 | list_for_each_entry(ns, &ctrl->namespaces, list) |
162 | if (nvme_mpath_clear_current_path(ns)) | 163 | if (nvme_mpath_clear_current_path(ns)) |
163 | kblockd_schedule_work(&ns->head->requeue_work); | 164 | kblockd_schedule_work(&ns->head->requeue_work); |
165 | up_read(&ctrl->namespaces_rwsem); | ||
164 | mutex_unlock(&ctrl->scan_lock); | 166 | mutex_unlock(&ctrl->scan_lock); |
165 | } | 167 | } |
166 | 168 | ||
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f19a28b4e997..cb4c3000a57e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -2133,8 +2133,16 @@ err_unreg_client: | |||
2133 | 2133 | ||
2134 | static void __exit nvme_rdma_cleanup_module(void) | 2134 | static void __exit nvme_rdma_cleanup_module(void) |
2135 | { | 2135 | { |
2136 | struct nvme_rdma_ctrl *ctrl; | ||
2137 | |||
2136 | nvmf_unregister_transport(&nvme_rdma_transport); | 2138 | nvmf_unregister_transport(&nvme_rdma_transport); |
2137 | ib_unregister_client(&nvme_rdma_ib_client); | 2139 | ib_unregister_client(&nvme_rdma_ib_client); |
2140 | |||
2141 | mutex_lock(&nvme_rdma_ctrl_mutex); | ||
2142 | list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) | ||
2143 | nvme_delete_ctrl(&ctrl->ctrl); | ||
2144 | mutex_unlock(&nvme_rdma_ctrl_mutex); | ||
2145 | flush_workqueue(nvme_delete_wq); | ||
2138 | } | 2146 | } |
2139 | 2147 | ||
2140 | module_init(nvme_rdma_init_module); | 2148 | module_init(nvme_rdma_init_module); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8461a6322039..335607b8c5c0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, | |||
576 | spin_unlock(&inode->i_lock); | 576 | spin_unlock(&inode->i_lock); |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * A dying wb indicates that the memcg-blkcg mapping has changed | 579 | * A dying wb indicates that either the blkcg associated with the |
580 | * and a new wb is already serving the memcg. Switch immediately. | 580 | * memcg changed or the associated memcg is dying. In the first |
581 | * case, a replacement wb should already be available and we should | ||
582 | * refresh the wb immediately. In the second case, trying to | ||
583 | * refresh will keep failing. | ||
581 | */ | 584 | */ |
582 | if (unlikely(wb_dying(wbc->wb))) | 585 | if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) |
583 | inode_switch_wbs(inode, wbc->wb_id); | 586 | inode_switch_wbs(inode, wbc->wb_id); |
584 | } | 587 | } |
585 | EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode); | 588 | EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode); |
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index e168dc59e9a0..d99b5a772698 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h | |||
@@ -63,6 +63,7 @@ struct nvme_passthru_cmd64 { | |||
63 | __u32 cdw14; | 63 | __u32 cdw14; |
64 | __u32 cdw15; | 64 | __u32 cdw15; |
65 | __u32 timeout_ms; | 65 | __u32 timeout_ms; |
66 | __u32 rsvd2; | ||
66 | __u64 result; | 67 | __u64 result; |
67 | }; | 68 | }; |
68 | 69 | ||