diff options
author | Christoph Hellwig <hch@lst.de> | 2015-11-28 09:40:19 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-12-01 12:59:40 -0500 |
commit | f3ca80fc11c3af566eacd99cf821c1a48035c63b (patch) | |
tree | 1db6d155210c4f11a48a2285eb7691313fa73496 | |
parent | 5bae7f73d378a986671a3cad717c721b38f80d9e (diff) |
nvme: move chardev and sysfs interface to common code
For this we need to add a proper controller init routine and a list of
all controllers that is in addition to the list of PCIe controllers,
which stays in pci.c. Note that we remove the sysfs device when the
last reference to a controller is dropped now - the old code would have
kept it around longer, which doesn't make much sense.
This requires a new ->reset_ctrl operation to implement controleller
resets, and a new ->write_reg32 operation that is required to implement
subsystem resets. We also now store caches copied of the NVMe compliance
version and the flag if a controller is attached to a subsystem or not in
the generic controller structure now.
Signed-off-by: Christoph Hellwig <hch@lst.de>
[Fixes for pr merge]
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/nvme/host/core.c | 215 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 18 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 195 | ||||
-rw-r--r-- | drivers/nvme/host/scsi.c | 13 |
4 files changed, 241 insertions, 200 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1b8498434b49..9c7dfd1476a7 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -31,11 +31,19 @@ | |||
31 | 31 | ||
32 | #include "nvme.h" | 32 | #include "nvme.h" |
33 | 33 | ||
34 | #define NVME_MINORS (1U << MINORBITS) | ||
35 | |||
34 | static int nvme_major; | 36 | static int nvme_major; |
35 | module_param(nvme_major, int, 0); | 37 | module_param(nvme_major, int, 0); |
36 | 38 | ||
39 | static int nvme_char_major; | ||
40 | module_param(nvme_char_major, int, 0); | ||
41 | |||
42 | static LIST_HEAD(nvme_ctrl_list); | ||
37 | DEFINE_SPINLOCK(dev_list_lock); | 43 | DEFINE_SPINLOCK(dev_list_lock); |
38 | 44 | ||
45 | static struct class *nvme_class; | ||
46 | |||
39 | static void nvme_free_ns(struct kref *kref) | 47 | static void nvme_free_ns(struct kref *kref) |
40 | { | 48 | { |
41 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); | 49 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); |
@@ -367,7 +375,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) | |||
367 | metadata, meta_len, io.slba, NULL, 0); | 375 | metadata, meta_len, io.slba, NULL, 0); |
368 | } | 376 | } |
369 | 377 | ||
370 | int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | 378 | static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
371 | struct nvme_passthru_cmd __user *ucmd) | 379 | struct nvme_passthru_cmd __user *ucmd) |
372 | { | 380 | { |
373 | struct nvme_passthru_cmd cmd; | 381 | struct nvme_passthru_cmd cmd; |
@@ -792,6 +800,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
792 | u64 cap; | 800 | u64 cap; |
793 | int ret, page_shift; | 801 | int ret, page_shift; |
794 | 802 | ||
803 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); | ||
804 | if (ret) { | ||
805 | dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret); | ||
806 | return ret; | ||
807 | } | ||
808 | |||
795 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); | 809 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); |
796 | if (ret) { | 810 | if (ret) { |
797 | dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret); | 811 | dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret); |
@@ -799,6 +813,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
799 | } | 813 | } |
800 | page_shift = NVME_CAP_MPSMIN(cap) + 12; | 814 | page_shift = NVME_CAP_MPSMIN(cap) + 12; |
801 | 815 | ||
816 | if (ctrl->vs >= NVME_VS(1, 1)) | ||
817 | ctrl->subsystem = NVME_CAP_NSSRC(cap); | ||
818 | |||
802 | ret = nvme_identify_ctrl(ctrl, &id); | 819 | ret = nvme_identify_ctrl(ctrl, &id); |
803 | if (ret) { | 820 | if (ret) { |
804 | dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret); | 821 | dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret); |
@@ -833,17 +850,84 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
833 | return 0; | 850 | return 0; |
834 | } | 851 | } |
835 | 852 | ||
836 | static void nvme_free_ctrl(struct kref *kref) | 853 | static int nvme_dev_open(struct inode *inode, struct file *file) |
837 | { | 854 | { |
838 | struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); | 855 | struct nvme_ctrl *ctrl; |
856 | int instance = iminor(inode); | ||
857 | int ret = -ENODEV; | ||
839 | 858 | ||
840 | ctrl->ops->free_ctrl(ctrl); | 859 | spin_lock(&dev_list_lock); |
860 | list_for_each_entry(ctrl, &nvme_ctrl_list, node) { | ||
861 | if (ctrl->instance != instance) | ||
862 | continue; | ||
863 | |||
864 | if (!ctrl->admin_q) { | ||
865 | ret = -EWOULDBLOCK; | ||
866 | break; | ||
867 | } | ||
868 | if (!kref_get_unless_zero(&ctrl->kref)) | ||
869 | break; | ||
870 | file->private_data = ctrl; | ||
871 | ret = 0; | ||
872 | break; | ||
873 | } | ||
874 | spin_unlock(&dev_list_lock); | ||
875 | |||
876 | return ret; | ||
841 | } | 877 | } |
842 | 878 | ||
843 | void nvme_put_ctrl(struct nvme_ctrl *ctrl) | 879 | static int nvme_dev_release(struct inode *inode, struct file *file) |
844 | { | 880 | { |
845 | kref_put(&ctrl->kref, nvme_free_ctrl); | 881 | nvme_put_ctrl(file->private_data); |
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static long nvme_dev_ioctl(struct file *file, unsigned int cmd, | ||
886 | unsigned long arg) | ||
887 | { | ||
888 | struct nvme_ctrl *ctrl = file->private_data; | ||
889 | void __user *argp = (void __user *)arg; | ||
890 | struct nvme_ns *ns; | ||
891 | |||
892 | switch (cmd) { | ||
893 | case NVME_IOCTL_ADMIN_CMD: | ||
894 | return nvme_user_cmd(ctrl, NULL, argp); | ||
895 | case NVME_IOCTL_IO_CMD: | ||
896 | if (list_empty(&ctrl->namespaces)) | ||
897 | return -ENOTTY; | ||
898 | ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); | ||
899 | return nvme_user_cmd(ctrl, ns, argp); | ||
900 | case NVME_IOCTL_RESET: | ||
901 | dev_warn(ctrl->dev, "resetting controller\n"); | ||
902 | return ctrl->ops->reset_ctrl(ctrl); | ||
903 | case NVME_IOCTL_SUBSYS_RESET: | ||
904 | return nvme_reset_subsystem(ctrl); | ||
905 | default: | ||
906 | return -ENOTTY; | ||
907 | } | ||
908 | } | ||
909 | |||
910 | static const struct file_operations nvme_dev_fops = { | ||
911 | .owner = THIS_MODULE, | ||
912 | .open = nvme_dev_open, | ||
913 | .release = nvme_dev_release, | ||
914 | .unlocked_ioctl = nvme_dev_ioctl, | ||
915 | .compat_ioctl = nvme_dev_ioctl, | ||
916 | }; | ||
917 | |||
918 | static ssize_t nvme_sysfs_reset(struct device *dev, | ||
919 | struct device_attribute *attr, const char *buf, | ||
920 | size_t count) | ||
921 | { | ||
922 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | ||
923 | int ret; | ||
924 | |||
925 | ret = ctrl->ops->reset_ctrl(ctrl); | ||
926 | if (ret < 0) | ||
927 | return ret; | ||
928 | return count; | ||
846 | } | 929 | } |
930 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); | ||
847 | 931 | ||
848 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) | 932 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) |
849 | { | 933 | { |
@@ -1009,6 +1093,104 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) | |||
1009 | nvme_ns_remove(ns); | 1093 | nvme_ns_remove(ns); |
1010 | } | 1094 | } |
1011 | 1095 | ||
1096 | static DEFINE_IDA(nvme_instance_ida); | ||
1097 | |||
1098 | static int nvme_set_instance(struct nvme_ctrl *ctrl) | ||
1099 | { | ||
1100 | int instance, error; | ||
1101 | |||
1102 | do { | ||
1103 | if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) | ||
1104 | return -ENODEV; | ||
1105 | |||
1106 | spin_lock(&dev_list_lock); | ||
1107 | error = ida_get_new(&nvme_instance_ida, &instance); | ||
1108 | spin_unlock(&dev_list_lock); | ||
1109 | } while (error == -EAGAIN); | ||
1110 | |||
1111 | if (error) | ||
1112 | return -ENODEV; | ||
1113 | |||
1114 | ctrl->instance = instance; | ||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | static void nvme_release_instance(struct nvme_ctrl *ctrl) | ||
1119 | { | ||
1120 | spin_lock(&dev_list_lock); | ||
1121 | ida_remove(&nvme_instance_ida, ctrl->instance); | ||
1122 | spin_unlock(&dev_list_lock); | ||
1123 | } | ||
1124 | |||
1125 | static void nvme_free_ctrl(struct kref *kref) | ||
1126 | { | ||
1127 | struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); | ||
1128 | |||
1129 | spin_lock(&dev_list_lock); | ||
1130 | list_del(&ctrl->node); | ||
1131 | spin_unlock(&dev_list_lock); | ||
1132 | |||
1133 | put_device(ctrl->device); | ||
1134 | nvme_release_instance(ctrl); | ||
1135 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); | ||
1136 | |||
1137 | ctrl->ops->free_ctrl(ctrl); | ||
1138 | } | ||
1139 | |||
1140 | void nvme_put_ctrl(struct nvme_ctrl *ctrl) | ||
1141 | { | ||
1142 | kref_put(&ctrl->kref, nvme_free_ctrl); | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * Initialize a NVMe controller structures. This needs to be called during | ||
1147 | * earliest initialization so that we have the initialized structured around | ||
1148 | * during probing. | ||
1149 | */ | ||
1150 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | ||
1151 | const struct nvme_ctrl_ops *ops, unsigned long quirks) | ||
1152 | { | ||
1153 | int ret; | ||
1154 | |||
1155 | INIT_LIST_HEAD(&ctrl->namespaces); | ||
1156 | kref_init(&ctrl->kref); | ||
1157 | ctrl->dev = dev; | ||
1158 | ctrl->ops = ops; | ||
1159 | ctrl->quirks = quirks; | ||
1160 | |||
1161 | ret = nvme_set_instance(ctrl); | ||
1162 | if (ret) | ||
1163 | goto out; | ||
1164 | |||
1165 | ctrl->device = device_create(nvme_class, ctrl->dev, | ||
1166 | MKDEV(nvme_char_major, ctrl->instance), | ||
1167 | dev, "nvme%d", ctrl->instance); | ||
1168 | if (IS_ERR(ctrl->device)) { | ||
1169 | ret = PTR_ERR(ctrl->device); | ||
1170 | goto out_release_instance; | ||
1171 | } | ||
1172 | get_device(ctrl->device); | ||
1173 | dev_set_drvdata(ctrl->device, ctrl); | ||
1174 | |||
1175 | ret = device_create_file(ctrl->device, &dev_attr_reset_controller); | ||
1176 | if (ret) | ||
1177 | goto out_put_device; | ||
1178 | |||
1179 | spin_lock(&dev_list_lock); | ||
1180 | list_add_tail(&ctrl->node, &nvme_ctrl_list); | ||
1181 | spin_unlock(&dev_list_lock); | ||
1182 | |||
1183 | return 0; | ||
1184 | |||
1185 | out_put_device: | ||
1186 | put_device(ctrl->device); | ||
1187 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); | ||
1188 | out_release_instance: | ||
1189 | nvme_release_instance(ctrl); | ||
1190 | out: | ||
1191 | return ret; | ||
1192 | } | ||
1193 | |||
1012 | int __init nvme_core_init(void) | 1194 | int __init nvme_core_init(void) |
1013 | { | 1195 | { |
1014 | int result; | 1196 | int result; |
@@ -1019,10 +1201,31 @@ int __init nvme_core_init(void) | |||
1019 | else if (result > 0) | 1201 | else if (result > 0) |
1020 | nvme_major = result; | 1202 | nvme_major = result; |
1021 | 1203 | ||
1204 | result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", | ||
1205 | &nvme_dev_fops); | ||
1206 | if (result < 0) | ||
1207 | goto unregister_blkdev; | ||
1208 | else if (result > 0) | ||
1209 | nvme_char_major = result; | ||
1210 | |||
1211 | nvme_class = class_create(THIS_MODULE, "nvme"); | ||
1212 | if (IS_ERR(nvme_class)) { | ||
1213 | result = PTR_ERR(nvme_class); | ||
1214 | goto unregister_chrdev; | ||
1215 | } | ||
1216 | |||
1022 | return 0; | 1217 | return 0; |
1218 | |||
1219 | unregister_chrdev: | ||
1220 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | ||
1221 | unregister_blkdev: | ||
1222 | unregister_blkdev(nvme_major, "nvme"); | ||
1223 | return result; | ||
1023 | } | 1224 | } |
1024 | 1225 | ||
1025 | void nvme_core_exit(void) | 1226 | void nvme_core_exit(void) |
1026 | { | 1227 | { |
1027 | unregister_blkdev(nvme_major, "nvme"); | 1228 | unregister_blkdev(nvme_major, "nvme"); |
1229 | class_destroy(nvme_class); | ||
1230 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | ||
1028 | } | 1231 | } |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index dfedaaa2633b..93378be874e1 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <linux/kref.h> | 19 | #include <linux/kref.h> |
20 | #include <linux/blk-mq.h> | 20 | #include <linux/blk-mq.h> |
21 | 21 | ||
22 | struct nvme_passthru_cmd; | ||
23 | |||
24 | extern unsigned char nvme_io_timeout; | 22 | extern unsigned char nvme_io_timeout; |
25 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) | 23 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
26 | 24 | ||
@@ -56,6 +54,7 @@ struct nvme_ctrl { | |||
56 | struct blk_mq_tag_set *tagset; | 54 | struct blk_mq_tag_set *tagset; |
57 | struct list_head namespaces; | 55 | struct list_head namespaces; |
58 | struct device *device; /* char device */ | 56 | struct device *device; /* char device */ |
57 | struct list_head node; | ||
59 | 58 | ||
60 | char name[12]; | 59 | char name[12]; |
61 | char serial[20]; | 60 | char serial[20]; |
@@ -71,6 +70,8 @@ struct nvme_ctrl { | |||
71 | u16 abort_limit; | 70 | u16 abort_limit; |
72 | u8 event_limit; | 71 | u8 event_limit; |
73 | u8 vwc; | 72 | u8 vwc; |
73 | u32 vs; | ||
74 | bool subsystem; | ||
74 | unsigned long quirks; | 75 | unsigned long quirks; |
75 | }; | 76 | }; |
76 | 77 | ||
@@ -100,6 +101,7 @@ struct nvme_ctrl_ops { | |||
100 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); | 101 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
101 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); | 102 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
102 | bool (*io_incapable)(struct nvme_ctrl *ctrl); | 103 | bool (*io_incapable)(struct nvme_ctrl *ctrl); |
104 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); | ||
103 | void (*free_ctrl)(struct nvme_ctrl *ctrl); | 105 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
104 | }; | 106 | }; |
105 | 107 | ||
@@ -123,6 +125,13 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl) | |||
123 | return val & NVME_CSTS_CFS; | 125 | return val & NVME_CSTS_CFS; |
124 | } | 126 | } |
125 | 127 | ||
128 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) | ||
129 | { | ||
130 | if (!ctrl->subsystem) | ||
131 | return -ENOTTY; | ||
132 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); | ||
133 | } | ||
134 | |||
126 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | 135 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) |
127 | { | 136 | { |
128 | return (sector >> (ns->lba_shift - 9)); | 137 | return (sector >> (ns->lba_shift - 9)); |
@@ -194,6 +203,8 @@ static inline int nvme_error_status(u16 status) | |||
194 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); | 203 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
195 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); | 204 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
196 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); | 205 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); |
206 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | ||
207 | const struct nvme_ctrl_ops *ops, unsigned long quirks); | ||
197 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); | 208 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); |
198 | int nvme_init_identify(struct nvme_ctrl *ctrl); | 209 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
199 | 210 | ||
@@ -224,9 +235,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | |||
224 | 235 | ||
225 | extern spinlock_t dev_list_lock; | 236 | extern spinlock_t dev_list_lock; |
226 | 237 | ||
227 | int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | ||
228 | struct nvme_passthru_cmd __user *ucmd); | ||
229 | |||
230 | struct sg_io_hdr; | 238 | struct sg_io_hdr; |
231 | 239 | ||
232 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); | 240 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 697dc1fb5ef9..87ad57bcc7ed 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -38,15 +38,11 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/t10-pi.h> | 39 | #include <linux/t10-pi.h> |
40 | #include <linux/types.h> | 40 | #include <linux/types.h> |
41 | #include <linux/pr.h> | ||
42 | #include <scsi/sg.h> | ||
43 | #include <linux/io-64-nonatomic-lo-hi.h> | 41 | #include <linux/io-64-nonatomic-lo-hi.h> |
44 | #include <asm/unaligned.h> | 42 | #include <asm/unaligned.h> |
45 | 43 | ||
46 | #include <uapi/linux/nvme_ioctl.h> | ||
47 | #include "nvme.h" | 44 | #include "nvme.h" |
48 | 45 | ||
49 | #define NVME_MINORS (1U << MINORBITS) | ||
50 | #define NVME_Q_DEPTH 1024 | 46 | #define NVME_Q_DEPTH 1024 |
51 | #define NVME_AQ_DEPTH 256 | 47 | #define NVME_AQ_DEPTH 256 |
52 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) | 48 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) |
@@ -64,9 +60,6 @@ unsigned char shutdown_timeout = 5; | |||
64 | module_param(shutdown_timeout, byte, 0644); | 60 | module_param(shutdown_timeout, byte, 0644); |
65 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); | 61 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); |
66 | 62 | ||
67 | static int nvme_char_major; | ||
68 | module_param(nvme_char_major, int, 0); | ||
69 | |||
70 | static int use_threaded_interrupts; | 63 | static int use_threaded_interrupts; |
71 | module_param(use_threaded_interrupts, int, 0); | 64 | module_param(use_threaded_interrupts, int, 0); |
72 | 65 | ||
@@ -79,8 +72,6 @@ static struct task_struct *nvme_thread; | |||
79 | static struct workqueue_struct *nvme_workq; | 72 | static struct workqueue_struct *nvme_workq; |
80 | static wait_queue_head_t nvme_kthread_wait; | 73 | static wait_queue_head_t nvme_kthread_wait; |
81 | 74 | ||
82 | static struct class *nvme_class; | ||
83 | |||
84 | struct nvme_dev; | 75 | struct nvme_dev; |
85 | struct nvme_queue; | 76 | struct nvme_queue; |
86 | struct nvme_iod; | 77 | struct nvme_iod; |
@@ -1505,15 +1496,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1505 | return result; | 1496 | return result; |
1506 | } | 1497 | } |
1507 | 1498 | ||
1508 | static int nvme_subsys_reset(struct nvme_dev *dev) | ||
1509 | { | ||
1510 | if (!dev->subsystem) | ||
1511 | return -ENOTTY; | ||
1512 | |||
1513 | writel(0x4E564D65, dev->bar + NVME_REG_NSSR); /* "NVMe" */ | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1517 | static int nvme_kthread(void *data) | 1499 | static int nvme_kthread(void *data) |
1518 | { | 1500 | { |
1519 | struct nvme_dev *dev, *next; | 1501 | struct nvme_dev *dev, *next; |
@@ -2113,42 +2095,11 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) | |||
2113 | dma_pool_destroy(dev->prp_small_pool); | 2095 | dma_pool_destroy(dev->prp_small_pool); |
2114 | } | 2096 | } |
2115 | 2097 | ||
2116 | static DEFINE_IDA(nvme_instance_ida); | ||
2117 | |||
2118 | static int nvme_set_instance(struct nvme_dev *dev) | ||
2119 | { | ||
2120 | int instance, error; | ||
2121 | |||
2122 | do { | ||
2123 | if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) | ||
2124 | return -ENODEV; | ||
2125 | |||
2126 | spin_lock(&dev_list_lock); | ||
2127 | error = ida_get_new(&nvme_instance_ida, &instance); | ||
2128 | spin_unlock(&dev_list_lock); | ||
2129 | } while (error == -EAGAIN); | ||
2130 | |||
2131 | if (error) | ||
2132 | return -ENODEV; | ||
2133 | |||
2134 | dev->ctrl.instance = instance; | ||
2135 | return 0; | ||
2136 | } | ||
2137 | |||
2138 | static void nvme_release_instance(struct nvme_dev *dev) | ||
2139 | { | ||
2140 | spin_lock(&dev_list_lock); | ||
2141 | ida_remove(&nvme_instance_ida, dev->ctrl.instance); | ||
2142 | spin_unlock(&dev_list_lock); | ||
2143 | } | ||
2144 | |||
2145 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) | 2098 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) |
2146 | { | 2099 | { |
2147 | struct nvme_dev *dev = to_nvme_dev(ctrl); | 2100 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
2148 | 2101 | ||
2149 | put_device(dev->dev); | 2102 | put_device(dev->dev); |
2150 | put_device(ctrl->device); | ||
2151 | nvme_release_instance(dev); | ||
2152 | if (dev->tagset.tags) | 2103 | if (dev->tagset.tags) |
2153 | blk_mq_free_tag_set(&dev->tagset); | 2104 | blk_mq_free_tag_set(&dev->tagset); |
2154 | if (dev->ctrl.admin_q) | 2105 | if (dev->ctrl.admin_q) |
@@ -2158,69 +2109,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) | |||
2158 | kfree(dev); | 2109 | kfree(dev); |
2159 | } | 2110 | } |
2160 | 2111 | ||
2161 | static int nvme_dev_open(struct inode *inode, struct file *f) | ||
2162 | { | ||
2163 | struct nvme_dev *dev; | ||
2164 | int instance = iminor(inode); | ||
2165 | int ret = -ENODEV; | ||
2166 | |||
2167 | spin_lock(&dev_list_lock); | ||
2168 | list_for_each_entry(dev, &dev_list, node) { | ||
2169 | if (dev->ctrl.instance == instance) { | ||
2170 | if (!dev->ctrl.admin_q) { | ||
2171 | ret = -EWOULDBLOCK; | ||
2172 | break; | ||
2173 | } | ||
2174 | if (!kref_get_unless_zero(&dev->ctrl.kref)) | ||
2175 | break; | ||
2176 | f->private_data = dev; | ||
2177 | ret = 0; | ||
2178 | break; | ||
2179 | } | ||
2180 | } | ||
2181 | spin_unlock(&dev_list_lock); | ||
2182 | |||
2183 | return ret; | ||
2184 | } | ||
2185 | |||
2186 | static int nvme_dev_release(struct inode *inode, struct file *f) | ||
2187 | { | ||
2188 | struct nvme_dev *dev = f->private_data; | ||
2189 | nvme_put_ctrl(&dev->ctrl); | ||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
2194 | { | ||
2195 | struct nvme_dev *dev = f->private_data; | ||
2196 | struct nvme_ns *ns; | ||
2197 | |||
2198 | switch (cmd) { | ||
2199 | case NVME_IOCTL_ADMIN_CMD: | ||
2200 | return nvme_user_cmd(&dev->ctrl, NULL, (void __user *)arg); | ||
2201 | case NVME_IOCTL_IO_CMD: | ||
2202 | if (list_empty(&dev->ctrl.namespaces)) | ||
2203 | return -ENOTTY; | ||
2204 | ns = list_first_entry(&dev->ctrl.namespaces, struct nvme_ns, list); | ||
2205 | return nvme_user_cmd(&dev->ctrl, ns, (void __user *)arg); | ||
2206 | case NVME_IOCTL_RESET: | ||
2207 | dev_warn(dev->dev, "resetting controller\n"); | ||
2208 | return nvme_reset(dev); | ||
2209 | case NVME_IOCTL_SUBSYS_RESET: | ||
2210 | return nvme_subsys_reset(dev); | ||
2211 | default: | ||
2212 | return -ENOTTY; | ||
2213 | } | ||
2214 | } | ||
2215 | |||
2216 | static const struct file_operations nvme_dev_fops = { | ||
2217 | .owner = THIS_MODULE, | ||
2218 | .open = nvme_dev_open, | ||
2219 | .release = nvme_dev_release, | ||
2220 | .unlocked_ioctl = nvme_dev_ioctl, | ||
2221 | .compat_ioctl = nvme_dev_ioctl, | ||
2222 | }; | ||
2223 | |||
2224 | static void nvme_probe_work(struct work_struct *work) | 2112 | static void nvme_probe_work(struct work_struct *work) |
2225 | { | 2113 | { |
2226 | struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); | 2114 | struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); |
@@ -2372,21 +2260,6 @@ static int nvme_reset(struct nvme_dev *dev) | |||
2372 | return ret; | 2260 | return ret; |
2373 | } | 2261 | } |
2374 | 2262 | ||
2375 | static ssize_t nvme_sysfs_reset(struct device *dev, | ||
2376 | struct device_attribute *attr, const char *buf, | ||
2377 | size_t count) | ||
2378 | { | ||
2379 | struct nvme_dev *ndev = dev_get_drvdata(dev); | ||
2380 | int ret; | ||
2381 | |||
2382 | ret = nvme_reset(ndev); | ||
2383 | if (ret < 0) | ||
2384 | return ret; | ||
2385 | |||
2386 | return count; | ||
2387 | } | ||
2388 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); | ||
2389 | |||
2390 | static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) | 2263 | static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) |
2391 | { | 2264 | { |
2392 | *val = readl(to_nvme_dev(ctrl)->bar + off); | 2265 | *val = readl(to_nvme_dev(ctrl)->bar + off); |
@@ -2412,11 +2285,17 @@ static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl) | |||
2412 | return !dev->bar || dev->online_queues < 2; | 2285 | return !dev->bar || dev->online_queues < 2; |
2413 | } | 2286 | } |
2414 | 2287 | ||
2288 | static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) | ||
2289 | { | ||
2290 | return nvme_reset(to_nvme_dev(ctrl)); | ||
2291 | } | ||
2292 | |||
2415 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { | 2293 | static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { |
2416 | .reg_read32 = nvme_pci_reg_read32, | 2294 | .reg_read32 = nvme_pci_reg_read32, |
2417 | .reg_write32 = nvme_pci_reg_write32, | 2295 | .reg_write32 = nvme_pci_reg_write32, |
2418 | .reg_read64 = nvme_pci_reg_read64, | 2296 | .reg_read64 = nvme_pci_reg_read64, |
2419 | .io_incapable = nvme_pci_io_incapable, | 2297 | .io_incapable = nvme_pci_io_incapable, |
2298 | .reset_ctrl = nvme_pci_reset_ctrl, | ||
2420 | .free_ctrl = nvme_pci_free_ctrl, | 2299 | .free_ctrl = nvme_pci_free_ctrl, |
2421 | }; | 2300 | }; |
2422 | 2301 | ||
@@ -2441,51 +2320,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2441 | if (!dev->queues) | 2320 | if (!dev->queues) |
2442 | goto free; | 2321 | goto free; |
2443 | 2322 | ||
2444 | INIT_LIST_HEAD(&dev->ctrl.namespaces); | ||
2445 | INIT_WORK(&dev->reset_work, nvme_reset_work); | ||
2446 | dev->dev = get_device(&pdev->dev); | 2323 | dev->dev = get_device(&pdev->dev); |
2447 | pci_set_drvdata(pdev, dev); | 2324 | pci_set_drvdata(pdev, dev); |
2448 | 2325 | ||
2449 | dev->ctrl.ops = &nvme_pci_ctrl_ops; | 2326 | INIT_LIST_HEAD(&dev->node); |
2450 | dev->ctrl.dev = dev->dev; | 2327 | INIT_WORK(&dev->scan_work, nvme_dev_scan); |
2451 | dev->ctrl.quirks = id->driver_data; | 2328 | INIT_WORK(&dev->probe_work, nvme_probe_work); |
2329 | INIT_WORK(&dev->reset_work, nvme_reset_work); | ||
2452 | 2330 | ||
2453 | result = nvme_set_instance(dev); | 2331 | result = nvme_setup_prp_pools(dev); |
2454 | if (result) | 2332 | if (result) |
2455 | goto put_pci; | 2333 | goto put_pci; |
2456 | 2334 | ||
2457 | result = nvme_setup_prp_pools(dev); | 2335 | result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
2336 | id->driver_data); | ||
2458 | if (result) | 2337 | if (result) |
2459 | goto release; | ||
2460 | |||
2461 | kref_init(&dev->ctrl.kref); | ||
2462 | dev->ctrl.device = device_create(nvme_class, &pdev->dev, | ||
2463 | MKDEV(nvme_char_major, dev->ctrl.instance), | ||
2464 | dev, "nvme%d", dev->ctrl.instance); | ||
2465 | if (IS_ERR(dev->ctrl.device)) { | ||
2466 | result = PTR_ERR(dev->ctrl.device); | ||
2467 | goto release_pools; | 2338 | goto release_pools; |
2468 | } | ||
2469 | get_device(dev->ctrl.device); | ||
2470 | dev_set_drvdata(dev->ctrl.device, dev); | ||
2471 | |||
2472 | result = device_create_file(dev->ctrl.device, &dev_attr_reset_controller); | ||
2473 | if (result) | ||
2474 | goto put_dev; | ||
2475 | 2339 | ||
2476 | INIT_LIST_HEAD(&dev->node); | ||
2477 | INIT_WORK(&dev->scan_work, nvme_dev_scan); | ||
2478 | INIT_WORK(&dev->probe_work, nvme_probe_work); | ||
2479 | schedule_work(&dev->probe_work); | 2340 | schedule_work(&dev->probe_work); |
2480 | return 0; | 2341 | return 0; |
2481 | 2342 | ||
2482 | put_dev: | ||
2483 | device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance)); | ||
2484 | put_device(dev->ctrl.device); | ||
2485 | release_pools: | 2343 | release_pools: |
2486 | nvme_release_prp_pools(dev); | 2344 | nvme_release_prp_pools(dev); |
2487 | release: | ||
2488 | nvme_release_instance(dev); | ||
2489 | put_pci: | 2345 | put_pci: |
2490 | put_device(dev->dev); | 2346 | put_device(dev->dev); |
2491 | free: | 2347 | free: |
@@ -2523,11 +2379,9 @@ static void nvme_remove(struct pci_dev *pdev) | |||
2523 | flush_work(&dev->probe_work); | 2379 | flush_work(&dev->probe_work); |
2524 | flush_work(&dev->reset_work); | 2380 | flush_work(&dev->reset_work); |
2525 | flush_work(&dev->scan_work); | 2381 | flush_work(&dev->scan_work); |
2526 | device_remove_file(dev->ctrl.device, &dev_attr_reset_controller); | ||
2527 | nvme_remove_namespaces(&dev->ctrl); | 2382 | nvme_remove_namespaces(&dev->ctrl); |
2528 | nvme_dev_shutdown(dev); | 2383 | nvme_dev_shutdown(dev); |
2529 | nvme_dev_remove_admin(dev); | 2384 | nvme_dev_remove_admin(dev); |
2530 | device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance)); | ||
2531 | nvme_free_queues(dev, 0); | 2385 | nvme_free_queues(dev, 0); |
2532 | nvme_release_cmb(dev); | 2386 | nvme_release_cmb(dev); |
2533 | nvme_release_prp_pools(dev); | 2387 | nvme_release_prp_pools(dev); |
@@ -2610,29 +2464,12 @@ static int __init nvme_init(void) | |||
2610 | if (result < 0) | 2464 | if (result < 0) |
2611 | goto kill_workq; | 2465 | goto kill_workq; |
2612 | 2466 | ||
2613 | result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", | ||
2614 | &nvme_dev_fops); | ||
2615 | if (result < 0) | ||
2616 | goto unregister_blkdev; | ||
2617 | else if (result > 0) | ||
2618 | nvme_char_major = result; | ||
2619 | |||
2620 | nvme_class = class_create(THIS_MODULE, "nvme"); | ||
2621 | if (IS_ERR(nvme_class)) { | ||
2622 | result = PTR_ERR(nvme_class); | ||
2623 | goto unregister_chrdev; | ||
2624 | } | ||
2625 | |||
2626 | result = pci_register_driver(&nvme_driver); | 2467 | result = pci_register_driver(&nvme_driver); |
2627 | if (result) | 2468 | if (result) |
2628 | goto destroy_class; | 2469 | goto core_exit; |
2629 | return 0; | 2470 | return 0; |
2630 | 2471 | ||
2631 | destroy_class: | 2472 | core_exit: |
2632 | class_destroy(nvme_class); | ||
2633 | unregister_chrdev: | ||
2634 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | ||
2635 | unregister_blkdev: | ||
2636 | nvme_core_exit(); | 2473 | nvme_core_exit(); |
2637 | kill_workq: | 2474 | kill_workq: |
2638 | destroy_workqueue(nvme_workq); | 2475 | destroy_workqueue(nvme_workq); |
@@ -2644,8 +2481,6 @@ static void __exit nvme_exit(void) | |||
2644 | pci_unregister_driver(&nvme_driver); | 2481 | pci_unregister_driver(&nvme_driver); |
2645 | nvme_core_exit(); | 2482 | nvme_core_exit(); |
2646 | destroy_workqueue(nvme_workq); | 2483 | destroy_workqueue(nvme_workq); |
2647 | class_destroy(nvme_class); | ||
2648 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); | ||
2649 | BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); | 2484 | BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); |
2650 | _nvme_check_size(); | 2485 | _nvme_check_size(); |
2651 | } | 2486 | } |
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index eaf725610fe2..e947e298a737 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c | |||
@@ -600,7 +600,7 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns, | |||
600 | } | 600 | } |
601 | 601 | ||
602 | static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, | 602 | static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
603 | u8 *inq_response, int alloc_len, u32 vs) | 603 | u8 *inq_response, int alloc_len) |
604 | { | 604 | { |
605 | struct nvme_id_ns *id_ns; | 605 | struct nvme_id_ns *id_ns; |
606 | int nvme_sc, res; | 606 | int nvme_sc, res; |
@@ -615,7 +615,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
615 | eui = id_ns->eui64; | 615 | eui = id_ns->eui64; |
616 | len = sizeof(id_ns->eui64); | 616 | len = sizeof(id_ns->eui64); |
617 | 617 | ||
618 | if (vs >= NVME_VS(1, 2)) { | 618 | if (ns->ctrl->vs >= NVME_VS(1, 2)) { |
619 | if (bitmap_empty(eui, len * 8)) { | 619 | if (bitmap_empty(eui, len * 8)) { |
620 | eui = id_ns->nguid; | 620 | eui = id_ns->nguid; |
621 | len = sizeof(id_ns->nguid); | 621 | len = sizeof(id_ns->nguid); |
@@ -687,14 +687,9 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
687 | u8 *resp, int alloc_len) | 687 | u8 *resp, int alloc_len) |
688 | { | 688 | { |
689 | int res; | 689 | int res; |
690 | u32 vs; | ||
691 | 690 | ||
692 | res = ns->ctrl->ops->reg_read32(ns->ctrl, NVME_REG_VS, &vs); | 691 | if (ns->ctrl->vs >= NVME_VS(1, 1)) { |
693 | if (res) | 692 | res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); |
694 | return res; | ||
695 | |||
696 | if (vs >= NVME_VS(1, 1)) { | ||
697 | res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len, vs); | ||
698 | if (res != -EOPNOTSUPP) | 693 | if (res != -EOPNOTSUPP) |
699 | return res; | 694 | return res; |
700 | } | 695 | } |