aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2015-06-05 12:30:08 -0400
committerJens Axboe <axboe@fb.com>2015-06-05 12:30:08 -0400
commit4cc06521ee1f153e0d292413a5bff7bbbdee92d0 (patch)
treedc984fc9b1a477b77f072271a9630b35b7dbdd58 /drivers/block
parent8b70f45e2eb275da886b9c9dee190436d12d876a (diff)
NVMe: add sysfs and ioctl controller reset
We need the ability to perform an nvme controller reset as discussed on the mailing list thread: http://lists.infradead.org/pipermail/linux-nvme/2015-March/001585.html This adds a sysfs entry that when written to will reset perform an NVMe controller reset if the controller was successfully initialized in the first place. This also adds locking around resetting the device in the async probe method so the driver can't schedule two resets. Signed-off-by: Keith Busch <keith.busch@intel.com> Cc: Brandon Schultz <brandon.schulz@hgst.com> Cc: David Sariel <david.sariel@pmcs.com> Updated by Jens to: 1) Merge this with the ioctl reset patch from David Sariel. The ioctl path now shares the reset code from the sysfs path. 2) Don't flush work if we fail issuing the reset. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c53
1 files changed, 53 insertions, 0 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 513908ff46c4..9682e29b4171 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -80,6 +80,7 @@ static wait_queue_head_t nvme_kthread_wait;
80static struct class *nvme_class; 80static struct class *nvme_class;
81 81
82static void nvme_reset_failed_dev(struct work_struct *ws); 82static void nvme_reset_failed_dev(struct work_struct *ws);
83static int nvme_reset(struct nvme_dev *dev);
83static int nvme_process_cq(struct nvme_queue *nvmeq); 84static int nvme_process_cq(struct nvme_queue *nvmeq);
84 85
85struct async_cmd_info { 86struct async_cmd_info {
@@ -2689,6 +2690,9 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2689 return -ENOTTY; 2690 return -ENOTTY;
2690 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list); 2691 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
2691 return nvme_user_cmd(dev, ns, (void __user *)arg); 2692 return nvme_user_cmd(dev, ns, (void __user *)arg);
2693 case NVME_IOCTL_RESET:
2694 dev_warn(dev->dev, "resetting controller\n");
2695 return nvme_reset(dev);
2692 default: 2696 default:
2693 return -ENOTTY; 2697 return -ENOTTY;
2694 } 2698 }
@@ -2839,6 +2843,44 @@ static void nvme_reset_workfn(struct work_struct *work)
2839 dev->reset_workfn(work); 2843 dev->reset_workfn(work);
2840} 2844}
2841 2845
2846static int nvme_reset(struct nvme_dev *dev)
2847{
2848 int ret = -EBUSY;
2849
2850 if (!dev->admin_q || blk_queue_dying(dev->admin_q))
2851 return -ENODEV;
2852
2853 spin_lock(&dev_list_lock);
2854 if (!work_pending(&dev->reset_work)) {
2855 dev->reset_workfn = nvme_reset_failed_dev;
2856 queue_work(nvme_workq, &dev->reset_work);
2857 ret = 0;
2858 }
2859 spin_unlock(&dev_list_lock);
2860
2861 if (!ret) {
2862 flush_work(&dev->reset_work);
2863 return 0;
2864 }
2865
2866 return ret;
2867}
2868
2869static ssize_t nvme_sysfs_reset(struct device *dev,
2870 struct device_attribute *attr, const char *buf,
2871 size_t count)
2872{
2873 struct nvme_dev *ndev = dev_get_drvdata(dev);
2874 int ret;
2875
2876 ret = nvme_reset(ndev);
2877 if (ret < 0)
2878 return ret;
2879
2880 return count;
2881}
2882static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
2883
2842static void nvme_async_probe(struct work_struct *work); 2884static void nvme_async_probe(struct work_struct *work);
2843static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2885static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2844{ 2886{
@@ -2883,12 +2925,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2883 goto release_pools; 2925 goto release_pools;
2884 } 2926 }
2885 get_device(dev->device); 2927 get_device(dev->device);
2928 dev_set_drvdata(dev->device, dev);
2929
2930 result = device_create_file(dev->device, &dev_attr_reset_controller);
2931 if (result)
2932 goto put_dev;
2886 2933
2887 INIT_LIST_HEAD(&dev->node); 2934 INIT_LIST_HEAD(&dev->node);
2888 INIT_WORK(&dev->probe_work, nvme_async_probe); 2935 INIT_WORK(&dev->probe_work, nvme_async_probe);
2889 schedule_work(&dev->probe_work); 2936 schedule_work(&dev->probe_work);
2890 return 0; 2937 return 0;
2891 2938
2939 put_dev:
2940 device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
2941 put_device(dev->device);
2892 release_pools: 2942 release_pools:
2893 nvme_release_prp_pools(dev); 2943 nvme_release_prp_pools(dev);
2894 release: 2944 release:
@@ -2919,10 +2969,12 @@ static void nvme_async_probe(struct work_struct *work)
2919 nvme_set_irq_hints(dev); 2969 nvme_set_irq_hints(dev);
2920 return; 2970 return;
2921 reset: 2971 reset:
2972 spin_lock(&dev_list_lock);
2922 if (!work_busy(&dev->reset_work)) { 2973 if (!work_busy(&dev->reset_work)) {
2923 dev->reset_workfn = nvme_reset_failed_dev; 2974 dev->reset_workfn = nvme_reset_failed_dev;
2924 queue_work(nvme_workq, &dev->reset_work); 2975 queue_work(nvme_workq, &dev->reset_work);
2925 } 2976 }
2977 spin_unlock(&dev_list_lock);
2926} 2978}
2927 2979
2928static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) 2980static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
@@ -2952,6 +3004,7 @@ static void nvme_remove(struct pci_dev *pdev)
2952 pci_set_drvdata(pdev, NULL); 3004 pci_set_drvdata(pdev, NULL);
2953 flush_work(&dev->probe_work); 3005 flush_work(&dev->probe_work);
2954 flush_work(&dev->reset_work); 3006 flush_work(&dev->reset_work);
3007 device_remove_file(dev->device, &dev_attr_reset_controller);
2955 nvme_dev_shutdown(dev); 3008 nvme_dev_shutdown(dev);
2956 nvme_dev_remove(dev); 3009 nvme_dev_remove(dev);
2957 nvme_dev_remove_admin(dev); 3010 nvme_dev_remove_admin(dev);