aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2014-05-13 12:32:46 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2014-06-03 22:58:34 -0400
commita51afb54339c5e9ee72df66ae0f2ac5aacfed365 (patch)
treefcaf5e2e3ccc53195f844954cff0713053803dd4
parentde672b9748f78dcbc663e12ea44cb24dc287baf0 (diff)
NVMe: Fix nvme get/put queue semantics
The routines to get and lock nvme queues required the caller to "put" or "unlock" them even if getting one returned NULL. This patch fixes that. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r--drivers/block/nvme-core.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 12c57eb7c915..29a3e85873b5 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -285,9 +285,17 @@ static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
285 285
286static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU) 286static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
287{ 287{
288 struct nvme_queue *nvmeq;
288 unsigned queue_id = get_cpu_var(*dev->io_queue); 289 unsigned queue_id = get_cpu_var(*dev->io_queue);
290
289 rcu_read_lock(); 291 rcu_read_lock();
290 return rcu_dereference(dev->queues[queue_id]); 292 nvmeq = rcu_dereference(dev->queues[queue_id]);
293 if (nvmeq)
294 return nvmeq;
295
296 rcu_read_unlock();
297 put_cpu_var(*dev->io_queue);
298 return NULL;
291} 299}
292 300
293static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 301static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -299,8 +307,15 @@ static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
299static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx) 307static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
300 __acquires(RCU) 308 __acquires(RCU)
301{ 309{
310 struct nvme_queue *nvmeq;
311
302 rcu_read_lock(); 312 rcu_read_lock();
303 return rcu_dereference(dev->queues[q_idx]); 313 nvmeq = rcu_dereference(dev->queues[q_idx]);
314 if (nvmeq)
315 return nvmeq;
316
317 rcu_read_unlock();
318 return NULL;
304} 319}
305 320
306static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU) 321static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
@@ -809,7 +824,6 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
809 int result = -EBUSY; 824 int result = -EBUSY;
810 825
811 if (!nvmeq) { 826 if (!nvmeq) {
812 put_nvmeq(NULL);
813 bio_endio(bio, -EIO); 827 bio_endio(bio, -EIO);
814 return; 828 return;
815 } 829 }
@@ -884,10 +898,8 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
884 struct nvme_queue *nvmeq; 898 struct nvme_queue *nvmeq;
885 899
886 nvmeq = lock_nvmeq(dev, q_idx); 900 nvmeq = lock_nvmeq(dev, q_idx);
887 if (!nvmeq) { 901 if (!nvmeq)
888 unlock_nvmeq(nvmeq);
889 return -ENODEV; 902 return -ENODEV;
890 }
891 903
892 cmdinfo.task = current; 904 cmdinfo.task = current;
893 cmdinfo.status = -EINTR; 905 cmdinfo.status = -EINTR;
@@ -912,9 +924,10 @@ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
912 924
913 if (cmdinfo.status == -EINTR) { 925 if (cmdinfo.status == -EINTR) {
914 nvmeq = lock_nvmeq(dev, q_idx); 926 nvmeq = lock_nvmeq(dev, q_idx);
915 if (nvmeq) 927 if (nvmeq) {
916 nvme_abort_command(nvmeq, cmdid); 928 nvme_abort_command(nvmeq, cmdid);
917 unlock_nvmeq(nvmeq); 929 unlock_nvmeq(nvmeq);
930 }
918 return -EINTR; 931 return -EINTR;
919 } 932 }
920 933