aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-11 19:45:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-11 19:45:59 -0400
commit3e8072d48b2dd0898e99698018b2045f8cd49965 (patch)
tree5710e46918d4e358f22fb6038ad81d1abdd3f1f8
parenta63b747b41d6f6c9116fb2260381a3c96fe5dc02 (diff)
parentedd10d33283899fb15d99a290dcc9ceb3604ca78 (diff)
Merge git://git.infradead.org/users/willy/linux-nvme
Pull NVMe driver updates from Matthew Wilcox: "Various updates to the NVMe driver. The most user-visible change is that drive hotplugging now works and CPU hotplug while an NVMe drive is installed should also work better" * git://git.infradead.org/users/willy/linux-nvme: NVMe: Retry failed commands with non-fatal errors NVMe: Add getgeo to block ops NVMe: Start-stop nvme_thread during device add-remove. NVMe: Make I/O timeout a module parameter NVMe: CPU hot plug notification NVMe: per-cpu io queues NVMe: Replace DEFINE_PCI_DEVICE_TABLE NVMe: Fix divide-by-zero in nvme_trans_io_get_num_cmds NVMe: IOCTL path RCU protect queue access NVMe: RCU protected access to io queues NVMe: Initialize device reference count earlier NVMe: Add CONFIG_PM_SLEEP to suspend/resume functions
-rw-r--r--drivers/block/nvme-core.c684
-rw-r--r--drivers/block/nvme-scsi.c43
-rw-r--r--include/linux/nvme.h21
-rw-r--r--include/uapi/linux/nvme.h1
4 files changed, 504 insertions, 245 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index da085ff10d25..7c64fa756cce 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * NVM Express device driver 2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation. 3 * Copyright (c) 2011-2014, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -20,10 +20,12 @@
20#include <linux/bio.h> 20#include <linux/bio.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/cpu.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/errno.h> 25#include <linux/errno.h>
25#include <linux/fs.h> 26#include <linux/fs.h>
26#include <linux/genhd.h> 27#include <linux/genhd.h>
28#include <linux/hdreg.h>
27#include <linux/idr.h> 29#include <linux/idr.h>
28#include <linux/init.h> 30#include <linux/init.h>
29#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -35,6 +37,7 @@
35#include <linux/module.h> 37#include <linux/module.h>
36#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
37#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/percpu.h>
38#include <linux/poison.h> 41#include <linux/poison.h>
39#include <linux/ptrace.h> 42#include <linux/ptrace.h>
40#include <linux/sched.h> 43#include <linux/sched.h>
@@ -47,6 +50,11 @@
47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 50#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 51#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
49#define ADMIN_TIMEOUT (60 * HZ) 52#define ADMIN_TIMEOUT (60 * HZ)
53#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
54
55unsigned char io_timeout = 30;
56module_param(io_timeout, byte, 0644);
57MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
50 58
51static int nvme_major; 59static int nvme_major;
52module_param(nvme_major, int, 0); 60module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list); 66static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread; 67static struct task_struct *nvme_thread;
60static struct workqueue_struct *nvme_workq; 68static struct workqueue_struct *nvme_workq;
69static wait_queue_head_t nvme_kthread_wait;
61 70
62static void nvme_reset_failed_dev(struct work_struct *ws); 71static void nvme_reset_failed_dev(struct work_struct *ws);
63 72
@@ -74,6 +83,7 @@ struct async_cmd_info {
74 * commands and one for I/O commands). 83 * commands and one for I/O commands).
75 */ 84 */
76struct nvme_queue { 85struct nvme_queue {
86 struct rcu_head r_head;
77 struct device *q_dmadev; 87 struct device *q_dmadev;
78 struct nvme_dev *dev; 88 struct nvme_dev *dev;
79 char irqname[24]; /* nvme4294967295-65535\0 */ 89 char irqname[24]; /* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@ struct nvme_queue {
85 wait_queue_head_t sq_full; 95 wait_queue_head_t sq_full;
86 wait_queue_t sq_cong_wait; 96 wait_queue_t sq_cong_wait;
87 struct bio_list sq_cong; 97 struct bio_list sq_cong;
98 struct list_head iod_bio;
88 u32 __iomem *q_db; 99 u32 __iomem *q_db;
89 u16 q_depth; 100 u16 q_depth;
90 u16 cq_vector; 101 u16 cq_vector;
@@ -95,6 +106,7 @@ struct nvme_queue {
95 u8 cq_phase; 106 u8 cq_phase;
96 u8 cqe_seen; 107 u8 cqe_seen;
97 u8 q_suspended; 108 u8 q_suspended;
109 cpumask_var_t cpu_mask;
98 struct async_cmd_info cmdinfo; 110 struct async_cmd_info cmdinfo;
99 unsigned long cmdid_data[]; 111 unsigned long cmdid_data[];
100}; 112};
@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
118 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 130 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
119} 131}
120 132
121typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 133typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
122 struct nvme_completion *); 134 struct nvme_completion *);
123 135
124struct nvme_cmd_info { 136struct nvme_cmd_info {
@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
190#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 202#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
191#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) 203#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
192 204
193static void special_completion(struct nvme_dev *dev, void *ctx, 205static void special_completion(struct nvme_queue *nvmeq, void *ctx,
194 struct nvme_completion *cqe) 206 struct nvme_completion *cqe)
195{ 207{
196 if (ctx == CMD_CTX_CANCELLED) 208 if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
198 if (ctx == CMD_CTX_FLUSH) 210 if (ctx == CMD_CTX_FLUSH)
199 return; 211 return;
200 if (ctx == CMD_CTX_ABORT) { 212 if (ctx == CMD_CTX_ABORT) {
201 ++dev->abort_limit; 213 ++nvmeq->dev->abort_limit;
202 return; 214 return;
203 } 215 }
204 if (ctx == CMD_CTX_COMPLETED) { 216 if (ctx == CMD_CTX_COMPLETED) {
205 dev_warn(&dev->pci_dev->dev, 217 dev_warn(nvmeq->q_dmadev,
206 "completed id %d twice on queue %d\n", 218 "completed id %d twice on queue %d\n",
207 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 219 cqe->command_id, le16_to_cpup(&cqe->sq_id));
208 return; 220 return;
209 } 221 }
210 if (ctx == CMD_CTX_INVALID) { 222 if (ctx == CMD_CTX_INVALID) {
211 dev_warn(&dev->pci_dev->dev, 223 dev_warn(nvmeq->q_dmadev,
212 "invalid id %d completed on queue %d\n", 224 "invalid id %d completed on queue %d\n",
213 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 225 cqe->command_id, le16_to_cpup(&cqe->sq_id));
214 return; 226 return;
215 } 227 }
216 228
217 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 229 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
218} 230}
219 231
220static void async_completion(struct nvme_dev *dev, void *ctx, 232static void async_completion(struct nvme_queue *nvmeq, void *ctx,
221 struct nvme_completion *cqe) 233 struct nvme_completion *cqe)
222{ 234{
223 struct async_cmd_info *cmdinfo = ctx; 235 struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
262 return ctx; 274 return ctx;
263} 275}
264 276
265struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 277static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
278{
279 return rcu_dereference_raw(dev->queues[qid]);
280}
281
282static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
283{
284 unsigned queue_id = get_cpu_var(*dev->io_queue);
285 rcu_read_lock();
286 return rcu_dereference(dev->queues[queue_id]);
287}
288
289static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
266{ 290{
267 return dev->queues[get_cpu() + 1]; 291 rcu_read_unlock();
292 put_cpu_var(nvmeq->dev->io_queue);
268} 293}
269 294
270void put_nvmeq(struct nvme_queue *nvmeq) 295static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
296 __acquires(RCU)
271{ 297{
272 put_cpu(); 298 rcu_read_lock();
299 return rcu_dereference(dev->queues[q_idx]);
300}
301
302static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
303{
304 rcu_read_unlock();
273} 305}
274 306
275/** 307/**
@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
284 unsigned long flags; 316 unsigned long flags;
285 u16 tail; 317 u16 tail;
286 spin_lock_irqsave(&nvmeq->q_lock, flags); 318 spin_lock_irqsave(&nvmeq->q_lock, flags);
319 if (nvmeq->q_suspended) {
320 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
321 return -EBUSY;
322 }
287 tail = nvmeq->sq_tail; 323 tail = nvmeq->sq_tail;
288 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 324 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
289 if (++tail == nvmeq->q_depth) 325 if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
323 iod->npages = -1; 359 iod->npages = -1;
324 iod->length = nbytes; 360 iod->length = nbytes;
325 iod->nents = 0; 361 iod->nents = 0;
362 iod->first_dma = 0ULL;
326 iod->start_time = jiffies; 363 iod->start_time = jiffies;
327 } 364 }
328 365
@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
371 part_stat_unlock(); 408 part_stat_unlock();
372} 409}
373 410
374static void bio_completion(struct nvme_dev *dev, void *ctx, 411static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
375 struct nvme_completion *cqe) 412 struct nvme_completion *cqe)
376{ 413{
377 struct nvme_iod *iod = ctx; 414 struct nvme_iod *iod = ctx;
378 struct bio *bio = iod->private; 415 struct bio *bio = iod->private;
379 u16 status = le16_to_cpup(&cqe->status) >> 1; 416 u16 status = le16_to_cpup(&cqe->status) >> 1;
380 417
418 if (unlikely(status)) {
419 if (!(status & NVME_SC_DNR ||
420 bio->bi_rw & REQ_FAILFAST_MASK) &&
421 (jiffies - iod->start_time) < IOD_TIMEOUT) {
422 if (!waitqueue_active(&nvmeq->sq_full))
423 add_wait_queue(&nvmeq->sq_full,
424 &nvmeq->sq_cong_wait);
425 list_add_tail(&iod->node, &nvmeq->iod_bio);
426 wake_up(&nvmeq->sq_full);
427 return;
428 }
429 }
381 if (iod->nents) { 430 if (iod->nents) {
382 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 431 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
383 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 432 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
384 nvme_end_io_acct(bio, iod->start_time); 433 nvme_end_io_acct(bio, iod->start_time);
385 } 434 }
386 nvme_free_iod(dev, iod); 435 nvme_free_iod(nvmeq->dev, iod);
387 if (status) 436 if (status)
388 bio_endio(bio, -EIO); 437 bio_endio(bio, -EIO);
389 else 438 else
@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
391} 440}
392 441
393/* length is in bytes. gfp flags indicates whether we may sleep. */ 442/* length is in bytes. gfp flags indicates whether we may sleep. */
394int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 443int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
395 struct nvme_iod *iod, int total_len, gfp_t gfp) 444 gfp_t gfp)
396{ 445{
397 struct dma_pool *pool; 446 struct dma_pool *pool;
398 int length = total_len; 447 int length = total_len;
@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
405 dma_addr_t prp_dma; 454 dma_addr_t prp_dma;
406 int nprps, i; 455 int nprps, i;
407 456
408 cmd->prp1 = cpu_to_le64(dma_addr);
409 length -= (PAGE_SIZE - offset); 457 length -= (PAGE_SIZE - offset);
410 if (length <= 0) 458 if (length <= 0)
411 return total_len; 459 return total_len;
@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
420 } 468 }
421 469
422 if (length <= PAGE_SIZE) { 470 if (length <= PAGE_SIZE) {
423 cmd->prp2 = cpu_to_le64(dma_addr); 471 iod->first_dma = dma_addr;
424 return total_len; 472 return total_len;
425 } 473 }
426 474
@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
435 483
436 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 484 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
437 if (!prp_list) { 485 if (!prp_list) {
438 cmd->prp2 = cpu_to_le64(dma_addr); 486 iod->first_dma = dma_addr;
439 iod->npages = -1; 487 iod->npages = -1;
440 return (total_len - length) + PAGE_SIZE; 488 return (total_len - length) + PAGE_SIZE;
441 } 489 }
442 list[0] = prp_list; 490 list[0] = prp_list;
443 iod->first_dma = prp_dma; 491 iod->first_dma = prp_dma;
444 cmd->prp2 = cpu_to_le64(prp_dma);
445 i = 0; 492 i = 0;
446 for (;;) { 493 for (;;) {
447 if (i == PAGE_SIZE / 8) { 494 if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
480 527
481 bio_chain(split, bio); 528 bio_chain(split, bio);
482 529
483 if (bio_list_empty(&nvmeq->sq_cong)) 530 if (!waitqueue_active(&nvmeq->sq_full))
484 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 531 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
485 bio_list_add(&nvmeq->sq_cong, split); 532 bio_list_add(&nvmeq->sq_cong, split);
486 bio_list_add(&nvmeq->sq_cong, bio); 533 bio_list_add(&nvmeq->sq_cong, bio);
534 wake_up(&nvmeq->sq_full);
487 535
488 return 0; 536 return 0;
489} 537}
@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
536 return length; 584 return length;
537} 585}
538 586
539/*
540 * We reuse the small pool to allocate the 16-byte range here as it is not
541 * worth having a special pool for these or additional cases to handle freeing
542 * the iod.
543 */
544static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 587static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
545 struct bio *bio, struct nvme_iod *iod, int cmdid) 588 struct bio *bio, struct nvme_iod *iod, int cmdid)
546{ 589{
547 struct nvme_dsm_range *range; 590 struct nvme_dsm_range *range =
591 (struct nvme_dsm_range *)iod_list(iod)[0];
548 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 592 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
549 593
550 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
551 &iod->first_dma);
552 if (!range)
553 return -ENOMEM;
554
555 iod_list(iod)[0] = (__le64 *)range;
556 iod->npages = 0;
557
558 range->cattr = cpu_to_le32(0); 594 range->cattr = cpu_to_le32(0);
559 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); 595 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
560 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 596 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
601 return nvme_submit_flush(nvmeq, ns, cmdid); 637 return nvme_submit_flush(nvmeq, ns, cmdid);
602} 638}
603 639
604/* 640static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
605 * Called with local interrupts disabled and the q_lock held. May not sleep.
606 */
607static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 struct bio *bio)
609{ 641{
642 struct bio *bio = iod->private;
643 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
610 struct nvme_command *cmnd; 644 struct nvme_command *cmnd;
611 struct nvme_iod *iod; 645 int cmdid;
612 enum dma_data_direction dma_dir;
613 int cmdid, length, result;
614 u16 control; 646 u16 control;
615 u32 dsmgmt; 647 u32 dsmgmt;
616 int psegs = bio_phys_segments(ns->queue, bio);
617
618 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
619 result = nvme_submit_flush_data(nvmeq, ns);
620 if (result)
621 return result;
622 }
623 648
624 result = -ENOMEM;
625 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
626 if (!iod)
627 goto nomem;
628 iod->private = bio;
629
630 result = -EBUSY;
631 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 649 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
632 if (unlikely(cmdid < 0)) 650 if (unlikely(cmdid < 0))
633 goto free_iod; 651 return cmdid;
634 652
635 if (bio->bi_rw & REQ_DISCARD) { 653 if (bio->bi_rw & REQ_DISCARD)
636 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 654 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
637 if (result) 655 if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
638 goto free_cmdid;
639 return result;
640 }
641 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
642 return nvme_submit_flush(nvmeq, ns, cmdid); 656 return nvme_submit_flush(nvmeq, ns, cmdid);
643 657
644 control = 0; 658 control = 0;
@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
652 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 666 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
653 667
654 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 668 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
655
656 memset(cmnd, 0, sizeof(*cmnd)); 669 memset(cmnd, 0, sizeof(*cmnd));
657 if (bio_data_dir(bio)) {
658 cmnd->rw.opcode = nvme_cmd_write;
659 dma_dir = DMA_TO_DEVICE;
660 } else {
661 cmnd->rw.opcode = nvme_cmd_read;
662 dma_dir = DMA_FROM_DEVICE;
663 }
664
665 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
666 if (result <= 0)
667 goto free_cmdid;
668 length = result;
669 670
671 cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
670 cmnd->rw.command_id = cmdid; 672 cmnd->rw.command_id = cmdid;
671 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 673 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
672 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 674 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
673 GFP_ATOMIC); 675 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
674 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); 676 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
675 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 677 cmnd->rw.length =
678 cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
676 cmnd->rw.control = cpu_to_le16(control); 679 cmnd->rw.control = cpu_to_le16(control);
677 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 680 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
678 681
679 nvme_start_io_acct(bio);
680 if (++nvmeq->sq_tail == nvmeq->q_depth) 682 if (++nvmeq->sq_tail == nvmeq->q_depth)
681 nvmeq->sq_tail = 0; 683 nvmeq->sq_tail = 0;
682 writel(nvmeq->sq_tail, nvmeq->q_db); 684 writel(nvmeq->sq_tail, nvmeq->q_db);
683 685
684 return 0; 686 return 0;
687}
688
689/*
690 * Called with local interrupts disabled and the q_lock held. May not sleep.
691 */
692static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
693 struct bio *bio)
694{
695 struct nvme_iod *iod;
696 int psegs = bio_phys_segments(ns->queue, bio);
697 int result;
698
699 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
700 result = nvme_submit_flush_data(nvmeq, ns);
701 if (result)
702 return result;
703 }
704
705 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
706 if (!iod)
707 return -ENOMEM;
708
709 iod->private = bio;
710 if (bio->bi_rw & REQ_DISCARD) {
711 void *range;
712 /*
713 * We reuse the small pool to allocate the 16-byte range here
714 * as it is not worth having a special pool for these or
715 * additional cases to handle freeing the iod.
716 */
717 range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
718 GFP_ATOMIC,
719 &iod->first_dma);
720 if (!range) {
721 result = -ENOMEM;
722 goto free_iod;
723 }
724 iod_list(iod)[0] = (__le64 *)range;
725 iod->npages = 0;
726 } else if (psegs) {
727 result = nvme_map_bio(nvmeq, iod, bio,
728 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
729 psegs);
730 if (result <= 0)
731 goto free_iod;
732 if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
733 result) {
734 result = -ENOMEM;
735 goto free_iod;
736 }
737 nvme_start_io_acct(bio);
738 }
739 if (unlikely(nvme_submit_iod(nvmeq, iod))) {
740 if (!waitqueue_active(&nvmeq->sq_full))
741 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
742 list_add_tail(&iod->node, &nvmeq->iod_bio);
743 }
744 return 0;
685 745
686 free_cmdid:
687 free_cmdid(nvmeq, cmdid, NULL);
688 free_iod: 746 free_iod:
689 nvme_free_iod(nvmeq->dev, iod); 747 nvme_free_iod(nvmeq->dev, iod);
690 nomem:
691 return result; 748 return result;
692} 749}
693 750
@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
711 } 768 }
712 769
713 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 770 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
714 fn(nvmeq->dev, ctx, &cqe); 771 fn(nvmeq, ctx, &cqe);
715 } 772 }
716 773
717 /* If the controller ignores the cq head doorbell and continuously 774 /* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
747 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) 804 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
748 result = nvme_submit_bio_queue(nvmeq, ns, bio); 805 result = nvme_submit_bio_queue(nvmeq, ns, bio);
749 if (unlikely(result)) { 806 if (unlikely(result)) {
750 if (bio_list_empty(&nvmeq->sq_cong)) 807 if (!waitqueue_active(&nvmeq->sq_full))
751 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 808 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
752 bio_list_add(&nvmeq->sq_cong, bio); 809 bio_list_add(&nvmeq->sq_cong, bio);
753 } 810 }
@@ -791,7 +848,7 @@ struct sync_cmd_info {
791 int status; 848 int status;
792}; 849};
793 850
794static void sync_completion(struct nvme_dev *dev, void *ctx, 851static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
795 struct nvme_completion *cqe) 852 struct nvme_completion *cqe)
796{ 853{
797 struct sync_cmd_info *cmdinfo = ctx; 854 struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
804 * Returns 0 on success. If the result is negative, it's a Linux error code; 861 * Returns 0 on success. If the result is negative, it's a Linux error code;
805 * if the result is positive, it's an NVM Express status code 862 * if the result is positive, it's an NVM Express status code
806 */ 863 */
807int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 864static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
865 struct nvme_command *cmd,
808 u32 *result, unsigned timeout) 866 u32 *result, unsigned timeout)
809{ 867{
810 int cmdid; 868 int cmdid, ret;
811 struct sync_cmd_info cmdinfo; 869 struct sync_cmd_info cmdinfo;
870 struct nvme_queue *nvmeq;
871
872 nvmeq = lock_nvmeq(dev, q_idx);
873 if (!nvmeq) {
874 unlock_nvmeq(nvmeq);
875 return -ENODEV;
876 }
812 877
813 cmdinfo.task = current; 878 cmdinfo.task = current;
814 cmdinfo.status = -EINTR; 879 cmdinfo.status = -EINTR;
815 880
816 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 881 cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
817 timeout); 882 if (cmdid < 0) {
818 if (cmdid < 0) 883 unlock_nvmeq(nvmeq);
819 return cmdid; 884 return cmdid;
885 }
820 cmd->common.command_id = cmdid; 886 cmd->common.command_id = cmdid;
821 887
822 set_current_state(TASK_KILLABLE); 888 set_current_state(TASK_KILLABLE);
823 nvme_submit_cmd(nvmeq, cmd); 889 ret = nvme_submit_cmd(nvmeq, cmd);
890 if (ret) {
891 free_cmdid(nvmeq, cmdid, NULL);
892 unlock_nvmeq(nvmeq);
893 set_current_state(TASK_RUNNING);
894 return ret;
895 }
896 unlock_nvmeq(nvmeq);
824 schedule_timeout(timeout); 897 schedule_timeout(timeout);
825 898
826 if (cmdinfo.status == -EINTR) { 899 if (cmdinfo.status == -EINTR) {
827 nvme_abort_command(nvmeq, cmdid); 900 nvmeq = lock_nvmeq(dev, q_idx);
901 if (nvmeq)
902 nvme_abort_command(nvmeq, cmdid);
903 unlock_nvmeq(nvmeq);
828 return -EINTR; 904 return -EINTR;
829 } 905 }
830 906
@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
845 return cmdid; 921 return cmdid;
846 cmdinfo->status = -EINTR; 922 cmdinfo->status = -EINTR;
847 cmd->common.command_id = cmdid; 923 cmd->common.command_id = cmdid;
848 nvme_submit_cmd(nvmeq, cmd); 924 return nvme_submit_cmd(nvmeq, cmd);
849 return 0;
850} 925}
851 926
852int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 927int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
853 u32 *result) 928 u32 *result)
854{ 929{
855 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 930 return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
931}
932
933int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
934 u32 *result)
935{
936 return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
937 NVME_IO_TIMEOUT);
856} 938}
857 939
858static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, 940static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
859 struct nvme_command *cmd, struct async_cmd_info *cmdinfo) 941 struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
860{ 942{
861 return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, 943 return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
862 ADMIN_TIMEOUT); 944 ADMIN_TIMEOUT);
863} 945}
864 946
@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
985 struct nvme_command cmd; 1067 struct nvme_command cmd;
986 struct nvme_dev *dev = nvmeq->dev; 1068 struct nvme_dev *dev = nvmeq->dev;
987 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1069 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1070 struct nvme_queue *adminq;
988 1071
989 if (!nvmeq->qid || info[cmdid].aborted) { 1072 if (!nvmeq->qid || info[cmdid].aborted) {
990 if (work_busy(&dev->reset_work)) 1073 if (work_busy(&dev->reset_work))
@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
1001 if (!dev->abort_limit) 1084 if (!dev->abort_limit)
1002 return; 1085 return;
1003 1086
1004 a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion, 1087 adminq = rcu_dereference(dev->queues[0]);
1088 a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
1005 ADMIN_TIMEOUT); 1089 ADMIN_TIMEOUT);
1006 if (a_cmdid < 0) 1090 if (a_cmdid < 0)
1007 return; 1091 return;
@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
1018 1102
1019 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, 1103 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
1020 nvmeq->qid); 1104 nvmeq->qid);
1021 nvme_submit_cmd(dev->queues[0], &cmd); 1105 nvme_submit_cmd(adminq, &cmd);
1022} 1106}
1023 1107
1024/** 1108/**
@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
1051 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, 1135 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
1052 nvmeq->qid); 1136 nvmeq->qid);
1053 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1137 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1054 fn(nvmeq->dev, ctx, &cqe); 1138 fn(nvmeq, ctx, &cqe);
1055 } 1139 }
1056} 1140}
1057 1141
1058static void nvme_free_queue(struct nvme_queue *nvmeq) 1142static void nvme_free_queue(struct rcu_head *r)
1059{ 1143{
1144 struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
1145
1060 spin_lock_irq(&nvmeq->q_lock); 1146 spin_lock_irq(&nvmeq->q_lock);
1061 while (bio_list_peek(&nvmeq->sq_cong)) { 1147 while (bio_list_peek(&nvmeq->sq_cong)) {
1062 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1148 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1063 bio_endio(bio, -EIO); 1149 bio_endio(bio, -EIO);
1064 } 1150 }
1151 while (!list_empty(&nvmeq->iod_bio)) {
1152 static struct nvme_completion cqe = {
1153 .status = cpu_to_le16(
1154 (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
1155 };
1156 struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
1157 struct nvme_iod,
1158 node);
1159 list_del(&iod->node);
1160 bio_completion(nvmeq, iod, &cqe);
1161 }
1065 spin_unlock_irq(&nvmeq->q_lock); 1162 spin_unlock_irq(&nvmeq->q_lock);
1066 1163
1067 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1164 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1068 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1165 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1069 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1166 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1070 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1167 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1168 if (nvmeq->qid)
1169 free_cpumask_var(nvmeq->cpu_mask);
1071 kfree(nvmeq); 1170 kfree(nvmeq);
1072} 1171}
1073 1172
@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1076 int i; 1175 int i;
1077 1176
1078 for (i = dev->queue_count - 1; i >= lowest; i--) { 1177 for (i = dev->queue_count - 1; i >= lowest; i--) {
1079 nvme_free_queue(dev->queues[i]); 1178 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
1179 rcu_assign_pointer(dev->queues[i], NULL);
1180 call_rcu(&nvmeq->r_head, nvme_free_queue);
1080 dev->queue_count--; 1181 dev->queue_count--;
1081 dev->queues[i] = NULL;
1082 } 1182 }
1083} 1183}
1084 1184
@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1098 return 1; 1198 return 1;
1099 } 1199 }
1100 nvmeq->q_suspended = 1; 1200 nvmeq->q_suspended = 1;
1201 nvmeq->dev->online_queues--;
1101 spin_unlock_irq(&nvmeq->q_lock); 1202 spin_unlock_irq(&nvmeq->q_lock);
1102 1203
1103 irq_set_affinity_hint(vector, NULL); 1204 irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
1116 1217
1117static void nvme_disable_queue(struct nvme_dev *dev, int qid) 1218static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1118{ 1219{
1119 struct nvme_queue *nvmeq = dev->queues[qid]; 1220 struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
1120 1221
1121 if (!nvmeq) 1222 if (!nvmeq)
1122 return; 1223 return;
@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1152 if (!nvmeq->sq_cmds) 1253 if (!nvmeq->sq_cmds)
1153 goto free_cqdma; 1254 goto free_cqdma;
1154 1255
1256 if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
1257 goto free_sqdma;
1258
1155 nvmeq->q_dmadev = dmadev; 1259 nvmeq->q_dmadev = dmadev;
1156 nvmeq->dev = dev; 1260 nvmeq->dev = dev;
1157 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", 1261 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1162 init_waitqueue_head(&nvmeq->sq_full); 1266 init_waitqueue_head(&nvmeq->sq_full);
1163 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1267 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
1164 bio_list_init(&nvmeq->sq_cong); 1268 bio_list_init(&nvmeq->sq_cong);
1269 INIT_LIST_HEAD(&nvmeq->iod_bio);
1165 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1270 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1166 nvmeq->q_depth = depth; 1271 nvmeq->q_depth = depth;
1167 nvmeq->cq_vector = vector; 1272 nvmeq->cq_vector = vector;
1168 nvmeq->qid = qid; 1273 nvmeq->qid = qid;
1169 nvmeq->q_suspended = 1; 1274 nvmeq->q_suspended = 1;
1170 dev->queue_count++; 1275 dev->queue_count++;
1276 rcu_assign_pointer(dev->queues[qid], nvmeq);
1171 1277
1172 return nvmeq; 1278 return nvmeq;
1173 1279
1280 free_sqdma:
1281 dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
1282 nvmeq->sq_dma_addr);
1174 free_cqdma: 1283 free_cqdma:
1175 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1284 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1176 nvmeq->cq_dma_addr); 1285 nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1203 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); 1312 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1204 nvme_cancel_ios(nvmeq, false); 1313 nvme_cancel_ios(nvmeq, false);
1205 nvmeq->q_suspended = 0; 1314 nvmeq->q_suspended = 0;
1315 dev->online_queues++;
1206} 1316}
1207 1317
1208static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) 1318static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1311 if (result < 0) 1421 if (result < 0)
1312 return result; 1422 return result;
1313 1423
1314 nvmeq = dev->queues[0]; 1424 nvmeq = raw_nvmeq(dev, 0);
1315 if (!nvmeq) { 1425 if (!nvmeq) {
1316 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1426 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
1317 if (!nvmeq) 1427 if (!nvmeq)
1318 return -ENOMEM; 1428 return -ENOMEM;
1319 dev->queues[0] = nvmeq;
1320 } 1429 }
1321 1430
1322 aqa = nvmeq->q_depth - 1; 1431 aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1418static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1527static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1419{ 1528{
1420 struct nvme_dev *dev = ns->dev; 1529 struct nvme_dev *dev = ns->dev;
1421 struct nvme_queue *nvmeq;
1422 struct nvme_user_io io; 1530 struct nvme_user_io io;
1423 struct nvme_command c; 1531 struct nvme_command c;
1424 unsigned length, meta_len; 1532 unsigned length, meta_len;
@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1492 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1600 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1493 } 1601 }
1494 1602
1495 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1603 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1604 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1605 c.rw.prp2 = cpu_to_le64(iod->first_dma);
1496 1606
1497 nvmeq = get_nvmeq(dev);
1498 /*
1499 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1500 * disabled. We may be preempted at any point, and be rescheduled
1501 * to a different CPU. That will cause cacheline bouncing, but no
1502 * additional races since q_lock already protects against other CPUs.
1503 */
1504 put_nvmeq(nvmeq);
1505 if (length != (io.nblocks + 1) << ns->lba_shift) 1607 if (length != (io.nblocks + 1) << ns->lba_shift)
1506 status = -ENOMEM; 1608 status = -ENOMEM;
1507 else if (!nvmeq || nvmeq->q_suspended)
1508 status = -EBUSY;
1509 else 1609 else
1510 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1610 status = nvme_submit_io_cmd(dev, &c, NULL);
1511 1611
1512 if (meta_len) { 1612 if (meta_len) {
1513 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1613 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1572 length); 1672 length);
1573 if (IS_ERR(iod)) 1673 if (IS_ERR(iod))
1574 return PTR_ERR(iod); 1674 return PTR_ERR(iod);
1575 length = nvme_setup_prps(dev, &c.common, iod, length, 1675 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
1576 GFP_KERNEL); 1676 c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1677 c.common.prp2 = cpu_to_le64(iod->first_dma);
1577 } 1678 }
1578 1679
1579 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : 1680 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1581 if (length != cmd.data_len) 1682 if (length != cmd.data_len)
1582 status = -ENOMEM; 1683 status = -ENOMEM;
1583 else 1684 else
1584 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, 1685 status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
1585 timeout);
1586 1686
1587 if (cmd.data_len) { 1687 if (cmd.data_len) {
1588 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1688 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
1653 kref_put(&dev->kref, nvme_free_dev); 1753 kref_put(&dev->kref, nvme_free_dev);
1654} 1754}
1655 1755
1756static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
1757{
1758 /* some standard values */
1759 geo->heads = 1 << 6;
1760 geo->sectors = 1 << 5;
1761 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1762 return 0;
1763}
1764
1656static const struct block_device_operations nvme_fops = { 1765static const struct block_device_operations nvme_fops = {
1657 .owner = THIS_MODULE, 1766 .owner = THIS_MODULE,
1658 .ioctl = nvme_ioctl, 1767 .ioctl = nvme_ioctl,
1659 .compat_ioctl = nvme_compat_ioctl, 1768 .compat_ioctl = nvme_compat_ioctl,
1660 .open = nvme_open, 1769 .open = nvme_open,
1661 .release = nvme_release, 1770 .release = nvme_release,
1771 .getgeo = nvme_getgeo,
1662}; 1772};
1663 1773
1774static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
1775{
1776 struct nvme_iod *iod, *next;
1777
1778 list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
1779 if (unlikely(nvme_submit_iod(nvmeq, iod)))
1780 break;
1781 list_del(&iod->node);
1782 if (bio_list_empty(&nvmeq->sq_cong) &&
1783 list_empty(&nvmeq->iod_bio))
1784 remove_wait_queue(&nvmeq->sq_full,
1785 &nvmeq->sq_cong_wait);
1786 }
1787}
1788
1664static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1789static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1665{ 1790{
1666 while (bio_list_peek(&nvmeq->sq_cong)) { 1791 while (bio_list_peek(&nvmeq->sq_cong)) {
1667 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1792 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1668 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1793 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1669 1794
1670 if (bio_list_empty(&nvmeq->sq_cong)) 1795 if (bio_list_empty(&nvmeq->sq_cong) &&
1796 list_empty(&nvmeq->iod_bio))
1671 remove_wait_queue(&nvmeq->sq_full, 1797 remove_wait_queue(&nvmeq->sq_full,
1672 &nvmeq->sq_cong_wait); 1798 &nvmeq->sq_cong_wait);
1673 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1799 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1674 if (bio_list_empty(&nvmeq->sq_cong)) 1800 if (!waitqueue_active(&nvmeq->sq_full))
1675 add_wait_queue(&nvmeq->sq_full, 1801 add_wait_queue(&nvmeq->sq_full,
1676 &nvmeq->sq_cong_wait); 1802 &nvmeq->sq_cong_wait);
1677 bio_list_add_head(&nvmeq->sq_cong, bio); 1803 bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data)
1700 queue_work(nvme_workq, &dev->reset_work); 1826 queue_work(nvme_workq, &dev->reset_work);
1701 continue; 1827 continue;
1702 } 1828 }
1829 rcu_read_lock();
1703 for (i = 0; i < dev->queue_count; i++) { 1830 for (i = 0; i < dev->queue_count; i++) {
1704 struct nvme_queue *nvmeq = dev->queues[i]; 1831 struct nvme_queue *nvmeq =
1832 rcu_dereference(dev->queues[i]);
1705 if (!nvmeq) 1833 if (!nvmeq)
1706 continue; 1834 continue;
1707 spin_lock_irq(&nvmeq->q_lock); 1835 spin_lock_irq(&nvmeq->q_lock);
@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data)
1710 nvme_process_cq(nvmeq); 1838 nvme_process_cq(nvmeq);
1711 nvme_cancel_ios(nvmeq, true); 1839 nvme_cancel_ios(nvmeq, true);
1712 nvme_resubmit_bios(nvmeq); 1840 nvme_resubmit_bios(nvmeq);
1841 nvme_resubmit_iods(nvmeq);
1713 unlock: 1842 unlock:
1714 spin_unlock_irq(&nvmeq->q_lock); 1843 spin_unlock_irq(&nvmeq->q_lock);
1715 } 1844 }
1845 rcu_read_unlock();
1716 } 1846 }
1717 spin_unlock(&dev_list_lock); 1847 spin_unlock(&dev_list_lock);
1718 schedule_timeout(round_jiffies_relative(HZ)); 1848 schedule_timeout(round_jiffies_relative(HZ));
@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
1787 return NULL; 1917 return NULL;
1788} 1918}
1789 1919
1920static int nvme_find_closest_node(int node)
1921{
1922 int n, val, min_val = INT_MAX, best_node = node;
1923
1924 for_each_online_node(n) {
1925 if (n == node)
1926 continue;
1927 val = node_distance(node, n);
1928 if (val < min_val) {
1929 min_val = val;
1930 best_node = n;
1931 }
1932 }
1933 return best_node;
1934}
1935
1936static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
1937 int count)
1938{
1939 int cpu;
1940 for_each_cpu(cpu, qmask) {
1941 if (cpumask_weight(nvmeq->cpu_mask) >= count)
1942 break;
1943 if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
1944 *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
1945 }
1946}
1947
1948static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
1949 const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
1950{
1951 int next_cpu;
1952 for_each_cpu(next_cpu, new_mask) {
1953 cpumask_or(mask, mask, get_cpu_mask(next_cpu));
1954 cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
1955 cpumask_and(mask, mask, unassigned_cpus);
1956 nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
1957 }
1958}
1959
1960static void nvme_create_io_queues(struct nvme_dev *dev)
1961{
1962 unsigned i, max;
1963
1964 max = min(dev->max_qid, num_online_cpus());
1965 for (i = dev->queue_count; i <= max; i++)
1966 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
1967 break;
1968
1969 max = min(dev->queue_count - 1, num_online_cpus());
1970 for (i = dev->online_queues; i <= max; i++)
1971 if (nvme_create_queue(raw_nvmeq(dev, i), i))
1972 break;
1973}
1974
1975/*
1976 * If there are fewer queues than online cpus, this will try to optimally
1977 * assign a queue to multiple cpus by grouping cpus that are "close" together:
1978 * thread siblings, core, socket, closest node, then whatever else is
1979 * available.
1980 */
1981static void nvme_assign_io_queues(struct nvme_dev *dev)
1982{
1983 unsigned cpu, cpus_per_queue, queues, remainder, i;
1984 cpumask_var_t unassigned_cpus;
1985
1986 nvme_create_io_queues(dev);
1987
1988 queues = min(dev->online_queues - 1, num_online_cpus());
1989 if (!queues)
1990 return;
1991
1992 cpus_per_queue = num_online_cpus() / queues;
1993 remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
1994
1995 if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
1996 return;
1997
1998 cpumask_copy(unassigned_cpus, cpu_online_mask);
1999 cpu = cpumask_first(unassigned_cpus);
2000 for (i = 1; i <= queues; i++) {
2001 struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
2002 cpumask_t mask;
2003
2004 cpumask_clear(nvmeq->cpu_mask);
2005 if (!cpumask_weight(unassigned_cpus)) {
2006 unlock_nvmeq(nvmeq);
2007 break;
2008 }
2009
2010 mask = *get_cpu_mask(cpu);
2011 nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
2012 if (cpus_weight(mask) < cpus_per_queue)
2013 nvme_add_cpus(&mask, unassigned_cpus,
2014 topology_thread_cpumask(cpu),
2015 nvmeq, cpus_per_queue);
2016 if (cpus_weight(mask) < cpus_per_queue)
2017 nvme_add_cpus(&mask, unassigned_cpus,
2018 topology_core_cpumask(cpu),
2019 nvmeq, cpus_per_queue);
2020 if (cpus_weight(mask) < cpus_per_queue)
2021 nvme_add_cpus(&mask, unassigned_cpus,
2022 cpumask_of_node(cpu_to_node(cpu)),
2023 nvmeq, cpus_per_queue);
2024 if (cpus_weight(mask) < cpus_per_queue)
2025 nvme_add_cpus(&mask, unassigned_cpus,
2026 cpumask_of_node(
2027 nvme_find_closest_node(
2028 cpu_to_node(cpu))),
2029 nvmeq, cpus_per_queue);
2030 if (cpus_weight(mask) < cpus_per_queue)
2031 nvme_add_cpus(&mask, unassigned_cpus,
2032 unassigned_cpus,
2033 nvmeq, cpus_per_queue);
2034
2035 WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
2036 "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
2037 dev->instance, i);
2038
2039 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2040 nvmeq->cpu_mask);
2041 cpumask_andnot(unassigned_cpus, unassigned_cpus,
2042 nvmeq->cpu_mask);
2043 cpu = cpumask_next(cpu, unassigned_cpus);
2044 if (remainder && !--remainder)
2045 cpus_per_queue++;
2046 unlock_nvmeq(nvmeq);
2047 }
2048 WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
2049 dev->instance);
2050 i = 0;
2051 cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
2052 for_each_cpu(cpu, unassigned_cpus)
2053 *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
2054 free_cpumask_var(unassigned_cpus);
2055}
2056
1790static int set_queue_count(struct nvme_dev *dev, int count) 2057static int set_queue_count(struct nvme_dev *dev, int count)
1791{ 2058{
1792 int status; 2059 int status;
@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1805 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); 2072 return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
1806} 2073}
1807 2074
2075static int nvme_cpu_notify(struct notifier_block *self,
2076 unsigned long action, void *hcpu)
2077{
2078 struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
2079 switch (action) {
2080 case CPU_ONLINE:
2081 case CPU_DEAD:
2082 nvme_assign_io_queues(dev);
2083 break;
2084 }
2085 return NOTIFY_OK;
2086}
2087
1808static int nvme_setup_io_queues(struct nvme_dev *dev) 2088static int nvme_setup_io_queues(struct nvme_dev *dev)
1809{ 2089{
1810 struct nvme_queue *adminq = dev->queues[0]; 2090 struct nvme_queue *adminq = raw_nvmeq(dev, 0);
1811 struct pci_dev *pdev = dev->pci_dev; 2091 struct pci_dev *pdev = dev->pci_dev;
1812 int result, cpu, i, vecs, nr_io_queues, size, q_depth; 2092 int result, i, vecs, nr_io_queues, size;
1813 2093
1814 nr_io_queues = num_online_cpus(); 2094 nr_io_queues = num_possible_cpus();
1815 result = set_queue_count(dev, nr_io_queues); 2095 result = set_queue_count(dev, nr_io_queues);
1816 if (result < 0) 2096 if (result < 0)
1817 return result; 2097 return result;
@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1830 size = db_bar_size(dev, nr_io_queues); 2110 size = db_bar_size(dev, nr_io_queues);
1831 } while (1); 2111 } while (1);
1832 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2112 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1833 dev->queues[0]->q_db = dev->dbs; 2113 adminq->q_db = dev->dbs;
1834 } 2114 }
1835 2115
1836 /* Deregister the admin queue's interrupt */ 2116 /* Deregister the admin queue's interrupt */
@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1856 * number of interrupts. 2136 * number of interrupts.
1857 */ 2137 */
1858 nr_io_queues = vecs; 2138 nr_io_queues = vecs;
2139 dev->max_qid = nr_io_queues;
1859 2140
1860 result = queue_request_irq(dev, adminq, adminq->irqname); 2141 result = queue_request_irq(dev, adminq, adminq->irqname);
1861 if (result) { 2142 if (result) {
@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1864 } 2145 }
1865 2146
1866 /* Free previously allocated queues that are no longer usable */ 2147 /* Free previously allocated queues that are no longer usable */
1867 spin_lock(&dev_list_lock); 2148 nvme_free_queues(dev, nr_io_queues + 1);
1868 for (i = dev->queue_count - 1; i > nr_io_queues; i--) { 2149 nvme_assign_io_queues(dev);
1869 struct nvme_queue *nvmeq = dev->queues[i];
1870
1871 spin_lock_irq(&nvmeq->q_lock);
1872 nvme_cancel_ios(nvmeq, false);
1873 spin_unlock_irq(&nvmeq->q_lock);
1874
1875 nvme_free_queue(nvmeq);
1876 dev->queue_count--;
1877 dev->queues[i] = NULL;
1878 }
1879 spin_unlock(&dev_list_lock);
1880
1881 cpu = cpumask_first(cpu_online_mask);
1882 for (i = 0; i < nr_io_queues; i++) {
1883 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1884 cpu = cpumask_next(cpu, cpu_online_mask);
1885 }
1886
1887 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1888 NVME_Q_DEPTH);
1889 for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
1890 dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
1891 if (!dev->queues[i + 1]) {
1892 result = -ENOMEM;
1893 goto free_queues;
1894 }
1895 }
1896
1897 for (; i < num_possible_cpus(); i++) {
1898 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1899 dev->queues[i + 1] = dev->queues[target + 1];
1900 }
1901 2150
1902 for (i = 1; i < dev->queue_count; i++) { 2151 dev->nb.notifier_call = &nvme_cpu_notify;
1903 result = nvme_create_queue(dev->queues[i], i); 2152 result = register_hotcpu_notifier(&dev->nb);
1904 if (result) { 2153 if (result)
1905 for (--i; i > 0; i--) 2154 goto free_queues;
1906 nvme_disable_queue(dev, i);
1907 goto free_queues;
1908 }
1909 }
1910 2155
1911 return 0; 2156 return 0;
1912 2157
@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
1985 2230
1986static int nvme_dev_map(struct nvme_dev *dev) 2231static int nvme_dev_map(struct nvme_dev *dev)
1987{ 2232{
2233 u64 cap;
1988 int bars, result = -ENOMEM; 2234 int bars, result = -ENOMEM;
1989 struct pci_dev *pdev = dev->pci_dev; 2235 struct pci_dev *pdev = dev->pci_dev;
1990 2236
@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
2008 result = -ENODEV; 2254 result = -ENODEV;
2009 goto unmap; 2255 goto unmap;
2010 } 2256 }
2011 dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap)); 2257 cap = readq(&dev->bar->cap);
2258 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2259 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2012 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2260 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2013 2261
2014 return 0; 2262 return 0;
@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2164 atomic_set(&dq.refcount, 0); 2412 atomic_set(&dq.refcount, 0);
2165 dq.worker = &worker; 2413 dq.worker = &worker;
2166 for (i = dev->queue_count - 1; i > 0; i--) { 2414 for (i = dev->queue_count - 1; i > 0; i--) {
2167 struct nvme_queue *nvmeq = dev->queues[i]; 2415 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2168 2416
2169 if (nvme_suspend_queue(nvmeq)) 2417 if (nvme_suspend_queue(nvmeq))
2170 continue; 2418 continue;
@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
2177 kthread_stop(kworker_task); 2425 kthread_stop(kworker_task);
2178} 2426}
2179 2427
2428/*
2429* Remove the node from the device list and check
2430* for whether or not we need to stop the nvme_thread.
2431*/
2432static void nvme_dev_list_remove(struct nvme_dev *dev)
2433{
2434 struct task_struct *tmp = NULL;
2435
2436 spin_lock(&dev_list_lock);
2437 list_del_init(&dev->node);
2438 if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
2439 tmp = nvme_thread;
2440 nvme_thread = NULL;
2441 }
2442 spin_unlock(&dev_list_lock);
2443
2444 if (tmp)
2445 kthread_stop(tmp);
2446}
2447
2180static void nvme_dev_shutdown(struct nvme_dev *dev) 2448static void nvme_dev_shutdown(struct nvme_dev *dev)
2181{ 2449{
2182 int i; 2450 int i;
2183 2451
2184 dev->initialized = 0; 2452 dev->initialized = 0;
2453 unregister_hotcpu_notifier(&dev->nb);
2185 2454
2186 spin_lock(&dev_list_lock); 2455 nvme_dev_list_remove(dev);
2187 list_del_init(&dev->node);
2188 spin_unlock(&dev_list_lock);
2189 2456
2190 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { 2457 if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
2191 for (i = dev->queue_count - 1; i >= 0; i--) { 2458 for (i = dev->queue_count - 1; i >= 0; i--) {
2192 struct nvme_queue *nvmeq = dev->queues[i]; 2459 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2193 nvme_suspend_queue(nvmeq); 2460 nvme_suspend_queue(nvmeq);
2194 nvme_clear_queue(nvmeq); 2461 nvme_clear_queue(nvmeq);
2195 } 2462 }
@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
2282 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 2549 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
2283 2550
2284 nvme_free_namespaces(dev); 2551 nvme_free_namespaces(dev);
2552 free_percpu(dev->io_queue);
2285 kfree(dev->queues); 2553 kfree(dev->queues);
2286 kfree(dev->entry); 2554 kfree(dev->entry);
2287 kfree(dev); 2555 kfree(dev);
@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
2325static int nvme_dev_start(struct nvme_dev *dev) 2593static int nvme_dev_start(struct nvme_dev *dev)
2326{ 2594{
2327 int result; 2595 int result;
2596 bool start_thread = false;
2328 2597
2329 result = nvme_dev_map(dev); 2598 result = nvme_dev_map(dev);
2330 if (result) 2599 if (result)
@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
2335 goto unmap; 2604 goto unmap;
2336 2605
2337 spin_lock(&dev_list_lock); 2606 spin_lock(&dev_list_lock);
2607 if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
2608 start_thread = true;
2609 nvme_thread = NULL;
2610 }
2338 list_add(&dev->node, &dev_list); 2611 list_add(&dev->node, &dev_list);
2339 spin_unlock(&dev_list_lock); 2612 spin_unlock(&dev_list_lock);
2340 2613
2614 if (start_thread) {
2615 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2616 wake_up(&nvme_kthread_wait);
2617 } else
2618 wait_event_killable(nvme_kthread_wait, nvme_thread);
2619
2620 if (IS_ERR_OR_NULL(nvme_thread)) {
2621 result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
2622 goto disable;
2623 }
2624
2341 result = nvme_setup_io_queues(dev); 2625 result = nvme_setup_io_queues(dev);
2342 if (result && result != -EBUSY) 2626 if (result && result != -EBUSY)
2343 goto disable; 2627 goto disable;
@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
2346 2630
2347 disable: 2631 disable:
2348 nvme_disable_queue(dev, 0); 2632 nvme_disable_queue(dev, 0);
2349 spin_lock(&dev_list_lock); 2633 nvme_dev_list_remove(dev);
2350 list_del_init(&dev->node);
2351 spin_unlock(&dev_list_lock);
2352 unmap: 2634 unmap:
2353 nvme_dev_unmap(dev); 2635 nvme_dev_unmap(dev);
2354 return result; 2636 return result;
@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
2367 2649
2368static void nvme_remove_disks(struct work_struct *ws) 2650static void nvme_remove_disks(struct work_struct *ws)
2369{ 2651{
2370 int i;
2371 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); 2652 struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
2372 2653
2373 nvme_dev_remove(dev); 2654 nvme_dev_remove(dev);
2374 spin_lock(&dev_list_lock); 2655 nvme_free_queues(dev, 1);
2375 for (i = dev->queue_count - 1; i > 0; i--) {
2376 BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
2377 nvme_free_queue(dev->queues[i]);
2378 dev->queue_count--;
2379 dev->queues[i] = NULL;
2380 }
2381 spin_unlock(&dev_list_lock);
2382} 2656}
2383 2657
2384static int nvme_dev_resume(struct nvme_dev *dev) 2658static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2441 GFP_KERNEL); 2715 GFP_KERNEL);
2442 if (!dev->queues) 2716 if (!dev->queues)
2443 goto free; 2717 goto free;
2718 dev->io_queue = alloc_percpu(unsigned short);
2719 if (!dev->io_queue)
2720 goto free;
2444 2721
2445 INIT_LIST_HEAD(&dev->namespaces); 2722 INIT_LIST_HEAD(&dev->namespaces);
2446 dev->reset_workfn = nvme_reset_failed_dev; 2723 dev->reset_workfn = nvme_reset_failed_dev;
@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2455 if (result) 2732 if (result)
2456 goto release; 2733 goto release;
2457 2734
2735 kref_init(&dev->kref);
2458 result = nvme_dev_start(dev); 2736 result = nvme_dev_start(dev);
2459 if (result) { 2737 if (result) {
2460 if (result == -EBUSY) 2738 if (result == -EBUSY)
@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2462 goto release_pools; 2740 goto release_pools;
2463 } 2741 }
2464 2742
2465 kref_init(&dev->kref);
2466 result = nvme_dev_add(dev); 2743 result = nvme_dev_add(dev);
2467 if (result) 2744 if (result)
2468 goto shutdown; 2745 goto shutdown;
@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2491 release: 2768 release:
2492 nvme_release_instance(dev); 2769 nvme_release_instance(dev);
2493 free: 2770 free:
2771 free_percpu(dev->io_queue);
2494 kfree(dev->queues); 2772 kfree(dev->queues);
2495 kfree(dev->entry); 2773 kfree(dev->entry);
2496 kfree(dev); 2774 kfree(dev);
@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
2517 nvme_dev_remove(dev); 2795 nvme_dev_remove(dev);
2518 nvme_dev_shutdown(dev); 2796 nvme_dev_shutdown(dev);
2519 nvme_free_queues(dev, 0); 2797 nvme_free_queues(dev, 0);
2798 rcu_barrier();
2520 nvme_release_instance(dev); 2799 nvme_release_instance(dev);
2521 nvme_release_prp_pools(dev); 2800 nvme_release_prp_pools(dev);
2522 kref_put(&dev->kref, nvme_free_dev); 2801 kref_put(&dev->kref, nvme_free_dev);
@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
2529#define nvme_slot_reset NULL 2808#define nvme_slot_reset NULL
2530#define nvme_error_resume NULL 2809#define nvme_error_resume NULL
2531 2810
2811#ifdef CONFIG_PM_SLEEP
2532static int nvme_suspend(struct device *dev) 2812static int nvme_suspend(struct device *dev)
2533{ 2813{
2534 struct pci_dev *pdev = to_pci_dev(dev); 2814 struct pci_dev *pdev = to_pci_dev(dev);
@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev)
2549 } 2829 }
2550 return 0; 2830 return 0;
2551} 2831}
2832#endif
2552 2833
2553static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); 2834static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
2554 2835
@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
2563/* Move to pci_ids.h later */ 2844/* Move to pci_ids.h later */
2564#define PCI_CLASS_STORAGE_EXPRESS 0x010802 2845#define PCI_CLASS_STORAGE_EXPRESS 0x010802
2565 2846
2566static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 2847static const struct pci_device_id nvme_id_table[] = {
2567 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2848 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2568 { 0, } 2849 { 0, }
2569}; 2850};
@@ -2585,14 +2866,11 @@ static int __init nvme_init(void)
2585{ 2866{
2586 int result; 2867 int result;
2587 2868
2588 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 2869 init_waitqueue_head(&nvme_kthread_wait);
2589 if (IS_ERR(nvme_thread))
2590 return PTR_ERR(nvme_thread);
2591 2870
2592 result = -ENOMEM;
2593 nvme_workq = create_singlethread_workqueue("nvme"); 2871 nvme_workq = create_singlethread_workqueue("nvme");
2594 if (!nvme_workq) 2872 if (!nvme_workq)
2595 goto kill_kthread; 2873 return -ENOMEM;
2596 2874
2597 result = register_blkdev(nvme_major, "nvme"); 2875 result = register_blkdev(nvme_major, "nvme");
2598 if (result < 0) 2876 if (result < 0)
@@ -2609,8 +2887,6 @@ static int __init nvme_init(void)
2609 unregister_blkdev(nvme_major, "nvme"); 2887 unregister_blkdev(nvme_major, "nvme");
2610 kill_workq: 2888 kill_workq:
2611 destroy_workqueue(nvme_workq); 2889 destroy_workqueue(nvme_workq);
2612 kill_kthread:
2613 kthread_stop(nvme_thread);
2614 return result; 2890 return result;
2615} 2891}
2616 2892
@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void)
2619 pci_unregister_driver(&nvme_driver); 2895 pci_unregister_driver(&nvme_driver);
2620 unregister_blkdev(nvme_major, "nvme"); 2896 unregister_blkdev(nvme_major, "nvme");
2621 destroy_workqueue(nvme_workq); 2897 destroy_workqueue(nvme_workq);
2622 kthread_stop(nvme_thread); 2898 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
2623} 2899}
2624 2900
2625MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2901MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2626MODULE_LICENSE("GPL"); 2902MODULE_LICENSE("GPL");
2627MODULE_VERSION("0.8"); 2903MODULE_VERSION("0.9");
2628module_init(nvme_init); 2904module_init(nvme_init);
2629module_exit(nvme_exit); 2905module_exit(nvme_exit);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb64e269..2c3f5be06da1 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1562 res = PTR_ERR(iod); 1562 res = PTR_ERR(iod);
1563 goto out; 1563 goto out;
1564 } 1564 }
1565 length = nvme_setup_prps(dev, &c.common, iod, tot_len, 1565 length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
1566 GFP_KERNEL);
1567 if (length != tot_len) { 1566 if (length != tot_len) {
1568 res = -ENOMEM; 1567 res = -ENOMEM;
1569 goto out_unmap; 1568 goto out_unmap;
1570 } 1569 }
1571 1570
1571 c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1572 c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
1572 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); 1573 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1573 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); 1574 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1574 } else if (opcode == nvme_admin_activate_fw) { 1575 } else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2033 int res = SNTI_TRANSLATION_SUCCESS; 2034 int res = SNTI_TRANSLATION_SUCCESS;
2034 int nvme_sc; 2035 int nvme_sc;
2035 struct nvme_dev *dev = ns->dev; 2036 struct nvme_dev *dev = ns->dev;
2036 struct nvme_queue *nvmeq;
2037 u32 num_cmds; 2037 u32 num_cmds;
2038 struct nvme_iod *iod; 2038 struct nvme_iod *iod;
2039 u64 unit_len; 2039 u64 unit_len;
@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2045 struct nvme_command c; 2045 struct nvme_command c;
2046 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); 2046 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2047 u16 control; 2047 u16 control;
2048 u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); 2048 u32 max_blocks = queue_max_hw_sectors(ns->queue);
2049 2049
2050 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); 2050 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2051 2051
@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2093 res = PTR_ERR(iod); 2093 res = PTR_ERR(iod);
2094 goto out; 2094 goto out;
2095 } 2095 }
2096 retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, 2096 retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
2097 GFP_KERNEL);
2098 if (retcode != unit_len) { 2097 if (retcode != unit_len) {
2099 nvme_unmap_user_pages(dev, 2098 nvme_unmap_user_pages(dev,
2100 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2099 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2103 res = -ENOMEM; 2102 res = -ENOMEM;
2104 goto out; 2103 goto out;
2105 } 2104 }
2105 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2106 c.rw.prp2 = cpu_to_le64(iod->first_dma);
2106 2107
2107 nvme_offset += unit_num_blocks; 2108 nvme_offset += unit_num_blocks;
2108 2109
2109 nvmeq = get_nvmeq(dev); 2110 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2110 /*
2111 * Since nvme_submit_sync_cmd sleeps, we can't keep
2112 * preemption disabled. We may be preempted at any
2113 * point, and be rescheduled to a different CPU. That
2114 * will cause cacheline bouncing, but no additional
2115 * races since q_lock already protects against other
2116 * CPUs.
2117 */
2118 put_nvmeq(nvmeq);
2119 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
2120 NVME_IO_TIMEOUT);
2121 if (nvme_sc != NVME_SC_SUCCESS) { 2111 if (nvme_sc != NVME_SC_SUCCESS) {
2122 nvme_unmap_user_pages(dev, 2112 nvme_unmap_user_pages(dev,
2123 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2113 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2644{ 2634{
2645 int res = SNTI_TRANSLATION_SUCCESS; 2635 int res = SNTI_TRANSLATION_SUCCESS;
2646 int nvme_sc; 2636 int nvme_sc;
2647 struct nvme_queue *nvmeq;
2648 struct nvme_command c; 2637 struct nvme_command c;
2649 u8 immed, pcmod, pc, no_flush, start; 2638 u8 immed, pcmod, pc, no_flush, start;
2650 2639
@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2671 c.common.opcode = nvme_cmd_flush; 2660 c.common.opcode = nvme_cmd_flush;
2672 c.common.nsid = cpu_to_le32(ns->ns_id); 2661 c.common.nsid = cpu_to_le32(ns->ns_id);
2673 2662
2674 nvmeq = get_nvmeq(ns->dev); 2663 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2675 put_nvmeq(nvmeq);
2676 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2677
2678 res = nvme_trans_status_code(hdr, nvme_sc); 2664 res = nvme_trans_status_code(hdr, nvme_sc);
2679 if (res) 2665 if (res)
2680 goto out; 2666 goto out;
@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2697 int res = SNTI_TRANSLATION_SUCCESS; 2683 int res = SNTI_TRANSLATION_SUCCESS;
2698 int nvme_sc; 2684 int nvme_sc;
2699 struct nvme_command c; 2685 struct nvme_command c;
2700 struct nvme_queue *nvmeq;
2701 2686
2702 memset(&c, 0, sizeof(c)); 2687 memset(&c, 0, sizeof(c));
2703 c.common.opcode = nvme_cmd_flush; 2688 c.common.opcode = nvme_cmd_flush;
2704 c.common.nsid = cpu_to_le32(ns->ns_id); 2689 c.common.nsid = cpu_to_le32(ns->ns_id);
2705 2690
2706 nvmeq = get_nvmeq(ns->dev); 2691 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2707 put_nvmeq(nvmeq);
2708 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2709 2692
2710 res = nvme_trans_status_code(hdr, nvme_sc); 2693 res = nvme_trans_status_code(hdr, nvme_sc);
2711 if (res) 2694 if (res)
@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2872 struct nvme_dev *dev = ns->dev; 2855 struct nvme_dev *dev = ns->dev;
2873 struct scsi_unmap_parm_list *plist; 2856 struct scsi_unmap_parm_list *plist;
2874 struct nvme_dsm_range *range; 2857 struct nvme_dsm_range *range;
2875 struct nvme_queue *nvmeq;
2876 struct nvme_command c; 2858 struct nvme_command c;
2877 int i, nvme_sc, res = -ENOMEM; 2859 int i, nvme_sc, res = -ENOMEM;
2878 u16 ndesc, list_len; 2860 u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2914 c.dsm.nr = cpu_to_le32(ndesc - 1); 2896 c.dsm.nr = cpu_to_le32(ndesc - 1);
2915 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 2897 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2916 2898
2917 nvmeq = get_nvmeq(dev); 2899 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2918 put_nvmeq(nvmeq);
2919
2920 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2921 res = nvme_trans_status_code(hdr, nvme_sc); 2900 res = nvme_trans_status_code(hdr, nvme_sc);
2922 2901
2923 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2902 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 6b9aafed225f..a50173ca1d72 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -66,20 +66,25 @@ enum {
66 66
67#define NVME_VS(major, minor) (major << 16 | minor) 67#define NVME_VS(major, minor) (major << 16 | minor)
68 68
69#define NVME_IO_TIMEOUT (5 * HZ) 69extern unsigned char io_timeout;
70#define NVME_IO_TIMEOUT (io_timeout * HZ)
70 71
71/* 72/*
72 * Represents an NVM Express device. Each nvme_dev is a PCI function. 73 * Represents an NVM Express device. Each nvme_dev is a PCI function.
73 */ 74 */
74struct nvme_dev { 75struct nvme_dev {
75 struct list_head node; 76 struct list_head node;
76 struct nvme_queue **queues; 77 struct nvme_queue __rcu **queues;
78 unsigned short __percpu *io_queue;
77 u32 __iomem *dbs; 79 u32 __iomem *dbs;
78 struct pci_dev *pci_dev; 80 struct pci_dev *pci_dev;
79 struct dma_pool *prp_page_pool; 81 struct dma_pool *prp_page_pool;
80 struct dma_pool *prp_small_pool; 82 struct dma_pool *prp_small_pool;
81 int instance; 83 int instance;
82 int queue_count; 84 unsigned queue_count;
85 unsigned online_queues;
86 unsigned max_qid;
87 int q_depth;
83 u32 db_stride; 88 u32 db_stride;
84 u32 ctrl_config; 89 u32 ctrl_config;
85 struct msix_entry *entry; 90 struct msix_entry *entry;
@@ -89,6 +94,7 @@ struct nvme_dev {
89 struct miscdevice miscdev; 94 struct miscdevice miscdev;
90 work_func_t reset_workfn; 95 work_func_t reset_workfn;
91 struct work_struct reset_work; 96 struct work_struct reset_work;
97 struct notifier_block nb;
92 char name[12]; 98 char name[12];
93 char serial[20]; 99 char serial[20];
94 char model[40]; 100 char model[40];
@@ -131,6 +137,7 @@ struct nvme_iod {
131 int length; /* Of data, in bytes */ 137 int length; /* Of data, in bytes */
132 unsigned long start_time; 138 unsigned long start_time;
133 dma_addr_t first_dma; 139 dma_addr_t first_dma;
140 struct list_head node;
134 struct scatterlist sg[0]; 141 struct scatterlist sg[0];
135}; 142};
136 143
@@ -146,16 +153,12 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
146 */ 153 */
147void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 154void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
148 155
149int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 156int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
150 struct nvme_iod *iod, int total_len, gfp_t gfp);
151struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 157struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
152 unsigned long addr, unsigned length); 158 unsigned long addr, unsigned length);
153void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 159void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
154 struct nvme_iod *iod); 160 struct nvme_iod *iod);
155struct nvme_queue *get_nvmeq(struct nvme_dev *dev); 161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
156void put_nvmeq(struct nvme_queue *nvmeq);
157int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
158 u32 *result, unsigned timeout);
159int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); 162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
160int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, 163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
161 u32 *result); 164 u32 *result);
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index e5ab62201119..096fe1c6f83d 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -434,6 +434,7 @@ enum {
434 NVME_SC_REFTAG_CHECK = 0x284, 434 NVME_SC_REFTAG_CHECK = 0x284,
435 NVME_SC_COMPARE_FAILED = 0x285, 435 NVME_SC_COMPARE_FAILED = 0x285,
436 NVME_SC_ACCESS_DENIED = 0x286, 436 NVME_SC_ACCESS_DENIED = 0x286,
437 NVME_SC_DNR = 0x4000,
437}; 438};
438 439
439struct nvme_completion { 440struct nvme_completion {