diff options
-rw-r--r-- | drivers/block/nvme.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 1c3cd6cc0ad9..60c3786bc787 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -44,6 +44,9 @@ | |||
44 | static int nvme_major; | 44 | static int nvme_major; |
45 | module_param(nvme_major, int, 0); | 45 | module_param(nvme_major, int, 0); |
46 | 46 | ||
47 | static int use_threaded_interrupts; | ||
48 | module_param(use_threaded_interrupts, int, 0); | ||
49 | |||
47 | /* | 50 | /* |
48 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | 51 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
49 | */ | 52 | */ |
@@ -455,6 +458,25 @@ static irqreturn_t nvme_irq(int irq, void *data) | |||
455 | return nvme_process_cq(data); | 458 | return nvme_process_cq(data); |
456 | } | 459 | } |
457 | 460 | ||
461 | static irqreturn_t nvme_irq_thread(int irq, void *data) | ||
462 | { | ||
463 | irqreturn_t result; | ||
464 | struct nvme_queue *nvmeq = data; | ||
465 | spin_lock(&nvmeq->q_lock); | ||
466 | result = nvme_process_cq(nvmeq); | ||
467 | spin_unlock(&nvmeq->q_lock); | ||
468 | return result; | ||
469 | } | ||
470 | |||
471 | static irqreturn_t nvme_irq_check(int irq, void *data) | ||
472 | { | ||
473 | struct nvme_queue *nvmeq = data; | ||
474 | struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; | ||
475 | if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) | ||
476 | return IRQ_NONE; | ||
477 | return IRQ_WAKE_THREAD; | ||
478 | } | ||
479 | |||
458 | static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) | 480 | static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) |
459 | { | 481 | { |
460 | spin_lock_irq(&nvmeq->q_lock); | 482 | spin_lock_irq(&nvmeq->q_lock); |
@@ -630,6 +652,11 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
630 | static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, | 652 | static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
631 | const char *name) | 653 | const char *name) |
632 | { | 654 | { |
655 | if (use_threaded_interrupts) | ||
656 | return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, | ||
657 | nvme_irq_check, nvme_irq_thread, | ||
658 | IRQF_DISABLED | IRQF_SHARED, | ||
659 | name, nvmeq); | ||
633 | return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, | 660 | return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, |
634 | IRQF_DISABLED | IRQF_SHARED, name, nvmeq); | 661 | IRQF_DISABLED | IRQF_SHARED, name, nvmeq); |
635 | } | 662 | } |