diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-02-06 07:28:06 -0500 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:52:55 -0400 |
commit | 58ffacb545f76fc2c65d1fbfa5acf5184a2a09e6 (patch) | |
tree | 382cf8004b88f9d2bd2b86d78121938afa863e42 /drivers/block/nvme.c | |
parent | b1ad37efcafe396ac3944853589688dd0ec3c64e (diff) |
NVMe: Add a module parameter to use a threaded interrupt
We're currently calling bio_endio from hard interrupt context. This is
not a good idea for preemptible kernels as it will cause longer latencies.
Using a threaded interrupt will run the entire queue processing mechanism
(including bio_endio) in a thread, which can be preempted. Unfortuantely,
it also adds about 7us of latency to the single-I/O case, so make it a
module parameter for the moment.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r-- | drivers/block/nvme.c | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 1c3cd6cc0ad9..60c3786bc787 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -44,6 +44,9 @@ | |||
44 | static int nvme_major; | 44 | static int nvme_major; |
45 | module_param(nvme_major, int, 0); | 45 | module_param(nvme_major, int, 0); |
46 | 46 | ||
47 | static int use_threaded_interrupts; | ||
48 | module_param(use_threaded_interrupts, int, 0); | ||
49 | |||
47 | /* | 50 | /* |
48 | * Represents an NVM Express device. Each nvme_dev is a PCI function. | 51 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
49 | */ | 52 | */ |
@@ -455,6 +458,25 @@ static irqreturn_t nvme_irq(int irq, void *data) | |||
455 | return nvme_process_cq(data); | 458 | return nvme_process_cq(data); |
456 | } | 459 | } |
457 | 460 | ||
461 | static irqreturn_t nvme_irq_thread(int irq, void *data) | ||
462 | { | ||
463 | irqreturn_t result; | ||
464 | struct nvme_queue *nvmeq = data; | ||
465 | spin_lock(&nvmeq->q_lock); | ||
466 | result = nvme_process_cq(nvmeq); | ||
467 | spin_unlock(&nvmeq->q_lock); | ||
468 | return result; | ||
469 | } | ||
470 | |||
471 | static irqreturn_t nvme_irq_check(int irq, void *data) | ||
472 | { | ||
473 | struct nvme_queue *nvmeq = data; | ||
474 | struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; | ||
475 | if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) | ||
476 | return IRQ_NONE; | ||
477 | return IRQ_WAKE_THREAD; | ||
478 | } | ||
479 | |||
458 | static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) | 480 | static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) |
459 | { | 481 | { |
460 | spin_lock_irq(&nvmeq->q_lock); | 482 | spin_lock_irq(&nvmeq->q_lock); |
@@ -630,6 +652,11 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
630 | static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, | 652 | static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
631 | const char *name) | 653 | const char *name) |
632 | { | 654 | { |
655 | if (use_threaded_interrupts) | ||
656 | return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, | ||
657 | nvme_irq_check, nvme_irq_thread, | ||
658 | IRQF_DISABLED | IRQF_SHARED, | ||
659 | name, nvmeq); | ||
633 | return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, | 660 | return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, |
634 | IRQF_DISABLED | IRQF_SHARED, name, nvmeq); | 661 | IRQF_DISABLED | IRQF_SHARED, name, nvmeq); |
635 | } | 662 | } |