aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme-core.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2013-06-24 12:03:57 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-06-24 13:57:27 -0400
commit7d8224574cbd2326a6be00f319f5f7597abec3f6 (patch)
treefcba1402f98bffa6a38f2a00d104e43e865804c8 /drivers/block/nvme-core.c
parentbc57a0f7a44cfcf3e9873f6c6b8dcecdca486b1f (diff)
NVMe: Call nvme_process_cq from submission path
Since we have the queue locked, it makes sense to check if there are any completion queue entries on the queue before we release the lock. If there are, it may save an interrupt and reduce latency for the I/Os that happened to complete. This happens fairly often for some workloads. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r--drivers/block/nvme-core.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index eb4a91f3bf41..07d527c66eb4 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -738,25 +738,6 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
738 return result; 738 return result;
739} 739}
740 740
741static void nvme_make_request(struct request_queue *q, struct bio *bio)
742{
743 struct nvme_ns *ns = q->queuedata;
744 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
745 int result = -EBUSY;
746
747 spin_lock_irq(&nvmeq->q_lock);
748 if (bio_list_empty(&nvmeq->sq_cong))
749 result = nvme_submit_bio_queue(nvmeq, ns, bio);
750 if (unlikely(result)) {
751 if (bio_list_empty(&nvmeq->sq_cong))
752 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
753 bio_list_add(&nvmeq->sq_cong, bio);
754 }
755
756 spin_unlock_irq(&nvmeq->q_lock);
757 put_nvmeq(nvmeq);
758}
759
760static int nvme_process_cq(struct nvme_queue *nvmeq) 741static int nvme_process_cq(struct nvme_queue *nvmeq)
761{ 742{
762 u16 head, phase; 743 u16 head, phase;
@@ -797,6 +778,26 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
797 return 1; 778 return 1;
798} 779}
799 780
781static void nvme_make_request(struct request_queue *q, struct bio *bio)
782{
783 struct nvme_ns *ns = q->queuedata;
784 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
785 int result = -EBUSY;
786
787 spin_lock_irq(&nvmeq->q_lock);
788 if (bio_list_empty(&nvmeq->sq_cong))
789 result = nvme_submit_bio_queue(nvmeq, ns, bio);
790 if (unlikely(result)) {
791 if (bio_list_empty(&nvmeq->sq_cong))
792 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
793 bio_list_add(&nvmeq->sq_cong, bio);
794 }
795
796 nvme_process_cq(nvmeq);
797 spin_unlock_irq(&nvmeq->q_lock);
798 put_nvmeq(nvmeq);
799}
800
800static irqreturn_t nvme_irq(int irq, void *data) 801static irqreturn_t nvme_irq(int irq, void *data)
801{ 802{
802 irqreturn_t result; 803 irqreturn_t result;