diff options
author | Sagi Grimberg <sagi@grimberg.me> | 2017-06-18 10:28:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-28 10:14:13 -0400 |
commit | 442e19b7ccb25337be7bfff96df94c38c037ee9f (patch) | |
tree | 592702bd98fadae1685eaf5c23f70aac73c04e32 | |
parent | 920d13a884c0595451658a7b48af8ac16918628f (diff) |
nvme-pci: open-code polling logic in nvme_poll
Given that the code is simple enough it seems better
then passing a tag by reference for each call site, also
we can now get rid of __nvme_process_cq.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/nvme/host/pci.c | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d309b6c90511..2a9ee769ce9e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq, | |||
785 | return false; | 785 | return false; |
786 | } | 786 | } |
787 | 787 | ||
788 | static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) | 788 | static void nvme_process_cq(struct nvme_queue *nvmeq) |
789 | { | 789 | { |
790 | struct nvme_completion cqe; | 790 | struct nvme_completion cqe; |
791 | int consumed = 0; | 791 | int consumed = 0; |
@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) | |||
793 | while (nvme_read_cqe(nvmeq, &cqe)) { | 793 | while (nvme_read_cqe(nvmeq, &cqe)) { |
794 | nvme_handle_cqe(nvmeq, &cqe); | 794 | nvme_handle_cqe(nvmeq, &cqe); |
795 | consumed++; | 795 | consumed++; |
796 | |||
797 | if (tag && *tag == cqe.command_id) { | ||
798 | *tag = -1; | ||
799 | break; | ||
800 | } | ||
801 | } | 796 | } |
802 | 797 | ||
803 | if (consumed) { | 798 | if (consumed) { |
@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) | |||
806 | } | 801 | } |
807 | } | 802 | } |
808 | 803 | ||
809 | static void nvme_process_cq(struct nvme_queue *nvmeq) | ||
810 | { | ||
811 | __nvme_process_cq(nvmeq, NULL); | ||
812 | } | ||
813 | |||
814 | static irqreturn_t nvme_irq(int irq, void *data) | 804 | static irqreturn_t nvme_irq(int irq, void *data) |
815 | { | 805 | { |
816 | irqreturn_t result; | 806 | irqreturn_t result; |
@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data) | |||
833 | 823 | ||
834 | static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) | 824 | static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) |
835 | { | 825 | { |
836 | if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { | 826 | struct nvme_completion cqe; |
837 | spin_lock_irq(&nvmeq->q_lock); | 827 | int found = 0, consumed = 0; |
838 | __nvme_process_cq(nvmeq, &tag); | ||
839 | spin_unlock_irq(&nvmeq->q_lock); | ||
840 | 828 | ||
841 | if (tag == -1) | 829 | if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) |
842 | return 1; | 830 | return 0; |
843 | } | ||
844 | 831 | ||
845 | return 0; | 832 | spin_lock_irq(&nvmeq->q_lock); |
833 | while (nvme_read_cqe(nvmeq, &cqe)) { | ||
834 | nvme_handle_cqe(nvmeq, &cqe); | ||
835 | consumed++; | ||
836 | |||
837 | if (tag == cqe.command_id) { | ||
838 | found = 1; | ||
839 | break; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | if (consumed) | ||
844 | nvme_ring_cq_doorbell(nvmeq); | ||
845 | spin_unlock_irq(&nvmeq->q_lock); | ||
846 | |||
847 | return found; | ||
846 | } | 848 | } |
847 | 849 | ||
848 | static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) | 850 | static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) |