aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Derrick <jonathan.derrick@intel.com>2015-07-20 12:14:08 -0400
committerJens Axboe <axboe@fb.com>2015-07-21 11:40:09 -0400
commit498c43949c7b8f57e0afb8195019cf5a7ba72de0 (patch)
treee8f8806ac007f39d9786584b94a04fb2e87ff332
parent0034af036554c39eefd14d835a8ec3496ac46712 (diff)
NVMe: Unify SQ entry writing and doorbell ringing
This patch changes sq_cmd writers to instead create their command on the stack. __nvme_submit_cmd copies the sq entry to the queue and writes the doorbell. Signed-off-by: Jon Derrick <jonathan.derrick@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nvme-core.c80
1 files changed, 35 insertions, 45 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d844ec4a2b85..e09ad6cc6dec 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -730,18 +730,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
730static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req, 730static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
731 struct nvme_iod *iod) 731 struct nvme_iod *iod)
732{ 732{
733 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 733 struct nvme_command cmnd;
734 734
735 memcpy(cmnd, req->cmd, sizeof(struct nvme_command)); 735 memcpy(&cmnd, req->cmd, sizeof(cmnd));
736 cmnd->rw.command_id = req->tag; 736 cmnd.rw.command_id = req->tag;
737 if (req->nr_phys_segments) { 737 if (req->nr_phys_segments) {
738 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 738 cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
739 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 739 cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
740 } 740 }
741 741
742 if (++nvmeq->sq_tail == nvmeq->q_depth) 742 __nvme_submit_cmd(nvmeq, &cmnd);
743 nvmeq->sq_tail = 0;
744 writel(nvmeq->sq_tail, nvmeq->q_db);
745} 743}
746 744
747/* 745/*
@@ -754,45 +752,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
754{ 752{
755 struct nvme_dsm_range *range = 753 struct nvme_dsm_range *range =
756 (struct nvme_dsm_range *)iod_list(iod)[0]; 754 (struct nvme_dsm_range *)iod_list(iod)[0];
757 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 755 struct nvme_command cmnd;
758 756
759 range->cattr = cpu_to_le32(0); 757 range->cattr = cpu_to_le32(0);
760 range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); 758 range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
761 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 759 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
762 760
763 memset(cmnd, 0, sizeof(*cmnd)); 761 memset(&cmnd, 0, sizeof(cmnd));
764 cmnd->dsm.opcode = nvme_cmd_dsm; 762 cmnd.dsm.opcode = nvme_cmd_dsm;
765 cmnd->dsm.command_id = req->tag; 763 cmnd.dsm.command_id = req->tag;
766 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 764 cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
767 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 765 cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
768 cmnd->dsm.nr = 0; 766 cmnd.dsm.nr = 0;
769 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 767 cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
770 768
771 if (++nvmeq->sq_tail == nvmeq->q_depth) 769 __nvme_submit_cmd(nvmeq, &cmnd);
772 nvmeq->sq_tail = 0;
773 writel(nvmeq->sq_tail, nvmeq->q_db);
774} 770}
775 771
776static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 772static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
777 int cmdid) 773 int cmdid)
778{ 774{
779 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 775 struct nvme_command cmnd;
780 776
781 memset(cmnd, 0, sizeof(*cmnd)); 777 memset(&cmnd, 0, sizeof(cmnd));
782 cmnd->common.opcode = nvme_cmd_flush; 778 cmnd.common.opcode = nvme_cmd_flush;
783 cmnd->common.command_id = cmdid; 779 cmnd.common.command_id = cmdid;
784 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 780 cmnd.common.nsid = cpu_to_le32(ns->ns_id);
785 781
786 if (++nvmeq->sq_tail == nvmeq->q_depth) 782 __nvme_submit_cmd(nvmeq, &cmnd);
787 nvmeq->sq_tail = 0;
788 writel(nvmeq->sq_tail, nvmeq->q_db);
789} 783}
790 784
791static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, 785static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
792 struct nvme_ns *ns) 786 struct nvme_ns *ns)
793{ 787{
794 struct request *req = iod_get_private(iod); 788 struct request *req = iod_get_private(iod);
795 struct nvme_command *cmnd; 789 struct nvme_command cmnd;
796 u16 control = 0; 790 u16 control = 0;
797 u32 dsmgmt = 0; 791 u32 dsmgmt = 0;
798 792
@@ -804,19 +798,17 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
804 if (req->cmd_flags & REQ_RAHEAD) 798 if (req->cmd_flags & REQ_RAHEAD)
805 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 799 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
806 800
807 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 801 memset(&cmnd, 0, sizeof(cmnd));
808 memset(cmnd, 0, sizeof(*cmnd)); 802 cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
809 803 cmnd.rw.command_id = req->tag;
810 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); 804 cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
811 cmnd->rw.command_id = req->tag; 805 cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
812 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 806 cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
813 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 807 cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
814 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); 808 cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
815 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
816 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
817 809
818 if (blk_integrity_rq(req)) { 810 if (blk_integrity_rq(req)) {
819 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); 811 cmnd.rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
820 switch (ns->pi_type) { 812 switch (ns->pi_type) {
821 case NVME_NS_DPS_PI_TYPE3: 813 case NVME_NS_DPS_PI_TYPE3:
822 control |= NVME_RW_PRINFO_PRCHK_GUARD; 814 control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -825,19 +817,17 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
825 case NVME_NS_DPS_PI_TYPE2: 817 case NVME_NS_DPS_PI_TYPE2:
826 control |= NVME_RW_PRINFO_PRCHK_GUARD | 818 control |= NVME_RW_PRINFO_PRCHK_GUARD |
827 NVME_RW_PRINFO_PRCHK_REF; 819 NVME_RW_PRINFO_PRCHK_REF;
828 cmnd->rw.reftag = cpu_to_le32( 820 cmnd.rw.reftag = cpu_to_le32(
829 nvme_block_nr(ns, blk_rq_pos(req))); 821 nvme_block_nr(ns, blk_rq_pos(req)));
830 break; 822 break;
831 } 823 }
832 } else if (ns->ms) 824 } else if (ns->ms)
833 control |= NVME_RW_PRINFO_PRACT; 825 control |= NVME_RW_PRINFO_PRACT;
834 826
835 cmnd->rw.control = cpu_to_le16(control); 827 cmnd.rw.control = cpu_to_le16(control);
836 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 828 cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
837 829
838 if (++nvmeq->sq_tail == nvmeq->q_depth) 830 __nvme_submit_cmd(nvmeq, &cmnd);
839 nvmeq->sq_tail = 0;
840 writel(nvmeq->sq_tail, nvmeq->q_db);
841 831
842 return 0; 832 return 0;
843} 833}