aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2013-05-06 04:38:29 -0400
committerMichael S. Tsirkin <mst@redhat.com>2013-07-07 07:38:58 -0400
commit3c63f66a0dcdd6cb8bcacf210181f2b3baed19be (patch)
tree0bf1ae5f675e9423ad3a1ef398ee86e09de4f88b
parent9871831283e79575deb63fa341e9c7f3c1223d10 (diff)
vhost-scsi: Rename struct tcm_vhost_cmd *tv_cmd to *cmd
This way, we use cmd for struct tcm_vhost_cmd and evt for struct tcm_vhost_cmd. Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/vhost/scsi.c142
1 files changed, 71 insertions, 71 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 7fb18595c744..03765e17c154 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -494,28 +494,28 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
494 return 0; 494 return 0;
495} 495}
496 496
497static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) 497static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
498{ 498{
499 struct vhost_scsi *vs = tv_cmd->tvc_vhost; 499 struct vhost_scsi *vs = cmd->tvc_vhost;
500 500
501 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); 501 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
502 502
503 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 503 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
504} 504}
505 505
506static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 506static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
507{ 507{
508 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 508 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
509 struct tcm_vhost_cmd, tvc_se_cmd); 509 struct tcm_vhost_cmd, tvc_se_cmd);
510 vhost_scsi_complete_cmd(tv_cmd); 510 vhost_scsi_complete_cmd(cmd);
511 return 0; 511 return 0;
512} 512}
513 513
514static int tcm_vhost_queue_status(struct se_cmd *se_cmd) 514static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
515{ 515{
516 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 516 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
517 struct tcm_vhost_cmd, tvc_se_cmd); 517 struct tcm_vhost_cmd, tvc_se_cmd);
518 vhost_scsi_complete_cmd(tv_cmd); 518 vhost_scsi_complete_cmd(cmd);
519 return 0; 519 return 0;
520} 520}
521 521
@@ -556,24 +556,24 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
556 return evt; 556 return evt;
557} 557}
558 558
559static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) 559static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
560{ 560{
561 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 561 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
562 562
563 /* TODO locking against target/backend threads? */ 563 /* TODO locking against target/backend threads? */
564 transport_generic_free_cmd(se_cmd, 1); 564 transport_generic_free_cmd(se_cmd, 1);
565 565
566 if (tv_cmd->tvc_sgl_count) { 566 if (cmd->tvc_sgl_count) {
567 u32 i; 567 u32 i;
568 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 568 for (i = 0; i < cmd->tvc_sgl_count; i++)
569 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 569 put_page(sg_page(&cmd->tvc_sgl[i]));
570 570
571 kfree(tv_cmd->tvc_sgl); 571 kfree(cmd->tvc_sgl);
572 } 572 }
573 573
574 tcm_vhost_put_inflight(tv_cmd->inflight); 574 tcm_vhost_put_inflight(cmd->inflight);
575 575
576 kfree(tv_cmd); 576 kfree(cmd);
577} 577}
578 578
579static void 579static void
@@ -656,7 +656,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
656 vs_completion_work); 656 vs_completion_work);
657 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 657 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
658 struct virtio_scsi_cmd_resp v_rsp; 658 struct virtio_scsi_cmd_resp v_rsp;
659 struct tcm_vhost_cmd *tv_cmd; 659 struct tcm_vhost_cmd *cmd;
660 struct llist_node *llnode; 660 struct llist_node *llnode;
661 struct se_cmd *se_cmd; 661 struct se_cmd *se_cmd;
662 int ret, vq; 662 int ret, vq;
@@ -664,32 +664,32 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
664 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 664 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
665 llnode = llist_del_all(&vs->vs_completion_list); 665 llnode = llist_del_all(&vs->vs_completion_list);
666 while (llnode) { 666 while (llnode) {
667 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd, 667 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
668 tvc_completion_list); 668 tvc_completion_list);
669 llnode = llist_next(llnode); 669 llnode = llist_next(llnode);
670 se_cmd = &tv_cmd->tvc_se_cmd; 670 se_cmd = &cmd->tvc_se_cmd;
671 671
672 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 672 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
673 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); 673 cmd, se_cmd->residual_count, se_cmd->scsi_status);
674 674
675 memset(&v_rsp, 0, sizeof(v_rsp)); 675 memset(&v_rsp, 0, sizeof(v_rsp));
676 v_rsp.resid = se_cmd->residual_count; 676 v_rsp.resid = se_cmd->residual_count;
677 /* TODO is status_qualifier field needed? */ 677 /* TODO is status_qualifier field needed? */
678 v_rsp.status = se_cmd->scsi_status; 678 v_rsp.status = se_cmd->scsi_status;
679 v_rsp.sense_len = se_cmd->scsi_sense_length; 679 v_rsp.sense_len = se_cmd->scsi_sense_length;
680 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, 680 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
681 v_rsp.sense_len); 681 v_rsp.sense_len);
682 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 682 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
683 if (likely(ret == 0)) { 683 if (likely(ret == 0)) {
684 struct vhost_scsi_virtqueue *q; 684 struct vhost_scsi_virtqueue *q;
685 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); 685 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
686 q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 686 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
687 vq = q - vs->vqs; 687 vq = q - vs->vqs;
688 __set_bit(vq, signal); 688 __set_bit(vq, signal);
689 } else 689 } else
690 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 690 pr_err("Faulted on virtio_scsi_cmd_resp\n");
691 691
692 vhost_scsi_free_cmd(tv_cmd); 692 vhost_scsi_free_cmd(cmd);
693 } 693 }
694 694
695 vq = -1; 695 vq = -1;
@@ -705,7 +705,7 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
705 u32 exp_data_len, 705 u32 exp_data_len,
706 int data_direction) 706 int data_direction)
707{ 707{
708 struct tcm_vhost_cmd *tv_cmd; 708 struct tcm_vhost_cmd *cmd;
709 struct tcm_vhost_nexus *tv_nexus; 709 struct tcm_vhost_nexus *tv_nexus;
710 710
711 tv_nexus = tpg->tpg_nexus; 711 tv_nexus = tpg->tpg_nexus;
@@ -714,19 +714,19 @@ vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
714 return ERR_PTR(-EIO); 714 return ERR_PTR(-EIO);
715 } 715 }
716 716
717 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); 717 cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
718 if (!tv_cmd) { 718 if (!cmd) {
719 pr_err("Unable to allocate struct tcm_vhost_cmd\n"); 719 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
720 return ERR_PTR(-ENOMEM); 720 return ERR_PTR(-ENOMEM);
721 } 721 }
722 tv_cmd->tvc_tag = v_req->tag; 722 cmd->tvc_tag = v_req->tag;
723 tv_cmd->tvc_task_attr = v_req->task_attr; 723 cmd->tvc_task_attr = v_req->task_attr;
724 tv_cmd->tvc_exp_data_len = exp_data_len; 724 cmd->tvc_exp_data_len = exp_data_len;
725 tv_cmd->tvc_data_direction = data_direction; 725 cmd->tvc_data_direction = data_direction;
726 tv_cmd->tvc_nexus = tv_nexus; 726 cmd->tvc_nexus = tv_nexus;
727 tv_cmd->inflight = tcm_vhost_get_inflight(vq); 727 cmd->inflight = tcm_vhost_get_inflight(vq);
728 728
729 return tv_cmd; 729 return cmd;
730} 730}
731 731
732/* 732/*
@@ -783,7 +783,7 @@ out:
783} 783}
784 784
785static int 785static int
786vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, 786vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
787 struct iovec *iov, 787 struct iovec *iov,
788 unsigned int niov, 788 unsigned int niov,
789 int write) 789 int write)
@@ -802,25 +802,25 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
802 802
803 /* TODO overflow checking */ 803 /* TODO overflow checking */
804 804
805 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 805 sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
806 if (!sg) 806 if (!sg)
807 return -ENOMEM; 807 return -ENOMEM;
808 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, 808 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
809 sg, sgl_count, !sg); 809 sg, sgl_count, !sg);
810 sg_init_table(sg, sgl_count); 810 sg_init_table(sg, sgl_count);
811 811
812 tv_cmd->tvc_sgl = sg; 812 cmd->tvc_sgl = sg;
813 tv_cmd->tvc_sgl_count = sgl_count; 813 cmd->tvc_sgl_count = sgl_count;
814 814
815 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 815 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
816 for (i = 0; i < niov; i++) { 816 for (i = 0; i < niov; i++) {
817 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); 817 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
818 if (ret < 0) { 818 if (ret < 0) {
819 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 819 for (i = 0; i < cmd->tvc_sgl_count; i++)
820 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 820 put_page(sg_page(&cmd->tvc_sgl[i]));
821 kfree(tv_cmd->tvc_sgl); 821 kfree(cmd->tvc_sgl);
822 tv_cmd->tvc_sgl = NULL; 822 cmd->tvc_sgl = NULL;
823 tv_cmd->tvc_sgl_count = 0; 823 cmd->tvc_sgl_count = 0;
824 return ret; 824 return ret;
825 } 825 }
826 826
@@ -832,15 +832,15 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
832 832
833static void tcm_vhost_submission_work(struct work_struct *work) 833static void tcm_vhost_submission_work(struct work_struct *work)
834{ 834{
835 struct tcm_vhost_cmd *tv_cmd = 835 struct tcm_vhost_cmd *cmd =
836 container_of(work, struct tcm_vhost_cmd, work); 836 container_of(work, struct tcm_vhost_cmd, work);
837 struct tcm_vhost_nexus *tv_nexus; 837 struct tcm_vhost_nexus *tv_nexus;
838 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 838 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
839 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; 839 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
840 int rc, sg_no_bidi = 0; 840 int rc, sg_no_bidi = 0;
841 841
842 if (tv_cmd->tvc_sgl_count) { 842 if (cmd->tvc_sgl_count) {
843 sg_ptr = tv_cmd->tvc_sgl; 843 sg_ptr = cmd->tvc_sgl;
844/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ 844/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
845#if 0 845#if 0
846 if (se_cmd->se_cmd_flags & SCF_BIDI) { 846 if (se_cmd->se_cmd_flags & SCF_BIDI) {
@@ -851,13 +851,13 @@ static void tcm_vhost_submission_work(struct work_struct *work)
851 } else { 851 } else {
852 sg_ptr = NULL; 852 sg_ptr = NULL;
853 } 853 }
854 tv_nexus = tv_cmd->tvc_nexus; 854 tv_nexus = cmd->tvc_nexus;
855 855
856 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 856 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
857 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0], 857 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
858 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len, 858 cmd->tvc_lun, cmd->tvc_exp_data_len,
859 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction, 859 cmd->tvc_task_attr, cmd->tvc_data_direction,
860 0, sg_ptr, tv_cmd->tvc_sgl_count, 860 0, sg_ptr, cmd->tvc_sgl_count,
861 sg_bidi_ptr, sg_no_bidi); 861 sg_bidi_ptr, sg_no_bidi);
862 if (rc < 0) { 862 if (rc < 0) {
863 transport_send_check_condition_and_sense(se_cmd, 863 transport_send_check_condition_and_sense(se_cmd,
@@ -891,7 +891,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
891 struct tcm_vhost_tpg **vs_tpg; 891 struct tcm_vhost_tpg **vs_tpg;
892 struct virtio_scsi_cmd_req v_req; 892 struct virtio_scsi_cmd_req v_req;
893 struct tcm_vhost_tpg *tpg; 893 struct tcm_vhost_tpg *tpg;
894 struct tcm_vhost_cmd *tv_cmd; 894 struct tcm_vhost_cmd *cmd;
895 u32 exp_data_len, data_first, data_num, data_direction; 895 u32 exp_data_len, data_first, data_num, data_direction;
896 unsigned out, in, i; 896 unsigned out, in, i;
897 int head, ret; 897 int head, ret;
@@ -988,46 +988,46 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
988 for (i = 0; i < data_num; i++) 988 for (i = 0; i < data_num; i++)
989 exp_data_len += vq->iov[data_first + i].iov_len; 989 exp_data_len += vq->iov[data_first + i].iov_len;
990 990
991 tv_cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req, 991 cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
992 exp_data_len, data_direction); 992 exp_data_len, data_direction);
993 if (IS_ERR(tv_cmd)) { 993 if (IS_ERR(cmd)) {
994 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 994 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
995 PTR_ERR(tv_cmd)); 995 PTR_ERR(cmd));
996 goto err_cmd; 996 goto err_cmd;
997 } 997 }
998 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 998 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
999 ": %d\n", tv_cmd, exp_data_len, data_direction); 999 ": %d\n", cmd, exp_data_len, data_direction);
1000 1000
1001 tv_cmd->tvc_vhost = vs; 1001 cmd->tvc_vhost = vs;
1002 tv_cmd->tvc_vq = vq; 1002 cmd->tvc_vq = vq;
1003 tv_cmd->tvc_resp = vq->iov[out].iov_base; 1003 cmd->tvc_resp = vq->iov[out].iov_base;
1004 1004
1005 /* 1005 /*
1006 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb 1006 * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1007 * that will be used by tcm_vhost_new_cmd_map() and down into 1007 * that will be used by tcm_vhost_new_cmd_map() and down into
1008 * target_setup_cmd_from_cdb() 1008 * target_setup_cmd_from_cdb()
1009 */ 1009 */
1010 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); 1010 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1011 /* 1011 /*
1012 * Check that the recieved CDB size does not exceeded our 1012 * Check that the recieved CDB size does not exceeded our
1013 * hardcoded max for tcm_vhost 1013 * hardcoded max for tcm_vhost
1014 */ 1014 */
1015 /* TODO what if cdb was too small for varlen cdb header? */ 1015 /* TODO what if cdb was too small for varlen cdb header? */
1016 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > 1016 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1017 TCM_VHOST_MAX_CDB_SIZE)) { 1017 TCM_VHOST_MAX_CDB_SIZE)) {
1018 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1018 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1019 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1019 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1020 scsi_command_size(tv_cmd->tvc_cdb), 1020 scsi_command_size(cmd->tvc_cdb),
1021 TCM_VHOST_MAX_CDB_SIZE); 1021 TCM_VHOST_MAX_CDB_SIZE);
1022 goto err_free; 1022 goto err_free;
1023 } 1023 }
1024 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1024 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1025 1025
1026 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1026 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1027 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); 1027 cmd->tvc_cdb[0], cmd->tvc_lun);
1028 1028
1029 if (data_direction != DMA_NONE) { 1029 if (data_direction != DMA_NONE) {
1030 ret = vhost_scsi_map_iov_to_sgl(tv_cmd, 1030 ret = vhost_scsi_map_iov_to_sgl(cmd,
1031 &vq->iov[data_first], data_num, 1031 &vq->iov[data_first], data_num,
1032 data_direction == DMA_TO_DEVICE); 1032 data_direction == DMA_TO_DEVICE);
1033 if (unlikely(ret)) { 1033 if (unlikely(ret)) {
@@ -1041,22 +1041,22 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1041 * complete the virtio-scsi request in TCM callback context via 1041 * complete the virtio-scsi request in TCM callback context via
1042 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() 1042 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1043 */ 1043 */
1044 tv_cmd->tvc_vq_desc = head; 1044 cmd->tvc_vq_desc = head;
1045 /* 1045 /*
1046 * Dispatch tv_cmd descriptor for cmwq execution in process 1046 * Dispatch tv_cmd descriptor for cmwq execution in process
1047 * context provided by tcm_vhost_workqueue. This also ensures 1047 * context provided by tcm_vhost_workqueue. This also ensures
1048 * tv_cmd is executed on the same kworker CPU as this vhost 1048 * tv_cmd is executed on the same kworker CPU as this vhost
1049 * thread to gain positive L2 cache locality effects.. 1049 * thread to gain positive L2 cache locality effects..
1050 */ 1050 */
1051 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); 1051 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1052 queue_work(tcm_vhost_workqueue, &tv_cmd->work); 1052 queue_work(tcm_vhost_workqueue, &cmd->work);
1053 } 1053 }
1054 1054
1055 mutex_unlock(&vq->mutex); 1055 mutex_unlock(&vq->mutex);
1056 return; 1056 return;
1057 1057
1058err_free: 1058err_free:
1059 vhost_scsi_free_cmd(tv_cmd); 1059 vhost_scsi_free_cmd(cmd);
1060err_cmd: 1060err_cmd:
1061 vhost_scsi_send_bad_target(vs, vq, head, out); 1061 vhost_scsi_send_bad_target(vs, vq, head, out);
1062 mutex_unlock(&vq->mutex); 1062 mutex_unlock(&vq->mutex);