aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2013-04-26 23:16:49 -0400
committerMichael S. Tsirkin <mst@redhat.com>2013-05-01 03:02:51 -0400
commitf2f0173d6a95fa60e7934f62ce27d6bd24e4e09c (patch)
tree9b875c3e325d3cdc6c0ddd0cfa198e8e6deff9b0 /drivers/vhost
parent3ab2e420ec1caf4ead233f3161ac7d86fe5d2a9f (diff)
tcm_vhost: Wait for pending requests in vhost_scsi_flush()
Unlike tcm_vhost_evt requests, tcm_vhost_cmd requests are passed to the target core system, we can not make sure all the pending requests will be finished by flushing the virt queue. In this patch, we do refcount for every tcm_vhost_cmd requests to make vhost_scsi_flush() wait for all the pending requests issued before the flush operation to be finished. This is useful when we call vhost_scsi_clear_endpoint() to stop tcm_vhost. No new requests will be passed to target core system because we clear the endpoint by setting vs_tpg to NULL. And we wait for all the old requests. These guarantee no requests will be leaked and existing requests will be completed. Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/tcm_vhost.c90
-rw-r--r--drivers/vhost/tcm_vhost.h3
2 files changed, 92 insertions, 1 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 99d3480450e7..afb530887936 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -74,8 +74,19 @@ enum {
74#define VHOST_SCSI_MAX_VQ 128 74#define VHOST_SCSI_MAX_VQ 128
75#define VHOST_SCSI_MAX_EVENT 128 75#define VHOST_SCSI_MAX_EVENT 128
76 76
77struct vhost_scsi_inflight {
78 /* Wait for the flush operation to finish */
79 struct completion comp;
80 /* Refcount for the inflight reqs */
81 struct kref kref;
82};
83
77struct vhost_scsi_virtqueue { 84struct vhost_scsi_virtqueue {
78 struct vhost_virtqueue vq; 85 struct vhost_virtqueue vq;
86 /* Track inflight reqs, protected by vq->mutex */
87 struct vhost_scsi_inflight inflights[2];
88 /* Indicate current inflight in use, protected by vq->mutex */
89 int inflight_idx;
79}; 90};
80 91
81struct vhost_scsi { 92struct vhost_scsi {
@@ -111,6 +122,59 @@ static int iov_num_pages(struct iovec *iov)
111 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; 122 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
112} 123}
113 124
125void tcm_vhost_done_inflight(struct kref *kref)
126{
127 struct vhost_scsi_inflight *inflight;
128
129 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
130 complete(&inflight->comp);
131}
132
133static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
134 struct vhost_scsi_inflight *old_inflight[])
135{
136 struct vhost_scsi_inflight *new_inflight;
137 struct vhost_virtqueue *vq;
138 int idx, i;
139
140 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
141 vq = &vs->vqs[i].vq;
142
143 mutex_lock(&vq->mutex);
144
145 /* store old infight */
146 idx = vs->vqs[i].inflight_idx;
147 if (old_inflight)
148 old_inflight[i] = &vs->vqs[i].inflights[idx];
149
150 /* setup new infight */
151 vs->vqs[i].inflight_idx = idx ^ 1;
152 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
153 kref_init(&new_inflight->kref);
154 init_completion(&new_inflight->comp);
155
156 mutex_unlock(&vq->mutex);
157 }
158}
159
160static struct vhost_scsi_inflight *
161tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
162{
163 struct vhost_scsi_inflight *inflight;
164 struct vhost_scsi_virtqueue *svq;
165
166 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
167 inflight = &svq->inflights[svq->inflight_idx];
168 kref_get(&inflight->kref);
169
170 return inflight;
171}
172
173static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
174{
175 kref_put(&inflight->kref, tcm_vhost_done_inflight);
176}
177
114static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 178static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
115{ 179{
116 return 1; 180 return 1;
@@ -407,6 +471,8 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
407 kfree(tv_cmd->tvc_sgl); 471 kfree(tv_cmd->tvc_sgl);
408 } 472 }
409 473
474 tcm_vhost_put_inflight(tv_cmd->inflight);
475
410 kfree(tv_cmd); 476 kfree(tv_cmd);
411} 477}
412 478
@@ -533,6 +599,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
533} 599}
534 600
535static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( 601static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
602 struct vhost_virtqueue *vq,
536 struct tcm_vhost_tpg *tv_tpg, 603 struct tcm_vhost_tpg *tv_tpg,
537 struct virtio_scsi_cmd_req *v_req, 604 struct virtio_scsi_cmd_req *v_req,
538 u32 exp_data_len, 605 u32 exp_data_len,
@@ -557,6 +624,7 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
557 tv_cmd->tvc_exp_data_len = exp_data_len; 624 tv_cmd->tvc_exp_data_len = exp_data_len;
558 tv_cmd->tvc_data_direction = data_direction; 625 tv_cmd->tvc_data_direction = data_direction;
559 tv_cmd->tvc_nexus = tv_nexus; 626 tv_cmd->tvc_nexus = tv_nexus;
627 tv_cmd->inflight = tcm_vhost_get_inflight(vq);
560 628
561 return tv_cmd; 629 return tv_cmd;
562} 630}
@@ -812,7 +880,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
812 for (i = 0; i < data_num; i++) 880 for (i = 0; i < data_num; i++)
813 exp_data_len += vq->iov[data_first + i].iov_len; 881 exp_data_len += vq->iov[data_first + i].iov_len;
814 882
815 tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, 883 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
816 exp_data_len, data_direction); 884 exp_data_len, data_direction);
817 if (IS_ERR(tv_cmd)) { 885 if (IS_ERR(tv_cmd)) {
818 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 886 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
@@ -949,12 +1017,29 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
949 1017
950static void vhost_scsi_flush(struct vhost_scsi *vs) 1018static void vhost_scsi_flush(struct vhost_scsi *vs)
951{ 1019{
1020 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
952 int i; 1021 int i;
953 1022
1023 /* Init new inflight and remember the old inflight */
1024 tcm_vhost_init_inflight(vs, old_inflight);
1025
1026 /*
1027 * The inflight->kref was initialized to 1. We decrement it here to
1028 * indicate the start of the flush operation so that it will reach 0
1029 * when all the reqs are finished.
1030 */
1031 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1032 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1033
1034 /* Flush both the vhost poll and vhost work */
954 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1035 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
955 vhost_scsi_flush_vq(vs, i); 1036 vhost_scsi_flush_vq(vs, i);
956 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 1037 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
957 vhost_work_flush(&vs->dev, &vs->vs_event_work); 1038 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1039
1040 /* Wait for all reqs issued before the flush to be finished */
1041 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1042 wait_for_completion(&old_inflight[i]->comp);
958} 1043}
959 1044
960/* 1045/*
@@ -1185,6 +1270,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1185 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1270 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1186 } 1271 }
1187 r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ); 1272 r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1273
1274 tcm_vhost_init_inflight(s, NULL);
1275
1188 if (r < 0) { 1276 if (r < 0) {
1189 kfree(vqs); 1277 kfree(vqs);
1190 kfree(s); 1278 kfree(s);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 514b9fda230e..26a57c2fdf92 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -2,6 +2,7 @@
2#define TCM_VHOST_NAMELEN 256 2#define TCM_VHOST_NAMELEN 256
3#define TCM_VHOST_MAX_CDB_SIZE 32 3#define TCM_VHOST_MAX_CDB_SIZE 32
4 4
5struct vhost_scsi_inflight;
5struct tcm_vhost_cmd { 6struct tcm_vhost_cmd {
6 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 7 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
7 int tvc_vq_desc; 8 int tvc_vq_desc;
@@ -37,6 +38,8 @@ struct tcm_vhost_cmd {
37 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 38 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
38 /* Completed commands list, serviced from vhost worker thread */ 39 /* Completed commands list, serviced from vhost worker thread */
39 struct llist_node tvc_completion_list; 40 struct llist_node tvc_completion_list;
41 /* Used to track inflight cmd */
42 struct vhost_scsi_inflight *inflight;
40}; 43};
41 44
42struct tcm_vhost_nexus { 45struct tcm_vhost_nexus {