diff options
Diffstat (limited to 'drivers/vhost/tcm_vhost.c')
-rw-r--r-- | drivers/vhost/tcm_vhost.c | 90 |
1 files changed, 89 insertions, 1 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 99d3480450e7..afb530887936 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -74,8 +74,19 @@ enum { | |||
74 | #define VHOST_SCSI_MAX_VQ 128 | 74 | #define VHOST_SCSI_MAX_VQ 128 |
75 | #define VHOST_SCSI_MAX_EVENT 128 | 75 | #define VHOST_SCSI_MAX_EVENT 128 |
76 | 76 | ||
77 | struct vhost_scsi_inflight { | ||
78 | /* Wait for the flush operation to finish */ | ||
79 | struct completion comp; | ||
80 | /* Refcount for the inflight reqs */ | ||
81 | struct kref kref; | ||
82 | }; | ||
83 | |||
77 | struct vhost_scsi_virtqueue { | 84 | struct vhost_scsi_virtqueue { |
78 | struct vhost_virtqueue vq; | 85 | struct vhost_virtqueue vq; |
86 | /* Track inflight reqs, protected by vq->mutex */ | ||
87 | struct vhost_scsi_inflight inflights[2]; | ||
88 | /* Indicate current inflight in use, protected by vq->mutex */ | ||
89 | int inflight_idx; | ||
79 | }; | 90 | }; |
80 | 91 | ||
81 | struct vhost_scsi { | 92 | struct vhost_scsi { |
@@ -111,6 +122,59 @@ static int iov_num_pages(struct iovec *iov) | |||
111 | ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; | 122 | ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; |
112 | } | 123 | } |
113 | 124 | ||
125 | void tcm_vhost_done_inflight(struct kref *kref) | ||
126 | { | ||
127 | struct vhost_scsi_inflight *inflight; | ||
128 | |||
129 | inflight = container_of(kref, struct vhost_scsi_inflight, kref); | ||
130 | complete(&inflight->comp); | ||
131 | } | ||
132 | |||
133 | static void tcm_vhost_init_inflight(struct vhost_scsi *vs, | ||
134 | struct vhost_scsi_inflight *old_inflight[]) | ||
135 | { | ||
136 | struct vhost_scsi_inflight *new_inflight; | ||
137 | struct vhost_virtqueue *vq; | ||
138 | int idx, i; | ||
139 | |||
140 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | ||
141 | vq = &vs->vqs[i].vq; | ||
142 | |||
143 | mutex_lock(&vq->mutex); | ||
144 | |||
145 | /* store old infight */ | ||
146 | idx = vs->vqs[i].inflight_idx; | ||
147 | if (old_inflight) | ||
148 | old_inflight[i] = &vs->vqs[i].inflights[idx]; | ||
149 | |||
150 | /* setup new infight */ | ||
151 | vs->vqs[i].inflight_idx = idx ^ 1; | ||
152 | new_inflight = &vs->vqs[i].inflights[idx ^ 1]; | ||
153 | kref_init(&new_inflight->kref); | ||
154 | init_completion(&new_inflight->comp); | ||
155 | |||
156 | mutex_unlock(&vq->mutex); | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static struct vhost_scsi_inflight * | ||
161 | tcm_vhost_get_inflight(struct vhost_virtqueue *vq) | ||
162 | { | ||
163 | struct vhost_scsi_inflight *inflight; | ||
164 | struct vhost_scsi_virtqueue *svq; | ||
165 | |||
166 | svq = container_of(vq, struct vhost_scsi_virtqueue, vq); | ||
167 | inflight = &svq->inflights[svq->inflight_idx]; | ||
168 | kref_get(&inflight->kref); | ||
169 | |||
170 | return inflight; | ||
171 | } | ||
172 | |||
173 | static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) | ||
174 | { | ||
175 | kref_put(&inflight->kref, tcm_vhost_done_inflight); | ||
176 | } | ||
177 | |||
114 | static int tcm_vhost_check_true(struct se_portal_group *se_tpg) | 178 | static int tcm_vhost_check_true(struct se_portal_group *se_tpg) |
115 | { | 179 | { |
116 | return 1; | 180 | return 1; |
@@ -407,6 +471,8 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | |||
407 | kfree(tv_cmd->tvc_sgl); | 471 | kfree(tv_cmd->tvc_sgl); |
408 | } | 472 | } |
409 | 473 | ||
474 | tcm_vhost_put_inflight(tv_cmd->inflight); | ||
475 | |||
410 | kfree(tv_cmd); | 476 | kfree(tv_cmd); |
411 | } | 477 | } |
412 | 478 | ||
@@ -533,6 +599,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
533 | } | 599 | } |
534 | 600 | ||
535 | static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( | 601 | static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( |
602 | struct vhost_virtqueue *vq, | ||
536 | struct tcm_vhost_tpg *tv_tpg, | 603 | struct tcm_vhost_tpg *tv_tpg, |
537 | struct virtio_scsi_cmd_req *v_req, | 604 | struct virtio_scsi_cmd_req *v_req, |
538 | u32 exp_data_len, | 605 | u32 exp_data_len, |
@@ -557,6 +624,7 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( | |||
557 | tv_cmd->tvc_exp_data_len = exp_data_len; | 624 | tv_cmd->tvc_exp_data_len = exp_data_len; |
558 | tv_cmd->tvc_data_direction = data_direction; | 625 | tv_cmd->tvc_data_direction = data_direction; |
559 | tv_cmd->tvc_nexus = tv_nexus; | 626 | tv_cmd->tvc_nexus = tv_nexus; |
627 | tv_cmd->inflight = tcm_vhost_get_inflight(vq); | ||
560 | 628 | ||
561 | return tv_cmd; | 629 | return tv_cmd; |
562 | } | 630 | } |
@@ -812,7 +880,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
812 | for (i = 0; i < data_num; i++) | 880 | for (i = 0; i < data_num; i++) |
813 | exp_data_len += vq->iov[data_first + i].iov_len; | 881 | exp_data_len += vq->iov[data_first + i].iov_len; |
814 | 882 | ||
815 | tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, | 883 | tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req, |
816 | exp_data_len, data_direction); | 884 | exp_data_len, data_direction); |
817 | if (IS_ERR(tv_cmd)) { | 885 | if (IS_ERR(tv_cmd)) { |
818 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", | 886 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", |
@@ -949,12 +1017,29 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | |||
949 | 1017 | ||
950 | static void vhost_scsi_flush(struct vhost_scsi *vs) | 1018 | static void vhost_scsi_flush(struct vhost_scsi *vs) |
951 | { | 1019 | { |
1020 | struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; | ||
952 | int i; | 1021 | int i; |
953 | 1022 | ||
1023 | /* Init new inflight and remember the old inflight */ | ||
1024 | tcm_vhost_init_inflight(vs, old_inflight); | ||
1025 | |||
1026 | /* | ||
1027 | * The inflight->kref was initialized to 1. We decrement it here to | ||
1028 | * indicate the start of the flush operation so that it will reach 0 | ||
1029 | * when all the reqs are finished. | ||
1030 | */ | ||
1031 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
1032 | kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); | ||
1033 | |||
1034 | /* Flush both the vhost poll and vhost work */ | ||
954 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 1035 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
955 | vhost_scsi_flush_vq(vs, i); | 1036 | vhost_scsi_flush_vq(vs, i); |
956 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | 1037 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); |
957 | vhost_work_flush(&vs->dev, &vs->vs_event_work); | 1038 | vhost_work_flush(&vs->dev, &vs->vs_event_work); |
1039 | |||
1040 | /* Wait for all reqs issued before the flush to be finished */ | ||
1041 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
1042 | wait_for_completion(&old_inflight[i]->comp); | ||
958 | } | 1043 | } |
959 | 1044 | ||
960 | /* | 1045 | /* |
@@ -1185,6 +1270,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1185 | s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; | 1270 | s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; |
1186 | } | 1271 | } |
1187 | r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ); | 1272 | r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ); |
1273 | |||
1274 | tcm_vhost_init_inflight(s, NULL); | ||
1275 | |||
1188 | if (r < 0) { | 1276 | if (r < 0) { |
1189 | kfree(vqs); | 1277 | kfree(vqs); |
1190 | kfree(s); | 1278 | kfree(s); |