aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/tcm_vhost.c
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2013-04-26 23:16:48 -0400
committerMichael S. Tsirkin <mst@redhat.com>2013-05-01 03:02:45 -0400
commit3ab2e420ec1caf4ead233f3161ac7d86fe5d2a9f (patch)
tree6c6237f7bbad368dfbdae34895430280af0d19b0 /drivers/vhost/tcm_vhost.c
parentbc7562355fda8075793bf66094cda573206ec693 (diff)
vhost: Allow device specific fields per vq
This is useful for any device who wants device specific fields per vq. For example, tcm_vhost wants a per vq field to track requests which are in flight on the vq. Also, on top of this we can add patches to move things like ubufs from vhost.h out to net.c. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/tcm_vhost.c')
-rw-r--r--drivers/vhost/tcm_vhost.c55
1 files changed, 37 insertions, 18 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 1677238d281f..99d3480450e7 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -74,13 +74,17 @@ enum {
74#define VHOST_SCSI_MAX_VQ 128 74#define VHOST_SCSI_MAX_VQ 128
75#define VHOST_SCSI_MAX_EVENT 128 75#define VHOST_SCSI_MAX_EVENT 128
76 76
77struct vhost_scsi_virtqueue {
78 struct vhost_virtqueue vq;
79};
80
77struct vhost_scsi { 81struct vhost_scsi {
78 /* Protected by vhost_scsi->dev.mutex */ 82 /* Protected by vhost_scsi->dev.mutex */
79 struct tcm_vhost_tpg **vs_tpg; 83 struct tcm_vhost_tpg **vs_tpg;
80 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 84 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
81 85
82 struct vhost_dev dev; 86 struct vhost_dev dev;
83 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 87 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
84 88
85 struct vhost_work vs_completion_work; /* cmd completion work item */ 89 struct vhost_work vs_completion_work; /* cmd completion work item */
86 struct llist_head vs_completion_list; /* cmd completion queue */ 90 struct llist_head vs_completion_list; /* cmd completion queue */
@@ -366,7 +370,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
366static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, 370static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
367 u32 event, u32 reason) 371 u32 event, u32 reason)
368{ 372{
369 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 373 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
370 struct tcm_vhost_evt *evt; 374 struct tcm_vhost_evt *evt;
371 375
372 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 376 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
@@ -409,7 +413,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
409static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, 413static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
410 struct tcm_vhost_evt *evt) 414 struct tcm_vhost_evt *evt)
411{ 415{
412 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 416 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
413 struct virtio_scsi_event *event = &evt->event; 417 struct virtio_scsi_event *event = &evt->event;
414 struct virtio_scsi_event __user *eventp; 418 struct virtio_scsi_event __user *eventp;
415 unsigned out, in; 419 unsigned out, in;
@@ -460,7 +464,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work)
460{ 464{
461 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 465 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
462 vs_event_work); 466 vs_event_work);
463 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 467 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
464 struct tcm_vhost_evt *evt; 468 struct tcm_vhost_evt *evt;
465 struct llist_node *llnode; 469 struct llist_node *llnode;
466 470
@@ -511,8 +515,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
511 v_rsp.sense_len); 515 v_rsp.sense_len);
512 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 516 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
513 if (likely(ret == 0)) { 517 if (likely(ret == 0)) {
518 struct vhost_scsi_virtqueue *q;
514 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); 519 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
515 vq = tv_cmd->tvc_vq - vs->vqs; 520 q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
521 vq = q - vs->vqs;
516 __set_bit(vq, signal); 522 __set_bit(vq, signal);
517 } else 523 } else
518 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 524 pr_err("Faulted on virtio_scsi_cmd_resp\n");
@@ -523,7 +529,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
523 vq = -1; 529 vq = -1;
524 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) 530 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
525 < VHOST_SCSI_MAX_VQ) 531 < VHOST_SCSI_MAX_VQ)
526 vhost_signal(&vs->dev, &vs->vqs[vq]); 532 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
527} 533}
528 534
529static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( 535static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
@@ -938,7 +944,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
938 944
939static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) 945static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
940{ 946{
941 vhost_poll_flush(&vs->dev.vqs[index].poll); 947 vhost_poll_flush(&vs->vqs[index].vq.poll);
942} 948}
943 949
944static void vhost_scsi_flush(struct vhost_scsi *vs) 950static void vhost_scsi_flush(struct vhost_scsi *vs)
@@ -975,7 +981,7 @@ static int vhost_scsi_set_endpoint(
975 /* Verify that ring has been setup correctly. */ 981 /* Verify that ring has been setup correctly. */
976 for (index = 0; index < vs->dev.nvqs; ++index) { 982 for (index = 0; index < vs->dev.nvqs; ++index) {
977 /* Verify that ring has been setup correctly. */ 983 /* Verify that ring has been setup correctly. */
978 if (!vhost_vq_access_ok(&vs->vqs[index])) { 984 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
979 ret = -EFAULT; 985 ret = -EFAULT;
980 goto out; 986 goto out;
981 } 987 }
@@ -1022,7 +1028,7 @@ static int vhost_scsi_set_endpoint(
1022 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1028 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1023 sizeof(vs->vs_vhost_wwpn)); 1029 sizeof(vs->vs_vhost_wwpn));
1024 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1030 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1025 vq = &vs->vqs[i]; 1031 vq = &vs->vqs[i].vq;
1026 /* Flushing the vhost_work acts as synchronize_rcu */ 1032 /* Flushing the vhost_work acts as synchronize_rcu */
1027 mutex_lock(&vq->mutex); 1033 mutex_lock(&vq->mutex);
1028 rcu_assign_pointer(vq->private_data, vs_tpg); 1034 rcu_assign_pointer(vq->private_data, vs_tpg);
@@ -1063,7 +1069,7 @@ static int vhost_scsi_clear_endpoint(
1063 mutex_lock(&vs->dev.mutex); 1069 mutex_lock(&vs->dev.mutex);
1064 /* Verify that ring has been setup correctly. */ 1070 /* Verify that ring has been setup correctly. */
1065 for (index = 0; index < vs->dev.nvqs; ++index) { 1071 for (index = 0; index < vs->dev.nvqs; ++index) {
1066 if (!vhost_vq_access_ok(&vs->vqs[index])) { 1072 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1067 ret = -EFAULT; 1073 ret = -EFAULT;
1068 goto err_dev; 1074 goto err_dev;
1069 } 1075 }
@@ -1103,7 +1109,7 @@ static int vhost_scsi_clear_endpoint(
1103 } 1109 }
1104 if (match) { 1110 if (match) {
1105 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1111 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1106 vq = &vs->vqs[i]; 1112 vq = &vs->vqs[i].vq;
1107 /* Flushing the vhost_work acts as synchronize_rcu */ 1113 /* Flushing the vhost_work acts as synchronize_rcu */
1108 mutex_lock(&vq->mutex); 1114 mutex_lock(&vq->mutex);
1109 rcu_assign_pointer(vq->private_data, NULL); 1115 rcu_assign_pointer(vq->private_data, NULL);
@@ -1151,24 +1157,36 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1151static int vhost_scsi_open(struct inode *inode, struct file *f) 1157static int vhost_scsi_open(struct inode *inode, struct file *f)
1152{ 1158{
1153 struct vhost_scsi *s; 1159 struct vhost_scsi *s;
1160 struct vhost_virtqueue **vqs;
1154 int r, i; 1161 int r, i;
1155 1162
1156 s = kzalloc(sizeof(*s), GFP_KERNEL); 1163 s = kzalloc(sizeof(*s), GFP_KERNEL);
1157 if (!s) 1164 if (!s)
1158 return -ENOMEM; 1165 return -ENOMEM;
1159 1166
1167 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1168 if (!vqs) {
1169 kfree(s);
1170 return -ENOMEM;
1171 }
1172
1160 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); 1173 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1161 vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); 1174 vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1162 1175
1163 s->vs_events_nr = 0; 1176 s->vs_events_nr = 0;
1164 s->vs_events_missed = false; 1177 s->vs_events_missed = false;
1165 1178
1166 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; 1179 vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1167 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; 1180 vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1168 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) 1181 s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1169 s->vqs[i].handle_kick = vhost_scsi_handle_kick; 1182 s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1170 r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ); 1183 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1184 vqs[i] = &s->vqs[i].vq;
1185 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1186 }
1187 r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1171 if (r < 0) { 1188 if (r < 0) {
1189 kfree(vqs);
1172 kfree(s); 1190 kfree(s);
1173 return r; 1191 return r;
1174 } 1192 }
@@ -1190,6 +1208,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1190 vhost_dev_cleanup(&s->dev, false); 1208 vhost_dev_cleanup(&s->dev, false);
1191 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1209 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1192 vhost_scsi_flush(s); 1210 vhost_scsi_flush(s);
1211 kfree(s->dev.vqs);
1193 kfree(s); 1212 kfree(s);
1194 return 0; 1213 return 0;
1195} 1214}
@@ -1205,7 +1224,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1205 u32 events_missed; 1224 u32 events_missed;
1206 u64 features; 1225 u64 features;
1207 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1226 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1208 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 1227 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1209 1228
1210 switch (ioctl) { 1229 switch (ioctl) {
1211 case VHOST_SCSI_SET_ENDPOINT: 1230 case VHOST_SCSI_SET_ENDPOINT:
@@ -1333,7 +1352,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1333 else 1352 else
1334 reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1353 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1335 1354
1336 vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; 1355 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1337 mutex_lock(&vq->mutex); 1356 mutex_lock(&vq->mutex);
1338 tcm_vhost_send_evt(vs, tpg, lun, 1357 tcm_vhost_send_evt(vs, tpg, lun,
1339 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1358 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);