diff options
author | Asias He <asias@redhat.com> | 2013-04-25 03:35:21 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2013-04-25 04:06:41 -0400 |
commit | a6c9af87363c5e964d981fe0ec4aa73061a0c793 (patch) | |
tree | 527c2f5179730e9763b6d1a7e969383e907205a0 | |
parent | f2b7daf5b13f69ac09e706fb5b906478212a4100 (diff) |
tcm_vhost: Add hotplug/hotunplug support
In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
virtio-scsi), hotplug support is added to virtio-scsi.
This patch adds hotplug and hotunplug support to tcm_vhost.
You can create or delete a LUN in targetcli to hotplug or hotunplug a
LUN in guest.
Signed-off-by: Asias He <asias@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/vhost/tcm_vhost.c | 210 | ||||
-rw-r--r-- | drivers/vhost/tcm_vhost.h | 10 |
2 files changed, 218 insertions, 2 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 822cd1fbff5e..5340fd759c6e 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -71,6 +71,7 @@ enum { | |||
71 | 71 | ||
72 | #define VHOST_SCSI_MAX_TARGET 256 | 72 | #define VHOST_SCSI_MAX_TARGET 256 |
73 | #define VHOST_SCSI_MAX_VQ 128 | 73 | #define VHOST_SCSI_MAX_VQ 128 |
74 | #define VHOST_SCSI_MAX_EVENT 128 | ||
74 | 75 | ||
75 | struct vhost_scsi { | 76 | struct vhost_scsi { |
76 | /* Protected by vhost_scsi->dev.mutex */ | 77 | /* Protected by vhost_scsi->dev.mutex */ |
@@ -82,6 +83,12 @@ struct vhost_scsi { | |||
82 | 83 | ||
83 | struct vhost_work vs_completion_work; /* cmd completion work item */ | 84 | struct vhost_work vs_completion_work; /* cmd completion work item */ |
84 | struct llist_head vs_completion_list; /* cmd completion queue */ | 85 | struct llist_head vs_completion_list; /* cmd completion queue */ |
86 | |||
87 | struct vhost_work vs_event_work; /* evt injection work item */ | ||
88 | struct llist_head vs_event_list; /* evt injection queue */ | ||
89 | |||
90 | bool vs_events_missed; /* any missed events, protected by vq->mutex */ | ||
91 | int vs_events_nr; /* num of pending events, protected by vq->mutex */ | ||
85 | }; | 92 | }; |
86 | 93 | ||
87 | /* Local pointer to allocated TCM configfs fabric module */ | 94 | /* Local pointer to allocated TCM configfs fabric module */ |
@@ -349,6 +356,37 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) | |||
349 | return 0; | 356 | return 0; |
350 | } | 357 | } |
351 | 358 | ||
359 | static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | ||
360 | { | ||
361 | vs->vs_events_nr--; | ||
362 | kfree(evt); | ||
363 | } | ||
364 | |||
365 | static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, | ||
366 | u32 event, u32 reason) | ||
367 | { | ||
368 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
369 | struct tcm_vhost_evt *evt; | ||
370 | |||
371 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { | ||
372 | vs->vs_events_missed = true; | ||
373 | return NULL; | ||
374 | } | ||
375 | |||
376 | evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
377 | if (!evt) { | ||
378 | vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); | ||
379 | vs->vs_events_missed = true; | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | evt->event.event = event; | ||
384 | evt->event.reason = reason; | ||
385 | vs->vs_events_nr++; | ||
386 | |||
387 | return evt; | ||
388 | } | ||
389 | |||
352 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | 390 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) |
353 | { | 391 | { |
354 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; | 392 | struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; |
@@ -367,6 +405,75 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | |||
367 | kfree(tv_cmd); | 405 | kfree(tv_cmd); |
368 | } | 406 | } |
369 | 407 | ||
408 | static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, | ||
409 | struct tcm_vhost_evt *evt) | ||
410 | { | ||
411 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
412 | struct virtio_scsi_event *event = &evt->event; | ||
413 | struct virtio_scsi_event __user *eventp; | ||
414 | unsigned out, in; | ||
415 | int head, ret; | ||
416 | |||
417 | if (!vq->private_data) { | ||
418 | vs->vs_events_missed = true; | ||
419 | return; | ||
420 | } | ||
421 | |||
422 | again: | ||
423 | vhost_disable_notify(&vs->dev, vq); | ||
424 | head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, | ||
425 | ARRAY_SIZE(vq->iov), &out, &in, | ||
426 | NULL, NULL); | ||
427 | if (head < 0) { | ||
428 | vs->vs_events_missed = true; | ||
429 | return; | ||
430 | } | ||
431 | if (head == vq->num) { | ||
432 | if (vhost_enable_notify(&vs->dev, vq)) | ||
433 | goto again; | ||
434 | vs->vs_events_missed = true; | ||
435 | return; | ||
436 | } | ||
437 | |||
438 | if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { | ||
439 | vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", | ||
440 | vq->iov[out].iov_len); | ||
441 | vs->vs_events_missed = true; | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | if (vs->vs_events_missed) { | ||
446 | event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; | ||
447 | vs->vs_events_missed = false; | ||
448 | } | ||
449 | |||
450 | eventp = vq->iov[out].iov_base; | ||
451 | ret = __copy_to_user(eventp, event, sizeof(*event)); | ||
452 | if (!ret) | ||
453 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
454 | else | ||
455 | vq_err(vq, "Faulted on tcm_vhost_send_event\n"); | ||
456 | } | ||
457 | |||
458 | static void tcm_vhost_evt_work(struct vhost_work *work) | ||
459 | { | ||
460 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, | ||
461 | vs_event_work); | ||
462 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
463 | struct tcm_vhost_evt *evt; | ||
464 | struct llist_node *llnode; | ||
465 | |||
466 | mutex_lock(&vq->mutex); | ||
467 | llnode = llist_del_all(&vs->vs_event_list); | ||
468 | while (llnode) { | ||
469 | evt = llist_entry(llnode, struct tcm_vhost_evt, list); | ||
470 | llnode = llist_next(llnode); | ||
471 | tcm_vhost_do_evt_work(vs, evt); | ||
472 | tcm_vhost_free_evt(vs, evt); | ||
473 | } | ||
474 | mutex_unlock(&vq->mutex); | ||
475 | } | ||
476 | |||
370 | /* Fill in status and signal that we are done processing this command | 477 | /* Fill in status and signal that we are done processing this command |
371 | * | 478 | * |
372 | * This is scheduled in the vhost work queue so we are called with the owner | 479 | * This is scheduled in the vhost work queue so we are called with the owner |
@@ -777,9 +884,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | |||
777 | pr_debug("%s: The handling func for control queue.\n", __func__); | 884 | pr_debug("%s: The handling func for control queue.\n", __func__); |
778 | } | 885 | } |
779 | 886 | ||
887 | static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg, | ||
888 | struct se_lun *lun, u32 event, u32 reason) | ||
889 | { | ||
890 | struct tcm_vhost_evt *evt; | ||
891 | |||
892 | evt = tcm_vhost_allocate_evt(vs, event, reason); | ||
893 | if (!evt) | ||
894 | return; | ||
895 | |||
896 | if (tpg && lun) { | ||
897 | /* TODO: share lun setup code with virtio-scsi.ko */ | ||
898 | /* | ||
899 | * Note: evt->event is zeroed when we allocate it and | ||
900 | * lun[4-7] need to be zero according to virtio-scsi spec. | ||
901 | */ | ||
902 | evt->event.lun[0] = 0x01; | ||
903 | evt->event.lun[1] = tpg->tport_tpgt & 0xFF; | ||
904 | if (lun->unpacked_lun >= 256) | ||
905 | evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; | ||
906 | evt->event.lun[3] = lun->unpacked_lun & 0xFF; | ||
907 | } | ||
908 | |||
909 | llist_add(&evt->list, &vs->vs_event_list); | ||
910 | vhost_work_queue(&vs->dev, &vs->vs_event_work); | ||
911 | } | ||
912 | |||
780 | static void vhost_scsi_evt_handle_kick(struct vhost_work *work) | 913 | static void vhost_scsi_evt_handle_kick(struct vhost_work *work) |
781 | { | 914 | { |
782 | pr_debug("%s: The handling func for event queue.\n", __func__); | 915 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
916 | poll.work); | ||
917 | struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); | ||
918 | |||
919 | mutex_lock(&vq->mutex); | ||
920 | if (!vq->private_data) | ||
921 | goto out; | ||
922 | |||
923 | if (vs->vs_events_missed) | ||
924 | tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); | ||
925 | out: | ||
926 | mutex_unlock(&vq->mutex); | ||
783 | } | 927 | } |
784 | 928 | ||
785 | static void vhost_scsi_handle_kick(struct vhost_work *work) | 929 | static void vhost_scsi_handle_kick(struct vhost_work *work) |
@@ -803,6 +947,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
803 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 947 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
804 | vhost_scsi_flush_vq(vs, i); | 948 | vhost_scsi_flush_vq(vs, i); |
805 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | 949 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); |
950 | vhost_work_flush(&vs->dev, &vs->vs_event_work); | ||
806 | } | 951 | } |
807 | 952 | ||
808 | /* | 953 | /* |
@@ -864,6 +1009,7 @@ static int vhost_scsi_set_endpoint( | |||
864 | goto out; | 1009 | goto out; |
865 | } | 1010 | } |
866 | tv_tpg->tv_tpg_vhost_count++; | 1011 | tv_tpg->tv_tpg_vhost_count++; |
1012 | tv_tpg->vhost_scsi = vs; | ||
867 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; | 1013 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; |
868 | smp_mb__after_atomic_inc(); | 1014 | smp_mb__after_atomic_inc(); |
869 | match = true; | 1015 | match = true; |
@@ -949,6 +1095,7 @@ static int vhost_scsi_clear_endpoint( | |||
949 | goto err_tpg; | 1095 | goto err_tpg; |
950 | } | 1096 | } |
951 | tv_tpg->tv_tpg_vhost_count--; | 1097 | tv_tpg->tv_tpg_vhost_count--; |
1098 | tv_tpg->vhost_scsi = NULL; | ||
952 | vs->vs_tpg[target] = NULL; | 1099 | vs->vs_tpg[target] = NULL; |
953 | match = true; | 1100 | match = true; |
954 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1101 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
@@ -969,6 +1116,7 @@ static int vhost_scsi_clear_endpoint( | |||
969 | vhost_scsi_flush(vs); | 1116 | vhost_scsi_flush(vs); |
970 | kfree(vs->vs_tpg); | 1117 | kfree(vs->vs_tpg); |
971 | vs->vs_tpg = NULL; | 1118 | vs->vs_tpg = NULL; |
1119 | WARN_ON(vs->vs_events_nr); | ||
972 | mutex_unlock(&vs->dev.mutex); | 1120 | mutex_unlock(&vs->dev.mutex); |
973 | mutex_unlock(&tcm_vhost_mutex); | 1121 | mutex_unlock(&tcm_vhost_mutex); |
974 | return 0; | 1122 | return 0; |
@@ -1009,6 +1157,10 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1009 | return -ENOMEM; | 1157 | return -ENOMEM; |
1010 | 1158 | ||
1011 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); | 1159 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); |
1160 | vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); | ||
1161 | |||
1162 | s->vs_events_nr = 0; | ||
1163 | s->vs_events_missed = false; | ||
1012 | 1164 | ||
1013 | s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; | 1165 | s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; |
1014 | s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; | 1166 | s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; |
@@ -1035,6 +1187,8 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) | |||
1035 | vhost_scsi_clear_endpoint(s, &t); | 1187 | vhost_scsi_clear_endpoint(s, &t); |
1036 | vhost_dev_stop(&s->dev); | 1188 | vhost_dev_stop(&s->dev); |
1037 | vhost_dev_cleanup(&s->dev, false); | 1189 | vhost_dev_cleanup(&s->dev, false); |
1190 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ | ||
1191 | vhost_scsi_flush(s); | ||
1038 | kfree(s); | 1192 | kfree(s); |
1039 | return 0; | 1193 | return 0; |
1040 | } | 1194 | } |
@@ -1139,28 +1293,80 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) | |||
1139 | return "Unknown"; | 1293 | return "Unknown"; |
1140 | } | 1294 | } |
1141 | 1295 | ||
1296 | static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | ||
1297 | struct se_lun *lun, bool plug) | ||
1298 | { | ||
1299 | |||
1300 | struct vhost_scsi *vs = tpg->vhost_scsi; | ||
1301 | struct vhost_virtqueue *vq; | ||
1302 | u32 reason; | ||
1303 | |||
1304 | if (!vs) | ||
1305 | return; | ||
1306 | |||
1307 | mutex_lock(&vs->dev.mutex); | ||
1308 | if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) { | ||
1309 | mutex_unlock(&vs->dev.mutex); | ||
1310 | return; | ||
1311 | } | ||
1312 | |||
1313 | if (plug) | ||
1314 | reason = VIRTIO_SCSI_EVT_RESET_RESCAN; | ||
1315 | else | ||
1316 | reason = VIRTIO_SCSI_EVT_RESET_REMOVED; | ||
1317 | |||
1318 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | ||
1319 | mutex_lock(&vq->mutex); | ||
1320 | tcm_vhost_send_evt(vs, tpg, lun, | ||
1321 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); | ||
1322 | mutex_unlock(&vq->mutex); | ||
1323 | mutex_unlock(&vs->dev.mutex); | ||
1324 | } | ||
1325 | |||
1326 | static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | ||
1327 | { | ||
1328 | tcm_vhost_do_plug(tpg, lun, true); | ||
1329 | } | ||
1330 | |||
1331 | static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | ||
1332 | { | ||
1333 | tcm_vhost_do_plug(tpg, lun, false); | ||
1334 | } | ||
1335 | |||
1142 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, | 1336 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, |
1143 | struct se_lun *lun) | 1337 | struct se_lun *lun) |
1144 | { | 1338 | { |
1145 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1339 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, |
1146 | struct tcm_vhost_tpg, se_tpg); | 1340 | struct tcm_vhost_tpg, se_tpg); |
1147 | 1341 | ||
1342 | mutex_lock(&tcm_vhost_mutex); | ||
1343 | |||
1148 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1344 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
1149 | tv_tpg->tv_tpg_port_count++; | 1345 | tv_tpg->tv_tpg_port_count++; |
1150 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1346 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
1151 | 1347 | ||
1348 | tcm_vhost_hotplug(tv_tpg, lun); | ||
1349 | |||
1350 | mutex_unlock(&tcm_vhost_mutex); | ||
1351 | |||
1152 | return 0; | 1352 | return 0; |
1153 | } | 1353 | } |
1154 | 1354 | ||
1155 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, | 1355 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, |
1156 | struct se_lun *se_lun) | 1356 | struct se_lun *lun) |
1157 | { | 1357 | { |
1158 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, | 1358 | struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, |
1159 | struct tcm_vhost_tpg, se_tpg); | 1359 | struct tcm_vhost_tpg, se_tpg); |
1160 | 1360 | ||
1361 | mutex_lock(&tcm_vhost_mutex); | ||
1362 | |||
1161 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 1363 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
1162 | tv_tpg->tv_tpg_port_count--; | 1364 | tv_tpg->tv_tpg_port_count--; |
1163 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 1365 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
1366 | |||
1367 | tcm_vhost_hotunplug(tv_tpg, lun); | ||
1368 | |||
1369 | mutex_unlock(&tcm_vhost_mutex); | ||
1164 | } | 1370 | } |
1165 | 1371 | ||
1166 | static struct se_node_acl *tcm_vhost_make_nodeacl( | 1372 | static struct se_node_acl *tcm_vhost_make_nodeacl( |
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h index 1d2ae7a60e11..a545a5b766a3 100644 --- a/drivers/vhost/tcm_vhost.h +++ b/drivers/vhost/tcm_vhost.h | |||
@@ -53,6 +53,7 @@ struct tcm_vhost_nacl { | |||
53 | struct se_node_acl se_node_acl; | 53 | struct se_node_acl se_node_acl; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct vhost_scsi; | ||
56 | struct tcm_vhost_tpg { | 57 | struct tcm_vhost_tpg { |
57 | /* Vhost port target portal group tag for TCM */ | 58 | /* Vhost port target portal group tag for TCM */ |
58 | u16 tport_tpgt; | 59 | u16 tport_tpgt; |
@@ -70,6 +71,8 @@ struct tcm_vhost_tpg { | |||
70 | struct tcm_vhost_tport *tport; | 71 | struct tcm_vhost_tport *tport; |
71 | /* Returned by tcm_vhost_make_tpg() */ | 72 | /* Returned by tcm_vhost_make_tpg() */ |
72 | struct se_portal_group se_tpg; | 73 | struct se_portal_group se_tpg; |
74 | /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ | ||
75 | struct vhost_scsi *vhost_scsi; | ||
73 | }; | 76 | }; |
74 | 77 | ||
75 | struct tcm_vhost_tport { | 78 | struct tcm_vhost_tport { |
@@ -83,6 +86,13 @@ struct tcm_vhost_tport { | |||
83 | struct se_wwn tport_wwn; | 86 | struct se_wwn tport_wwn; |
84 | }; | 87 | }; |
85 | 88 | ||
89 | struct tcm_vhost_evt { | ||
90 | /* event to be sent to guest */ | ||
91 | struct virtio_scsi_event event; | ||
92 | /* event list, serviced from vhost worker thread */ | ||
93 | struct llist_node list; | ||
94 | }; | ||
95 | |||
86 | /* | 96 | /* |
87 | * As per request from MST, keep TCM_VHOST related ioctl defines out of | 97 | * As per request from MST, keep TCM_VHOST related ioctl defines out of |
88 | * linux/vhost.h (user-space) for now.. | 98 | * linux/vhost.h (user-space) for now.. |