diff options
-rw-r--r-- | drivers/vhost/net.c | 64 | ||||
-rw-r--r-- | drivers/vhost/tcm_vhost.c | 55 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 88 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 4 |
4 files changed, 124 insertions, 87 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 87c216c1e54e..176aa030dc5f 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -64,9 +64,13 @@ enum { | |||
64 | VHOST_NET_VQ_MAX = 2, | 64 | VHOST_NET_VQ_MAX = 2, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct vhost_net_virtqueue { | ||
68 | struct vhost_virtqueue vq; | ||
69 | }; | ||
70 | |||
67 | struct vhost_net { | 71 | struct vhost_net { |
68 | struct vhost_dev dev; | 72 | struct vhost_dev dev; |
69 | struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; | 73 | struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; |
70 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; | 74 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; |
71 | /* Number of TX recently submitted. | 75 | /* Number of TX recently submitted. |
72 | * Protected by tx vq lock. */ | 76 | * Protected by tx vq lock. */ |
@@ -198,7 +202,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | |||
198 | * read-size critical section for our kind of RCU. */ | 202 | * read-size critical section for our kind of RCU. */ |
199 | static void handle_tx(struct vhost_net *net) | 203 | static void handle_tx(struct vhost_net *net) |
200 | { | 204 | { |
201 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; | 205 | struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq; |
202 | unsigned out, in, s; | 206 | unsigned out, in, s; |
203 | int head; | 207 | int head; |
204 | struct msghdr msg = { | 208 | struct msghdr msg = { |
@@ -417,7 +421,7 @@ err: | |||
417 | * read-size critical section for our kind of RCU. */ | 421 | * read-size critical section for our kind of RCU. */ |
418 | static void handle_rx(struct vhost_net *net) | 422 | static void handle_rx(struct vhost_net *net) |
419 | { | 423 | { |
420 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; | 424 | struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_RX].vq; |
421 | unsigned uninitialized_var(in), log; | 425 | unsigned uninitialized_var(in), log; |
422 | struct vhost_log *vq_log; | 426 | struct vhost_log *vq_log; |
423 | struct msghdr msg = { | 427 | struct msghdr msg = { |
@@ -559,17 +563,26 @@ static int vhost_net_open(struct inode *inode, struct file *f) | |||
559 | { | 563 | { |
560 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); | 564 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); |
561 | struct vhost_dev *dev; | 565 | struct vhost_dev *dev; |
566 | struct vhost_virtqueue **vqs; | ||
562 | int r; | 567 | int r; |
563 | 568 | ||
564 | if (!n) | 569 | if (!n) |
565 | return -ENOMEM; | 570 | return -ENOMEM; |
571 | vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); | ||
572 | if (!vqs) { | ||
573 | kfree(n); | ||
574 | return -ENOMEM; | ||
575 | } | ||
566 | 576 | ||
567 | dev = &n->dev; | 577 | dev = &n->dev; |
568 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; | 578 | vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; |
569 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; | 579 | vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; |
570 | r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); | 580 | n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; |
581 | n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; | ||
582 | r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); | ||
571 | if (r < 0) { | 583 | if (r < 0) { |
572 | kfree(n); | 584 | kfree(n); |
585 | kfree(vqs); | ||
573 | return r; | 586 | return r; |
574 | } | 587 | } |
575 | 588 | ||
@@ -584,7 +597,9 @@ static int vhost_net_open(struct inode *inode, struct file *f) | |||
584 | static void vhost_net_disable_vq(struct vhost_net *n, | 597 | static void vhost_net_disable_vq(struct vhost_net *n, |
585 | struct vhost_virtqueue *vq) | 598 | struct vhost_virtqueue *vq) |
586 | { | 599 | { |
587 | struct vhost_poll *poll = n->poll + (vq - n->vqs); | 600 | struct vhost_net_virtqueue *nvq = |
601 | container_of(vq, struct vhost_net_virtqueue, vq); | ||
602 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | ||
588 | if (!vq->private_data) | 603 | if (!vq->private_data) |
589 | return; | 604 | return; |
590 | vhost_poll_stop(poll); | 605 | vhost_poll_stop(poll); |
@@ -593,7 +608,9 @@ static void vhost_net_disable_vq(struct vhost_net *n, | |||
593 | static int vhost_net_enable_vq(struct vhost_net *n, | 608 | static int vhost_net_enable_vq(struct vhost_net *n, |
594 | struct vhost_virtqueue *vq) | 609 | struct vhost_virtqueue *vq) |
595 | { | 610 | { |
596 | struct vhost_poll *poll = n->poll + (vq - n->vqs); | 611 | struct vhost_net_virtqueue *nvq = |
612 | container_of(vq, struct vhost_net_virtqueue, vq); | ||
613 | struct vhost_poll *poll = n->poll + (nvq - n->vqs); | ||
597 | struct socket *sock; | 614 | struct socket *sock; |
598 | 615 | ||
599 | sock = rcu_dereference_protected(vq->private_data, | 616 | sock = rcu_dereference_protected(vq->private_data, |
@@ -621,30 +638,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, | |||
621 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, | 638 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, |
622 | struct socket **rx_sock) | 639 | struct socket **rx_sock) |
623 | { | 640 | { |
624 | *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); | 641 | *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); |
625 | *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); | 642 | *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); |
626 | } | 643 | } |
627 | 644 | ||
628 | static void vhost_net_flush_vq(struct vhost_net *n, int index) | 645 | static void vhost_net_flush_vq(struct vhost_net *n, int index) |
629 | { | 646 | { |
630 | vhost_poll_flush(n->poll + index); | 647 | vhost_poll_flush(n->poll + index); |
631 | vhost_poll_flush(&n->dev.vqs[index].poll); | 648 | vhost_poll_flush(&n->vqs[index].vq.poll); |
632 | } | 649 | } |
633 | 650 | ||
634 | static void vhost_net_flush(struct vhost_net *n) | 651 | static void vhost_net_flush(struct vhost_net *n) |
635 | { | 652 | { |
636 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); | 653 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); |
637 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); | 654 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); |
638 | if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { | 655 | if (n->vqs[VHOST_NET_VQ_TX].vq.ubufs) { |
639 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 656 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
640 | n->tx_flush = true; | 657 | n->tx_flush = true; |
641 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 658 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
642 | /* Wait for all lower device DMAs done. */ | 659 | /* Wait for all lower device DMAs done. */ |
643 | vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); | 660 | vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].vq.ubufs); |
644 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 661 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
645 | n->tx_flush = false; | 662 | n->tx_flush = false; |
646 | kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); | 663 | kref_init(&n->vqs[VHOST_NET_VQ_TX].vq.ubufs->kref); |
647 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 664 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
648 | } | 665 | } |
649 | } | 666 | } |
650 | 667 | ||
@@ -665,6 +682,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) | |||
665 | /* We do an extra flush before freeing memory, | 682 | /* We do an extra flush before freeing memory, |
666 | * since jobs can re-queue themselves. */ | 683 | * since jobs can re-queue themselves. */ |
667 | vhost_net_flush(n); | 684 | vhost_net_flush(n); |
685 | kfree(n->dev.vqs); | ||
668 | kfree(n); | 686 | kfree(n); |
669 | return 0; | 687 | return 0; |
670 | } | 688 | } |
@@ -750,7 +768,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
750 | r = -ENOBUFS; | 768 | r = -ENOBUFS; |
751 | goto err; | 769 | goto err; |
752 | } | 770 | } |
753 | vq = n->vqs + index; | 771 | vq = &n->vqs[index].vq; |
754 | mutex_lock(&vq->mutex); | 772 | mutex_lock(&vq->mutex); |
755 | 773 | ||
756 | /* Verify that ring has been setup correctly. */ | 774 | /* Verify that ring has been setup correctly. */ |
@@ -870,10 +888,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) | |||
870 | n->dev.acked_features = features; | 888 | n->dev.acked_features = features; |
871 | smp_wmb(); | 889 | smp_wmb(); |
872 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { | 890 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
873 | mutex_lock(&n->vqs[i].mutex); | 891 | mutex_lock(&n->vqs[i].vq.mutex); |
874 | n->vqs[i].vhost_hlen = vhost_hlen; | 892 | n->vqs[i].vq.vhost_hlen = vhost_hlen; |
875 | n->vqs[i].sock_hlen = sock_hlen; | 893 | n->vqs[i].vq.sock_hlen = sock_hlen; |
876 | mutex_unlock(&n->vqs[i].mutex); | 894 | mutex_unlock(&n->vqs[i].vq.mutex); |
877 | } | 895 | } |
878 | vhost_net_flush(n); | 896 | vhost_net_flush(n); |
879 | mutex_unlock(&n->dev.mutex); | 897 | mutex_unlock(&n->dev.mutex); |
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 1677238d281f..99d3480450e7 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -74,13 +74,17 @@ enum { | |||
74 | #define VHOST_SCSI_MAX_VQ 128 | 74 | #define VHOST_SCSI_MAX_VQ 128 |
75 | #define VHOST_SCSI_MAX_EVENT 128 | 75 | #define VHOST_SCSI_MAX_EVENT 128 |
76 | 76 | ||
77 | struct vhost_scsi_virtqueue { | ||
78 | struct vhost_virtqueue vq; | ||
79 | }; | ||
80 | |||
77 | struct vhost_scsi { | 81 | struct vhost_scsi { |
78 | /* Protected by vhost_scsi->dev.mutex */ | 82 | /* Protected by vhost_scsi->dev.mutex */ |
79 | struct tcm_vhost_tpg **vs_tpg; | 83 | struct tcm_vhost_tpg **vs_tpg; |
80 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; | 84 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; |
81 | 85 | ||
82 | struct vhost_dev dev; | 86 | struct vhost_dev dev; |
83 | struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; | 87 | struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; |
84 | 88 | ||
85 | struct vhost_work vs_completion_work; /* cmd completion work item */ | 89 | struct vhost_work vs_completion_work; /* cmd completion work item */ |
86 | struct llist_head vs_completion_list; /* cmd completion queue */ | 90 | struct llist_head vs_completion_list; /* cmd completion queue */ |
@@ -366,7 +370,7 @@ static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | |||
366 | static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, | 370 | static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs, |
367 | u32 event, u32 reason) | 371 | u32 event, u32 reason) |
368 | { | 372 | { |
369 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | 373 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
370 | struct tcm_vhost_evt *evt; | 374 | struct tcm_vhost_evt *evt; |
371 | 375 | ||
372 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { | 376 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { |
@@ -409,7 +413,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) | |||
409 | static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, | 413 | static void tcm_vhost_do_evt_work(struct vhost_scsi *vs, |
410 | struct tcm_vhost_evt *evt) | 414 | struct tcm_vhost_evt *evt) |
411 | { | 415 | { |
412 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | 416 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
413 | struct virtio_scsi_event *event = &evt->event; | 417 | struct virtio_scsi_event *event = &evt->event; |
414 | struct virtio_scsi_event __user *eventp; | 418 | struct virtio_scsi_event __user *eventp; |
415 | unsigned out, in; | 419 | unsigned out, in; |
@@ -460,7 +464,7 @@ static void tcm_vhost_evt_work(struct vhost_work *work) | |||
460 | { | 464 | { |
461 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, | 465 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, |
462 | vs_event_work); | 466 | vs_event_work); |
463 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | 467 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
464 | struct tcm_vhost_evt *evt; | 468 | struct tcm_vhost_evt *evt; |
465 | struct llist_node *llnode; | 469 | struct llist_node *llnode; |
466 | 470 | ||
@@ -511,8 +515,10 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
511 | v_rsp.sense_len); | 515 | v_rsp.sense_len); |
512 | ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); | 516 | ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); |
513 | if (likely(ret == 0)) { | 517 | if (likely(ret == 0)) { |
518 | struct vhost_scsi_virtqueue *q; | ||
514 | vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); | 519 | vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0); |
515 | vq = tv_cmd->tvc_vq - vs->vqs; | 520 | q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); |
521 | vq = q - vs->vqs; | ||
516 | __set_bit(vq, signal); | 522 | __set_bit(vq, signal); |
517 | } else | 523 | } else |
518 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | 524 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); |
@@ -523,7 +529,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
523 | vq = -1; | 529 | vq = -1; |
524 | while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) | 530 | while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) |
525 | < VHOST_SCSI_MAX_VQ) | 531 | < VHOST_SCSI_MAX_VQ) |
526 | vhost_signal(&vs->dev, &vs->vqs[vq]); | 532 | vhost_signal(&vs->dev, &vs->vqs[vq].vq); |
527 | } | 533 | } |
528 | 534 | ||
529 | static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( | 535 | static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( |
@@ -938,7 +944,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) | |||
938 | 944 | ||
939 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | 945 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) |
940 | { | 946 | { |
941 | vhost_poll_flush(&vs->dev.vqs[index].poll); | 947 | vhost_poll_flush(&vs->vqs[index].vq.poll); |
942 | } | 948 | } |
943 | 949 | ||
944 | static void vhost_scsi_flush(struct vhost_scsi *vs) | 950 | static void vhost_scsi_flush(struct vhost_scsi *vs) |
@@ -975,7 +981,7 @@ static int vhost_scsi_set_endpoint( | |||
975 | /* Verify that ring has been setup correctly. */ | 981 | /* Verify that ring has been setup correctly. */ |
976 | for (index = 0; index < vs->dev.nvqs; ++index) { | 982 | for (index = 0; index < vs->dev.nvqs; ++index) { |
977 | /* Verify that ring has been setup correctly. */ | 983 | /* Verify that ring has been setup correctly. */ |
978 | if (!vhost_vq_access_ok(&vs->vqs[index])) { | 984 | if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { |
979 | ret = -EFAULT; | 985 | ret = -EFAULT; |
980 | goto out; | 986 | goto out; |
981 | } | 987 | } |
@@ -1022,7 +1028,7 @@ static int vhost_scsi_set_endpoint( | |||
1022 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, | 1028 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, |
1023 | sizeof(vs->vs_vhost_wwpn)); | 1029 | sizeof(vs->vs_vhost_wwpn)); |
1024 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1030 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
1025 | vq = &vs->vqs[i]; | 1031 | vq = &vs->vqs[i].vq; |
1026 | /* Flushing the vhost_work acts as synchronize_rcu */ | 1032 | /* Flushing the vhost_work acts as synchronize_rcu */ |
1027 | mutex_lock(&vq->mutex); | 1033 | mutex_lock(&vq->mutex); |
1028 | rcu_assign_pointer(vq->private_data, vs_tpg); | 1034 | rcu_assign_pointer(vq->private_data, vs_tpg); |
@@ -1063,7 +1069,7 @@ static int vhost_scsi_clear_endpoint( | |||
1063 | mutex_lock(&vs->dev.mutex); | 1069 | mutex_lock(&vs->dev.mutex); |
1064 | /* Verify that ring has been setup correctly. */ | 1070 | /* Verify that ring has been setup correctly. */ |
1065 | for (index = 0; index < vs->dev.nvqs; ++index) { | 1071 | for (index = 0; index < vs->dev.nvqs; ++index) { |
1066 | if (!vhost_vq_access_ok(&vs->vqs[index])) { | 1072 | if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { |
1067 | ret = -EFAULT; | 1073 | ret = -EFAULT; |
1068 | goto err_dev; | 1074 | goto err_dev; |
1069 | } | 1075 | } |
@@ -1103,7 +1109,7 @@ static int vhost_scsi_clear_endpoint( | |||
1103 | } | 1109 | } |
1104 | if (match) { | 1110 | if (match) { |
1105 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | 1111 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
1106 | vq = &vs->vqs[i]; | 1112 | vq = &vs->vqs[i].vq; |
1107 | /* Flushing the vhost_work acts as synchronize_rcu */ | 1113 | /* Flushing the vhost_work acts as synchronize_rcu */ |
1108 | mutex_lock(&vq->mutex); | 1114 | mutex_lock(&vq->mutex); |
1109 | rcu_assign_pointer(vq->private_data, NULL); | 1115 | rcu_assign_pointer(vq->private_data, NULL); |
@@ -1151,24 +1157,36 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | |||
1151 | static int vhost_scsi_open(struct inode *inode, struct file *f) | 1157 | static int vhost_scsi_open(struct inode *inode, struct file *f) |
1152 | { | 1158 | { |
1153 | struct vhost_scsi *s; | 1159 | struct vhost_scsi *s; |
1160 | struct vhost_virtqueue **vqs; | ||
1154 | int r, i; | 1161 | int r, i; |
1155 | 1162 | ||
1156 | s = kzalloc(sizeof(*s), GFP_KERNEL); | 1163 | s = kzalloc(sizeof(*s), GFP_KERNEL); |
1157 | if (!s) | 1164 | if (!s) |
1158 | return -ENOMEM; | 1165 | return -ENOMEM; |
1159 | 1166 | ||
1167 | vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); | ||
1168 | if (!vqs) { | ||
1169 | kfree(s); | ||
1170 | return -ENOMEM; | ||
1171 | } | ||
1172 | |||
1160 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); | 1173 | vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); |
1161 | vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); | 1174 | vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work); |
1162 | 1175 | ||
1163 | s->vs_events_nr = 0; | 1176 | s->vs_events_nr = 0; |
1164 | s->vs_events_missed = false; | 1177 | s->vs_events_missed = false; |
1165 | 1178 | ||
1166 | s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; | 1179 | vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq; |
1167 | s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; | 1180 | vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq; |
1168 | for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) | 1181 | s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; |
1169 | s->vqs[i].handle_kick = vhost_scsi_handle_kick; | 1182 | s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; |
1170 | r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ); | 1183 | for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { |
1184 | vqs[i] = &s->vqs[i].vq; | ||
1185 | s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; | ||
1186 | } | ||
1187 | r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ); | ||
1171 | if (r < 0) { | 1188 | if (r < 0) { |
1189 | kfree(vqs); | ||
1172 | kfree(s); | 1190 | kfree(s); |
1173 | return r; | 1191 | return r; |
1174 | } | 1192 | } |
@@ -1190,6 +1208,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) | |||
1190 | vhost_dev_cleanup(&s->dev, false); | 1208 | vhost_dev_cleanup(&s->dev, false); |
1191 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ | 1209 | /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ |
1192 | vhost_scsi_flush(s); | 1210 | vhost_scsi_flush(s); |
1211 | kfree(s->dev.vqs); | ||
1193 | kfree(s); | 1212 | kfree(s); |
1194 | return 0; | 1213 | return 0; |
1195 | } | 1214 | } |
@@ -1205,7 +1224,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | |||
1205 | u32 events_missed; | 1224 | u32 events_missed; |
1206 | u64 features; | 1225 | u64 features; |
1207 | int r, abi_version = VHOST_SCSI_ABI_VERSION; | 1226 | int r, abi_version = VHOST_SCSI_ABI_VERSION; |
1208 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | 1227 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
1209 | 1228 | ||
1210 | switch (ioctl) { | 1229 | switch (ioctl) { |
1211 | case VHOST_SCSI_SET_ENDPOINT: | 1230 | case VHOST_SCSI_SET_ENDPOINT: |
@@ -1333,7 +1352,7 @@ static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | |||
1333 | else | 1352 | else |
1334 | reason = VIRTIO_SCSI_EVT_RESET_REMOVED; | 1353 | reason = VIRTIO_SCSI_EVT_RESET_REMOVED; |
1335 | 1354 | ||
1336 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT]; | 1355 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
1337 | mutex_lock(&vq->mutex); | 1356 | mutex_lock(&vq->mutex); |
1338 | tcm_vhost_send_evt(vs, tpg, lun, | 1357 | tcm_vhost_send_evt(vs, tpg, lun, |
1339 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); | 1358 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4eecdb867d53..bef8b6bae186 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -269,27 +269,27 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) | |||
269 | bool zcopy; | 269 | bool zcopy; |
270 | 270 | ||
271 | for (i = 0; i < dev->nvqs; ++i) { | 271 | for (i = 0; i < dev->nvqs; ++i) { |
272 | dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * | 272 | dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect * |
273 | UIO_MAXIOV, GFP_KERNEL); | 273 | UIO_MAXIOV, GFP_KERNEL); |
274 | dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV, | 274 | dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV, |
275 | GFP_KERNEL); | 275 | GFP_KERNEL); |
276 | dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads * | 276 | dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads * |
277 | UIO_MAXIOV, GFP_KERNEL); | 277 | UIO_MAXIOV, GFP_KERNEL); |
278 | zcopy = vhost_zcopy_mask & (0x1 << i); | 278 | zcopy = vhost_zcopy_mask & (0x1 << i); |
279 | if (zcopy) | 279 | if (zcopy) |
280 | dev->vqs[i].ubuf_info = | 280 | dev->vqs[i]->ubuf_info = |
281 | kmalloc(sizeof *dev->vqs[i].ubuf_info * | 281 | kmalloc(sizeof *dev->vqs[i]->ubuf_info * |
282 | UIO_MAXIOV, GFP_KERNEL); | 282 | UIO_MAXIOV, GFP_KERNEL); |
283 | if (!dev->vqs[i].indirect || !dev->vqs[i].log || | 283 | if (!dev->vqs[i]->indirect || !dev->vqs[i]->log || |
284 | !dev->vqs[i].heads || | 284 | !dev->vqs[i]->heads || |
285 | (zcopy && !dev->vqs[i].ubuf_info)) | 285 | (zcopy && !dev->vqs[i]->ubuf_info)) |
286 | goto err_nomem; | 286 | goto err_nomem; |
287 | } | 287 | } |
288 | return 0; | 288 | return 0; |
289 | 289 | ||
290 | err_nomem: | 290 | err_nomem: |
291 | for (; i >= 0; --i) | 291 | for (; i >= 0; --i) |
292 | vhost_vq_free_iovecs(&dev->vqs[i]); | 292 | vhost_vq_free_iovecs(dev->vqs[i]); |
293 | return -ENOMEM; | 293 | return -ENOMEM; |
294 | } | 294 | } |
295 | 295 | ||
@@ -298,11 +298,11 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) | |||
298 | int i; | 298 | int i; |
299 | 299 | ||
300 | for (i = 0; i < dev->nvqs; ++i) | 300 | for (i = 0; i < dev->nvqs; ++i) |
301 | vhost_vq_free_iovecs(&dev->vqs[i]); | 301 | vhost_vq_free_iovecs(dev->vqs[i]); |
302 | } | 302 | } |
303 | 303 | ||
304 | long vhost_dev_init(struct vhost_dev *dev, | 304 | long vhost_dev_init(struct vhost_dev *dev, |
305 | struct vhost_virtqueue *vqs, int nvqs) | 305 | struct vhost_virtqueue **vqs, int nvqs) |
306 | { | 306 | { |
307 | int i; | 307 | int i; |
308 | 308 | ||
@@ -318,16 +318,16 @@ long vhost_dev_init(struct vhost_dev *dev, | |||
318 | dev->worker = NULL; | 318 | dev->worker = NULL; |
319 | 319 | ||
320 | for (i = 0; i < dev->nvqs; ++i) { | 320 | for (i = 0; i < dev->nvqs; ++i) { |
321 | dev->vqs[i].log = NULL; | 321 | dev->vqs[i]->log = NULL; |
322 | dev->vqs[i].indirect = NULL; | 322 | dev->vqs[i]->indirect = NULL; |
323 | dev->vqs[i].heads = NULL; | 323 | dev->vqs[i]->heads = NULL; |
324 | dev->vqs[i].ubuf_info = NULL; | 324 | dev->vqs[i]->ubuf_info = NULL; |
325 | dev->vqs[i].dev = dev; | 325 | dev->vqs[i]->dev = dev; |
326 | mutex_init(&dev->vqs[i].mutex); | 326 | mutex_init(&dev->vqs[i]->mutex); |
327 | vhost_vq_reset(dev, dev->vqs + i); | 327 | vhost_vq_reset(dev, dev->vqs[i]); |
328 | if (dev->vqs[i].handle_kick) | 328 | if (dev->vqs[i]->handle_kick) |
329 | vhost_poll_init(&dev->vqs[i].poll, | 329 | vhost_poll_init(&dev->vqs[i]->poll, |
330 | dev->vqs[i].handle_kick, POLLIN, dev); | 330 | dev->vqs[i]->handle_kick, POLLIN, dev); |
331 | } | 331 | } |
332 | 332 | ||
333 | return 0; | 333 | return 0; |
@@ -430,9 +430,9 @@ void vhost_dev_stop(struct vhost_dev *dev) | |||
430 | int i; | 430 | int i; |
431 | 431 | ||
432 | for (i = 0; i < dev->nvqs; ++i) { | 432 | for (i = 0; i < dev->nvqs; ++i) { |
433 | if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { | 433 | if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { |
434 | vhost_poll_stop(&dev->vqs[i].poll); | 434 | vhost_poll_stop(&dev->vqs[i]->poll); |
435 | vhost_poll_flush(&dev->vqs[i].poll); | 435 | vhost_poll_flush(&dev->vqs[i]->poll); |
436 | } | 436 | } |
437 | } | 437 | } |
438 | } | 438 | } |
@@ -443,17 +443,17 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) | |||
443 | int i; | 443 | int i; |
444 | 444 | ||
445 | for (i = 0; i < dev->nvqs; ++i) { | 445 | for (i = 0; i < dev->nvqs; ++i) { |
446 | if (dev->vqs[i].error_ctx) | 446 | if (dev->vqs[i]->error_ctx) |
447 | eventfd_ctx_put(dev->vqs[i].error_ctx); | 447 | eventfd_ctx_put(dev->vqs[i]->error_ctx); |
448 | if (dev->vqs[i].error) | 448 | if (dev->vqs[i]->error) |
449 | fput(dev->vqs[i].error); | 449 | fput(dev->vqs[i]->error); |
450 | if (dev->vqs[i].kick) | 450 | if (dev->vqs[i]->kick) |
451 | fput(dev->vqs[i].kick); | 451 | fput(dev->vqs[i]->kick); |
452 | if (dev->vqs[i].call_ctx) | 452 | if (dev->vqs[i]->call_ctx) |
453 | eventfd_ctx_put(dev->vqs[i].call_ctx); | 453 | eventfd_ctx_put(dev->vqs[i]->call_ctx); |
454 | if (dev->vqs[i].call) | 454 | if (dev->vqs[i]->call) |
455 | fput(dev->vqs[i].call); | 455 | fput(dev->vqs[i]->call); |
456 | vhost_vq_reset(dev, dev->vqs + i); | 456 | vhost_vq_reset(dev, dev->vqs[i]); |
457 | } | 457 | } |
458 | vhost_dev_free_iovecs(dev); | 458 | vhost_dev_free_iovecs(dev); |
459 | if (dev->log_ctx) | 459 | if (dev->log_ctx) |
@@ -524,14 +524,14 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, | |||
524 | 524 | ||
525 | for (i = 0; i < d->nvqs; ++i) { | 525 | for (i = 0; i < d->nvqs; ++i) { |
526 | int ok; | 526 | int ok; |
527 | mutex_lock(&d->vqs[i].mutex); | 527 | mutex_lock(&d->vqs[i]->mutex); |
528 | /* If ring is inactive, will check when it's enabled. */ | 528 | /* If ring is inactive, will check when it's enabled. */ |
529 | if (d->vqs[i].private_data) | 529 | if (d->vqs[i]->private_data) |
530 | ok = vq_memory_access_ok(d->vqs[i].log_base, mem, | 530 | ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, |
531 | log_all); | 531 | log_all); |
532 | else | 532 | else |
533 | ok = 1; | 533 | ok = 1; |
534 | mutex_unlock(&d->vqs[i].mutex); | 534 | mutex_unlock(&d->vqs[i]->mutex); |
535 | if (!ok) | 535 | if (!ok) |
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
@@ -641,7 +641,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
641 | if (idx >= d->nvqs) | 641 | if (idx >= d->nvqs) |
642 | return -ENOBUFS; | 642 | return -ENOBUFS; |
643 | 643 | ||
644 | vq = d->vqs + idx; | 644 | vq = d->vqs[idx]; |
645 | 645 | ||
646 | mutex_lock(&vq->mutex); | 646 | mutex_lock(&vq->mutex); |
647 | 647 | ||
@@ -852,7 +852,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) | |||
852 | for (i = 0; i < d->nvqs; ++i) { | 852 | for (i = 0; i < d->nvqs; ++i) { |
853 | struct vhost_virtqueue *vq; | 853 | struct vhost_virtqueue *vq; |
854 | void __user *base = (void __user *)(unsigned long)p; | 854 | void __user *base = (void __user *)(unsigned long)p; |
855 | vq = d->vqs + i; | 855 | vq = d->vqs[i]; |
856 | mutex_lock(&vq->mutex); | 856 | mutex_lock(&vq->mutex); |
857 | /* If ring is inactive, will check when it's enabled. */ | 857 | /* If ring is inactive, will check when it's enabled. */ |
858 | if (vq->private_data && !vq_log_access_ok(d, vq, base)) | 858 | if (vq->private_data && !vq_log_access_ok(d, vq, base)) |
@@ -879,9 +879,9 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) | |||
879 | } else | 879 | } else |
880 | filep = eventfp; | 880 | filep = eventfp; |
881 | for (i = 0; i < d->nvqs; ++i) { | 881 | for (i = 0; i < d->nvqs; ++i) { |
882 | mutex_lock(&d->vqs[i].mutex); | 882 | mutex_lock(&d->vqs[i]->mutex); |
883 | d->vqs[i].log_ctx = d->log_ctx; | 883 | d->vqs[i]->log_ctx = d->log_ctx; |
884 | mutex_unlock(&d->vqs[i].mutex); | 884 | mutex_unlock(&d->vqs[i]->mutex); |
885 | } | 885 | } |
886 | if (ctx) | 886 | if (ctx) |
887 | eventfd_ctx_put(ctx); | 887 | eventfd_ctx_put(ctx); |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 17261e277c02..f3afa8a41fe0 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -150,7 +150,7 @@ struct vhost_dev { | |||
150 | struct mm_struct *mm; | 150 | struct mm_struct *mm; |
151 | struct mutex mutex; | 151 | struct mutex mutex; |
152 | unsigned acked_features; | 152 | unsigned acked_features; |
153 | struct vhost_virtqueue *vqs; | 153 | struct vhost_virtqueue **vqs; |
154 | int nvqs; | 154 | int nvqs; |
155 | struct file *log_file; | 155 | struct file *log_file; |
156 | struct eventfd_ctx *log_ctx; | 156 | struct eventfd_ctx *log_ctx; |
@@ -159,7 +159,7 @@ struct vhost_dev { | |||
159 | struct task_struct *worker; | 159 | struct task_struct *worker; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs); | 162 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); |
163 | long vhost_dev_check_owner(struct vhost_dev *); | 163 | long vhost_dev_check_owner(struct vhost_dev *); |
164 | long vhost_dev_reset_owner(struct vhost_dev *); | 164 | long vhost_dev_reset_owner(struct vhost_dev *); |
165 | void vhost_dev_cleanup(struct vhost_dev *, bool locked); | 165 | void vhost_dev_cleanup(struct vhost_dev *, bool locked); |