aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2012-12-03 17:17:14 -0500
committerMichael S. Tsirkin <mst@redhat.com>2012-12-06 10:09:18 -0500
commit1280c27f8e29acf4af2da914e80ec27c3dbd5c01 (patch)
tree6b488908fab086a906656894801544210bd64335 /drivers
parent935cdee7ee159569b0aaa10bd9244660f6672b08 (diff)
vhost-net: flush outstanding DMAs on memory change
When memory map changes, we need to flush outstanding DMAs as they might in theory reference old memory addresses. To do this simply stop initiating new DMAs and wait for ubufs ref count to drop to 0. Afterwards reset the count back to 1. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/vhost/net.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1802ab662082..6a86deb39a72 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -83,6 +83,8 @@ struct vhost_net {
83 /* Number of times zerocopy TX recently failed. 83 /* Number of times zerocopy TX recently failed.
84 * Protected by tx vq lock. */ 84 * Protected by tx vq lock. */
85 unsigned tx_zcopy_err; 85 unsigned tx_zcopy_err;
86 /* Flush in progress. Protected by tx vq lock. */
87 bool tx_flush;
86}; 88};
87 89
88static void vhost_net_tx_packet(struct vhost_net *net) 90static void vhost_net_tx_packet(struct vhost_net *net)
@@ -101,7 +103,11 @@ static void vhost_net_tx_err(struct vhost_net *net)
101 103
102static bool vhost_net_tx_select_zcopy(struct vhost_net *net) 104static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
103{ 105{
104 return net->tx_packets / 64 >= net->tx_zcopy_err; 106 /* TX flush waits for outstanding DMAs to be done.
107 * Don't start new DMAs.
108 */
109 return !net->tx_flush &&
110 net->tx_packets / 64 >= net->tx_zcopy_err;
105} 111}
106 112
107static bool vhost_sock_zcopy(struct socket *sock) 113static bool vhost_sock_zcopy(struct socket *sock)
@@ -679,6 +685,17 @@ static void vhost_net_flush(struct vhost_net *n)
679{ 685{
680 vhost_net_flush_vq(n, VHOST_NET_VQ_TX); 686 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
681 vhost_net_flush_vq(n, VHOST_NET_VQ_RX); 687 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
688 if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) {
689 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
690 n->tx_flush = true;
691 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
692 /* Wait for all lower device DMAs done. */
693 vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs);
694 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
695 n->tx_flush = false;
696 kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref);
697 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
698 }
682} 699}
683 700
684static int vhost_net_release(struct inode *inode, struct file *f) 701static int vhost_net_release(struct inode *inode, struct file *f)
@@ -686,18 +703,10 @@ static int vhost_net_release(struct inode *inode, struct file *f)
686 struct vhost_net *n = f->private_data; 703 struct vhost_net *n = f->private_data;
687 struct socket *tx_sock; 704 struct socket *tx_sock;
688 struct socket *rx_sock; 705 struct socket *rx_sock;
689 int i;
690 706
691 vhost_net_stop(n, &tx_sock, &rx_sock); 707 vhost_net_stop(n, &tx_sock, &rx_sock);
692 vhost_net_flush(n); 708 vhost_net_flush(n);
693 vhost_dev_stop(&n->dev); 709 vhost_dev_stop(&n->dev);
694 for (i = 0; i < n->dev.nvqs; ++i) {
695 /* Wait for all lower device DMAs done. */
696 if (n->dev.vqs[i].ubufs)
697 vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
698
699 vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
700 }
701 vhost_dev_cleanup(&n->dev, false); 710 vhost_dev_cleanup(&n->dev, false);
702 if (tx_sock) 711 if (tx_sock)
703 fput(tx_sock->file); 712 fput(tx_sock->file);
@@ -826,6 +835,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
826 835
827 n->tx_packets = 0; 836 n->tx_packets = 0;
828 n->tx_zcopy_err = 0; 837 n->tx_zcopy_err = 0;
838 n->tx_flush = false;
829 } 839 }
830 840
831 mutex_unlock(&vq->mutex); 841 mutex_unlock(&vq->mutex);