diff options
author | Asias He <asias@redhat.com> | 2013-05-06 04:38:24 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2013-05-06 06:25:47 -0400 |
commit | fe729a57c8d85b6b38d62b64215d5f064e682940 (patch) | |
tree | 2fb5b303ac7a516034ea0f3d3fca99dee98d1178 /drivers/vhost | |
parent | e40ab7484f8e9cc9f5e59ae53214800e50f5615e (diff) |
vhost-net: Cleanup vhost_ubuf and vhost_zcopy
- Rename vhost_ubuf to vhost_net_ubuf
- Rename vhost_zcopy_mask to vhost_net_zcopy_mask
- Make funcs static
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/net.c | 58 |
1 files changed, 30 insertions, 28 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 06b2447bce71..2b51e2336aa2 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -70,7 +70,7 @@ enum { | |||
70 | VHOST_NET_VQ_MAX = 2, | 70 | VHOST_NET_VQ_MAX = 2, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | struct vhost_ubuf_ref { | 73 | struct vhost_net_ubuf_ref { |
74 | struct kref kref; | 74 | struct kref kref; |
75 | wait_queue_head_t wait; | 75 | wait_queue_head_t wait; |
76 | struct vhost_virtqueue *vq; | 76 | struct vhost_virtqueue *vq; |
@@ -93,7 +93,7 @@ struct vhost_net_virtqueue { | |||
93 | struct ubuf_info *ubuf_info; | 93 | struct ubuf_info *ubuf_info; |
94 | /* Reference counting for outstanding ubufs. | 94 | /* Reference counting for outstanding ubufs. |
95 | * Protected by vq mutex. Writers must also take device mutex. */ | 95 | * Protected by vq mutex. Writers must also take device mutex. */ |
96 | struct vhost_ubuf_ref *ubufs; | 96 | struct vhost_net_ubuf_ref *ubufs; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | struct vhost_net { | 99 | struct vhost_net { |
@@ -110,24 +110,25 @@ struct vhost_net { | |||
110 | bool tx_flush; | 110 | bool tx_flush; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | static unsigned vhost_zcopy_mask __read_mostly; | 113 | static unsigned vhost_net_zcopy_mask __read_mostly; |
114 | 114 | ||
115 | void vhost_enable_zcopy(int vq) | 115 | static void vhost_net_enable_zcopy(int vq) |
116 | { | 116 | { |
117 | vhost_zcopy_mask |= 0x1 << vq; | 117 | vhost_net_zcopy_mask |= 0x1 << vq; |
118 | } | 118 | } |
119 | 119 | ||
120 | static void vhost_zerocopy_done_signal(struct kref *kref) | 120 | static void vhost_net_zerocopy_done_signal(struct kref *kref) |
121 | { | 121 | { |
122 | struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref, | 122 | struct vhost_net_ubuf_ref *ubufs; |
123 | kref); | 123 | |
124 | ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); | ||
124 | wake_up(&ubufs->wait); | 125 | wake_up(&ubufs->wait); |
125 | } | 126 | } |
126 | 127 | ||
127 | struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq, | 128 | static struct vhost_net_ubuf_ref * |
128 | bool zcopy) | 129 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) |
129 | { | 130 | { |
130 | struct vhost_ubuf_ref *ubufs; | 131 | struct vhost_net_ubuf_ref *ubufs; |
131 | /* No zero copy backend? Nothing to count. */ | 132 | /* No zero copy backend? Nothing to count. */ |
132 | if (!zcopy) | 133 | if (!zcopy) |
133 | return NULL; | 134 | return NULL; |
@@ -140,14 +141,14 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq, | |||
140 | return ubufs; | 141 | return ubufs; |
141 | } | 142 | } |
142 | 143 | ||
143 | void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs) | 144 | static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) |
144 | { | 145 | { |
145 | kref_put(&ubufs->kref, vhost_zerocopy_done_signal); | 146 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); |
146 | } | 147 | } |
147 | 148 | ||
148 | void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs) | 149 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) |
149 | { | 150 | { |
150 | kref_put(&ubufs->kref, vhost_zerocopy_done_signal); | 151 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); |
151 | wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); | 152 | wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); |
152 | kfree(ubufs); | 153 | kfree(ubufs); |
153 | } | 154 | } |
@@ -159,7 +160,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n) | |||
159 | int i; | 160 | int i; |
160 | 161 | ||
161 | for (i = 0; i < n->dev.nvqs; ++i) { | 162 | for (i = 0; i < n->dev.nvqs; ++i) { |
162 | zcopy = vhost_zcopy_mask & (0x1 << i); | 163 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
163 | if (zcopy) | 164 | if (zcopy) |
164 | kfree(n->vqs[i].ubuf_info); | 165 | kfree(n->vqs[i].ubuf_info); |
165 | } | 166 | } |
@@ -171,7 +172,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) | |||
171 | int i; | 172 | int i; |
172 | 173 | ||
173 | for (i = 0; i < n->dev.nvqs; ++i) { | 174 | for (i = 0; i < n->dev.nvqs; ++i) { |
174 | zcopy = vhost_zcopy_mask & (0x1 << i); | 175 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
175 | if (!zcopy) | 176 | if (!zcopy) |
176 | continue; | 177 | continue; |
177 | n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * | 178 | n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * |
@@ -183,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) | |||
183 | 184 | ||
184 | err: | 185 | err: |
185 | while (i--) { | 186 | while (i--) { |
186 | zcopy = vhost_zcopy_mask & (0x1 << i); | 187 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
187 | if (!zcopy) | 188 | if (!zcopy) |
188 | continue; | 189 | continue; |
189 | kfree(n->vqs[i].ubuf_info); | 190 | kfree(n->vqs[i].ubuf_info); |
@@ -305,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net, | |||
305 | 306 | ||
306 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | 307 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) |
307 | { | 308 | { |
308 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; | 309 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; |
309 | struct vhost_virtqueue *vq = ubufs->vq; | 310 | struct vhost_virtqueue *vq = ubufs->vq; |
310 | int cnt = atomic_read(&ubufs->kref.refcount); | 311 | int cnt = atomic_read(&ubufs->kref.refcount); |
311 | 312 | ||
@@ -322,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | |||
322 | /* set len to mark this desc buffers done DMA */ | 323 | /* set len to mark this desc buffers done DMA */ |
323 | vq->heads[ubuf->desc].len = success ? | 324 | vq->heads[ubuf->desc].len = success ? |
324 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | 325 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; |
325 | vhost_ubuf_put(ubufs); | 326 | vhost_net_ubuf_put(ubufs); |
326 | } | 327 | } |
327 | 328 | ||
328 | /* Expects to be always run from workqueue - which acts as | 329 | /* Expects to be always run from workqueue - which acts as |
@@ -345,7 +346,7 @@ static void handle_tx(struct vhost_net *net) | |||
345 | int err; | 346 | int err; |
346 | size_t hdr_size; | 347 | size_t hdr_size; |
347 | struct socket *sock; | 348 | struct socket *sock; |
348 | struct vhost_ubuf_ref *uninitialized_var(ubufs); | 349 | struct vhost_net_ubuf_ref *uninitialized_var(ubufs); |
349 | bool zcopy, zcopy_used; | 350 | bool zcopy, zcopy_used; |
350 | 351 | ||
351 | /* TODO: check that we are running from vhost_worker? */ | 352 | /* TODO: check that we are running from vhost_worker? */ |
@@ -441,7 +442,7 @@ static void handle_tx(struct vhost_net *net) | |||
441 | if (unlikely(err < 0)) { | 442 | if (unlikely(err < 0)) { |
442 | if (zcopy_used) { | 443 | if (zcopy_used) { |
443 | if (ubufs) | 444 | if (ubufs) |
444 | vhost_ubuf_put(ubufs); | 445 | vhost_net_ubuf_put(ubufs); |
445 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) | 446 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) |
446 | % UIO_MAXIOV; | 447 | % UIO_MAXIOV; |
447 | } | 448 | } |
@@ -795,7 +796,7 @@ static void vhost_net_flush(struct vhost_net *n) | |||
795 | n->tx_flush = true; | 796 | n->tx_flush = true; |
796 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 797 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
797 | /* Wait for all lower device DMAs done. */ | 798 | /* Wait for all lower device DMAs done. */ |
798 | vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); | 799 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); |
799 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 800 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
800 | n->tx_flush = false; | 801 | n->tx_flush = false; |
801 | kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); | 802 | kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); |
@@ -896,7 +897,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
896 | struct socket *sock, *oldsock; | 897 | struct socket *sock, *oldsock; |
897 | struct vhost_virtqueue *vq; | 898 | struct vhost_virtqueue *vq; |
898 | struct vhost_net_virtqueue *nvq; | 899 | struct vhost_net_virtqueue *nvq; |
899 | struct vhost_ubuf_ref *ubufs, *oldubufs = NULL; | 900 | struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; |
900 | int r; | 901 | int r; |
901 | 902 | ||
902 | mutex_lock(&n->dev.mutex); | 903 | mutex_lock(&n->dev.mutex); |
@@ -927,7 +928,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
927 | oldsock = rcu_dereference_protected(vq->private_data, | 928 | oldsock = rcu_dereference_protected(vq->private_data, |
928 | lockdep_is_held(&vq->mutex)); | 929 | lockdep_is_held(&vq->mutex)); |
929 | if (sock != oldsock) { | 930 | if (sock != oldsock) { |
930 | ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); | 931 | ubufs = vhost_net_ubuf_alloc(vq, |
932 | sock && vhost_sock_zcopy(sock)); | ||
931 | if (IS_ERR(ubufs)) { | 933 | if (IS_ERR(ubufs)) { |
932 | r = PTR_ERR(ubufs); | 934 | r = PTR_ERR(ubufs); |
933 | goto err_ubufs; | 935 | goto err_ubufs; |
@@ -953,7 +955,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
953 | mutex_unlock(&vq->mutex); | 955 | mutex_unlock(&vq->mutex); |
954 | 956 | ||
955 | if (oldubufs) { | 957 | if (oldubufs) { |
956 | vhost_ubuf_put_and_wait(oldubufs); | 958 | vhost_net_ubuf_put_and_wait(oldubufs); |
957 | mutex_lock(&vq->mutex); | 959 | mutex_lock(&vq->mutex); |
958 | vhost_zerocopy_signal_used(n, vq); | 960 | vhost_zerocopy_signal_used(n, vq); |
959 | mutex_unlock(&vq->mutex); | 961 | mutex_unlock(&vq->mutex); |
@@ -971,7 +973,7 @@ err_used: | |||
971 | rcu_assign_pointer(vq->private_data, oldsock); | 973 | rcu_assign_pointer(vq->private_data, oldsock); |
972 | vhost_net_enable_vq(n, vq); | 974 | vhost_net_enable_vq(n, vq); |
973 | if (ubufs) | 975 | if (ubufs) |
974 | vhost_ubuf_put_and_wait(ubufs); | 976 | vhost_net_ubuf_put_and_wait(ubufs); |
975 | err_ubufs: | 977 | err_ubufs: |
976 | fput(sock->file); | 978 | fput(sock->file); |
977 | err_vq: | 979 | err_vq: |
@@ -1133,7 +1135,7 @@ static struct miscdevice vhost_net_misc = { | |||
1133 | static int vhost_net_init(void) | 1135 | static int vhost_net_init(void) |
1134 | { | 1136 | { |
1135 | if (experimental_zcopytx) | 1137 | if (experimental_zcopytx) |
1136 | vhost_enable_zcopy(VHOST_NET_VQ_TX); | 1138 | vhost_net_enable_zcopy(VHOST_NET_VQ_TX); |
1137 | return misc_register(&vhost_net_misc); | 1139 | return misc_register(&vhost_net_misc); |
1138 | } | 1140 | } |
1139 | module_init(vhost_net_init); | 1141 | module_init(vhost_net_init); |