diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 13:13:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 13:13:52 -0400 |
commit | eac84105cddf8686440aaa9fbcb58093e37e4180 (patch) | |
tree | 36e247af959104ef4a055a2d9660e48b1eb56283 | |
parent | a26ea93a3d19c2b79e8b382356014eba607ce477 (diff) | |
parent | a18cc421649dfdc21edb6c4c867dd05447d7df8c (diff) |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull more vhost fixes from Michael Tsirkin:
"This fixes some minor issues in the patches that have been merged.
We also finally drop the workaround disabling event_idx for scsi: it
was always questionable, and now we know it's not needed.
There's also a memory leak fix"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
vhost-scsi: Enable VIRTIO_RING_F_EVENT_IDX
vhost: drop virtio_net.h dependency
vhost-net: Cleanup vhost_ubuf and vhost_zcopy
vhost: Remove vhost_enable_zcopy in vhost.h
vhost: Remove comments for hdr in vhost.h
vhost: Move VHOST_NET_FEATURES to net.c
vhost-net: Free ubuf when vhost_dev_set_owner fails
vhost: Export vhost_dev_set_owner
-rw-r--r-- | drivers/vhost/net.c | 100 | ||||
-rw-r--r-- | drivers/vhost/scsi.c | 8 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 4 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 10 |
4 files changed, 71 insertions, 51 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a3645bd163d8..2b51e2336aa2 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -59,12 +59,18 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" | |||
59 | #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) | 59 | #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) |
60 | 60 | ||
61 | enum { | 61 | enum { |
62 | VHOST_NET_FEATURES = VHOST_FEATURES | | ||
63 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | | ||
64 | (1ULL << VIRTIO_NET_F_MRG_RXBUF), | ||
65 | }; | ||
66 | |||
67 | enum { | ||
62 | VHOST_NET_VQ_RX = 0, | 68 | VHOST_NET_VQ_RX = 0, |
63 | VHOST_NET_VQ_TX = 1, | 69 | VHOST_NET_VQ_TX = 1, |
64 | VHOST_NET_VQ_MAX = 2, | 70 | VHOST_NET_VQ_MAX = 2, |
65 | }; | 71 | }; |
66 | 72 | ||
67 | struct vhost_ubuf_ref { | 73 | struct vhost_net_ubuf_ref { |
68 | struct kref kref; | 74 | struct kref kref; |
69 | wait_queue_head_t wait; | 75 | wait_queue_head_t wait; |
70 | struct vhost_virtqueue *vq; | 76 | struct vhost_virtqueue *vq; |
@@ -87,7 +93,7 @@ struct vhost_net_virtqueue { | |||
87 | struct ubuf_info *ubuf_info; | 93 | struct ubuf_info *ubuf_info; |
88 | /* Reference counting for outstanding ubufs. | 94 | /* Reference counting for outstanding ubufs. |
89 | * Protected by vq mutex. Writers must also take device mutex. */ | 95 | * Protected by vq mutex. Writers must also take device mutex. */ |
90 | struct vhost_ubuf_ref *ubufs; | 96 | struct vhost_net_ubuf_ref *ubufs; |
91 | }; | 97 | }; |
92 | 98 | ||
93 | struct vhost_net { | 99 | struct vhost_net { |
@@ -104,24 +110,25 @@ struct vhost_net { | |||
104 | bool tx_flush; | 110 | bool tx_flush; |
105 | }; | 111 | }; |
106 | 112 | ||
107 | static unsigned vhost_zcopy_mask __read_mostly; | 113 | static unsigned vhost_net_zcopy_mask __read_mostly; |
108 | 114 | ||
109 | void vhost_enable_zcopy(int vq) | 115 | static void vhost_net_enable_zcopy(int vq) |
110 | { | 116 | { |
111 | vhost_zcopy_mask |= 0x1 << vq; | 117 | vhost_net_zcopy_mask |= 0x1 << vq; |
112 | } | 118 | } |
113 | 119 | ||
114 | static void vhost_zerocopy_done_signal(struct kref *kref) | 120 | static void vhost_net_zerocopy_done_signal(struct kref *kref) |
115 | { | 121 | { |
116 | struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref, | 122 | struct vhost_net_ubuf_ref *ubufs; |
117 | kref); | 123 | |
124 | ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); | ||
118 | wake_up(&ubufs->wait); | 125 | wake_up(&ubufs->wait); |
119 | } | 126 | } |
120 | 127 | ||
121 | struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq, | 128 | static struct vhost_net_ubuf_ref * |
122 | bool zcopy) | 129 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) |
123 | { | 130 | { |
124 | struct vhost_ubuf_ref *ubufs; | 131 | struct vhost_net_ubuf_ref *ubufs; |
125 | /* No zero copy backend? Nothing to count. */ | 132 | /* No zero copy backend? Nothing to count. */ |
126 | if (!zcopy) | 133 | if (!zcopy) |
127 | return NULL; | 134 | return NULL; |
@@ -134,25 +141,38 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq, | |||
134 | return ubufs; | 141 | return ubufs; |
135 | } | 142 | } |
136 | 143 | ||
137 | void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs) | 144 | static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) |
138 | { | 145 | { |
139 | kref_put(&ubufs->kref, vhost_zerocopy_done_signal); | 146 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); |
140 | } | 147 | } |
141 | 148 | ||
142 | void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs) | 149 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) |
143 | { | 150 | { |
144 | kref_put(&ubufs->kref, vhost_zerocopy_done_signal); | 151 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); |
145 | wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); | 152 | wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); |
146 | kfree(ubufs); | 153 | kfree(ubufs); |
147 | } | 154 | } |
148 | 155 | ||
156 | static void vhost_net_clear_ubuf_info(struct vhost_net *n) | ||
157 | { | ||
158 | |||
159 | bool zcopy; | ||
160 | int i; | ||
161 | |||
162 | for (i = 0; i < n->dev.nvqs; ++i) { | ||
163 | zcopy = vhost_net_zcopy_mask & (0x1 << i); | ||
164 | if (zcopy) | ||
165 | kfree(n->vqs[i].ubuf_info); | ||
166 | } | ||
167 | } | ||
168 | |||
149 | int vhost_net_set_ubuf_info(struct vhost_net *n) | 169 | int vhost_net_set_ubuf_info(struct vhost_net *n) |
150 | { | 170 | { |
151 | bool zcopy; | 171 | bool zcopy; |
152 | int i; | 172 | int i; |
153 | 173 | ||
154 | for (i = 0; i < n->dev.nvqs; ++i) { | 174 | for (i = 0; i < n->dev.nvqs; ++i) { |
155 | zcopy = vhost_zcopy_mask & (0x1 << i); | 175 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
156 | if (!zcopy) | 176 | if (!zcopy) |
157 | continue; | 177 | continue; |
158 | n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * | 178 | n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * |
@@ -164,7 +184,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n) | |||
164 | 184 | ||
165 | err: | 185 | err: |
166 | while (i--) { | 186 | while (i--) { |
167 | zcopy = vhost_zcopy_mask & (0x1 << i); | 187 | zcopy = vhost_net_zcopy_mask & (0x1 << i); |
168 | if (!zcopy) | 188 | if (!zcopy) |
169 | continue; | 189 | continue; |
170 | kfree(n->vqs[i].ubuf_info); | 190 | kfree(n->vqs[i].ubuf_info); |
@@ -286,7 +306,7 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net, | |||
286 | 306 | ||
287 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | 307 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) |
288 | { | 308 | { |
289 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; | 309 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; |
290 | struct vhost_virtqueue *vq = ubufs->vq; | 310 | struct vhost_virtqueue *vq = ubufs->vq; |
291 | int cnt = atomic_read(&ubufs->kref.refcount); | 311 | int cnt = atomic_read(&ubufs->kref.refcount); |
292 | 312 | ||
@@ -303,7 +323,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | |||
303 | /* set len to mark this desc buffers done DMA */ | 323 | /* set len to mark this desc buffers done DMA */ |
304 | vq->heads[ubuf->desc].len = success ? | 324 | vq->heads[ubuf->desc].len = success ? |
305 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | 325 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; |
306 | vhost_ubuf_put(ubufs); | 326 | vhost_net_ubuf_put(ubufs); |
307 | } | 327 | } |
308 | 328 | ||
309 | /* Expects to be always run from workqueue - which acts as | 329 | /* Expects to be always run from workqueue - which acts as |
@@ -326,7 +346,7 @@ static void handle_tx(struct vhost_net *net) | |||
326 | int err; | 346 | int err; |
327 | size_t hdr_size; | 347 | size_t hdr_size; |
328 | struct socket *sock; | 348 | struct socket *sock; |
329 | struct vhost_ubuf_ref *uninitialized_var(ubufs); | 349 | struct vhost_net_ubuf_ref *uninitialized_var(ubufs); |
330 | bool zcopy, zcopy_used; | 350 | bool zcopy, zcopy_used; |
331 | 351 | ||
332 | /* TODO: check that we are running from vhost_worker? */ | 352 | /* TODO: check that we are running from vhost_worker? */ |
@@ -422,7 +442,7 @@ static void handle_tx(struct vhost_net *net) | |||
422 | if (unlikely(err < 0)) { | 442 | if (unlikely(err < 0)) { |
423 | if (zcopy_used) { | 443 | if (zcopy_used) { |
424 | if (ubufs) | 444 | if (ubufs) |
425 | vhost_ubuf_put(ubufs); | 445 | vhost_net_ubuf_put(ubufs); |
426 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) | 446 | nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) |
427 | % UIO_MAXIOV; | 447 | % UIO_MAXIOV; |
428 | } | 448 | } |
@@ -776,7 +796,7 @@ static void vhost_net_flush(struct vhost_net *n) | |||
776 | n->tx_flush = true; | 796 | n->tx_flush = true; |
777 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 797 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
778 | /* Wait for all lower device DMAs done. */ | 798 | /* Wait for all lower device DMAs done. */ |
779 | vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); | 799 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); |
780 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 800 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
781 | n->tx_flush = false; | 801 | n->tx_flush = false; |
782 | kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); | 802 | kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); |
@@ -877,7 +897,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
877 | struct socket *sock, *oldsock; | 897 | struct socket *sock, *oldsock; |
878 | struct vhost_virtqueue *vq; | 898 | struct vhost_virtqueue *vq; |
879 | struct vhost_net_virtqueue *nvq; | 899 | struct vhost_net_virtqueue *nvq; |
880 | struct vhost_ubuf_ref *ubufs, *oldubufs = NULL; | 900 | struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; |
881 | int r; | 901 | int r; |
882 | 902 | ||
883 | mutex_lock(&n->dev.mutex); | 903 | mutex_lock(&n->dev.mutex); |
@@ -908,7 +928,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
908 | oldsock = rcu_dereference_protected(vq->private_data, | 928 | oldsock = rcu_dereference_protected(vq->private_data, |
909 | lockdep_is_held(&vq->mutex)); | 929 | lockdep_is_held(&vq->mutex)); |
910 | if (sock != oldsock) { | 930 | if (sock != oldsock) { |
911 | ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); | 931 | ubufs = vhost_net_ubuf_alloc(vq, |
932 | sock && vhost_sock_zcopy(sock)); | ||
912 | if (IS_ERR(ubufs)) { | 933 | if (IS_ERR(ubufs)) { |
913 | r = PTR_ERR(ubufs); | 934 | r = PTR_ERR(ubufs); |
914 | goto err_ubufs; | 935 | goto err_ubufs; |
@@ -934,7 +955,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
934 | mutex_unlock(&vq->mutex); | 955 | mutex_unlock(&vq->mutex); |
935 | 956 | ||
936 | if (oldubufs) { | 957 | if (oldubufs) { |
937 | vhost_ubuf_put_and_wait(oldubufs); | 958 | vhost_net_ubuf_put_and_wait(oldubufs); |
938 | mutex_lock(&vq->mutex); | 959 | mutex_lock(&vq->mutex); |
939 | vhost_zerocopy_signal_used(n, vq); | 960 | vhost_zerocopy_signal_used(n, vq); |
940 | mutex_unlock(&vq->mutex); | 961 | mutex_unlock(&vq->mutex); |
@@ -952,7 +973,7 @@ err_used: | |||
952 | rcu_assign_pointer(vq->private_data, oldsock); | 973 | rcu_assign_pointer(vq->private_data, oldsock); |
953 | vhost_net_enable_vq(n, vq); | 974 | vhost_net_enable_vq(n, vq); |
954 | if (ubufs) | 975 | if (ubufs) |
955 | vhost_ubuf_put_and_wait(ubufs); | 976 | vhost_net_ubuf_put_and_wait(ubufs); |
956 | err_ubufs: | 977 | err_ubufs: |
957 | fput(sock->file); | 978 | fput(sock->file); |
958 | err_vq: | 979 | err_vq: |
@@ -1027,6 +1048,23 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) | |||
1027 | return 0; | 1048 | return 0; |
1028 | } | 1049 | } |
1029 | 1050 | ||
1051 | static long vhost_net_set_owner(struct vhost_net *n) | ||
1052 | { | ||
1053 | int r; | ||
1054 | |||
1055 | mutex_lock(&n->dev.mutex); | ||
1056 | r = vhost_net_set_ubuf_info(n); | ||
1057 | if (r) | ||
1058 | goto out; | ||
1059 | r = vhost_dev_set_owner(&n->dev); | ||
1060 | if (r) | ||
1061 | vhost_net_clear_ubuf_info(n); | ||
1062 | vhost_net_flush(n); | ||
1063 | out: | ||
1064 | mutex_unlock(&n->dev.mutex); | ||
1065 | return r; | ||
1066 | } | ||
1067 | |||
1030 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | 1068 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, |
1031 | unsigned long arg) | 1069 | unsigned long arg) |
1032 | { | 1070 | { |
@@ -1055,19 +1093,15 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | |||
1055 | return vhost_net_set_features(n, features); | 1093 | return vhost_net_set_features(n, features); |
1056 | case VHOST_RESET_OWNER: | 1094 | case VHOST_RESET_OWNER: |
1057 | return vhost_net_reset_owner(n); | 1095 | return vhost_net_reset_owner(n); |
1096 | case VHOST_SET_OWNER: | ||
1097 | return vhost_net_set_owner(n); | ||
1058 | default: | 1098 | default: |
1059 | mutex_lock(&n->dev.mutex); | 1099 | mutex_lock(&n->dev.mutex); |
1060 | if (ioctl == VHOST_SET_OWNER) { | ||
1061 | r = vhost_net_set_ubuf_info(n); | ||
1062 | if (r) | ||
1063 | goto out; | ||
1064 | } | ||
1065 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); | 1100 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); |
1066 | if (r == -ENOIOCTLCMD) | 1101 | if (r == -ENOIOCTLCMD) |
1067 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); | 1102 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); |
1068 | else | 1103 | else |
1069 | vhost_net_flush(n); | 1104 | vhost_net_flush(n); |
1070 | out: | ||
1071 | mutex_unlock(&n->dev.mutex); | 1105 | mutex_unlock(&n->dev.mutex); |
1072 | return r; | 1106 | return r; |
1073 | } | 1107 | } |
@@ -1101,7 +1135,7 @@ static struct miscdevice vhost_net_misc = { | |||
1101 | static int vhost_net_init(void) | 1135 | static int vhost_net_init(void) |
1102 | { | 1136 | { |
1103 | if (experimental_zcopytx) | 1137 | if (experimental_zcopytx) |
1104 | vhost_enable_zcopy(VHOST_NET_VQ_TX); | 1138 | vhost_net_enable_zcopy(VHOST_NET_VQ_TX); |
1105 | return misc_register(&vhost_net_misc); | 1139 | return misc_register(&vhost_net_misc); |
1106 | } | 1140 | } |
1107 | module_init(vhost_net_init); | 1141 | module_init(vhost_net_init); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5179f7aa1b0b..701420297225 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -162,14 +162,8 @@ enum { | |||
162 | VHOST_SCSI_VQ_IO = 2, | 162 | VHOST_SCSI_VQ_IO = 2, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | /* | ||
166 | * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in | ||
167 | * kernel but disabling it helps. | ||
168 | * TODO: debug and remove the workaround. | ||
169 | */ | ||
170 | enum { | 165 | enum { |
171 | VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) | | 166 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
172 | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | ||
173 | }; | 167 | }; |
174 | 168 | ||
175 | #define VHOST_SCSI_MAX_TARGET 256 | 169 | #define VHOST_SCSI_MAX_TARGET 256 |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 749b5ab5bfbb..beee7f5787e6 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/eventfd.h> | 14 | #include <linux/eventfd.h> |
15 | #include <linux/vhost.h> | 15 | #include <linux/vhost.h> |
16 | #include <linux/virtio_net.h> | 16 | #include <linux/socket.h> /* memcpy_fromiovec */ |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/mmu_context.h> | 18 | #include <linux/mmu_context.h> |
19 | #include <linux/miscdevice.h> | 19 | #include <linux/miscdevice.h> |
@@ -344,7 +344,7 @@ static int vhost_attach_cgroups(struct vhost_dev *dev) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | /* Caller should have device mutex */ | 346 | /* Caller should have device mutex */ |
347 | static long vhost_dev_set_owner(struct vhost_dev *dev) | 347 | long vhost_dev_set_owner(struct vhost_dev *dev) |
348 | { | 348 | { |
349 | struct task_struct *worker; | 349 | struct task_struct *worker; |
350 | int err; | 350 | int err; |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index b58f4ae82cb8..a7ad63592987 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -99,9 +99,6 @@ struct vhost_virtqueue { | |||
99 | u64 log_addr; | 99 | u64 log_addr; |
100 | 100 | ||
101 | struct iovec iov[UIO_MAXIOV]; | 101 | struct iovec iov[UIO_MAXIOV]; |
102 | /* hdr is used to store the virtio header. | ||
103 | * Since each iovec has >= 1 byte length, we never need more than | ||
104 | * header length entries to store the header. */ | ||
105 | struct iovec *indirect; | 102 | struct iovec *indirect; |
106 | struct vring_used_elem *heads; | 103 | struct vring_used_elem *heads; |
107 | /* We use a kind of RCU to access private pointer. | 104 | /* We use a kind of RCU to access private pointer. |
@@ -135,6 +132,7 @@ struct vhost_dev { | |||
135 | }; | 132 | }; |
136 | 133 | ||
137 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); | 134 | long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); |
135 | long vhost_dev_set_owner(struct vhost_dev *dev); | ||
138 | long vhost_dev_check_owner(struct vhost_dev *); | 136 | long vhost_dev_check_owner(struct vhost_dev *); |
139 | struct vhost_memory *vhost_dev_reset_owner_prepare(void); | 137 | struct vhost_memory *vhost_dev_reset_owner_prepare(void); |
140 | void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); | 138 | void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *); |
@@ -177,9 +175,6 @@ enum { | |||
177 | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | | 175 | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | |
178 | (1ULL << VIRTIO_RING_F_EVENT_IDX) | | 176 | (1ULL << VIRTIO_RING_F_EVENT_IDX) | |
179 | (1ULL << VHOST_F_LOG_ALL), | 177 | (1ULL << VHOST_F_LOG_ALL), |
180 | VHOST_NET_FEATURES = VHOST_FEATURES | | ||
181 | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | | ||
182 | (1ULL << VIRTIO_NET_F_MRG_RXBUF), | ||
183 | }; | 178 | }; |
184 | 179 | ||
185 | static inline int vhost_has_feature(struct vhost_dev *dev, int bit) | 180 | static inline int vhost_has_feature(struct vhost_dev *dev, int bit) |
@@ -191,7 +186,4 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit) | |||
191 | acked_features = rcu_dereference_index_check(dev->acked_features, 1); | 186 | acked_features = rcu_dereference_index_check(dev->acked_features, 1); |
192 | return acked_features & (1 << bit); | 187 | return acked_features & (1 << bit); |
193 | } | 188 | } |
194 | |||
195 | void vhost_enable_zcopy(int vq); | ||
196 | |||
197 | #endif | 189 | #endif |