summaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2018-11-05 05:35:47 -0500
committerMichael S. Tsirkin <mst@redhat.com>2018-12-06 14:28:38 -0500
commit834e772c8db0c6a275d75315d90aba4ebbb1e249 (patch)
tree9883c42fbb27228dacc9685f4e39f077016574a5 /drivers/vhost
parent78b1a52e05c9db11d293342e8d6d8a230a04b4e7 (diff)
vhost/vsock: fix use-after-free in network stack callers
If the network stack calls .send_pkt()/.cancel_pkt() during .release(), a struct vhost_vsock use-after-free is possible. This occurs because .release() does not wait for other CPUs to stop using struct vhost_vsock. Switch to an RCU-enabled hashtable (indexed by guest CID) so that .release() can wait for other CPUs by calling synchronize_rcu(). This also eliminates vhost_vsock_lock acquisition in the data path so it could have a positive effect on performance. This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt". Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vsock.c57
1 files changed, 33 insertions, 24 deletions
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 731e2ea2aeca..98ed5be132c6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -15,6 +15,7 @@
15#include <net/sock.h> 15#include <net/sock.h>
16#include <linux/virtio_vsock.h> 16#include <linux/virtio_vsock.h>
17#include <linux/vhost.h> 17#include <linux/vhost.h>
18#include <linux/hashtable.h>
18 19
19#include <net/af_vsock.h> 20#include <net/af_vsock.h>
20#include "vhost.h" 21#include "vhost.h"
@@ -27,14 +28,14 @@ enum {
27 28
28/* Used to track all the vhost_vsock instances on the system. */ 29/* Used to track all the vhost_vsock instances on the system. */
29static DEFINE_SPINLOCK(vhost_vsock_lock); 30static DEFINE_SPINLOCK(vhost_vsock_lock);
30static LIST_HEAD(vhost_vsock_list); 31static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
31 32
32struct vhost_vsock { 33struct vhost_vsock {
33 struct vhost_dev dev; 34 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2]; 35 struct vhost_virtqueue vqs[2];
35 36
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ 37 /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
37 struct list_head list; 38 struct hlist_node hash;
38 39
39 struct vhost_work send_pkt_work; 40 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock; 41 spinlock_t send_pkt_list_lock;
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID; 51 return VHOST_VSOCK_DEFAULT_HOST_CID;
51} 52}
52 53
53static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) 54/* Callers that dereference the return value must hold vhost_vsock_lock or the
55 * RCU read lock.
56 */
57static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
54{ 58{
55 struct vhost_vsock *vsock; 59 struct vhost_vsock *vsock;
56 60
57 list_for_each_entry(vsock, &vhost_vsock_list, list) { 61 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
58 u32 other_cid = vsock->guest_cid; 62 u32 other_cid = vsock->guest_cid;
59 63
60 /* Skip instances that have no CID yet */ 64 /* Skip instances that have no CID yet */
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
69 return NULL; 73 return NULL;
70} 74}
71 75
72static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
73{
74 struct vhost_vsock *vsock;
75
76 spin_lock_bh(&vhost_vsock_lock);
77 vsock = __vhost_vsock_get(guest_cid);
78 spin_unlock_bh(&vhost_vsock_lock);
79
80 return vsock;
81}
82
83static void 76static void
84vhost_transport_do_send_pkt(struct vhost_vsock *vsock, 77vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 struct vhost_virtqueue *vq) 78 struct vhost_virtqueue *vq)
@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
210 struct vhost_vsock *vsock; 203 struct vhost_vsock *vsock;
211 int len = pkt->len; 204 int len = pkt->len;
212 205
206 rcu_read_lock();
207
213 /* Find the vhost_vsock according to guest context id */ 208 /* Find the vhost_vsock according to guest context id */
214 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); 209 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
215 if (!vsock) { 210 if (!vsock) {
211 rcu_read_unlock();
216 virtio_transport_free_pkt(pkt); 212 virtio_transport_free_pkt(pkt);
217 return -ENODEV; 213 return -ENODEV;
218 } 214 }
@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
225 spin_unlock_bh(&vsock->send_pkt_list_lock); 221 spin_unlock_bh(&vsock->send_pkt_list_lock);
226 222
227 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); 223 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
224
225 rcu_read_unlock();
228 return len; 226 return len;
229} 227}
230 228
@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
234 struct vhost_vsock *vsock; 232 struct vhost_vsock *vsock;
235 struct virtio_vsock_pkt *pkt, *n; 233 struct virtio_vsock_pkt *pkt, *n;
236 int cnt = 0; 234 int cnt = 0;
235 int ret = -ENODEV;
237 LIST_HEAD(freeme); 236 LIST_HEAD(freeme);
238 237
238 rcu_read_lock();
239
239 /* Find the vhost_vsock according to guest context id */ 240 /* Find the vhost_vsock according to guest context id */
240 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); 241 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
241 if (!vsock) 242 if (!vsock)
242 return -ENODEV; 243 goto out;
243 244
244 spin_lock_bh(&vsock->send_pkt_list_lock); 245 spin_lock_bh(&vsock->send_pkt_list_lock);
245 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 246 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
265 vhost_poll_queue(&tx_vq->poll); 266 vhost_poll_queue(&tx_vq->poll);
266 } 267 }
267 268
268 return 0; 269 ret = 0;
270out:
271 rcu_read_unlock();
272 return ret;
269} 273}
270 274
271static struct virtio_vsock_pkt * 275static struct virtio_vsock_pkt *
@@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
533 spin_lock_init(&vsock->send_pkt_list_lock); 537 spin_lock_init(&vsock->send_pkt_list_lock);
534 INIT_LIST_HEAD(&vsock->send_pkt_list); 538 INIT_LIST_HEAD(&vsock->send_pkt_list);
535 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); 539 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
536
537 spin_lock_bh(&vhost_vsock_lock);
538 list_add_tail(&vsock->list, &vhost_vsock_list);
539 spin_unlock_bh(&vhost_vsock_lock);
540 return 0; 540 return 0;
541 541
542out: 542out:
@@ -585,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
585 struct vhost_vsock *vsock = file->private_data; 585 struct vhost_vsock *vsock = file->private_data;
586 586
587 spin_lock_bh(&vhost_vsock_lock); 587 spin_lock_bh(&vhost_vsock_lock);
588 list_del(&vsock->list); 588 if (vsock->guest_cid)
589 hash_del_rcu(&vsock->hash);
589 spin_unlock_bh(&vhost_vsock_lock); 590 spin_unlock_bh(&vhost_vsock_lock);
590 591
592 /* Wait for other CPUs to finish using vsock */
593 synchronize_rcu();
594
591 /* Iterating over all connections for all CIDs to find orphans is 595 /* Iterating over all connections for all CIDs to find orphans is
592 * inefficient. Room for improvement here. */ 596 * inefficient. Room for improvement here. */
593 vsock_for_each_connected_socket(vhost_vsock_reset_orphans); 597 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@@ -628,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
628 632
629 /* Refuse if CID is already in use */ 633 /* Refuse if CID is already in use */
630 spin_lock_bh(&vhost_vsock_lock); 634 spin_lock_bh(&vhost_vsock_lock);
631 other = __vhost_vsock_get(guest_cid); 635 other = vhost_vsock_get(guest_cid);
632 if (other && other != vsock) { 636 if (other && other != vsock) {
633 spin_unlock_bh(&vhost_vsock_lock); 637 spin_unlock_bh(&vhost_vsock_lock);
634 return -EADDRINUSE; 638 return -EADDRINUSE;
635 } 639 }
640
641 if (vsock->guest_cid)
642 hash_del_rcu(&vsock->hash);
643
636 vsock->guest_cid = guest_cid; 644 vsock->guest_cid = guest_cid;
645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
637 spin_unlock_bh(&vhost_vsock_lock); 646 spin_unlock_bh(&vhost_vsock_lock);
638 647
639 return 0; 648 return 0;