summaryrefslogtreecommitdiffstats
path: root/net/vmw_vsock
diff options
context:
space:
mode:
authorStefano Garzarella <sgarzare@redhat.com>2019-07-05 07:04:52 -0400
committerDavid S. Miller <davem@davemloft.net>2019-07-08 18:35:17 -0400
commit0deab087b16abb755dca6da5d3685375f8ff8c85 (patch)
treef23b480f35390cf3b8aa59d79207a5c72a4afb80 /net/vmw_vsock
parent1a2d405c0081e466cc56309652de3ce8467b3812 (diff)
vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
Some callbacks used by the upper layers can run while we are in the .remove(). A potential use-after-free can happen, because we free the_virtio_vsock without knowing if the callbacks are over or not. To solve this issue we move the assignment of the_virtio_vsock at the end of .probe(), when we finished all the initialization, and at the beginning of .remove(), before to release resources. For the same reason, we do the same also for the vdev->priv. We use RCU to be sure that all callbacks that use the_virtio_vsock ended before freeing it. This is not required for callbacks that use vdev->priv, because after the vdev->config->del_vqs() we are sure that they are ended and will no longer be invoked. We also take the mutex during the .remove() to avoid that .probe() can run while we are resetting the device. Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/vmw_vsock')
-rw-r--r--net/vmw_vsock/virtio_transport.c70
1 files changed, 46 insertions, 24 deletions
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9c287e3e393c..3eaec60aa64f 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -65,19 +65,22 @@ struct virtio_vsock {
65 u32 guest_cid; 65 u32 guest_cid;
66}; 66};
67 67
68static struct virtio_vsock *virtio_vsock_get(void)
69{
70 return the_virtio_vsock;
71}
72
73static u32 virtio_transport_get_local_cid(void) 68static u32 virtio_transport_get_local_cid(void)
74{ 69{
75 struct virtio_vsock *vsock = virtio_vsock_get(); 70 struct virtio_vsock *vsock;
71 u32 ret;
76 72
77 if (!vsock) 73 rcu_read_lock();
78 return VMADDR_CID_ANY; 74 vsock = rcu_dereference(the_virtio_vsock);
75 if (!vsock) {
76 ret = VMADDR_CID_ANY;
77 goto out_rcu;
78 }
79 79
80 return vsock->guest_cid; 80 ret = vsock->guest_cid;
81out_rcu:
82 rcu_read_unlock();
83 return ret;
81} 84}
82 85
83static void virtio_transport_loopback_work(struct work_struct *work) 86static void virtio_transport_loopback_work(struct work_struct *work)
@@ -197,14 +200,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
197 struct virtio_vsock *vsock; 200 struct virtio_vsock *vsock;
198 int len = pkt->len; 201 int len = pkt->len;
199 202
200 vsock = virtio_vsock_get(); 203 rcu_read_lock();
204 vsock = rcu_dereference(the_virtio_vsock);
201 if (!vsock) { 205 if (!vsock) {
202 virtio_transport_free_pkt(pkt); 206 virtio_transport_free_pkt(pkt);
203 return -ENODEV; 207 len = -ENODEV;
208 goto out_rcu;
204 } 209 }
205 210
206 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) 211 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
207 return virtio_transport_send_pkt_loopback(vsock, pkt); 212 len = virtio_transport_send_pkt_loopback(vsock, pkt);
213 goto out_rcu;
214 }
208 215
209 if (pkt->reply) 216 if (pkt->reply)
210 atomic_inc(&vsock->queued_replies); 217 atomic_inc(&vsock->queued_replies);
@@ -214,6 +221,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
214 spin_unlock_bh(&vsock->send_pkt_list_lock); 221 spin_unlock_bh(&vsock->send_pkt_list_lock);
215 222
216 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 223 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
224
225out_rcu:
226 rcu_read_unlock();
217 return len; 227 return len;
218} 228}
219 229
@@ -222,12 +232,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
222{ 232{
223 struct virtio_vsock *vsock; 233 struct virtio_vsock *vsock;
224 struct virtio_vsock_pkt *pkt, *n; 234 struct virtio_vsock_pkt *pkt, *n;
225 int cnt = 0; 235 int cnt = 0, ret;
226 LIST_HEAD(freeme); 236 LIST_HEAD(freeme);
227 237
228 vsock = virtio_vsock_get(); 238 rcu_read_lock();
239 vsock = rcu_dereference(the_virtio_vsock);
229 if (!vsock) { 240 if (!vsock) {
230 return -ENODEV; 241 ret = -ENODEV;
242 goto out_rcu;
231 } 243 }
232 244
233 spin_lock_bh(&vsock->send_pkt_list_lock); 245 spin_lock_bh(&vsock->send_pkt_list_lock);
@@ -255,7 +267,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
255 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 267 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
256 } 268 }
257 269
258 return 0; 270 ret = 0;
271
272out_rcu:
273 rcu_read_unlock();
274 return ret;
259} 275}
260 276
261static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 277static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
@@ -565,7 +581,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
565 return ret; 581 return ret;
566 582
567 /* Only one virtio-vsock device per guest is supported */ 583 /* Only one virtio-vsock device per guest is supported */
568 if (the_virtio_vsock) { 584 if (rcu_dereference_protected(the_virtio_vsock,
585 lockdep_is_held(&the_virtio_vsock_mutex))) {
569 ret = -EBUSY; 586 ret = -EBUSY;
570 goto out; 587 goto out;
571 } 588 }
@@ -590,8 +607,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
590 vsock->rx_buf_max_nr = 0; 607 vsock->rx_buf_max_nr = 0;
591 atomic_set(&vsock->queued_replies, 0); 608 atomic_set(&vsock->queued_replies, 0);
592 609
593 vdev->priv = vsock;
594 the_virtio_vsock = vsock;
595 mutex_init(&vsock->tx_lock); 610 mutex_init(&vsock->tx_lock);
596 mutex_init(&vsock->rx_lock); 611 mutex_init(&vsock->rx_lock);
597 mutex_init(&vsock->event_lock); 612 mutex_init(&vsock->event_lock);
@@ -613,6 +628,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
613 virtio_vsock_event_fill(vsock); 628 virtio_vsock_event_fill(vsock);
614 mutex_unlock(&vsock->event_lock); 629 mutex_unlock(&vsock->event_lock);
615 630
631 vdev->priv = vsock;
632 rcu_assign_pointer(the_virtio_vsock, vsock);
633
616 mutex_unlock(&the_virtio_vsock_mutex); 634 mutex_unlock(&the_virtio_vsock_mutex);
617 return 0; 635 return 0;
618 636
@@ -627,6 +645,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
627 struct virtio_vsock *vsock = vdev->priv; 645 struct virtio_vsock *vsock = vdev->priv;
628 struct virtio_vsock_pkt *pkt; 646 struct virtio_vsock_pkt *pkt;
629 647
648 mutex_lock(&the_virtio_vsock_mutex);
649
650 vdev->priv = NULL;
651 rcu_assign_pointer(the_virtio_vsock, NULL);
652 synchronize_rcu();
653
630 flush_work(&vsock->loopback_work); 654 flush_work(&vsock->loopback_work);
631 flush_work(&vsock->rx_work); 655 flush_work(&vsock->rx_work);
632 flush_work(&vsock->tx_work); 656 flush_work(&vsock->tx_work);
@@ -666,12 +690,10 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
666 } 690 }
667 spin_unlock_bh(&vsock->loopback_list_lock); 691 spin_unlock_bh(&vsock->loopback_list_lock);
668 692
669 mutex_lock(&the_virtio_vsock_mutex);
670 the_virtio_vsock = NULL;
671 mutex_unlock(&the_virtio_vsock_mutex);
672
673 vdev->config->del_vqs(vdev); 693 vdev->config->del_vqs(vdev);
674 694
695 mutex_unlock(&the_virtio_vsock_mutex);
696
675 kfree(vsock); 697 kfree(vsock);
676} 698}
677 699