diff options
Diffstat (limited to 'drivers/vhost/vhost.c')
-rw-r--r-- | drivers/vhost/vhost.c | 138 |
1 files changed, 104 insertions, 34 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 7aa4eea930f1..ea966b356352 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -37,6 +37,9 @@ enum { | |||
37 | VHOST_MEMORY_F_LOG = 0x1, | 37 | VHOST_MEMORY_F_LOG = 0x1, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | #define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) | ||
41 | #define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) | ||
42 | |||
40 | static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, | 43 | static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, |
41 | poll_table *pt) | 44 | poll_table *pt) |
42 | { | 45 | { |
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, | |||
161 | vq->last_avail_idx = 0; | 164 | vq->last_avail_idx = 0; |
162 | vq->avail_idx = 0; | 165 | vq->avail_idx = 0; |
163 | vq->last_used_idx = 0; | 166 | vq->last_used_idx = 0; |
167 | vq->signalled_used = 0; | ||
168 | vq->signalled_used_valid = false; | ||
164 | vq->used_flags = 0; | 169 | vq->used_flags = 0; |
165 | vq->log_used = false; | 170 | vq->log_used = false; |
166 | vq->log_addr = -1ull; | 171 | vq->log_addr = -1ull; |
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, | |||
489 | return 1; | 494 | return 1; |
490 | } | 495 | } |
491 | 496 | ||
492 | static int vq_access_ok(unsigned int num, | 497 | static int vq_access_ok(struct vhost_dev *d, unsigned int num, |
493 | struct vring_desc __user *desc, | 498 | struct vring_desc __user *desc, |
494 | struct vring_avail __user *avail, | 499 | struct vring_avail __user *avail, |
495 | struct vring_used __user *used) | 500 | struct vring_used __user *used) |
496 | { | 501 | { |
502 | size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; | ||
497 | return access_ok(VERIFY_READ, desc, num * sizeof *desc) && | 503 | return access_ok(VERIFY_READ, desc, num * sizeof *desc) && |
498 | access_ok(VERIFY_READ, avail, | 504 | access_ok(VERIFY_READ, avail, |
499 | sizeof *avail + num * sizeof *avail->ring) && | 505 | sizeof *avail + num * sizeof *avail->ring + s) && |
500 | access_ok(VERIFY_WRITE, used, | 506 | access_ok(VERIFY_WRITE, used, |
501 | sizeof *used + num * sizeof *used->ring); | 507 | sizeof *used + num * sizeof *used->ring + s); |
502 | } | 508 | } |
503 | 509 | ||
504 | /* Can we log writes? */ | 510 | /* Can we log writes? */ |
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev) | |||
514 | 520 | ||
515 | /* Verify access for write logging. */ | 521 | /* Verify access for write logging. */ |
516 | /* Caller should have vq mutex and device mutex */ | 522 | /* Caller should have vq mutex and device mutex */ |
517 | static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) | 523 | static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, |
524 | void __user *log_base) | ||
518 | { | 525 | { |
519 | struct vhost_memory *mp; | 526 | struct vhost_memory *mp; |
527 | size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; | ||
520 | 528 | ||
521 | mp = rcu_dereference_protected(vq->dev->memory, | 529 | mp = rcu_dereference_protected(vq->dev->memory, |
522 | lockdep_is_held(&vq->mutex)); | 530 | lockdep_is_held(&vq->mutex)); |
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) | |||
524 | vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && | 532 | vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && |
525 | (!vq->log_used || log_access_ok(log_base, vq->log_addr, | 533 | (!vq->log_used || log_access_ok(log_base, vq->log_addr, |
526 | sizeof *vq->used + | 534 | sizeof *vq->used + |
527 | vq->num * sizeof *vq->used->ring)); | 535 | vq->num * sizeof *vq->used->ring + s)); |
528 | } | 536 | } |
529 | 537 | ||
530 | /* Can we start vq? */ | 538 | /* Can we start vq? */ |
531 | /* Caller should have vq mutex and device mutex */ | 539 | /* Caller should have vq mutex and device mutex */ |
532 | int vhost_vq_access_ok(struct vhost_virtqueue *vq) | 540 | int vhost_vq_access_ok(struct vhost_virtqueue *vq) |
533 | { | 541 | { |
534 | return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) && | 542 | return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && |
535 | vq_log_access_ok(vq, vq->log_base); | 543 | vq_log_access_ok(vq->dev, vq, vq->log_base); |
536 | } | 544 | } |
537 | 545 | ||
538 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | 546 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) |
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq, | |||
577 | 585 | ||
578 | if (r) | 586 | if (r) |
579 | return r; | 587 | return r; |
588 | vq->signalled_used_valid = false; | ||
580 | return get_user(vq->last_used_idx, &used->idx); | 589 | return get_user(vq->last_used_idx, &used->idx); |
581 | } | 590 | } |
582 | 591 | ||
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) | |||
674 | * If it is not, we don't as size might not have been setup. | 683 | * If it is not, we don't as size might not have been setup. |
675 | * We will verify when backend is configured. */ | 684 | * We will verify when backend is configured. */ |
676 | if (vq->private_data) { | 685 | if (vq->private_data) { |
677 | if (!vq_access_ok(vq->num, | 686 | if (!vq_access_ok(d, vq->num, |
678 | (void __user *)(unsigned long)a.desc_user_addr, | 687 | (void __user *)(unsigned long)a.desc_user_addr, |
679 | (void __user *)(unsigned long)a.avail_user_addr, | 688 | (void __user *)(unsigned long)a.avail_user_addr, |
680 | (void __user *)(unsigned long)a.used_user_addr)) { | 689 | (void __user *)(unsigned long)a.used_user_addr)) { |
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) | |||
818 | vq = d->vqs + i; | 827 | vq = d->vqs + i; |
819 | mutex_lock(&vq->mutex); | 828 | mutex_lock(&vq->mutex); |
820 | /* If ring is inactive, will check when it's enabled. */ | 829 | /* If ring is inactive, will check when it's enabled. */ |
821 | if (vq->private_data && !vq_log_access_ok(vq, base)) | 830 | if (vq->private_data && !vq_log_access_ok(d, vq, base)) |
822 | r = -EFAULT; | 831 | r = -EFAULT; |
823 | else | 832 | else |
824 | vq->log_base = base; | 833 | vq->log_base = base; |
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
1219 | 1228 | ||
1220 | /* On success, increment avail index. */ | 1229 | /* On success, increment avail index. */ |
1221 | vq->last_avail_idx++; | 1230 | vq->last_avail_idx++; |
1231 | |||
1232 | /* Assume notifications from guest are disabled at this point, | ||
1233 | * if they aren't we would need to update avail_event index. */ | ||
1234 | BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); | ||
1222 | return head; | 1235 | return head; |
1223 | } | 1236 | } |
1224 | 1237 | ||
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) | |||
1267 | eventfd_signal(vq->log_ctx, 1); | 1280 | eventfd_signal(vq->log_ctx, 1); |
1268 | } | 1281 | } |
1269 | vq->last_used_idx++; | 1282 | vq->last_used_idx++; |
1283 | /* If the driver never bothers to signal in a very long while, | ||
1284 | * used index might wrap around. If that happens, invalidate | ||
1285 | * signalled_used index we stored. TODO: make sure driver | ||
1286 | * signals at least once in 2^16 and remove this. */ | ||
1287 | if (unlikely(vq->last_used_idx == vq->signalled_used)) | ||
1288 | vq->signalled_used_valid = false; | ||
1270 | return 0; | 1289 | return 0; |
1271 | } | 1290 | } |
1272 | 1291 | ||
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, | |||
1275 | unsigned count) | 1294 | unsigned count) |
1276 | { | 1295 | { |
1277 | struct vring_used_elem __user *used; | 1296 | struct vring_used_elem __user *used; |
1297 | u16 old, new; | ||
1278 | int start; | 1298 | int start; |
1279 | 1299 | ||
1280 | start = vq->last_used_idx % vq->num; | 1300 | start = vq->last_used_idx % vq->num; |
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, | |||
1292 | ((void __user *)used - (void __user *)vq->used), | 1312 | ((void __user *)used - (void __user *)vq->used), |
1293 | count * sizeof *used); | 1313 | count * sizeof *used); |
1294 | } | 1314 | } |
1295 | vq->last_used_idx += count; | 1315 | old = vq->last_used_idx; |
1316 | new = (vq->last_used_idx += count); | ||
1317 | /* If the driver never bothers to signal in a very long while, | ||
1318 | * used index might wrap around. If that happens, invalidate | ||
1319 | * signalled_used index we stored. TODO: make sure driver | ||
1320 | * signals at least once in 2^16 and remove this. */ | ||
1321 | if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) | ||
1322 | vq->signalled_used_valid = false; | ||
1296 | return 0; | 1323 | return 0; |
1297 | } | 1324 | } |
1298 | 1325 | ||
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | |||
1331 | return r; | 1358 | return r; |
1332 | } | 1359 | } |
1333 | 1360 | ||
1334 | /* This actually signals the guest, using eventfd. */ | 1361 | static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
1335 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) | ||
1336 | { | 1362 | { |
1337 | __u16 flags; | 1363 | __u16 old, new, event; |
1338 | 1364 | bool v; | |
1339 | /* Flush out used index updates. This is paired | 1365 | /* Flush out used index updates. This is paired |
1340 | * with the barrier that the Guest executes when enabling | 1366 | * with the barrier that the Guest executes when enabling |
1341 | * interrupts. */ | 1367 | * interrupts. */ |
1342 | smp_mb(); | 1368 | smp_mb(); |
1343 | 1369 | ||
1344 | if (__get_user(flags, &vq->avail->flags)) { | 1370 | if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && |
1345 | vq_err(vq, "Failed to get flags"); | 1371 | unlikely(vq->avail_idx == vq->last_avail_idx)) |
1346 | return; | 1372 | return true; |
1373 | |||
1374 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { | ||
1375 | __u16 flags; | ||
1376 | if (__get_user(flags, &vq->avail->flags)) { | ||
1377 | vq_err(vq, "Failed to get flags"); | ||
1378 | return true; | ||
1379 | } | ||
1380 | return !(flags & VRING_AVAIL_F_NO_INTERRUPT); | ||
1347 | } | 1381 | } |
1382 | old = vq->signalled_used; | ||
1383 | v = vq->signalled_used_valid; | ||
1384 | new = vq->signalled_used = vq->last_used_idx; | ||
1385 | vq->signalled_used_valid = true; | ||
1348 | 1386 | ||
1349 | /* If they don't want an interrupt, don't signal, unless empty. */ | 1387 | if (unlikely(!v)) |
1350 | if ((flags & VRING_AVAIL_F_NO_INTERRUPT) && | 1388 | return true; |
1351 | (vq->avail_idx != vq->last_avail_idx || | ||
1352 | !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY))) | ||
1353 | return; | ||
1354 | 1389 | ||
1390 | if (get_user(event, vhost_used_event(vq))) { | ||
1391 | vq_err(vq, "Failed to get used event idx"); | ||
1392 | return true; | ||
1393 | } | ||
1394 | return vring_need_event(event, new, old); | ||
1395 | } | ||
1396 | |||
1397 | /* This actually signals the guest, using eventfd. */ | ||
1398 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) | ||
1399 | { | ||
1355 | /* Signal the Guest tell them we used something up. */ | 1400 | /* Signal the Guest tell them we used something up. */ |
1356 | if (vq->call_ctx) | 1401 | if (vq->call_ctx && vhost_notify(dev, vq)) |
1357 | eventfd_signal(vq->call_ctx, 1); | 1402 | eventfd_signal(vq->call_ctx, 1); |
1358 | } | 1403 | } |
1359 | 1404 | ||
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev, | |||
1376 | } | 1421 | } |
1377 | 1422 | ||
1378 | /* OK, now we need to know about added descriptors. */ | 1423 | /* OK, now we need to know about added descriptors. */ |
1379 | bool vhost_enable_notify(struct vhost_virtqueue *vq) | 1424 | bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
1380 | { | 1425 | { |
1381 | u16 avail_idx; | 1426 | u16 avail_idx; |
1382 | int r; | 1427 | int r; |
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) | |||
1384 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) | 1429 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) |
1385 | return false; | 1430 | return false; |
1386 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; | 1431 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; |
1387 | r = put_user(vq->used_flags, &vq->used->flags); | 1432 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { |
1388 | if (r) { | 1433 | r = put_user(vq->used_flags, &vq->used->flags); |
1389 | vq_err(vq, "Failed to enable notification at %p: %d\n", | 1434 | if (r) { |
1390 | &vq->used->flags, r); | 1435 | vq_err(vq, "Failed to enable notification at %p: %d\n", |
1391 | return false; | 1436 | &vq->used->flags, r); |
1437 | return false; | ||
1438 | } | ||
1439 | } else { | ||
1440 | r = put_user(vq->avail_idx, vhost_avail_event(vq)); | ||
1441 | if (r) { | ||
1442 | vq_err(vq, "Failed to update avail event index at %p: %d\n", | ||
1443 | vhost_avail_event(vq), r); | ||
1444 | return false; | ||
1445 | } | ||
1446 | } | ||
1447 | if (unlikely(vq->log_used)) { | ||
1448 | void __user *used; | ||
1449 | /* Make sure data is seen before log. */ | ||
1450 | smp_wmb(); | ||
1451 | used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ? | ||
1452 | &vq->used->flags : vhost_avail_event(vq); | ||
1453 | /* Log used flags or event index entry write. Both are 16 bit | ||
1454 | * fields. */ | ||
1455 | log_write(vq->log_base, vq->log_addr + | ||
1456 | (used - (void __user *)vq->used), | ||
1457 | sizeof(u16)); | ||
1458 | if (vq->log_ctx) | ||
1459 | eventfd_signal(vq->log_ctx, 1); | ||
1392 | } | 1460 | } |
1393 | /* They could have slipped one in as we were doing that: make | 1461 | /* They could have slipped one in as we were doing that: make |
1394 | * sure it's written, then check again. */ | 1462 | * sure it's written, then check again. */ |
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) | |||
1404 | } | 1472 | } |
1405 | 1473 | ||
1406 | /* We don't need to be notified again. */ | 1474 | /* We don't need to be notified again. */ |
1407 | void vhost_disable_notify(struct vhost_virtqueue *vq) | 1475 | void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
1408 | { | 1476 | { |
1409 | int r; | 1477 | int r; |
1410 | 1478 | ||
1411 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) | 1479 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) |
1412 | return; | 1480 | return; |
1413 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; | 1481 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; |
1414 | r = put_user(vq->used_flags, &vq->used->flags); | 1482 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { |
1415 | if (r) | 1483 | r = put_user(vq->used_flags, &vq->used->flags); |
1416 | vq_err(vq, "Failed to enable notification at %p: %d\n", | 1484 | if (r) |
1417 | &vq->used->flags, r); | 1485 | vq_err(vq, "Failed to enable notification at %p: %d\n", |
1486 | &vq->used->flags, r); | ||
1487 | } | ||
1418 | } | 1488 | } |