diff options
| author | Krishna Kumar <krkumar2@in.ibm.com> | 2011-03-01 06:36:37 -0500 |
|---|---|---|
| committer | Michael S. Tsirkin <mst@redhat.com> | 2011-03-08 11:02:47 -0500 |
| commit | d47effe1be0c4fc983306a9c704632e3a087eed8 (patch) | |
| tree | d0705237c61d02734e79c02440bad7a439b6ac68 | |
| parent | 1fc050a13473348f5c439de2bb41c8e92dba5588 (diff) | |
vhost: Cleanup vhost.c and net.c
Minor cleanup of vhost.c and net.c to match coding style.
Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
| -rw-r--r-- | drivers/vhost/net.c | 19 | ||||
| -rw-r--r-- | drivers/vhost/vhost.c | 53 |
2 files changed, 49 insertions, 23 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index f616cefc95ba..59dad9fe52dd 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to, | |||
| 60 | { | 60 | { |
| 61 | int seg = 0; | 61 | int seg = 0; |
| 62 | size_t size; | 62 | size_t size; |
| 63 | |||
| 63 | while (len && seg < iov_count) { | 64 | while (len && seg < iov_count) { |
| 64 | size = min(from->iov_len, len); | 65 | size = min(from->iov_len, len); |
| 65 | to->iov_base = from->iov_base; | 66 | to->iov_base = from->iov_base; |
| @@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, | |||
| 79 | { | 80 | { |
| 80 | int seg = 0; | 81 | int seg = 0; |
| 81 | size_t size; | 82 | size_t size; |
| 83 | |||
| 82 | while (len && seg < iovcount) { | 84 | while (len && seg < iovcount) { |
| 83 | size = min(from->iov_len, len); | 85 | size = min(from->iov_len, len); |
| 84 | to->iov_base = from->iov_base; | 86 | to->iov_base = from->iov_base; |
| @@ -296,17 +298,16 @@ static void handle_rx_big(struct vhost_net *net) | |||
| 296 | .msg_iov = vq->iov, | 298 | .msg_iov = vq->iov, |
| 297 | .msg_flags = MSG_DONTWAIT, | 299 | .msg_flags = MSG_DONTWAIT, |
| 298 | }; | 300 | }; |
| 299 | |||
| 300 | struct virtio_net_hdr hdr = { | 301 | struct virtio_net_hdr hdr = { |
| 301 | .flags = 0, | 302 | .flags = 0, |
| 302 | .gso_type = VIRTIO_NET_HDR_GSO_NONE | 303 | .gso_type = VIRTIO_NET_HDR_GSO_NONE |
| 303 | }; | 304 | }; |
| 304 | |||
| 305 | size_t len, total_len = 0; | 305 | size_t len, total_len = 0; |
| 306 | int err; | 306 | int err; |
| 307 | size_t hdr_size; | 307 | size_t hdr_size; |
| 308 | /* TODO: check that we are running from vhost_worker? */ | 308 | /* TODO: check that we are running from vhost_worker? */ |
| 309 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); | 309 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); |
| 310 | |||
| 310 | if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) | 311 | if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) |
| 311 | return; | 312 | return; |
| 312 | 313 | ||
| @@ -405,18 +406,17 @@ static void handle_rx_mergeable(struct vhost_net *net) | |||
| 405 | .msg_iov = vq->iov, | 406 | .msg_iov = vq->iov, |
| 406 | .msg_flags = MSG_DONTWAIT, | 407 | .msg_flags = MSG_DONTWAIT, |
| 407 | }; | 408 | }; |
| 408 | |||
| 409 | struct virtio_net_hdr_mrg_rxbuf hdr = { | 409 | struct virtio_net_hdr_mrg_rxbuf hdr = { |
| 410 | .hdr.flags = 0, | 410 | .hdr.flags = 0, |
| 411 | .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE | 411 | .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE |
| 412 | }; | 412 | }; |
| 413 | |||
| 414 | size_t total_len = 0; | 413 | size_t total_len = 0; |
| 415 | int err, headcount; | 414 | int err, headcount; |
| 416 | size_t vhost_hlen, sock_hlen; | 415 | size_t vhost_hlen, sock_hlen; |
| 417 | size_t vhost_len, sock_len; | 416 | size_t vhost_len, sock_len; |
| 418 | /* TODO: check that we are running from vhost_worker? */ | 417 | /* TODO: check that we are running from vhost_worker? */ |
| 419 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); | 418 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); |
| 419 | |||
| 420 | if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) | 420 | if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) |
| 421 | return; | 421 | return; |
| 422 | 422 | ||
| @@ -654,6 +654,7 @@ static struct socket *get_raw_socket(int fd) | |||
| 654 | } uaddr; | 654 | } uaddr; |
| 655 | int uaddr_len = sizeof uaddr, r; | 655 | int uaddr_len = sizeof uaddr, r; |
| 656 | struct socket *sock = sockfd_lookup(fd, &r); | 656 | struct socket *sock = sockfd_lookup(fd, &r); |
| 657 | |||
| 657 | if (!sock) | 658 | if (!sock) |
| 658 | return ERR_PTR(-ENOTSOCK); | 659 | return ERR_PTR(-ENOTSOCK); |
| 659 | 660 | ||
| @@ -682,6 +683,7 @@ static struct socket *get_tap_socket(int fd) | |||
| 682 | { | 683 | { |
| 683 | struct file *file = fget(fd); | 684 | struct file *file = fget(fd); |
| 684 | struct socket *sock; | 685 | struct socket *sock; |
| 686 | |||
| 685 | if (!file) | 687 | if (!file) |
| 686 | return ERR_PTR(-EBADF); | 688 | return ERR_PTR(-EBADF); |
| 687 | sock = tun_get_socket(file); | 689 | sock = tun_get_socket(file); |
| @@ -696,6 +698,7 @@ static struct socket *get_tap_socket(int fd) | |||
| 696 | static struct socket *get_socket(int fd) | 698 | static struct socket *get_socket(int fd) |
| 697 | { | 699 | { |
| 698 | struct socket *sock; | 700 | struct socket *sock; |
| 701 | |||
| 699 | /* special case to disable backend */ | 702 | /* special case to disable backend */ |
| 700 | if (fd == -1) | 703 | if (fd == -1) |
| 701 | return NULL; | 704 | return NULL; |
| @@ -741,9 +744,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
| 741 | oldsock = rcu_dereference_protected(vq->private_data, | 744 | oldsock = rcu_dereference_protected(vq->private_data, |
| 742 | lockdep_is_held(&vq->mutex)); | 745 | lockdep_is_held(&vq->mutex)); |
| 743 | if (sock != oldsock) { | 746 | if (sock != oldsock) { |
| 744 | vhost_net_disable_vq(n, vq); | 747 | vhost_net_disable_vq(n, vq); |
| 745 | rcu_assign_pointer(vq->private_data, sock); | 748 | rcu_assign_pointer(vq->private_data, sock); |
| 746 | vhost_net_enable_vq(n, vq); | 749 | vhost_net_enable_vq(n, vq); |
| 747 | } | 750 | } |
| 748 | 751 | ||
| 749 | mutex_unlock(&vq->mutex); | 752 | mutex_unlock(&vq->mutex); |
| @@ -768,6 +771,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) | |||
| 768 | struct socket *tx_sock = NULL; | 771 | struct socket *tx_sock = NULL; |
| 769 | struct socket *rx_sock = NULL; | 772 | struct socket *rx_sock = NULL; |
| 770 | long err; | 773 | long err; |
| 774 | |||
| 771 | mutex_lock(&n->dev.mutex); | 775 | mutex_lock(&n->dev.mutex); |
| 772 | err = vhost_dev_check_owner(&n->dev); | 776 | err = vhost_dev_check_owner(&n->dev); |
| 773 | if (err) | 777 | if (err) |
| @@ -829,6 +833,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | |||
| 829 | struct vhost_vring_file backend; | 833 | struct vhost_vring_file backend; |
| 830 | u64 features; | 834 | u64 features; |
| 831 | int r; | 835 | int r; |
| 836 | |||
| 832 | switch (ioctl) { | 837 | switch (ioctl) { |
| 833 | case VHOST_NET_SET_BACKEND: | 838 | case VHOST_NET_SET_BACKEND: |
| 834 | if (copy_from_user(&backend, argp, sizeof backend)) | 839 | if (copy_from_user(&backend, argp, sizeof backend)) |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ade0568c07a4..b0cc7f8ca4de 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, | |||
| 41 | poll_table *pt) | 41 | poll_table *pt) |
| 42 | { | 42 | { |
| 43 | struct vhost_poll *poll; | 43 | struct vhost_poll *poll; |
| 44 | poll = container_of(pt, struct vhost_poll, table); | ||
| 45 | 44 | ||
| 45 | poll = container_of(pt, struct vhost_poll, table); | ||
| 46 | poll->wqh = wqh; | 46 | poll->wqh = wqh; |
| 47 | add_wait_queue(wqh, &poll->wait); | 47 | add_wait_queue(wqh, &poll->wait); |
| 48 | } | 48 | } |
| @@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | |||
| 85 | void vhost_poll_start(struct vhost_poll *poll, struct file *file) | 85 | void vhost_poll_start(struct vhost_poll *poll, struct file *file) |
| 86 | { | 86 | { |
| 87 | unsigned long mask; | 87 | unsigned long mask; |
| 88 | |||
| 88 | mask = file->f_op->poll(file, &poll->table); | 89 | mask = file->f_op->poll(file, &poll->table); |
| 89 | if (mask) | 90 | if (mask) |
| 90 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); | 91 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); |
| @@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | |||
| 101 | unsigned seq) | 102 | unsigned seq) |
| 102 | { | 103 | { |
| 103 | int left; | 104 | int left; |
| 105 | |||
| 104 | spin_lock_irq(&dev->work_lock); | 106 | spin_lock_irq(&dev->work_lock); |
| 105 | left = seq - work->done_seq; | 107 | left = seq - work->done_seq; |
| 106 | spin_unlock_irq(&dev->work_lock); | 108 | spin_unlock_irq(&dev->work_lock); |
| @@ -222,6 +224,7 @@ static int vhost_worker(void *data) | |||
| 222 | static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) | 224 | static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) |
| 223 | { | 225 | { |
| 224 | int i; | 226 | int i; |
| 227 | |||
| 225 | for (i = 0; i < dev->nvqs; ++i) { | 228 | for (i = 0; i < dev->nvqs; ++i) { |
| 226 | dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * | 229 | dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * |
| 227 | UIO_MAXIOV, GFP_KERNEL); | 230 | UIO_MAXIOV, GFP_KERNEL); |
| @@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) | |||
| 235 | goto err_nomem; | 238 | goto err_nomem; |
| 236 | } | 239 | } |
| 237 | return 0; | 240 | return 0; |
| 241 | |||
| 238 | err_nomem: | 242 | err_nomem: |
| 239 | for (; i >= 0; --i) { | 243 | for (; i >= 0; --i) { |
| 240 | kfree(dev->vqs[i].indirect); | 244 | kfree(dev->vqs[i].indirect); |
| @@ -247,6 +251,7 @@ err_nomem: | |||
| 247 | static void vhost_dev_free_iovecs(struct vhost_dev *dev) | 251 | static void vhost_dev_free_iovecs(struct vhost_dev *dev) |
| 248 | { | 252 | { |
| 249 | int i; | 253 | int i; |
| 254 | |||
| 250 | for (i = 0; i < dev->nvqs; ++i) { | 255 | for (i = 0; i < dev->nvqs; ++i) { |
| 251 | kfree(dev->vqs[i].indirect); | 256 | kfree(dev->vqs[i].indirect); |
| 252 | dev->vqs[i].indirect = NULL; | 257 | dev->vqs[i].indirect = NULL; |
| @@ -296,26 +301,28 @@ long vhost_dev_check_owner(struct vhost_dev *dev) | |||
| 296 | } | 301 | } |
| 297 | 302 | ||
| 298 | struct vhost_attach_cgroups_struct { | 303 | struct vhost_attach_cgroups_struct { |
| 299 | struct vhost_work work; | 304 | struct vhost_work work; |
| 300 | struct task_struct *owner; | 305 | struct task_struct *owner; |
| 301 | int ret; | 306 | int ret; |
| 302 | }; | 307 | }; |
| 303 | 308 | ||
| 304 | static void vhost_attach_cgroups_work(struct vhost_work *work) | 309 | static void vhost_attach_cgroups_work(struct vhost_work *work) |
| 305 | { | 310 | { |
| 306 | struct vhost_attach_cgroups_struct *s; | 311 | struct vhost_attach_cgroups_struct *s; |
| 307 | s = container_of(work, struct vhost_attach_cgroups_struct, work); | 312 | |
| 308 | s->ret = cgroup_attach_task_all(s->owner, current); | 313 | s = container_of(work, struct vhost_attach_cgroups_struct, work); |
| 314 | s->ret = cgroup_attach_task_all(s->owner, current); | ||
| 309 | } | 315 | } |
| 310 | 316 | ||
| 311 | static int vhost_attach_cgroups(struct vhost_dev *dev) | 317 | static int vhost_attach_cgroups(struct vhost_dev *dev) |
| 312 | { | 318 | { |
| 313 | struct vhost_attach_cgroups_struct attach; | 319 | struct vhost_attach_cgroups_struct attach; |
| 314 | attach.owner = current; | 320 | |
| 315 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); | 321 | attach.owner = current; |
| 316 | vhost_work_queue(dev, &attach.work); | 322 | vhost_work_init(&attach.work, vhost_attach_cgroups_work); |
| 317 | vhost_work_flush(dev, &attach.work); | 323 | vhost_work_queue(dev, &attach.work); |
| 318 | return attach.ret; | 324 | vhost_work_flush(dev, &attach.work); |
| 325 | return attach.ret; | ||
| 319 | } | 326 | } |
| 320 | 327 | ||
| 321 | /* Caller should have device mutex */ | 328 | /* Caller should have device mutex */ |
| @@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) | |||
| 323 | { | 330 | { |
| 324 | struct task_struct *worker; | 331 | struct task_struct *worker; |
| 325 | int err; | 332 | int err; |
| 333 | |||
| 326 | /* Is there an owner already? */ | 334 | /* Is there an owner already? */ |
| 327 | if (dev->mm) { | 335 | if (dev->mm) { |
| 328 | err = -EBUSY; | 336 | err = -EBUSY; |
| 329 | goto err_mm; | 337 | goto err_mm; |
| 330 | } | 338 | } |
| 339 | |||
| 331 | /* No owner, become one */ | 340 | /* No owner, become one */ |
| 332 | dev->mm = get_task_mm(current); | 341 | dev->mm = get_task_mm(current); |
| 333 | worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); | 342 | worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); |
| @@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev) | |||
| 380 | void vhost_dev_cleanup(struct vhost_dev *dev) | 389 | void vhost_dev_cleanup(struct vhost_dev *dev) |
| 381 | { | 390 | { |
| 382 | int i; | 391 | int i; |
| 392 | |||
| 383 | for (i = 0; i < dev->nvqs; ++i) { | 393 | for (i = 0; i < dev->nvqs; ++i) { |
| 384 | if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { | 394 | if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { |
| 385 | vhost_poll_stop(&dev->vqs[i].poll); | 395 | vhost_poll_stop(&dev->vqs[i].poll); |
| @@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev) | |||
| 421 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) | 431 | static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
| 422 | { | 432 | { |
| 423 | u64 a = addr / VHOST_PAGE_SIZE / 8; | 433 | u64 a = addr / VHOST_PAGE_SIZE / 8; |
| 434 | |||
| 424 | /* Make sure 64 bit math will not overflow. */ | 435 | /* Make sure 64 bit math will not overflow. */ |
| 425 | if (a > ULONG_MAX - (unsigned long)log_base || | 436 | if (a > ULONG_MAX - (unsigned long)log_base || |
| 426 | a + (unsigned long)log_base > ULONG_MAX) | 437 | a + (unsigned long)log_base > ULONG_MAX) |
| @@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, | |||
| 461 | int log_all) | 472 | int log_all) |
| 462 | { | 473 | { |
| 463 | int i; | 474 | int i; |
| 475 | |||
| 464 | for (i = 0; i < d->nvqs; ++i) { | 476 | for (i = 0; i < d->nvqs; ++i) { |
| 465 | int ok; | 477 | int ok; |
| 466 | mutex_lock(&d->vqs[i].mutex); | 478 | mutex_lock(&d->vqs[i].mutex); |
| @@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
| 527 | { | 539 | { |
| 528 | struct vhost_memory mem, *newmem, *oldmem; | 540 | struct vhost_memory mem, *newmem, *oldmem; |
| 529 | unsigned long size = offsetof(struct vhost_memory, regions); | 541 | unsigned long size = offsetof(struct vhost_memory, regions); |
| 542 | |||
| 530 | if (copy_from_user(&mem, m, size)) | 543 | if (copy_from_user(&mem, m, size)) |
| 531 | return -EFAULT; | 544 | return -EFAULT; |
| 532 | if (mem.padding) | 545 | if (mem.padding) |
| @@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
| 544 | return -EFAULT; | 557 | return -EFAULT; |
| 545 | } | 558 | } |
| 546 | 559 | ||
| 547 | if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { | 560 | if (!memory_access_ok(d, newmem, |
| 561 | vhost_has_feature(d, VHOST_F_LOG_ALL))) { | ||
| 548 | kfree(newmem); | 562 | kfree(newmem); |
| 549 | return -EFAULT; | 563 | return -EFAULT; |
| 550 | } | 564 | } |
| @@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq, | |||
| 560 | struct vring_used __user *used) | 574 | struct vring_used __user *used) |
| 561 | { | 575 | { |
| 562 | int r = put_user(vq->used_flags, &used->flags); | 576 | int r = put_user(vq->used_flags, &used->flags); |
| 577 | |||
| 563 | if (r) | 578 | if (r) |
| 564 | return r; | 579 | return r; |
| 565 | return get_user(vq->last_used_idx, &used->idx); | 580 | return get_user(vq->last_used_idx, &used->idx); |
| @@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem, | |||
| 849 | { | 864 | { |
| 850 | struct vhost_memory_region *reg; | 865 | struct vhost_memory_region *reg; |
| 851 | int i; | 866 | int i; |
| 867 | |||
| 852 | /* linear search is not brilliant, but we really have on the order of 6 | 868 | /* linear search is not brilliant, but we really have on the order of 6 |
| 853 | * regions in practice */ | 869 | * regions in practice */ |
| 854 | for (i = 0; i < mem->nregions; ++i) { | 870 | for (i = 0; i < mem->nregions; ++i) { |
| @@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr) | |||
| 871 | void *base; | 887 | void *base; |
| 872 | int bit = nr + (log % PAGE_SIZE) * 8; | 888 | int bit = nr + (log % PAGE_SIZE) * 8; |
| 873 | int r; | 889 | int r; |
| 890 | |||
| 874 | r = get_user_pages_fast(log, 1, 1, &page); | 891 | r = get_user_pages_fast(log, 1, 1, &page); |
| 875 | if (r < 0) | 892 | if (r < 0) |
| 876 | return r; | 893 | return r; |
| @@ -888,6 +905,7 @@ static int log_write(void __user *log_base, | |||
| 888 | { | 905 | { |
| 889 | u64 write_page = write_address / VHOST_PAGE_SIZE; | 906 | u64 write_page = write_address / VHOST_PAGE_SIZE; |
| 890 | int r; | 907 | int r; |
| 908 | |||
| 891 | if (!write_length) | 909 | if (!write_length) |
| 892 | return 0; | 910 | return 0; |
| 893 | write_length += write_address % VHOST_PAGE_SIZE; | 911 | write_length += write_address % VHOST_PAGE_SIZE; |
| @@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, | |||
| 1037 | i, count); | 1055 | i, count); |
| 1038 | return -EINVAL; | 1056 | return -EINVAL; |
| 1039 | } | 1057 | } |
| 1040 | if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, | 1058 | if (unlikely(memcpy_fromiovec((unsigned char *)&desc, |
| 1041 | sizeof desc))) { | 1059 | vq->indirect, sizeof desc))) { |
| 1042 | vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", | 1060 | vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", |
| 1043 | i, (size_t)indirect->addr + i * sizeof desc); | 1061 | i, (size_t)indirect->addr + i * sizeof desc); |
| 1044 | return -EINVAL; | 1062 | return -EINVAL; |
| @@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | |||
| 1317 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) | 1335 | void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) |
| 1318 | { | 1336 | { |
| 1319 | __u16 flags; | 1337 | __u16 flags; |
| 1338 | |||
| 1320 | /* Flush out used index updates. This is paired | 1339 | /* Flush out used index updates. This is paired |
| 1321 | * with the barrier that the Guest executes when enabling | 1340 | * with the barrier that the Guest executes when enabling |
| 1322 | * interrupts. */ | 1341 | * interrupts. */ |
| @@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) | |||
| 1361 | { | 1380 | { |
| 1362 | u16 avail_idx; | 1381 | u16 avail_idx; |
| 1363 | int r; | 1382 | int r; |
| 1383 | |||
| 1364 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) | 1384 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) |
| 1365 | return false; | 1385 | return false; |
| 1366 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; | 1386 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; |
| @@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq) | |||
| 1387 | void vhost_disable_notify(struct vhost_virtqueue *vq) | 1407 | void vhost_disable_notify(struct vhost_virtqueue *vq) |
| 1388 | { | 1408 | { |
| 1389 | int r; | 1409 | int r; |
| 1410 | |||
| 1390 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) | 1411 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) |
| 1391 | return; | 1412 | return; |
| 1392 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; | 1413 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; |
