aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/vhost.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2018-04-10 22:35:41 -0400
committerDavid S. Miller <davem@davemloft.net>2018-04-11 10:54:06 -0400
commitddd3d4081ffa806ffef28eaeefde757ba2b6812a (patch)
treee55d8ec43f5c5ce6d42a3af79d69361113819d7b /drivers/vhost/vhost.c
parentd14d2b78090c7de0557362b26a4ca591aa6a9faa (diff)
vhost: return bool from *_access_ok() functions
Currently vhost *_access_ok() functions return int. This is error-prone because there are two popular conventions: 1. 0 means failure, 1 means success 2. -errno means failure, 0 means success Although vhost mostly uses #1, it does not do so consistently. umem_access_ok() uses #2. This patch changes the return type from int to bool so that false means failure and true means success. This eliminates a potential source of errors. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost/vhost.c')
-rw-r--r--drivers/vhost/vhost.c66
1 files changed, 33 insertions, 33 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9a18535fafba..f3bd8e941224 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -641,14 +641,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
641} 641}
642EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 642EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
643 643
644static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 644static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
645{ 645{
646 u64 a = addr / VHOST_PAGE_SIZE / 8; 646 u64 a = addr / VHOST_PAGE_SIZE / 8;
647 647
648 /* Make sure 64 bit math will not overflow. */ 648 /* Make sure 64 bit math will not overflow. */
649 if (a > ULONG_MAX - (unsigned long)log_base || 649 if (a > ULONG_MAX - (unsigned long)log_base ||
650 a + (unsigned long)log_base > ULONG_MAX) 650 a + (unsigned long)log_base > ULONG_MAX)
651 return 0; 651 return false;
652 652
653 return access_ok(VERIFY_WRITE, log_base + a, 653 return access_ok(VERIFY_WRITE, log_base + a,
654 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 654 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
@@ -661,30 +661,30 @@ static bool vhost_overflow(u64 uaddr, u64 size)
661} 661}
662 662
663/* Caller should have vq mutex and device mutex. */ 663/* Caller should have vq mutex and device mutex. */
664static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, 664static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
665 int log_all) 665 int log_all)
666{ 666{
667 struct vhost_umem_node *node; 667 struct vhost_umem_node *node;
668 668
669 if (!umem) 669 if (!umem)
670 return 0; 670 return false;
671 671
672 list_for_each_entry(node, &umem->umem_list, link) { 672 list_for_each_entry(node, &umem->umem_list, link) {
673 unsigned long a = node->userspace_addr; 673 unsigned long a = node->userspace_addr;
674 674
675 if (vhost_overflow(node->userspace_addr, node->size)) 675 if (vhost_overflow(node->userspace_addr, node->size))
676 return 0; 676 return false;
677 677
678 678
679 if (!access_ok(VERIFY_WRITE, (void __user *)a, 679 if (!access_ok(VERIFY_WRITE, (void __user *)a,
680 node->size)) 680 node->size))
681 return 0; 681 return false;
682 else if (log_all && !log_access_ok(log_base, 682 else if (log_all && !log_access_ok(log_base,
683 node->start, 683 node->start,
684 node->size)) 684 node->size))
685 return 0; 685 return false;
686 } 686 }
687 return 1; 687 return true;
688} 688}
689 689
690static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, 690static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
@@ -701,13 +701,13 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
701 701
702/* Can we switch to this memory table? */ 702/* Can we switch to this memory table? */
703/* Caller should have device mutex but not vq mutex */ 703/* Caller should have device mutex but not vq mutex */
704static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, 704static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
705 int log_all) 705 int log_all)
706{ 706{
707 int i; 707 int i;
708 708
709 for (i = 0; i < d->nvqs; ++i) { 709 for (i = 0; i < d->nvqs; ++i) {
710 int ok; 710 bool ok;
711 bool log; 711 bool log;
712 712
713 mutex_lock(&d->vqs[i]->mutex); 713 mutex_lock(&d->vqs[i]->mutex);
@@ -717,12 +717,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
717 ok = vq_memory_access_ok(d->vqs[i]->log_base, 717 ok = vq_memory_access_ok(d->vqs[i]->log_base,
718 umem, log); 718 umem, log);
719 else 719 else
720 ok = 1; 720 ok = true;
721 mutex_unlock(&d->vqs[i]->mutex); 721 mutex_unlock(&d->vqs[i]->mutex);
722 if (!ok) 722 if (!ok)
723 return 0; 723 return false;
724 } 724 }
725 return 1; 725 return true;
726} 726}
727 727
728static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 728static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
@@ -959,21 +959,21 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
959 spin_unlock(&d->iotlb_lock); 959 spin_unlock(&d->iotlb_lock);
960} 960}
961 961
962static int umem_access_ok(u64 uaddr, u64 size, int access) 962static bool umem_access_ok(u64 uaddr, u64 size, int access)
963{ 963{
964 unsigned long a = uaddr; 964 unsigned long a = uaddr;
965 965
966 /* Make sure 64 bit math will not overflow. */ 966 /* Make sure 64 bit math will not overflow. */
967 if (vhost_overflow(uaddr, size)) 967 if (vhost_overflow(uaddr, size))
968 return -EFAULT; 968 return false;
969 969
970 if ((access & VHOST_ACCESS_RO) && 970 if ((access & VHOST_ACCESS_RO) &&
971 !access_ok(VERIFY_READ, (void __user *)a, size)) 971 !access_ok(VERIFY_READ, (void __user *)a, size))
972 return -EFAULT; 972 return false;
973 if ((access & VHOST_ACCESS_WO) && 973 if ((access & VHOST_ACCESS_WO) &&
974 !access_ok(VERIFY_WRITE, (void __user *)a, size)) 974 !access_ok(VERIFY_WRITE, (void __user *)a, size))
975 return -EFAULT; 975 return false;
976 return 0; 976 return true;
977} 977}
978 978
979static int vhost_process_iotlb_msg(struct vhost_dev *dev, 979static int vhost_process_iotlb_msg(struct vhost_dev *dev,
@@ -988,7 +988,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
988 ret = -EFAULT; 988 ret = -EFAULT;
989 break; 989 break;
990 } 990 }
991 if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) { 991 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
992 ret = -EFAULT; 992 ret = -EFAULT;
993 break; 993 break;
994 } 994 }
@@ -1135,10 +1135,10 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1135 return 0; 1135 return 0;
1136} 1136}
1137 1137
1138static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, 1138static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1139 struct vring_desc __user *desc, 1139 struct vring_desc __user *desc,
1140 struct vring_avail __user *avail, 1140 struct vring_avail __user *avail,
1141 struct vring_used __user *used) 1141 struct vring_used __user *used)
1142 1142
1143{ 1143{
1144 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1144 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
@@ -1161,8 +1161,8 @@ static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1161 vq->meta_iotlb[type] = node; 1161 vq->meta_iotlb[type] = node;
1162} 1162}
1163 1163
1164static int iotlb_access_ok(struct vhost_virtqueue *vq, 1164static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1165 int access, u64 addr, u64 len, int type) 1165 int access, u64 addr, u64 len, int type)
1166{ 1166{
1167 const struct vhost_umem_node *node; 1167 const struct vhost_umem_node *node;
1168 struct vhost_umem *umem = vq->iotlb; 1168 struct vhost_umem *umem = vq->iotlb;
@@ -1220,7 +1220,7 @@ EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1220 1220
1221/* Can we log writes? */ 1221/* Can we log writes? */
1222/* Caller should have device mutex but not vq mutex */ 1222/* Caller should have device mutex but not vq mutex */
1223int vhost_log_access_ok(struct vhost_dev *dev) 1223bool vhost_log_access_ok(struct vhost_dev *dev)
1224{ 1224{
1225 return memory_access_ok(dev, dev->umem, 1); 1225 return memory_access_ok(dev, dev->umem, 1);
1226} 1226}
@@ -1228,8 +1228,8 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1228 1228
1229/* Verify access for write logging. */ 1229/* Verify access for write logging. */
1230/* Caller should have vq mutex and device mutex */ 1230/* Caller should have vq mutex and device mutex */
1231static int vq_log_access_ok(struct vhost_virtqueue *vq, 1231static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1232 void __user *log_base) 1232 void __user *log_base)
1233{ 1233{
1234 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1234 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1235 1235
@@ -1242,14 +1242,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
1242 1242
1243/* Can we start vq? */ 1243/* Can we start vq? */
1244/* Caller should have vq mutex and device mutex */ 1244/* Caller should have vq mutex and device mutex */
1245int vhost_vq_access_ok(struct vhost_virtqueue *vq) 1245bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1246{ 1246{
1247 if (!vq_log_access_ok(vq, vq->log_base)) 1247 if (!vq_log_access_ok(vq, vq->log_base))
1248 return 0; 1248 return false;
1249 1249
1250 /* Access validation occurs at prefetch time with IOTLB */ 1250 /* Access validation occurs at prefetch time with IOTLB */
1251 if (vq->iotlb) 1251 if (vq->iotlb)
1252 return 1; 1252 return true;
1253 1253
1254 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); 1254 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1255} 1255}