summaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2019-05-24 04:12:14 -0400
committerMichael S. Tsirkin <mst@redhat.com>2019-06-05 16:23:52 -0400
commit7b5d753ebc22c5b6935a70ce9a857dc6220784f8 (patch)
tree1c99f78be7ca1df166ad2b2bf48ef44dfe6b8845 /drivers/vhost
parent1ab5d1385af40272ca44a5cd38af7e13da6ed847 (diff)
vhost: fine grain userspace memory accessors
This is used to hide the metadata address from virtqueue helpers. This will allow to implement a vmap based fast accessing to metadata. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vhost.c94
1 files changed, 77 insertions, 17 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3fb58015f8b3..b89faedbb9bd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -887,6 +887,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
887 ret; \ 887 ret; \
888}) 888})
889 889
890static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
891{
892 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
893 vhost_avail_event(vq));
894}
895
896static inline int vhost_put_used(struct vhost_virtqueue *vq,
897 struct vring_used_elem *head, int idx,
898 int count)
899{
900 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
901 count * sizeof(*head));
902}
903
904static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
905
906{
907 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
908 &vq->used->flags);
909}
910
911static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
912
913{
914 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
915 &vq->used->idx);
916}
917
890#define vhost_get_user(vq, x, ptr, type) \ 918#define vhost_get_user(vq, x, ptr, type) \
891({ \ 919({ \
892 int ret; \ 920 int ret; \
@@ -925,6 +953,43 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
925 mutex_unlock(&d->vqs[i]->mutex); 953 mutex_unlock(&d->vqs[i]->mutex);
926} 954}
927 955
956static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
957 __virtio16 *idx)
958{
959 return vhost_get_avail(vq, *idx, &vq->avail->idx);
960}
961
962static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
963 __virtio16 *head, int idx)
964{
965 return vhost_get_avail(vq, *head,
966 &vq->avail->ring[idx & (vq->num - 1)]);
967}
968
969static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
970 __virtio16 *flags)
971{
972 return vhost_get_avail(vq, *flags, &vq->avail->flags);
973}
974
975static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
976 __virtio16 *event)
977{
978 return vhost_get_avail(vq, *event, vhost_used_event(vq));
979}
980
981static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
982 __virtio16 *idx)
983{
984 return vhost_get_used(vq, *idx, &vq->used->idx);
985}
986
987static inline int vhost_get_desc(struct vhost_virtqueue *vq,
988 struct vring_desc *desc, int idx)
989{
990 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
991}
992
928static int vhost_new_umem_range(struct vhost_umem *umem, 993static int vhost_new_umem_range(struct vhost_umem *umem,
929 u64 start, u64 size, u64 end, 994 u64 start, u64 size, u64 end,
930 u64 userspace_addr, int perm) 995 u64 userspace_addr, int perm)
@@ -1862,8 +1927,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
1862static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1927static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1863{ 1928{
1864 void __user *used; 1929 void __user *used;
1865 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 1930 if (vhost_put_used_flags(vq))
1866 &vq->used->flags) < 0)
1867 return -EFAULT; 1931 return -EFAULT;
1868 if (unlikely(vq->log_used)) { 1932 if (unlikely(vq->log_used)) {
1869 /* Make sure the flag is seen before log. */ 1933 /* Make sure the flag is seen before log. */
@@ -1880,8 +1944,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1880 1944
1881static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) 1945static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1882{ 1946{
1883 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 1947 if (vhost_put_avail_event(vq))
1884 vhost_avail_event(vq)))
1885 return -EFAULT; 1948 return -EFAULT;
1886 if (unlikely(vq->log_used)) { 1949 if (unlikely(vq->log_used)) {
1887 void __user *used; 1950 void __user *used;
@@ -1917,7 +1980,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1917 r = -EFAULT; 1980 r = -EFAULT;
1918 goto err; 1981 goto err;
1919 } 1982 }
1920 r = vhost_get_used(vq, last_used_idx, &vq->used->idx); 1983 r = vhost_get_used_idx(vq, &last_used_idx);
1921 if (r) { 1984 if (r) {
1922 vq_err(vq, "Can't access used idx at %p\n", 1985 vq_err(vq, "Can't access used idx at %p\n",
1923 &vq->used->idx); 1986 &vq->used->idx);
@@ -2116,7 +2179,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2116 last_avail_idx = vq->last_avail_idx; 2179 last_avail_idx = vq->last_avail_idx;
2117 2180
2118 if (vq->avail_idx == vq->last_avail_idx) { 2181 if (vq->avail_idx == vq->last_avail_idx) {
2119 if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { 2182 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2120 vq_err(vq, "Failed to access avail idx at %p\n", 2183 vq_err(vq, "Failed to access avail idx at %p\n",
2121 &vq->avail->idx); 2184 &vq->avail->idx);
2122 return -EFAULT; 2185 return -EFAULT;
@@ -2143,8 +2206,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2143 2206
2144 /* Grab the next descriptor number they're advertising, and increment 2207 /* Grab the next descriptor number they're advertising, and increment
2145 * the index we've seen. */ 2208 * the index we've seen. */
2146 if (unlikely(vhost_get_avail(vq, ring_head, 2209 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2147 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
2148 vq_err(vq, "Failed to read head: idx %d address %p\n", 2210 vq_err(vq, "Failed to read head: idx %d address %p\n",
2149 last_avail_idx, 2211 last_avail_idx,
2150 &vq->avail->ring[last_avail_idx % vq->num]); 2212 &vq->avail->ring[last_avail_idx % vq->num]);
@@ -2179,8 +2241,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2179 i, vq->num, head); 2241 i, vq->num, head);
2180 return -EINVAL; 2242 return -EINVAL;
2181 } 2243 }
2182 ret = vhost_copy_from_user(vq, &desc, vq->desc + i, 2244 ret = vhost_get_desc(vq, &desc, i);
2183 sizeof desc);
2184 if (unlikely(ret)) { 2245 if (unlikely(ret)) {
2185 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 2246 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2186 i, vq->desc + i); 2247 i, vq->desc + i);
@@ -2273,7 +2334,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2273 2334
2274 start = vq->last_used_idx & (vq->num - 1); 2335 start = vq->last_used_idx & (vq->num - 1);
2275 used = vq->used->ring + start; 2336 used = vq->used->ring + start;
2276 if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) { 2337 if (vhost_put_used(vq, heads, start, count)) {
2277 vq_err(vq, "Failed to write used"); 2338 vq_err(vq, "Failed to write used");
2278 return -EFAULT; 2339 return -EFAULT;
2279 } 2340 }
@@ -2315,8 +2376,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2315 2376
2316 /* Make sure buffer is written before we update index. */ 2377 /* Make sure buffer is written before we update index. */
2317 smp_wmb(); 2378 smp_wmb();
2318 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 2379 if (vhost_put_used_idx(vq)) {
2319 &vq->used->idx)) {
2320 vq_err(vq, "Failed to increment used idx"); 2380 vq_err(vq, "Failed to increment used idx");
2321 return -EFAULT; 2381 return -EFAULT;
2322 } 2382 }
@@ -2349,7 +2409,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2349 2409
2350 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2410 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2351 __virtio16 flags; 2411 __virtio16 flags;
2352 if (vhost_get_avail(vq, flags, &vq->avail->flags)) { 2412 if (vhost_get_avail_flags(vq, &flags)) {
2353 vq_err(vq, "Failed to get flags"); 2413 vq_err(vq, "Failed to get flags");
2354 return true; 2414 return true;
2355 } 2415 }
@@ -2363,7 +2423,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2363 if (unlikely(!v)) 2423 if (unlikely(!v))
2364 return true; 2424 return true;
2365 2425
2366 if (vhost_get_avail(vq, event, vhost_used_event(vq))) { 2426 if (vhost_get_used_event(vq, &event)) {
2367 vq_err(vq, "Failed to get used event idx"); 2427 vq_err(vq, "Failed to get used event idx");
2368 return true; 2428 return true;
2369 } 2429 }
@@ -2408,7 +2468,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2408 if (vq->avail_idx != vq->last_avail_idx) 2468 if (vq->avail_idx != vq->last_avail_idx)
2409 return false; 2469 return false;
2410 2470
2411 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2471 r = vhost_get_avail_idx(vq, &avail_idx);
2412 if (unlikely(r)) 2472 if (unlikely(r))
2413 return false; 2473 return false;
2414 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2474 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
@@ -2444,7 +2504,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2444 /* They could have slipped one in as we were doing that: make 2504 /* They could have slipped one in as we were doing that: make
2445 * sure it's written, then check again. */ 2505 * sure it's written, then check again. */
2446 smp_mb(); 2506 smp_mb();
2447 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2507 r = vhost_get_avail_idx(vq, &avail_idx);
2448 if (r) { 2508 if (r) {
2449 vq_err(vq, "Failed to check avail idx at %p: %d\n", 2509 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2450 &vq->avail->idx, r); 2510 &vq->avail->idx, r);