aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2014-10-24 07:04:47 -0400
committerMichael S. Tsirkin <mst@redhat.com>2014-12-09 05:05:29 -0500
commit3b1bbe89351a8003857aeb5cbef3595f5d0ee609 (patch)
treed4600391db01ebfd0a34dbfca8d9dc0d729d34dd /drivers/vhost
parent64f7f0510c7e6f61eab080e3f51d314849f47ac1 (diff)
vhost: virtio 1.0 endian-ness support
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vhost.c86
1 files changed, 49 insertions, 37 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6a408377598a..ed71b5347a76 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -33,8 +33,8 @@ enum {
33 VHOST_MEMORY_F_LOG = 0x1, 33 VHOST_MEMORY_F_LOG = 0x1,
34}; 34};
35 35
36#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) 36#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
37#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) 37#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
38 38
39static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 39static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
40 poll_table *pt) 40 poll_table *pt)
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
1001static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1001static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1002{ 1002{
1003 void __user *used; 1003 void __user *used;
1004 if (__put_user(vq->used_flags, &vq->used->flags) < 0) 1004 if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
1005 return -EFAULT; 1005 return -EFAULT;
1006 if (unlikely(vq->log_used)) { 1006 if (unlikely(vq->log_used)) {
1007 /* Make sure the flag is seen before log. */ 1007 /* Make sure the flag is seen before log. */
@@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1019 1019
1020static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) 1020static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1021{ 1021{
1022 if (__put_user(vq->avail_idx, vhost_avail_event(vq))) 1022 if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
1023 return -EFAULT; 1023 return -EFAULT;
1024 if (unlikely(vq->log_used)) { 1024 if (unlikely(vq->log_used)) {
1025 void __user *used; 1025 void __user *used;
@@ -1038,7 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1038 1038
1039int vhost_init_used(struct vhost_virtqueue *vq) 1039int vhost_init_used(struct vhost_virtqueue *vq)
1040{ 1040{
1041 u16 last_used_idx; 1041 __virtio16 last_used_idx;
1042 int r; 1042 int r;
1043 if (!vq->private_data) 1043 if (!vq->private_data)
1044 return 0; 1044 return 0;
@@ -1052,7 +1052,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1052 r = __get_user(last_used_idx, &vq->used->idx); 1052 r = __get_user(last_used_idx, &vq->used->idx);
1053 if (r) 1053 if (r)
1054 return r; 1054 return r;
1055 vq->last_used_idx = last_used_idx; 1055 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1056 return 0; 1056 return 0;
1057} 1057}
1058EXPORT_SYMBOL_GPL(vhost_init_used); 1058EXPORT_SYMBOL_GPL(vhost_init_used);
@@ -1094,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1094/* Each buffer in the virtqueues is actually a chain of descriptors. This 1094/* Each buffer in the virtqueues is actually a chain of descriptors. This
1095 * function returns the next descriptor in the chain, 1095 * function returns the next descriptor in the chain,
1096 * or -1U if we're at the end. */ 1096 * or -1U if we're at the end. */
1097static unsigned next_desc(struct vring_desc *desc) 1097static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1098{ 1098{
1099 unsigned int next; 1099 unsigned int next;
1100 1100
1101 /* If this descriptor says it doesn't chain, we're done. */ 1101 /* If this descriptor says it doesn't chain, we're done. */
1102 if (!(desc->flags & VRING_DESC_F_NEXT)) 1102 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1103 return -1U; 1103 return -1U;
1104 1104
1105 /* Check they're not leading us off end of descriptors. */ 1105 /* Check they're not leading us off end of descriptors. */
1106 next = desc->next; 1106 next = vhost16_to_cpu(vq, desc->next);
1107 /* Make sure compiler knows to grab that: we don't want it changing! */ 1107 /* Make sure compiler knows to grab that: we don't want it changing! */
1108 /* We will use the result as an index in an array, so most 1108 /* We will use the result as an index in an array, so most
1109 * architectures only need a compiler barrier here. */ 1109 * architectures only need a compiler barrier here. */
@@ -1120,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq,
1120{ 1120{
1121 struct vring_desc desc; 1121 struct vring_desc desc;
1122 unsigned int i = 0, count, found = 0; 1122 unsigned int i = 0, count, found = 0;
1123 u32 len = vhost32_to_cpu(vq, indirect->len);
1123 int ret; 1124 int ret;
1124 1125
1125 /* Sanity check */ 1126 /* Sanity check */
1126 if (unlikely(indirect->len % sizeof desc)) { 1127 if (unlikely(len % sizeof desc)) {
1127 vq_err(vq, "Invalid length in indirect descriptor: " 1128 vq_err(vq, "Invalid length in indirect descriptor: "
1128 "len 0x%llx not multiple of 0x%zx\n", 1129 "len 0x%llx not multiple of 0x%zx\n",
1129 (unsigned long long)indirect->len, 1130 (unsigned long long)len,
1130 sizeof desc); 1131 sizeof desc);
1131 return -EINVAL; 1132 return -EINVAL;
1132 } 1133 }
1133 1134
1134 ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect, 1135 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1135 UIO_MAXIOV); 1136 UIO_MAXIOV);
1136 if (unlikely(ret < 0)) { 1137 if (unlikely(ret < 0)) {
1137 vq_err(vq, "Translation failure %d in indirect.\n", ret); 1138 vq_err(vq, "Translation failure %d in indirect.\n", ret);
@@ -1142,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
1142 * architectures only need a compiler barrier here. */ 1143 * architectures only need a compiler barrier here. */
1143 read_barrier_depends(); 1144 read_barrier_depends();
1144 1145
1145 count = indirect->len / sizeof desc; 1146 count = len / sizeof desc;
1146 /* Buffers are chained via a 16 bit next field, so 1147 /* Buffers are chained via a 16 bit next field, so
1147 * we can have at most 2^16 of these. */ 1148 * we can have at most 2^16 of these. */
1148 if (unlikely(count > USHRT_MAX + 1)) { 1149 if (unlikely(count > USHRT_MAX + 1)) {
@@ -1162,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq,
1162 if (unlikely(memcpy_fromiovec((unsigned char *)&desc, 1163 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1163 vq->indirect, sizeof desc))) { 1164 vq->indirect, sizeof desc))) {
1164 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1165 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1165 i, (size_t)indirect->addr + i * sizeof desc); 1166 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1166 return -EINVAL; 1167 return -EINVAL;
1167 } 1168 }
1168 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { 1169 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1169 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 1170 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1170 i, (size_t)indirect->addr + i * sizeof desc); 1171 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1171 return -EINVAL; 1172 return -EINVAL;
1172 } 1173 }
1173 1174
1174 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, 1175 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1176 vhost32_to_cpu(vq, desc.len), iov + iov_count,
1175 iov_size - iov_count); 1177 iov_size - iov_count);
1176 if (unlikely(ret < 0)) { 1178 if (unlikely(ret < 0)) {
1177 vq_err(vq, "Translation failure %d indirect idx %d\n", 1179 vq_err(vq, "Translation failure %d indirect idx %d\n",
@@ -1179,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq,
1179 return ret; 1181 return ret;
1180 } 1182 }
1181 /* If this is an input descriptor, increment that count. */ 1183 /* If this is an input descriptor, increment that count. */
1182 if (desc.flags & VRING_DESC_F_WRITE) { 1184 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1183 *in_num += ret; 1185 *in_num += ret;
1184 if (unlikely(log)) { 1186 if (unlikely(log)) {
1185 log[*log_num].addr = desc.addr; 1187 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1186 log[*log_num].len = desc.len; 1188 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1187 ++*log_num; 1189 ++*log_num;
1188 } 1190 }
1189 } else { 1191 } else {
@@ -1196,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
1196 } 1198 }
1197 *out_num += ret; 1199 *out_num += ret;
1198 } 1200 }
1199 } while ((i = next_desc(&desc)) != -1); 1201 } while ((i = next_desc(vq, &desc)) != -1);
1200 return 0; 1202 return 0;
1201} 1203}
1202 1204
@@ -1216,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1216 struct vring_desc desc; 1218 struct vring_desc desc;
1217 unsigned int i, head, found = 0; 1219 unsigned int i, head, found = 0;
1218 u16 last_avail_idx; 1220 u16 last_avail_idx;
1221 __virtio16 avail_idx;
1222 __virtio16 ring_head;
1219 int ret; 1223 int ret;
1220 1224
1221 /* Check it isn't doing very strange things with descriptor numbers. */ 1225 /* Check it isn't doing very strange things with descriptor numbers. */
1222 last_avail_idx = vq->last_avail_idx; 1226 last_avail_idx = vq->last_avail_idx;
1223 if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { 1227 if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
1224 vq_err(vq, "Failed to access avail idx at %p\n", 1228 vq_err(vq, "Failed to access avail idx at %p\n",
1225 &vq->avail->idx); 1229 &vq->avail->idx);
1226 return -EFAULT; 1230 return -EFAULT;
1227 } 1231 }
1232 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
1228 1233
1229 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { 1234 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1230 vq_err(vq, "Guest moved used index from %u to %u", 1235 vq_err(vq, "Guest moved used index from %u to %u",
@@ -1241,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1241 1246
1242 /* Grab the next descriptor number they're advertising, and increment 1247 /* Grab the next descriptor number they're advertising, and increment
1243 * the index we've seen. */ 1248 * the index we've seen. */
1244 if (unlikely(__get_user(head, 1249 if (unlikely(__get_user(ring_head,
1245 &vq->avail->ring[last_avail_idx % vq->num]))) { 1250 &vq->avail->ring[last_avail_idx % vq->num]))) {
1246 vq_err(vq, "Failed to read head: idx %d address %p\n", 1251 vq_err(vq, "Failed to read head: idx %d address %p\n",
1247 last_avail_idx, 1252 last_avail_idx,
@@ -1249,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1249 return -EFAULT; 1254 return -EFAULT;
1250 } 1255 }
1251 1256
1257 head = vhost16_to_cpu(vq, ring_head);
1258
1252 /* If their number is silly, that's an error. */ 1259 /* If their number is silly, that's an error. */
1253 if (unlikely(head >= vq->num)) { 1260 if (unlikely(head >= vq->num)) {
1254 vq_err(vq, "Guest says index %u > %u is available", 1261 vq_err(vq, "Guest says index %u > %u is available",
@@ -1281,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1281 i, vq->desc + i); 1288 i, vq->desc + i);
1282 return -EFAULT; 1289 return -EFAULT;
1283 } 1290 }
1284 if (desc.flags & VRING_DESC_F_INDIRECT) { 1291 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
1285 ret = get_indirect(vq, iov, iov_size, 1292 ret = get_indirect(vq, iov, iov_size,
1286 out_num, in_num, 1293 out_num, in_num,
1287 log, log_num, &desc); 1294 log, log_num, &desc);
@@ -1293,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1293 continue; 1300 continue;
1294 } 1301 }
1295 1302
1296 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, 1303 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1304 vhost32_to_cpu(vq, desc.len), iov + iov_count,
1297 iov_size - iov_count); 1305 iov_size - iov_count);
1298 if (unlikely(ret < 0)) { 1306 if (unlikely(ret < 0)) {
1299 vq_err(vq, "Translation failure %d descriptor idx %d\n", 1307 vq_err(vq, "Translation failure %d descriptor idx %d\n",
1300 ret, i); 1308 ret, i);
1301 return ret; 1309 return ret;
1302 } 1310 }
1303 if (desc.flags & VRING_DESC_F_WRITE) { 1311 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
1304 /* If this is an input descriptor, 1312 /* If this is an input descriptor,
1305 * increment that count. */ 1313 * increment that count. */
1306 *in_num += ret; 1314 *in_num += ret;
1307 if (unlikely(log)) { 1315 if (unlikely(log)) {
1308 log[*log_num].addr = desc.addr; 1316 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1309 log[*log_num].len = desc.len; 1317 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1310 ++*log_num; 1318 ++*log_num;
1311 } 1319 }
1312 } else { 1320 } else {
@@ -1319,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1319 } 1327 }
1320 *out_num += ret; 1328 *out_num += ret;
1321 } 1329 }
1322 } while ((i = next_desc(&desc)) != -1); 1330 } while ((i = next_desc(vq, &desc)) != -1);
1323 1331
1324 /* On success, increment avail index. */ 1332 /* On success, increment avail index. */
1325 vq->last_avail_idx++; 1333 vq->last_avail_idx++;
@@ -1342,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1342 * want to notify the guest, using eventfd. */ 1350 * want to notify the guest, using eventfd. */
1343int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 1351int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1344{ 1352{
1345 struct vring_used_elem heads = { head, len }; 1353 struct vring_used_elem heads = {
1354 cpu_to_vhost32(vq, head),
1355 cpu_to_vhost32(vq, len)
1356 };
1346 1357
1347 return vhost_add_used_n(vq, &heads, 1); 1358 return vhost_add_used_n(vq, &heads, 1);
1348} 1359}
@@ -1411,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1411 1422
1412 /* Make sure buffer is written before we update index. */ 1423 /* Make sure buffer is written before we update index. */
1413 smp_wmb(); 1424 smp_wmb();
1414 if (__put_user(vq->last_used_idx, &vq->used->idx)) { 1425 if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
1415 vq_err(vq, "Failed to increment used idx"); 1426 vq_err(vq, "Failed to increment used idx");
1416 return -EFAULT; 1427 return -EFAULT;
1417 } 1428 }
@@ -1429,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n);
1429 1440
1430static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1441static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1431{ 1442{
1432 __u16 old, new, event; 1443 __u16 old, new;
1444 __virtio16 event;
1433 bool v; 1445 bool v;
1434 /* Flush out used index updates. This is paired 1446 /* Flush out used index updates. This is paired
1435 * with the barrier that the Guest executes when enabling 1447 * with the barrier that the Guest executes when enabling
@@ -1441,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1441 return true; 1453 return true;
1442 1454
1443 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 1455 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1444 __u16 flags; 1456 __virtio16 flags;
1445 if (__get_user(flags, &vq->avail->flags)) { 1457 if (__get_user(flags, &vq->avail->flags)) {
1446 vq_err(vq, "Failed to get flags"); 1458 vq_err(vq, "Failed to get flags");
1447 return true; 1459 return true;
1448 } 1460 }
1449 return !(flags & VRING_AVAIL_F_NO_INTERRUPT); 1461 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
1450 } 1462 }
1451 old = vq->signalled_used; 1463 old = vq->signalled_used;
1452 v = vq->signalled_used_valid; 1464 v = vq->signalled_used_valid;
@@ -1460,7 +1472,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1460 vq_err(vq, "Failed to get used event idx"); 1472 vq_err(vq, "Failed to get used event idx");
1461 return true; 1473 return true;
1462 } 1474 }
1463 return vring_need_event(event, new, old); 1475 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
1464} 1476}
1465 1477
1466/* This actually signals the guest, using eventfd. */ 1478/* This actually signals the guest, using eventfd. */
@@ -1495,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1495/* OK, now we need to know about added descriptors. */ 1507/* OK, now we need to know about added descriptors. */
1496bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1508bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1497{ 1509{
1498 u16 avail_idx; 1510 __virtio16 avail_idx;
1499 int r; 1511 int r;
1500 1512
1501 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1513 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -1526,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1526 return false; 1538 return false;
1527 } 1539 }
1528 1540
1529 return avail_idx != vq->avail_idx; 1541 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
1530} 1542}
1531EXPORT_SYMBOL_GPL(vhost_enable_notify); 1543EXPORT_SYMBOL_GPL(vhost_enable_notify);
1532 1544