aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-07-14 16:54:08 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-12-02 21:24:03 -0500
commitd1038084415c413b3c11c536f28fc5571ed00153 (patch)
tree96168fa79b406376a3bcfb489ba07cdf4e37b8d9
parent53f58d8ed862a3675659fb3162abfc5dd5f1025b (diff)
vmci: the same on the send side...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c89
1 files changed, 20 insertions, 69 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 09314b991fdf..0339538c182d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -129,20 +129,6 @@
129 * *_MEM state, and vice versa. 129 * *_MEM state, and vice versa.
130 */ 130 */
131 131
132/*
133 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
134 * types are passed around to enqueue and dequeue routines. Note that
135 * often the functions passed are simply wrappers around memcpy
136 * itself.
137 *
138 * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
139 * there's an unused last parameter for the hosted side. In
140 * ESX, that parameter holds a buffer type.
141 */
142typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
143 u64 queue_offset, const void *src,
144 size_t src_offset, size_t size);
145
146/* The Kernel specific component of the struct vmci_queue structure. */ 132/* The Kernel specific component of the struct vmci_queue structure. */
147struct vmci_queue_kern_if { 133struct vmci_queue_kern_if {
148 struct mutex __mutex; /* Protects the queue. */ 134 struct mutex __mutex; /* Protects the queue. */
@@ -348,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
348 * by traversing the offset -> page translation structure for the queue. 334 * by traversing the offset -> page translation structure for the queue.
349 * Assumes that offset + size does not wrap around in the queue. 335 * Assumes that offset + size does not wrap around in the queue.
350 */ 336 */
351static int __qp_memcpy_to_queue(struct vmci_queue *queue, 337static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
352 u64 queue_offset, 338 u64 queue_offset,
353 const void *src, 339 struct iov_iter *from,
354 size_t size, 340 size_t size)
355 bool is_iovec)
356{ 341{
357 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
358 size_t bytes_copied = 0; 343 size_t bytes_copied = 0;
@@ -377,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
377 else 362 else
378 to_copy = size - bytes_copied; 363 to_copy = size - bytes_copied;
379 364
380 if (is_iovec) { 365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
381 struct msghdr *msg = (struct msghdr *)src; 366 from)) {
382 int err; 367 if (kernel_if->host)
383 368 kunmap(kernel_if->u.h.page[page_index]);
384 /* The iovec will track bytes_copied internally. */ 369 return VMCI_ERROR_INVALID_ARGS;
385 err = memcpy_from_msg((u8 *)va + page_offset,
386 msg, to_copy);
387 if (err != 0) {
388 if (kernel_if->host)
389 kunmap(kernel_if->u.h.page[page_index]);
390 return VMCI_ERROR_INVALID_ARGS;
391 }
392 } else {
393 memcpy((u8 *)va + page_offset,
394 (u8 *)src + bytes_copied, to_copy);
395 } 370 }
396
397 bytes_copied += to_copy; 371 bytes_copied += to_copy;
398 if (kernel_if->host) 372 if (kernel_if->host)
399 kunmap(kernel_if->u.h.page[page_index]); 373 kunmap(kernel_if->u.h.page[page_index]);
@@ -554,30 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
554 return VMCI_SUCCESS; 528 return VMCI_SUCCESS;
555} 529}
556 530
557static int qp_memcpy_to_queue(struct vmci_queue *queue,
558 u64 queue_offset,
559 const void *src, size_t src_offset, size_t size)
560{
561 return __qp_memcpy_to_queue(queue, queue_offset,
562 (u8 *)src + src_offset, size, false);
563}
564
565/*
566 * Copies from a given iovec from a VMCI Queue.
567 */
568static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
569 u64 queue_offset,
570 const void *msg,
571 size_t src_offset, size_t size)
572{
573
574 /*
575 * We ignore src_offset because src is really a struct iovec * and will
576 * maintain offset internally.
577 */
578 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
579}
580
581/* 531/*
582 * Allocates kernel VA space of specified size plus space for the queue 532 * Allocates kernel VA space of specified size plus space for the queue
583 * and kernel interface. This is different from the guest queue allocator, 533 * and kernel interface. This is different from the guest queue allocator,
@@ -2590,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2590static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2540static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2591 struct vmci_queue *consume_q, 2541 struct vmci_queue *consume_q,
2592 const u64 produce_q_size, 2542 const u64 produce_q_size,
2593 const void *buf, 2543 struct iov_iter *from)
2594 size_t buf_size,
2595 vmci_memcpy_to_queue_func memcpy_to_queue)
2596{ 2544{
2597 s64 free_space; 2545 s64 free_space;
2598 u64 tail; 2546 u64 tail;
2547 size_t buf_size = iov_iter_count(from);
2599 size_t written; 2548 size_t written;
2600 ssize_t result; 2549 ssize_t result;
2601 2550
@@ -2615,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2615 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2564 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2616 tail = vmci_q_header_producer_tail(produce_q->q_header); 2565 tail = vmci_q_header_producer_tail(produce_q->q_header);
2617 if (likely(tail + written < produce_q_size)) { 2566 if (likely(tail + written < produce_q_size)) {
2618 result = memcpy_to_queue(produce_q, tail, buf, 0, written); 2567 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2619 } else { 2568 } else {
2620 /* Tail pointer wraps around. */ 2569 /* Tail pointer wraps around. */
2621 2570
2622 const size_t tmp = (size_t) (produce_q_size - tail); 2571 const size_t tmp = (size_t) (produce_q_size - tail);
2623 2572
2624 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); 2573 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2625 if (result >= VMCI_SUCCESS) 2574 if (result >= VMCI_SUCCESS)
2626 result = memcpy_to_queue(produce_q, 0, buf, tmp, 2575 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2627 written - tmp); 2576 written - tmp);
2628 } 2577 }
2629 2578
@@ -3078,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3078 int buf_type) 3027 int buf_type)
3079{ 3028{
3080 ssize_t result; 3029 ssize_t result;
3030 struct iov_iter from;
3031 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3081 3032
3082 if (!qpair || !buf) 3033 if (!qpair || !buf)
3083 return VMCI_ERROR_INVALID_ARGS; 3034 return VMCI_ERROR_INVALID_ARGS;
3084 3035
3036 iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
3037
3085 qp_lock(qpair); 3038 qp_lock(qpair);
3086 3039
3087 do { 3040 do {
3088 result = qp_enqueue_locked(qpair->produce_q, 3041 result = qp_enqueue_locked(qpair->produce_q,
3089 qpair->consume_q, 3042 qpair->consume_q,
3090 qpair->produce_q_size, 3043 qpair->produce_q_size,
3091 buf, buf_size, 3044 &from);
3092 qp_memcpy_to_queue);
3093 3045
3094 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3046 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3095 !qp_wait_for_ready_queue(qpair)) 3047 !qp_wait_for_ready_queue(qpair))
@@ -3219,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3219 result = qp_enqueue_locked(qpair->produce_q, 3171 result = qp_enqueue_locked(qpair->produce_q,
3220 qpair->consume_q, 3172 qpair->consume_q,
3221 qpair->produce_q_size, 3173 qpair->produce_q_size,
3222 msg, msg_data_left(msg), 3174 &msg->msg_iter);
3223 qp_memcpy_to_queue_iov);
3224 3175
3225 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3176 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3226 !qp_wait_for_ready_queue(qpair)) 3177 !qp_wait_for_ready_queue(qpair))