aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 22:21:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 22:21:14 -0500
commitd76e0a050e0f5e7c00e6d334f758178bbc16eb98 (patch)
tree9d0299f385da06f082fad6d206cc1351ee0de7f4
parent40b9672a2f071cbf931eb144997a21332bc0a747 (diff)
parentd1038084415c413b3c11c536f28fc5571ed00153 (diff)
Merge branch 'work.vmci' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vmci iov_iter updates from Al Viro: "Get rid of "is it an iovec or an entire array?" flags in vmxi - just use iov_iter. Simplifies the living hell out of that code..." * 'work.vmci' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: vmci: the same on the send side... vmci: simplify qp_dequeue_locked() vmci: get rid of qp_memcpy_from_queue() vmci: fix buf_size in case of iovec-based accesses
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c179
1 files changed, 46 insertions, 133 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8af5c2672f71..0339538c182d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -129,23 +129,6 @@
129 * *_MEM state, and vice versa. 129 * *_MEM state, and vice versa.
130 */ 130 */
131 131
132/*
133 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
134 * types are passed around to enqueue and dequeue routines. Note that
135 * often the functions passed are simply wrappers around memcpy
136 * itself.
137 *
138 * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
139 * there's an unused last parameter for the hosted side. In
140 * ESX, that parameter holds a buffer type.
141 */
142typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
143 u64 queue_offset, const void *src,
144 size_t src_offset, size_t size);
145typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 const struct vmci_queue *queue,
147 u64 queue_offset, size_t size);
148
149/* The Kernel specific component of the struct vmci_queue structure. */ 132/* The Kernel specific component of the struct vmci_queue structure. */
150struct vmci_queue_kern_if { 133struct vmci_queue_kern_if {
151 struct mutex __mutex; /* Protects the queue. */ 134 struct mutex __mutex; /* Protects the queue. */
@@ -351,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
351 * by traversing the offset -> page translation structure for the queue. 334 * by traversing the offset -> page translation structure for the queue.
352 * Assumes that offset + size does not wrap around in the queue. 335 * Assumes that offset + size does not wrap around in the queue.
353 */ 336 */
354static int __qp_memcpy_to_queue(struct vmci_queue *queue, 337static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
355 u64 queue_offset, 338 u64 queue_offset,
356 const void *src, 339 struct iov_iter *from,
357 size_t size, 340 size_t size)
358 bool is_iovec)
359{ 341{
360 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
361 size_t bytes_copied = 0; 343 size_t bytes_copied = 0;
@@ -380,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
380 else 362 else
381 to_copy = size - bytes_copied; 363 to_copy = size - bytes_copied;
382 364
383 if (is_iovec) { 365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
384 struct msghdr *msg = (struct msghdr *)src; 366 from)) {
385 int err; 367 if (kernel_if->host)
386 368 kunmap(kernel_if->u.h.page[page_index]);
387 /* The iovec will track bytes_copied internally. */ 369 return VMCI_ERROR_INVALID_ARGS;
388 err = memcpy_from_msg((u8 *)va + page_offset,
389 msg, to_copy);
390 if (err != 0) {
391 if (kernel_if->host)
392 kunmap(kernel_if->u.h.page[page_index]);
393 return VMCI_ERROR_INVALID_ARGS;
394 }
395 } else {
396 memcpy((u8 *)va + page_offset,
397 (u8 *)src + bytes_copied, to_copy);
398 } 370 }
399
400 bytes_copied += to_copy; 371 bytes_copied += to_copy;
401 if (kernel_if->host) 372 if (kernel_if->host)
402 kunmap(kernel_if->u.h.page[page_index]); 373 kunmap(kernel_if->u.h.page[page_index]);
@@ -411,11 +382,9 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
411 * by traversing the offset -> page translation structure for the queue. 382 * by traversing the offset -> page translation structure for the queue.
412 * Assumes that offset + size does not wrap around in the queue. 383 * Assumes that offset + size does not wrap around in the queue.
413 */ 384 */
414static int __qp_memcpy_from_queue(void *dest, 385static int qp_memcpy_from_queue_iter(struct iov_iter *to,
415 const struct vmci_queue *queue, 386 const struct vmci_queue *queue,
416 u64 queue_offset, 387 u64 queue_offset, size_t size)
417 size_t size,
418 bool is_iovec)
419{ 388{
420 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 389 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
421 size_t bytes_copied = 0; 390 size_t bytes_copied = 0;
@@ -427,6 +396,7 @@ static int __qp_memcpy_from_queue(void *dest,
427 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 396 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
428 void *va; 397 void *va;
429 size_t to_copy; 398 size_t to_copy;
399 int err;
430 400
431 if (kernel_if->host) 401 if (kernel_if->host)
432 va = kmap(kernel_if->u.h.page[page_index]); 402 va = kmap(kernel_if->u.h.page[page_index]);
@@ -440,23 +410,12 @@ static int __qp_memcpy_from_queue(void *dest,
440 else 410 else
441 to_copy = size - bytes_copied; 411 to_copy = size - bytes_copied;
442 412
443 if (is_iovec) { 413 err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
444 struct msghdr *msg = dest; 414 if (err != to_copy) {
445 int err; 415 if (kernel_if->host)
446 416 kunmap(kernel_if->u.h.page[page_index]);
447 /* The iovec will track bytes_copied internally. */ 417 return VMCI_ERROR_INVALID_ARGS;
448 err = memcpy_to_msg(msg, (u8 *)va + page_offset,
449 to_copy);
450 if (err != 0) {
451 if (kernel_if->host)
452 kunmap(kernel_if->u.h.page[page_index]);
453 return VMCI_ERROR_INVALID_ARGS;
454 }
455 } else {
456 memcpy((u8 *)dest + bytes_copied,
457 (u8 *)va + page_offset, to_copy);
458 } 418 }
459
460 bytes_copied += to_copy; 419 bytes_copied += to_copy;
461 if (kernel_if->host) 420 if (kernel_if->host)
462 kunmap(kernel_if->u.h.page[page_index]); 421 kunmap(kernel_if->u.h.page[page_index]);
@@ -569,54 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
569 return VMCI_SUCCESS; 528 return VMCI_SUCCESS;
570} 529}
571 530
572static int qp_memcpy_to_queue(struct vmci_queue *queue,
573 u64 queue_offset,
574 const void *src, size_t src_offset, size_t size)
575{
576 return __qp_memcpy_to_queue(queue, queue_offset,
577 (u8 *)src + src_offset, size, false);
578}
579
580static int qp_memcpy_from_queue(void *dest,
581 size_t dest_offset,
582 const struct vmci_queue *queue,
583 u64 queue_offset, size_t size)
584{
585 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
586 queue, queue_offset, size, false);
587}
588
589/*
590 * Copies from a given iovec from a VMCI Queue.
591 */
592static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
593 u64 queue_offset,
594 const void *msg,
595 size_t src_offset, size_t size)
596{
597
598 /*
599 * We ignore src_offset because src is really a struct iovec * and will
600 * maintain offset internally.
601 */
602 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
603}
604
605/*
606 * Copies to a given iovec from a VMCI Queue.
607 */
608static int qp_memcpy_from_queue_iov(void *dest,
609 size_t dest_offset,
610 const struct vmci_queue *queue,
611 u64 queue_offset, size_t size)
612{
613 /*
614 * We ignore dest_offset because dest is really a struct iovec * and
615 * will maintain offset internally.
616 */
617 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
618}
619
620/* 531/*
621 * Allocates kernel VA space of specified size plus space for the queue 532 * Allocates kernel VA space of specified size plus space for the queue
622 * and kernel interface. This is different from the guest queue allocator, 533 * and kernel interface. This is different from the guest queue allocator,
@@ -2629,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2629static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2540static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2630 struct vmci_queue *consume_q, 2541 struct vmci_queue *consume_q,
2631 const u64 produce_q_size, 2542 const u64 produce_q_size,
2632 const void *buf, 2543 struct iov_iter *from)
2633 size_t buf_size,
2634 vmci_memcpy_to_queue_func memcpy_to_queue)
2635{ 2544{
2636 s64 free_space; 2545 s64 free_space;
2637 u64 tail; 2546 u64 tail;
2547 size_t buf_size = iov_iter_count(from);
2638 size_t written; 2548 size_t written;
2639 ssize_t result; 2549 ssize_t result;
2640 2550
@@ -2654,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2654 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2564 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2655 tail = vmci_q_header_producer_tail(produce_q->q_header); 2565 tail = vmci_q_header_producer_tail(produce_q->q_header);
2656 if (likely(tail + written < produce_q_size)) { 2566 if (likely(tail + written < produce_q_size)) {
2657 result = memcpy_to_queue(produce_q, tail, buf, 0, written); 2567 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2658 } else { 2568 } else {
2659 /* Tail pointer wraps around. */ 2569 /* Tail pointer wraps around. */
2660 2570
2661 const size_t tmp = (size_t) (produce_q_size - tail); 2571 const size_t tmp = (size_t) (produce_q_size - tail);
2662 2572
2663 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); 2573 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2664 if (result >= VMCI_SUCCESS) 2574 if (result >= VMCI_SUCCESS)
2665 result = memcpy_to_queue(produce_q, 0, buf, tmp, 2575 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2666 written - tmp); 2576 written - tmp);
2667 } 2577 }
2668 2578
@@ -2690,11 +2600,10 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2690static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2600static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2691 struct vmci_queue *consume_q, 2601 struct vmci_queue *consume_q,
2692 const u64 consume_q_size, 2602 const u64 consume_q_size,
2693 void *buf, 2603 struct iov_iter *to,
2694 size_t buf_size,
2695 vmci_memcpy_from_queue_func memcpy_from_queue,
2696 bool update_consumer) 2604 bool update_consumer)
2697{ 2605{
2606 size_t buf_size = iov_iter_count(to);
2698 s64 buf_ready; 2607 s64 buf_ready;
2699 u64 head; 2608 u64 head;
2700 size_t read; 2609 size_t read;
@@ -2716,15 +2625,15 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2716 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2625 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2717 head = vmci_q_header_consumer_head(produce_q->q_header); 2626 head = vmci_q_header_consumer_head(produce_q->q_header);
2718 if (likely(head + read < consume_q_size)) { 2627 if (likely(head + read < consume_q_size)) {
2719 result = memcpy_from_queue(buf, 0, consume_q, head, read); 2628 result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2720 } else { 2629 } else {
2721 /* Head pointer wraps around. */ 2630 /* Head pointer wraps around. */
2722 2631
2723 const size_t tmp = (size_t) (consume_q_size - head); 2632 const size_t tmp = (size_t) (consume_q_size - head);
2724 2633
2725 result = memcpy_from_queue(buf, 0, consume_q, head, tmp); 2634 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2726 if (result >= VMCI_SUCCESS) 2635 if (result >= VMCI_SUCCESS)
2727 result = memcpy_from_queue(buf, tmp, consume_q, 0, 2636 result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2728 read - tmp); 2637 read - tmp);
2729 2638
2730 } 2639 }
@@ -3118,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3118 int buf_type) 3027 int buf_type)
3119{ 3028{
3120 ssize_t result; 3029 ssize_t result;
3030 struct iov_iter from;
3031 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3121 3032
3122 if (!qpair || !buf) 3033 if (!qpair || !buf)
3123 return VMCI_ERROR_INVALID_ARGS; 3034 return VMCI_ERROR_INVALID_ARGS;
3124 3035
3036 iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
3037
3125 qp_lock(qpair); 3038 qp_lock(qpair);
3126 3039
3127 do { 3040 do {
3128 result = qp_enqueue_locked(qpair->produce_q, 3041 result = qp_enqueue_locked(qpair->produce_q,
3129 qpair->consume_q, 3042 qpair->consume_q,
3130 qpair->produce_q_size, 3043 qpair->produce_q_size,
3131 buf, buf_size, 3044 &from);
3132 qp_memcpy_to_queue);
3133 3045
3134 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3046 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3135 !qp_wait_for_ready_queue(qpair)) 3047 !qp_wait_for_ready_queue(qpair))
@@ -3159,18 +3071,21 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3159 int buf_type) 3071 int buf_type)
3160{ 3072{
3161 ssize_t result; 3073 ssize_t result;
3074 struct iov_iter to;
3075 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3162 3076
3163 if (!qpair || !buf) 3077 if (!qpair || !buf)
3164 return VMCI_ERROR_INVALID_ARGS; 3078 return VMCI_ERROR_INVALID_ARGS;
3165 3079
3080 iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
3081
3166 qp_lock(qpair); 3082 qp_lock(qpair);
3167 3083
3168 do { 3084 do {
3169 result = qp_dequeue_locked(qpair->produce_q, 3085 result = qp_dequeue_locked(qpair->produce_q,
3170 qpair->consume_q, 3086 qpair->consume_q,
3171 qpair->consume_q_size, 3087 qpair->consume_q_size,
3172 buf, buf_size, 3088 &to, true);
3173 qp_memcpy_from_queue, true);
3174 3089
3175 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3090 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3176 !qp_wait_for_ready_queue(qpair)) 3091 !qp_wait_for_ready_queue(qpair))
@@ -3200,19 +3115,22 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3200 size_t buf_size, 3115 size_t buf_size,
3201 int buf_type) 3116 int buf_type)
3202{ 3117{
3118 struct iov_iter to;
3119 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3203 ssize_t result; 3120 ssize_t result;
3204 3121
3205 if (!qpair || !buf) 3122 if (!qpair || !buf)
3206 return VMCI_ERROR_INVALID_ARGS; 3123 return VMCI_ERROR_INVALID_ARGS;
3207 3124
3125 iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
3126
3208 qp_lock(qpair); 3127 qp_lock(qpair);
3209 3128
3210 do { 3129 do {
3211 result = qp_dequeue_locked(qpair->produce_q, 3130 result = qp_dequeue_locked(qpair->produce_q,
3212 qpair->consume_q, 3131 qpair->consume_q,
3213 qpair->consume_q_size, 3132 qpair->consume_q_size,
3214 buf, buf_size, 3133 &to, false);
3215 qp_memcpy_from_queue, false);
3216 3134
3217 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3135 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3218 !qp_wait_for_ready_queue(qpair)) 3136 !qp_wait_for_ready_queue(qpair))
@@ -3253,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3253 result = qp_enqueue_locked(qpair->produce_q, 3171 result = qp_enqueue_locked(qpair->produce_q,
3254 qpair->consume_q, 3172 qpair->consume_q,
3255 qpair->produce_q_size, 3173 qpair->produce_q_size,
3256 msg, iov_size, 3174 &msg->msg_iter);
3257 qp_memcpy_to_queue_iov);
3258 3175
3259 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3176 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3260 !qp_wait_for_ready_queue(qpair)) 3177 !qp_wait_for_ready_queue(qpair))
@@ -3295,9 +3212,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3295 result = qp_dequeue_locked(qpair->produce_q, 3212 result = qp_dequeue_locked(qpair->produce_q,
3296 qpair->consume_q, 3213 qpair->consume_q,
3297 qpair->consume_q_size, 3214 qpair->consume_q_size,
3298 msg, iov_size, 3215 &msg->msg_iter, true);
3299 qp_memcpy_from_queue_iov,
3300 true);
3301 3216
3302 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3217 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3303 !qp_wait_for_ready_queue(qpair)) 3218 !qp_wait_for_ready_queue(qpair))
@@ -3339,9 +3254,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3339 result = qp_dequeue_locked(qpair->produce_q, 3254 result = qp_dequeue_locked(qpair->produce_q,
3340 qpair->consume_q, 3255 qpair->consume_q,
3341 qpair->consume_q_size, 3256 qpair->consume_q_size,
3342 msg, iov_size, 3257 &msg->msg_iter, false);
3343 qp_memcpy_from_queue_iov,
3344 false);
3345 3258
3346 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3259 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3347 !qp_wait_for_ready_queue(qpair)) 3260 !qp_wait_for_ready_queue(qpair))