diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2017-07-14 16:13:07 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2017-12-02 21:24:02 -0500 |
commit | ce3d6e7d4200617e6086c1a83220cf6efd3c92cf (patch) | |
tree | 3a1b1c793176d19b5c3f37184ba8ccff43372aca | |
parent | 19c5b89d8d582cce9a90335de212cf477fe15b95 (diff) |
vmci: get rid of qp_memcpy_from_queue()
switch both of its users to qp_memcpy_from_queue_iov() - just
make it take iov_iter * instead of msghdr * and arrange for a
iov_iter for it in all cases.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.c | 37 |
1 files changed, 17 insertions, 20 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 7387dedcda67..a0c10f8cba30 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c | |||
@@ -441,13 +441,11 @@ static int __qp_memcpy_from_queue(void *dest, | |||
441 | to_copy = size - bytes_copied; | 441 | to_copy = size - bytes_copied; |
442 | 442 | ||
443 | if (is_iovec) { | 443 | if (is_iovec) { |
444 | struct msghdr *msg = dest; | 444 | struct iov_iter *to = dest; |
445 | int err; | 445 | int err; |
446 | 446 | ||
447 | /* The iovec will track bytes_copied internally. */ | 447 | err = copy_to_iter((u8 *)va + page_offset, to_copy, to); |
448 | err = memcpy_to_msg(msg, (u8 *)va + page_offset, | 448 | if (err != to_copy) { |
449 | to_copy); | ||
450 | if (err != 0) { | ||
451 | if (kernel_if->host) | 449 | if (kernel_if->host) |
452 | kunmap(kernel_if->u.h.page[page_index]); | 450 | kunmap(kernel_if->u.h.page[page_index]); |
453 | return VMCI_ERROR_INVALID_ARGS; | 451 | return VMCI_ERROR_INVALID_ARGS; |
@@ -577,15 +575,6 @@ static int qp_memcpy_to_queue(struct vmci_queue *queue, | |||
577 | (u8 *)src + src_offset, size, false); | 575 | (u8 *)src + src_offset, size, false); |
578 | } | 576 | } |
579 | 577 | ||
580 | static int qp_memcpy_from_queue(void *dest, | ||
581 | size_t dest_offset, | ||
582 | const struct vmci_queue *queue, | ||
583 | u64 queue_offset, size_t size) | ||
584 | { | ||
585 | return __qp_memcpy_from_queue((u8 *)dest + dest_offset, | ||
586 | queue, queue_offset, size, false); | ||
587 | } | ||
588 | |||
589 | /* | 578 | /* |
590 | * Copies from a given iovec from a VMCI Queue. | 579 | * Copies from a given iovec from a VMCI Queue. |
591 | */ | 580 | */ |
@@ -3159,18 +3148,22 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, | |||
3159 | int buf_type) | 3148 | int buf_type) |
3160 | { | 3149 | { |
3161 | ssize_t result; | 3150 | ssize_t result; |
3151 | struct iov_iter to; | ||
3152 | struct kvec v = {.iov_base = buf, .iov_len = buf_size}; | ||
3162 | 3153 | ||
3163 | if (!qpair || !buf) | 3154 | if (!qpair || !buf) |
3164 | return VMCI_ERROR_INVALID_ARGS; | 3155 | return VMCI_ERROR_INVALID_ARGS; |
3165 | 3156 | ||
3157 | iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); | ||
3158 | |||
3166 | qp_lock(qpair); | 3159 | qp_lock(qpair); |
3167 | 3160 | ||
3168 | do { | 3161 | do { |
3169 | result = qp_dequeue_locked(qpair->produce_q, | 3162 | result = qp_dequeue_locked(qpair->produce_q, |
3170 | qpair->consume_q, | 3163 | qpair->consume_q, |
3171 | qpair->consume_q_size, | 3164 | qpair->consume_q_size, |
3172 | buf, buf_size, | 3165 | &to, buf_size, |
3173 | qp_memcpy_from_queue, true); | 3166 | qp_memcpy_from_queue_iov, true); |
3174 | 3167 | ||
3175 | if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && | 3168 | if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && |
3176 | !qp_wait_for_ready_queue(qpair)) | 3169 | !qp_wait_for_ready_queue(qpair)) |
@@ -3200,19 +3193,23 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, | |||
3200 | size_t buf_size, | 3193 | size_t buf_size, |
3201 | int buf_type) | 3194 | int buf_type) |
3202 | { | 3195 | { |
3196 | struct iov_iter to; | ||
3197 | struct kvec v = {.iov_base = buf, .iov_len = buf_size}; | ||
3203 | ssize_t result; | 3198 | ssize_t result; |
3204 | 3199 | ||
3205 | if (!qpair || !buf) | 3200 | if (!qpair || !buf) |
3206 | return VMCI_ERROR_INVALID_ARGS; | 3201 | return VMCI_ERROR_INVALID_ARGS; |
3207 | 3202 | ||
3203 | iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); | ||
3204 | |||
3208 | qp_lock(qpair); | 3205 | qp_lock(qpair); |
3209 | 3206 | ||
3210 | do { | 3207 | do { |
3211 | result = qp_dequeue_locked(qpair->produce_q, | 3208 | result = qp_dequeue_locked(qpair->produce_q, |
3212 | qpair->consume_q, | 3209 | qpair->consume_q, |
3213 | qpair->consume_q_size, | 3210 | qpair->consume_q_size, |
3214 | buf, buf_size, | 3211 | &to, buf_size, |
3215 | qp_memcpy_from_queue, false); | 3212 | qp_memcpy_from_queue_iov, false); |
3216 | 3213 | ||
3217 | if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && | 3214 | if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && |
3218 | !qp_wait_for_ready_queue(qpair)) | 3215 | !qp_wait_for_ready_queue(qpair)) |
@@ -3295,7 +3292,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, | |||
3295 | result = qp_dequeue_locked(qpair->produce_q, | 3292 | result = qp_dequeue_locked(qpair->produce_q, |
3296 | qpair->consume_q, | 3293 | qpair->consume_q, |
3297 | qpair->consume_q_size, | 3294 | qpair->consume_q_size, |
3298 | msg, msg_data_left(msg), | 3295 | &msg->msg_iter, msg_data_left(msg), |
3299 | qp_memcpy_from_queue_iov, | 3296 | qp_memcpy_from_queue_iov, |
3300 | true); | 3297 | true); |
3301 | 3298 | ||
@@ -3339,7 +3336,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, | |||
3339 | result = qp_dequeue_locked(qpair->produce_q, | 3336 | result = qp_dequeue_locked(qpair->produce_q, |
3340 | qpair->consume_q, | 3337 | qpair->consume_q, |
3341 | qpair->consume_q_size, | 3338 | qpair->consume_q_size, |
3342 | msg, msg_data_left(msg), | 3339 | &msg->msg_iter, msg_data_left(msg), |
3343 | qp_memcpy_from_queue_iov, | 3340 | qp_memcpy_from_queue_iov, |
3344 | false); | 3341 | false); |
3345 | 3342 | ||