aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorAndy King <acking@vmware.com>2013-08-23 12:22:13 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-28 00:42:12 -0400
commit45412befe8fee657effc15112af05ca9dbea61fc (patch)
tree9eabdc57bd7da7c3359d7d55a153ef26e2706c4c /drivers/misc
parent440ab3b3039834508250975d07d52d41883cf520 (diff)
VMCI: Remove non-blocking/pinned queuepair support
We added this for a special case that doesn't exist on Linux. Remove the non-blocking/pinned queuepair code and simplify the driver in preparation for adding virtual IOMMU support. Acked-by: Aditya Sarwade <asarwade@vmware.com> Signed-off-by: Andy King <acking@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c149
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h18
2 files changed, 22 insertions, 145 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8ff2e5ee8fb8..8698e0c5bdb4 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -148,12 +148,10 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
148struct vmci_queue_kern_if { 148struct vmci_queue_kern_if {
149 struct page **page; 149 struct page **page;
150 struct page **header_page; 150 struct page **header_page;
151 void *va;
152 struct mutex __mutex; /* Protects the queue. */ 151 struct mutex __mutex; /* Protects the queue. */
153 struct mutex *mutex; /* Shared by producer and consumer queues. */ 152 struct mutex *mutex; /* Shared by producer and consumer queues. */
154 bool host; 153 bool host;
155 size_t num_pages; 154 size_t num_pages;
156 bool mapped;
157}; 155};
158 156
159/* 157/*
@@ -267,11 +265,6 @@ static void qp_free_queue(void *q, u64 size)
267 if (queue) { 265 if (queue) {
268 u64 i = DIV_ROUND_UP(size, PAGE_SIZE); 266 u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
269 267
270 if (queue->kernel_if->mapped) {
271 vunmap(queue->kernel_if->va);
272 queue->kernel_if->va = NULL;
273 }
274
275 while (i) 268 while (i)
276 __free_page(queue->kernel_if->page[--i]); 269 __free_page(queue->kernel_if->page[--i]);
277 270
@@ -311,8 +304,6 @@ static void *qp_alloc_queue(u64 size, u32 flags)
311 queue->kernel_if->header_page = NULL; /* Unused in guest. */ 304 queue->kernel_if->header_page = NULL; /* Unused in guest. */
312 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); 305 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
313 queue->kernel_if->host = false; 306 queue->kernel_if->host = false;
314 queue->kernel_if->va = NULL;
315 queue->kernel_if->mapped = false;
316 307
317 for (i = 0; i < num_data_pages; i++) { 308 for (i = 0; i < num_data_pages; i++) {
318 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0); 309 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
@@ -320,16 +311,6 @@ static void *qp_alloc_queue(u64 size, u32 flags)
320 goto fail; 311 goto fail;
321 } 312 }
322 313
323 if (vmci_qp_pinned(flags)) {
324 queue->kernel_if->va =
325 vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
326 PAGE_KERNEL);
327 if (!queue->kernel_if->va)
328 goto fail;
329
330 queue->kernel_if->mapped = true;
331 }
332
333 return (void *)queue; 314 return (void *)queue;
334 315
335 fail: 316 fail:
@@ -359,11 +340,7 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
359 void *va; 340 void *va;
360 size_t to_copy; 341 size_t to_copy;
361 342
362 if (!kernel_if->mapped) 343 va = kmap(kernel_if->page[page_index]);
363 va = kmap(kernel_if->page[page_index]);
364 else
365 va = (void *)((u8 *)kernel_if->va +
366 (page_index * PAGE_SIZE));
367 344
368 if (size - bytes_copied > PAGE_SIZE - page_offset) 345 if (size - bytes_copied > PAGE_SIZE - page_offset)
369 /* Enough payload to fill up from this page. */ 346 /* Enough payload to fill up from this page. */
@@ -388,8 +365,7 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
388 } 365 }
389 366
390 bytes_copied += to_copy; 367 bytes_copied += to_copy;
391 if (!kernel_if->mapped) 368 kunmap(kernel_if->page[page_index]);
392 kunmap(kernel_if->page[page_index]);
393 } 369 }
394 370
395 return VMCI_SUCCESS; 371 return VMCI_SUCCESS;
@@ -417,11 +393,7 @@ static int __qp_memcpy_from_queue(void *dest,
417 void *va; 393 void *va;
418 size_t to_copy; 394 size_t to_copy;
419 395
420 if (!kernel_if->mapped) 396 va = kmap(kernel_if->page[page_index]);
421 va = kmap(kernel_if->page[page_index]);
422 else
423 va = (void *)((u8 *)kernel_if->va +
424 (page_index * PAGE_SIZE));
425 397
426 if (size - bytes_copied > PAGE_SIZE - page_offset) 398 if (size - bytes_copied > PAGE_SIZE - page_offset)
427 /* Enough payload to fill up this page. */ 399 /* Enough payload to fill up this page. */
@@ -446,8 +418,7 @@ static int __qp_memcpy_from_queue(void *dest,
446 } 418 }
447 419
448 bytes_copied += to_copy; 420 bytes_copied += to_copy;
449 if (!kernel_if->mapped) 421 kunmap(kernel_if->page[page_index]);
450 kunmap(kernel_if->page[page_index]);
451 } 422 }
452 423
453 return VMCI_SUCCESS; 424 return VMCI_SUCCESS;
@@ -634,8 +605,6 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
634 queue->kernel_if->header_page = 605 queue->kernel_if->header_page =
635 (struct page **)((u8 *)queue + queue_size); 606 (struct page **)((u8 *)queue + queue_size);
636 queue->kernel_if->page = &queue->kernel_if->header_page[1]; 607 queue->kernel_if->page = &queue->kernel_if->header_page[1];
637 queue->kernel_if->va = NULL;
638 queue->kernel_if->mapped = false;
639 } 608 }
640 609
641 return queue; 610 return queue;
@@ -1720,21 +1689,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
1720 if (result < VMCI_SUCCESS) 1689 if (result < VMCI_SUCCESS)
1721 return result; 1690 return result;
1722 1691
1723 /*
1724 * Preemptively load in the headers if non-blocking to
1725 * prevent blocking later.
1726 */
1727 if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
1728 result = qp_host_map_queues(entry->produce_q,
1729 entry->consume_q);
1730 if (result < VMCI_SUCCESS) {
1731 qp_host_unregister_user_memory(
1732 entry->produce_q,
1733 entry->consume_q);
1734 return result;
1735 }
1736 }
1737
1738 entry->state = VMCIQPB_ATTACHED_MEM; 1692 entry->state = VMCIQPB_ATTACHED_MEM;
1739 } else { 1693 } else {
1740 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1694 entry->state = VMCIQPB_ATTACHED_NO_MEM;
@@ -1749,24 +1703,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
1749 1703
1750 return VMCI_ERROR_UNAVAILABLE; 1704 return VMCI_ERROR_UNAVAILABLE;
1751 } else { 1705 } else {
1752 /*
1753 * For non-blocking queue pairs, we cannot rely on
1754 * enqueue/dequeue to map in the pages on the
1755 * host-side, since it may block, so we make an
1756 * attempt here.
1757 */
1758
1759 if (flags & VMCI_QPFLAG_NONBLOCK) {
1760 result =
1761 qp_host_map_queues(entry->produce_q,
1762 entry->consume_q);
1763 if (result < VMCI_SUCCESS)
1764 return result;
1765
1766 entry->qp.flags |= flags &
1767 (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
1768 }
1769
1770 /* The host side has successfully attached to a queue pair. */ 1706 /* The host side has successfully attached to a queue pair. */
1771 entry->state = VMCIQPB_ATTACHED_MEM; 1707 entry->state = VMCIQPB_ATTACHED_MEM;
1772 } 1708 }
@@ -2543,24 +2479,19 @@ void vmci_qp_guest_endpoints_exit(void)
2543 * Since non-blocking isn't yet implemented on the host personality we 2479 * Since non-blocking isn't yet implemented on the host personality we
2544 * have no reason to acquire a spin lock. So to avoid the use of an 2480 * have no reason to acquire a spin lock. So to avoid the use of an
2545 * unnecessary lock only acquire the mutex if we can block. 2481 * unnecessary lock only acquire the mutex if we can block.
2546 * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
2547 * we can use the same locking function for access to both the queue
2548 * and the queue headers as it is the same logic. Assert this behvior.
2549 */ 2482 */
2550static void qp_lock(const struct vmci_qp *qpair) 2483static void qp_lock(const struct vmci_qp *qpair)
2551{ 2484{
2552 if (vmci_can_block(qpair->flags)) 2485 qp_acquire_queue_mutex(qpair->produce_q);
2553 qp_acquire_queue_mutex(qpair->produce_q);
2554} 2486}
2555 2487
2556/* 2488/*
2557 * Helper routine that unlocks the queue pair after calling 2489 * Helper routine that unlocks the queue pair after calling
2558 * qp_lock. Respects non-blocking and pinning flags. 2490 * qp_lock.
2559 */ 2491 */
2560static void qp_unlock(const struct vmci_qp *qpair) 2492static void qp_unlock(const struct vmci_qp *qpair)
2561{ 2493{
2562 if (vmci_can_block(qpair->flags)) 2494 qp_release_queue_mutex(qpair->produce_q);
2563 qp_release_queue_mutex(qpair->produce_q);
2564} 2495}
2565 2496
2566/* 2497/*
@@ -2568,17 +2499,12 @@ static void qp_unlock(const struct vmci_qp *qpair)
2568 * currently not mapped, it will be attempted to do so. 2499 * currently not mapped, it will be attempted to do so.
2569 */ 2500 */
2570static int qp_map_queue_headers(struct vmci_queue *produce_q, 2501static int qp_map_queue_headers(struct vmci_queue *produce_q,
2571 struct vmci_queue *consume_q, 2502 struct vmci_queue *consume_q)
2572 bool can_block)
2573{ 2503{
2574 int result; 2504 int result;
2575 2505
2576 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2506 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2577 if (can_block) 2507 result = qp_host_map_queues(produce_q, consume_q);
2578 result = qp_host_map_queues(produce_q, consume_q);
2579 else
2580 result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
2581
2582 if (result < VMCI_SUCCESS) 2508 if (result < VMCI_SUCCESS)
2583 return (produce_q->saved_header && 2509 return (produce_q->saved_header &&
2584 consume_q->saved_header) ? 2510 consume_q->saved_header) ?
@@ -2601,8 +2527,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair,
2601{ 2527{
2602 int result; 2528 int result;
2603 2529
2604 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q, 2530 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2605 vmci_can_block(qpair->flags));
2606 if (result == VMCI_SUCCESS) { 2531 if (result == VMCI_SUCCESS) {
2607 *produce_q_header = qpair->produce_q->q_header; 2532 *produce_q_header = qpair->produce_q->q_header;
2608 *consume_q_header = qpair->consume_q->q_header; 2533 *consume_q_header = qpair->consume_q->q_header;
@@ -2645,9 +2570,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2645{ 2570{
2646 unsigned int generation; 2571 unsigned int generation;
2647 2572
2648 if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
2649 return false;
2650
2651 qpair->blocked++; 2573 qpair->blocked++;
2652 generation = qpair->generation; 2574 generation = qpair->generation;
2653 qp_unlock(qpair); 2575 qp_unlock(qpair);
@@ -2674,15 +2596,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2674 const u64 produce_q_size, 2596 const u64 produce_q_size,
2675 const void *buf, 2597 const void *buf,
2676 size_t buf_size, 2598 size_t buf_size,
2677 vmci_memcpy_to_queue_func memcpy_to_queue, 2599 vmci_memcpy_to_queue_func memcpy_to_queue)
2678 bool can_block)
2679{ 2600{
2680 s64 free_space; 2601 s64 free_space;
2681 u64 tail; 2602 u64 tail;
2682 size_t written; 2603 size_t written;
2683 ssize_t result; 2604 ssize_t result;
2684 2605
2685 result = qp_map_queue_headers(produce_q, consume_q, can_block); 2606 result = qp_map_queue_headers(produce_q, consume_q);
2686 if (unlikely(result != VMCI_SUCCESS)) 2607 if (unlikely(result != VMCI_SUCCESS))
2687 return result; 2608 return result;
2688 2609
@@ -2737,15 +2658,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2737 void *buf, 2658 void *buf,
2738 size_t buf_size, 2659 size_t buf_size,
2739 vmci_memcpy_from_queue_func memcpy_from_queue, 2660 vmci_memcpy_from_queue_func memcpy_from_queue,
2740 bool update_consumer, 2661 bool update_consumer)
2741 bool can_block)
2742{ 2662{
2743 s64 buf_ready; 2663 s64 buf_ready;
2744 u64 head; 2664 u64 head;
2745 size_t read; 2665 size_t read;
2746 ssize_t result; 2666 ssize_t result;
2747 2667
2748 result = qp_map_queue_headers(produce_q, consume_q, can_block); 2668 result = qp_map_queue_headers(produce_q, consume_q);
2749 if (unlikely(result != VMCI_SUCCESS)) 2669 if (unlikely(result != VMCI_SUCCESS))
2750 return result; 2670 return result;
2751 2671
@@ -2842,32 +2762,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
2842 route = vmci_guest_code_active() ? 2762 route = vmci_guest_code_active() ?
2843 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2763 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2844 2764
2845 /* If NONBLOCK or PINNED is set, we better be the guest personality. */ 2765 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2846 if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) && 2766 pr_devel("NONBLOCK OR PINNED set");
2847 VMCI_ROUTE_AS_GUEST != route) {
2848 pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
2849 return VMCI_ERROR_INVALID_ARGS; 2767 return VMCI_ERROR_INVALID_ARGS;
2850 } 2768 }
2851 2769
2852 /*
2853 * Limit the size of pinned QPs and check sanity.
2854 *
2855 * Pinned pages implies non-blocking mode. Mutexes aren't acquired
2856 * when the NONBLOCK flag is set in qpair code; and also should not be
2857 * acquired when the PINNED flagged is set. Since pinning pages
2858 * implies we want speed, it makes no sense not to have NONBLOCK
2859 * set if PINNED is set. Hence enforce this implication.
2860 */
2861 if (vmci_qp_pinned(flags)) {
2862 if (vmci_can_block(flags)) {
2863 pr_err("Attempted to enable pinning w/o non-blocking");
2864 return VMCI_ERROR_INVALID_ARGS;
2865 }
2866
2867 if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
2868 return VMCI_ERROR_NO_RESOURCES;
2869 }
2870
2871 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2770 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2872 if (!my_qpair) 2771 if (!my_qpair)
2873 return VMCI_ERROR_NO_MEM; 2772 return VMCI_ERROR_NO_MEM;
@@ -3195,8 +3094,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3195 qpair->consume_q, 3094 qpair->consume_q,
3196 qpair->produce_q_size, 3095 qpair->produce_q_size,
3197 buf, buf_size, 3096 buf, buf_size,
3198 qp_memcpy_to_queue, 3097 qp_memcpy_to_queue);
3199 vmci_can_block(qpair->flags));
3200 3098
3201 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3099 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3202 !qp_wait_for_ready_queue(qpair)) 3100 !qp_wait_for_ready_queue(qpair))
@@ -3237,8 +3135,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3237 qpair->consume_q, 3135 qpair->consume_q,
3238 qpair->consume_q_size, 3136 qpair->consume_q_size,
3239 buf, buf_size, 3137 buf, buf_size,
3240 qp_memcpy_from_queue, true, 3138 qp_memcpy_from_queue, true);
3241 vmci_can_block(qpair->flags));
3242 3139
3243 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3140 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3244 !qp_wait_for_ready_queue(qpair)) 3141 !qp_wait_for_ready_queue(qpair))
@@ -3280,8 +3177,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3280 qpair->consume_q, 3177 qpair->consume_q,
3281 qpair->consume_q_size, 3178 qpair->consume_q_size,
3282 buf, buf_size, 3179 buf, buf_size,
3283 qp_memcpy_from_queue, false, 3180 qp_memcpy_from_queue, false);
3284 vmci_can_block(qpair->flags));
3285 3181
3286 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3182 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3287 !qp_wait_for_ready_queue(qpair)) 3183 !qp_wait_for_ready_queue(qpair))
@@ -3323,8 +3219,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3323 qpair->consume_q, 3219 qpair->consume_q,
3324 qpair->produce_q_size, 3220 qpair->produce_q_size,
3325 iov, iov_size, 3221 iov, iov_size,
3326 qp_memcpy_to_queue_iov, 3222 qp_memcpy_to_queue_iov);
3327 vmci_can_block(qpair->flags));
3328 3223
3329 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3224 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3330 !qp_wait_for_ready_queue(qpair)) 3225 !qp_wait_for_ready_queue(qpair))
@@ -3367,7 +3262,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3367 qpair->consume_q_size, 3262 qpair->consume_q_size,
3368 iov, iov_size, 3263 iov, iov_size,
3369 qp_memcpy_from_queue_iov, 3264 qp_memcpy_from_queue_iov,
3370 true, vmci_can_block(qpair->flags)); 3265 true);
3371 3266
3372 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3267 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3373 !qp_wait_for_ready_queue(qpair)) 3268 !qp_wait_for_ready_queue(qpair))
@@ -3411,7 +3306,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3411 qpair->consume_q_size, 3306 qpair->consume_q_size,
3412 iov, iov_size, 3307 iov, iov_size,
3413 qp_memcpy_from_queue_iov, 3308 qp_memcpy_from_queue_iov,
3414 false, vmci_can_block(qpair->flags)); 3309 false);
3415 3310
3416 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3311 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3417 !qp_wait_for_ready_queue(qpair)) 3312 !qp_wait_for_ready_queue(qpair))
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 58c6959f6b6d..ed177f04ef24 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
146 return page_store->len >= 2; 146 return page_store->len >= 2;
147} 147}
148 148
149/*
150 * Helper function to check if the non-blocking flag
151 * is set for a given queue pair.
152 */
153static inline bool vmci_can_block(u32 flags)
154{
155 return !(flags & VMCI_QPFLAG_NONBLOCK);
156}
157
158/*
159 * Helper function to check if the queue pair is pinned
160 * into memory.
161 */
162static inline bool vmci_qp_pinned(u32 flags)
163{
164 return flags & VMCI_QPFLAG_PINNED;
165}
166
167void vmci_qp_broker_exit(void); 149void vmci_qp_broker_exit(void);
168int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, 150int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
169 u32 flags, u32 priv_flags, 151 u32 flags, u32 priv_flags,