aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy King <acking@vmware.com>2013-08-23 12:22:14 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-08-28 00:42:12 -0400
commit6d6dfb4f4aa9ee352a199b5379942350bdd26e64 (patch)
tree91c12892923e6402d34246112947abdf03e5538d
parent45412befe8fee657effc15112af05ca9dbea61fc (diff)
VMCI: Add support for virtual IOMMU
This patch adds support for virtual IOMMU to the vmci module. We switch to DMA consistent mappings for guest queuepair and doorbell pages that are passed to the device. We still allocate each page individually, since there's no guarantee that we'll get a contiguous block of physical for an entire queuepair (especially since we allow up to 128 MiB!). Also made the split between guest and host in the kernelIf struct much clearer. Now it's obvious which fields are which. Acked-by: George Zhang <georgezhang@vmware.com> Acked-by: Aditya Sarwade <asarwade@vmware.com> Signed-off-by: Andy King <acking@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h7
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c22
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c180
4 files changed, 128 insertions, 83 deletions
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 7b3fce2da6c3..3dee7ae123e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
113 113
114MODULE_AUTHOR("VMware, Inc."); 114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.0.0.0-k"); 116MODULE_VERSION("1.1.0.0-k");
117MODULE_LICENSE("GPL v2"); 117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index f69156a1f30c..cee9e977d318 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -35,6 +35,13 @@ struct vmci_obj {
35 enum vmci_obj_type type; 35 enum vmci_obj_type type;
36}; 36};
37 37
38/*
39 * Needed by other components of this module. It's okay to have one global
40 * instance of this because there can only ever be one VMCI device. Our
41 * virtual hardware enforces this.
42 */
43extern struct pci_dev *vmci_pdev;
44
38u32 vmci_get_context_id(void); 45u32 vmci_get_context_id(void);
39int vmci_send_datagram(struct vmci_datagram *dg); 46int vmci_send_datagram(struct vmci_datagram *dg);
40 47
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 60c01999f489..b3a2b763ecf2 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -65,9 +65,11 @@ struct vmci_guest_device {
65 65
66 void *data_buffer; 66 void *data_buffer;
67 void *notification_bitmap; 67 void *notification_bitmap;
68 dma_addr_t notification_base;
68}; 69};
69 70
70/* vmci_dev singleton device and supporting data*/ 71/* vmci_dev singleton device and supporting data*/
72struct pci_dev *vmci_pdev;
71static struct vmci_guest_device *vmci_dev_g; 73static struct vmci_guest_device *vmci_dev_g;
72static DEFINE_SPINLOCK(vmci_dev_spinlock); 74static DEFINE_SPINLOCK(vmci_dev_spinlock);
73 75
@@ -528,7 +530,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
528 * well. 530 * well.
529 */ 531 */
530 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 532 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
531 vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); 533 vmci_dev->notification_bitmap = dma_alloc_coherent(
534 &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
535 GFP_KERNEL);
532 if (!vmci_dev->notification_bitmap) { 536 if (!vmci_dev->notification_bitmap) {
533 dev_warn(&pdev->dev, 537 dev_warn(&pdev->dev,
534 "Unable to allocate notification bitmap\n"); 538 "Unable to allocate notification bitmap\n");
@@ -546,6 +550,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
546 /* Set up global device so that we can start sending datagrams */ 550 /* Set up global device so that we can start sending datagrams */
547 spin_lock_irq(&vmci_dev_spinlock); 551 spin_lock_irq(&vmci_dev_spinlock);
548 vmci_dev_g = vmci_dev; 552 vmci_dev_g = vmci_dev;
553 vmci_pdev = pdev;
549 spin_unlock_irq(&vmci_dev_spinlock); 554 spin_unlock_irq(&vmci_dev_spinlock);
550 555
551 /* 556 /*
@@ -553,9 +558,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
553 * used. 558 * used.
554 */ 559 */
555 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 560 if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
556 struct page *page = 561 unsigned long bitmap_ppn =
557 vmalloc_to_page(vmci_dev->notification_bitmap); 562 vmci_dev->notification_base >> PAGE_SHIFT;
558 unsigned long bitmap_ppn = page_to_pfn(page);
559 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 563 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
560 dev_warn(&pdev->dev, 564 dev_warn(&pdev->dev,
561 "VMCI device unable to register notification bitmap with PPN 0x%x\n", 565 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
@@ -665,11 +669,14 @@ err_remove_bitmap:
665 if (vmci_dev->notification_bitmap) { 669 if (vmci_dev->notification_bitmap) {
666 iowrite32(VMCI_CONTROL_RESET, 670 iowrite32(VMCI_CONTROL_RESET,
667 vmci_dev->iobase + VMCI_CONTROL_ADDR); 671 vmci_dev->iobase + VMCI_CONTROL_ADDR);
668 vfree(vmci_dev->notification_bitmap); 672 dma_free_coherent(&pdev->dev, PAGE_SIZE,
673 vmci_dev->notification_bitmap,
674 vmci_dev->notification_base);
669 } 675 }
670 676
671err_remove_vmci_dev_g: 677err_remove_vmci_dev_g:
672 spin_lock_irq(&vmci_dev_spinlock); 678 spin_lock_irq(&vmci_dev_spinlock);
679 vmci_pdev = NULL;
673 vmci_dev_g = NULL; 680 vmci_dev_g = NULL;
674 spin_unlock_irq(&vmci_dev_spinlock); 681 spin_unlock_irq(&vmci_dev_spinlock);
675 682
@@ -699,6 +706,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
699 706
700 spin_lock_irq(&vmci_dev_spinlock); 707 spin_lock_irq(&vmci_dev_spinlock);
701 vmci_dev_g = NULL; 708 vmci_dev_g = NULL;
709 vmci_pdev = NULL;
702 spin_unlock_irq(&vmci_dev_spinlock); 710 spin_unlock_irq(&vmci_dev_spinlock);
703 711
704 dev_dbg(&pdev->dev, "Resetting vmci device\n"); 712 dev_dbg(&pdev->dev, "Resetting vmci device\n");
@@ -727,7 +735,9 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
727 * device, so we can safely free it here. 735 * device, so we can safely free it here.
728 */ 736 */
729 737
730 vfree(vmci_dev->notification_bitmap); 738 dma_free_coherent(&pdev->dev, PAGE_SIZE,
739 vmci_dev->notification_bitmap,
740 vmci_dev->notification_base);
731 } 741 }
732 742
733 vfree(vmci_dev->data_buffer); 743 vfree(vmci_dev->data_buffer);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8698e0c5bdb4..a0515a6d6ebd 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/pci.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/uio.h> 27#include <linux/uio.h>
@@ -146,12 +147,20 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
146 147
147/* The Kernel specific component of the struct vmci_queue structure. */ 148/* The Kernel specific component of the struct vmci_queue structure. */
148struct vmci_queue_kern_if { 149struct vmci_queue_kern_if {
149 struct page **page;
150 struct page **header_page;
151 struct mutex __mutex; /* Protects the queue. */ 150 struct mutex __mutex; /* Protects the queue. */
152 struct mutex *mutex; /* Shared by producer and consumer queues. */ 151 struct mutex *mutex; /* Shared by producer and consumer queues. */
153 bool host; 152 size_t num_pages; /* Number of pages incl. header. */
154 size_t num_pages; 153 bool host; /* Host or guest? */
154 union {
155 struct {
156 dma_addr_t *pas;
157 void **vas;
158 } g; /* Used by the guest. */
159 struct {
160 struct page **page;
161 struct page **header_page;
162 } h; /* Used by the host. */
163 } u;
155}; 164};
156 165
157/* 166/*
@@ -263,59 +272,65 @@ static void qp_free_queue(void *q, u64 size)
263 struct vmci_queue *queue = q; 272 struct vmci_queue *queue = q;
264 273
265 if (queue) { 274 if (queue) {
266 u64 i = DIV_ROUND_UP(size, PAGE_SIZE); 275 u64 i;
267 276
268 while (i) 277 /* Given size does not include header, so add in a page here. */
269 __free_page(queue->kernel_if->page[--i]); 278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
280 queue->kernel_if->u.g.vas[i],
281 queue->kernel_if->u.g.pas[i]);
282 }
270 283
271 vfree(queue->q_header); 284 vfree(queue);
272 } 285 }
273} 286}
274 287
275/* 288/*
276 * Allocates kernel VA space of specified size, plus space for the 289 * Allocates kernel queue pages of specified size with IOMMU mappings,
277 * queue structure/kernel interface and the queue header. Allocates 290 * plus space for the queue structure/kernel interface and the queue
278 * physical pages for the queue data pages. 291 * header.
279 *
280 * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
281 * PAGE m+1: struct vmci_queue
282 * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
283 * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
284 */ 292 */
285static void *qp_alloc_queue(u64 size, u32 flags) 293static void *qp_alloc_queue(u64 size, u32 flags)
286{ 294{
287 u64 i; 295 u64 i;
288 struct vmci_queue *queue; 296 struct vmci_queue *queue;
289 struct vmci_queue_header *q_header; 297 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
290 const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE); 298 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
291 const uint queue_size = 299 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
292 PAGE_SIZE + 300 const size_t queue_size =
293 sizeof(*queue) + sizeof(*(queue->kernel_if)) + 301 sizeof(*queue) + sizeof(*queue->kernel_if) +
294 num_data_pages * sizeof(*(queue->kernel_if->page)); 302 pas_size + vas_size;
295 303
296 q_header = vmalloc(queue_size); 304 queue = vmalloc(queue_size);
297 if (!q_header) 305 if (!queue)
298 return NULL; 306 return NULL;
299 307
300 queue = (void *)q_header + PAGE_SIZE; 308 queue->q_header = NULL;
301 queue->q_header = q_header;
302 queue->saved_header = NULL; 309 queue->saved_header = NULL;
303 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
304 queue->kernel_if->header_page = NULL; /* Unused in guest. */ 311 queue->kernel_if->mutex = NULL;
305 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); 312 queue->kernel_if->num_pages = num_pages;
313 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
314 queue->kernel_if->u.g.vas =
315 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
306 queue->kernel_if->host = false; 316 queue->kernel_if->host = false;
307 317
308 for (i = 0; i < num_data_pages; i++) { 318 for (i = 0; i < num_pages; i++) {
309 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0); 319 queue->kernel_if->u.g.vas[i] =
310 if (!queue->kernel_if->page[i]) 320 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
311 goto fail; 321 &queue->kernel_if->u.g.pas[i],
322 GFP_KERNEL);
323 if (!queue->kernel_if->u.g.vas[i]) {
324 /* Size excl. the header. */
325 qp_free_queue(queue, i * PAGE_SIZE);
326 return NULL;
327 }
312 } 328 }
313 329
314 return (void *)queue; 330 /* Queue header is the first page. */
331 queue->q_header = queue->kernel_if->u.g.vas[0];
315 332
316 fail: 333 return queue;
317 qp_free_queue(queue, i * PAGE_SIZE);
318 return NULL;
319} 334}
320 335
321/* 336/*
@@ -334,13 +349,18 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
334 size_t bytes_copied = 0; 349 size_t bytes_copied = 0;
335 350
336 while (bytes_copied < size) { 351 while (bytes_copied < size) {
337 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; 352 const u64 page_index =
338 size_t page_offset = 353 (queue_offset + bytes_copied) / PAGE_SIZE;
354 const size_t page_offset =
339 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 355 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
340 void *va; 356 void *va;
341 size_t to_copy; 357 size_t to_copy;
342 358
343 va = kmap(kernel_if->page[page_index]); 359 if (kernel_if->host)
360 va = kmap(kernel_if->u.h.page[page_index]);
361 else
362 va = kernel_if->u.g.vas[page_index + 1];
363 /* Skip header. */
344 364
345 if (size - bytes_copied > PAGE_SIZE - page_offset) 365 if (size - bytes_copied > PAGE_SIZE - page_offset)
346 /* Enough payload to fill up from this page. */ 366 /* Enough payload to fill up from this page. */
@@ -356,7 +376,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
356 err = memcpy_fromiovec((u8 *)va + page_offset, 376 err = memcpy_fromiovec((u8 *)va + page_offset,
357 iov, to_copy); 377 iov, to_copy);
358 if (err != 0) { 378 if (err != 0) {
359 kunmap(kernel_if->page[page_index]); 379 if (kernel_if->host)
380 kunmap(kernel_if->u.h.page[page_index]);
360 return VMCI_ERROR_INVALID_ARGS; 381 return VMCI_ERROR_INVALID_ARGS;
361 } 382 }
362 } else { 383 } else {
@@ -365,7 +386,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
365 } 386 }
366 387
367 bytes_copied += to_copy; 388 bytes_copied += to_copy;
368 kunmap(kernel_if->page[page_index]); 389 if (kernel_if->host)
390 kunmap(kernel_if->u.h.page[page_index]);
369 } 391 }
370 392
371 return VMCI_SUCCESS; 393 return VMCI_SUCCESS;
@@ -387,13 +409,18 @@ static int __qp_memcpy_from_queue(void *dest,
387 size_t bytes_copied = 0; 409 size_t bytes_copied = 0;
388 410
389 while (bytes_copied < size) { 411 while (bytes_copied < size) {
390 u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; 412 const u64 page_index =
391 size_t page_offset = 413 (queue_offset + bytes_copied) / PAGE_SIZE;
414 const size_t page_offset =
392 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 415 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
393 void *va; 416 void *va;
394 size_t to_copy; 417 size_t to_copy;
395 418
396 va = kmap(kernel_if->page[page_index]); 419 if (kernel_if->host)
420 va = kmap(kernel_if->u.h.page[page_index]);
421 else
422 va = kernel_if->u.g.vas[page_index + 1];
423 /* Skip header. */
397 424
398 if (size - bytes_copied > PAGE_SIZE - page_offset) 425 if (size - bytes_copied > PAGE_SIZE - page_offset)
399 /* Enough payload to fill up this page. */ 426 /* Enough payload to fill up this page. */
@@ -409,7 +436,8 @@ static int __qp_memcpy_from_queue(void *dest,
409 err = memcpy_toiovec(iov, (u8 *)va + page_offset, 436 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
410 to_copy); 437 to_copy);
411 if (err != 0) { 438 if (err != 0) {
412 kunmap(kernel_if->page[page_index]); 439 if (kernel_if->host)
440 kunmap(kernel_if->u.h.page[page_index]);
413 return VMCI_ERROR_INVALID_ARGS; 441 return VMCI_ERROR_INVALID_ARGS;
414 } 442 }
415 } else { 443 } else {
@@ -418,7 +446,8 @@ static int __qp_memcpy_from_queue(void *dest,
418 } 446 }
419 447
420 bytes_copied += to_copy; 448 bytes_copied += to_copy;
421 kunmap(kernel_if->page[page_index]); 449 if (kernel_if->host)
450 kunmap(kernel_if->u.h.page[page_index]);
422 } 451 }
423 452
424 return VMCI_SUCCESS; 453 return VMCI_SUCCESS;
@@ -460,12 +489,11 @@ static int qp_alloc_ppn_set(void *prod_q,
460 return VMCI_ERROR_NO_MEM; 489 return VMCI_ERROR_NO_MEM;
461 } 490 }
462 491
463 produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header)); 492 for (i = 0; i < num_produce_pages; i++) {
464 for (i = 1; i < num_produce_pages; i++) {
465 unsigned long pfn; 493 unsigned long pfn;
466 494
467 produce_ppns[i] = 495 produce_ppns[i] =
468 page_to_pfn(produce_q->kernel_if->page[i - 1]); 496 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
469 pfn = produce_ppns[i]; 497 pfn = produce_ppns[i];
470 498
471 /* Fail allocation if PFN isn't supported by hypervisor. */ 499 /* Fail allocation if PFN isn't supported by hypervisor. */
@@ -474,12 +502,11 @@ static int qp_alloc_ppn_set(void *prod_q,
474 goto ppn_error; 502 goto ppn_error;
475 } 503 }
476 504
477 consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header)); 505 for (i = 0; i < num_consume_pages; i++) {
478 for (i = 1; i < num_consume_pages; i++) {
479 unsigned long pfn; 506 unsigned long pfn;
480 507
481 consume_ppns[i] = 508 consume_ppns[i] =
482 page_to_pfn(consume_q->kernel_if->page[i - 1]); 509 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
483 pfn = consume_ppns[i]; 510 pfn = consume_ppns[i];
484 511
485 /* Fail allocation if PFN isn't supported by hypervisor. */ 512 /* Fail allocation if PFN isn't supported by hypervisor. */
@@ -590,21 +617,20 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
590 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 617 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
591 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 618 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
592 const size_t queue_page_size = 619 const size_t queue_page_size =
593 num_pages * sizeof(*queue->kernel_if->page); 620 num_pages * sizeof(*queue->kernel_if->u.h.page);
594 621
595 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 622 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
596 if (queue) { 623 if (queue) {
597 queue->q_header = NULL; 624 queue->q_header = NULL;
598 queue->saved_header = NULL; 625 queue->saved_header = NULL;
599 queue->kernel_if = 626 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
600 (struct vmci_queue_kern_if *)((u8 *)queue +
601 sizeof(*queue));
602 queue->kernel_if->host = true; 627 queue->kernel_if->host = true;
603 queue->kernel_if->mutex = NULL; 628 queue->kernel_if->mutex = NULL;
604 queue->kernel_if->num_pages = num_pages; 629 queue->kernel_if->num_pages = num_pages;
605 queue->kernel_if->header_page = 630 queue->kernel_if->u.h.header_page =
606 (struct page **)((u8 *)queue + queue_size); 631 (struct page **)((u8 *)queue + queue_size);
607 queue->kernel_if->page = &queue->kernel_if->header_page[1]; 632 queue->kernel_if->u.h.page =
633 &queue->kernel_if->u.h.header_page[1];
608 } 634 }
609 635
610 return queue; 636 return queue;
@@ -711,11 +737,12 @@ static int qp_host_get_user_memory(u64 produce_uva,
711 current->mm, 737 current->mm,
712 (uintptr_t) produce_uva, 738 (uintptr_t) produce_uva,
713 produce_q->kernel_if->num_pages, 739 produce_q->kernel_if->num_pages,
714 1, 0, produce_q->kernel_if->header_page, NULL); 740 1, 0,
741 produce_q->kernel_if->u.h.header_page, NULL);
715 if (retval < produce_q->kernel_if->num_pages) { 742 if (retval < produce_q->kernel_if->num_pages) {
716 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 743 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
717 qp_release_pages(produce_q->kernel_if->header_page, retval, 744 qp_release_pages(produce_q->kernel_if->u.h.header_page,
718 false); 745 retval, false);
719 err = VMCI_ERROR_NO_MEM; 746 err = VMCI_ERROR_NO_MEM;
720 goto out; 747 goto out;
721 } 748 }
@@ -724,12 +751,13 @@ static int qp_host_get_user_memory(u64 produce_uva,
724 current->mm, 751 current->mm,
725 (uintptr_t) consume_uva, 752 (uintptr_t) consume_uva,
726 consume_q->kernel_if->num_pages, 753 consume_q->kernel_if->num_pages,
727 1, 0, consume_q->kernel_if->header_page, NULL); 754 1, 0,
755 consume_q->kernel_if->u.h.header_page, NULL);
728 if (retval < consume_q->kernel_if->num_pages) { 756 if (retval < consume_q->kernel_if->num_pages) {
729 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 757 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
730 qp_release_pages(consume_q->kernel_if->header_page, retval, 758 qp_release_pages(consume_q->kernel_if->u.h.header_page,
731 false); 759 retval, false);
732 qp_release_pages(produce_q->kernel_if->header_page, 760 qp_release_pages(produce_q->kernel_if->u.h.header_page,
733 produce_q->kernel_if->num_pages, false); 761 produce_q->kernel_if->num_pages, false);
734 err = VMCI_ERROR_NO_MEM; 762 err = VMCI_ERROR_NO_MEM;
735 } 763 }
@@ -772,15 +800,15 @@ static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
772static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 800static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
773 struct vmci_queue *consume_q) 801 struct vmci_queue *consume_q)
774{ 802{
775 qp_release_pages(produce_q->kernel_if->header_page, 803 qp_release_pages(produce_q->kernel_if->u.h.header_page,
776 produce_q->kernel_if->num_pages, true); 804 produce_q->kernel_if->num_pages, true);
777 memset(produce_q->kernel_if->header_page, 0, 805 memset(produce_q->kernel_if->u.h.header_page, 0,
778 sizeof(*produce_q->kernel_if->header_page) * 806 sizeof(*produce_q->kernel_if->u.h.header_page) *
779 produce_q->kernel_if->num_pages); 807 produce_q->kernel_if->num_pages);
780 qp_release_pages(consume_q->kernel_if->header_page, 808 qp_release_pages(consume_q->kernel_if->u.h.header_page,
781 consume_q->kernel_if->num_pages, true); 809 consume_q->kernel_if->num_pages, true);
782 memset(consume_q->kernel_if->header_page, 0, 810 memset(consume_q->kernel_if->u.h.header_page, 0,
783 sizeof(*consume_q->kernel_if->header_page) * 811 sizeof(*consume_q->kernel_if->u.h.header_page) *
784 consume_q->kernel_if->num_pages); 812 consume_q->kernel_if->num_pages);
785} 813}
786 814
@@ -803,12 +831,12 @@ static int qp_host_map_queues(struct vmci_queue *produce_q,
803 if (produce_q->q_header != consume_q->q_header) 831 if (produce_q->q_header != consume_q->q_header)
804 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 832 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
805 833
806 if (produce_q->kernel_if->header_page == NULL || 834 if (produce_q->kernel_if->u.h.header_page == NULL ||
807 *produce_q->kernel_if->header_page == NULL) 835 *produce_q->kernel_if->u.h.header_page == NULL)
808 return VMCI_ERROR_UNAVAILABLE; 836 return VMCI_ERROR_UNAVAILABLE;
809 837
810 headers[0] = *produce_q->kernel_if->header_page; 838 headers[0] = *produce_q->kernel_if->u.h.header_page;
811 headers[1] = *consume_q->kernel_if->header_page; 839 headers[1] = *consume_q->kernel_if->u.h.header_page;
812 840
813 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 841 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
814 if (produce_q->q_header != NULL) { 842 if (produce_q->q_header != NULL) {