diff options
Diffstat (limited to 'drivers/xen/xenbus/xenbus_client.c')
| -rw-r--r-- | drivers/xen/xenbus/xenbus_client.c | 387 |
1 files changed, 286 insertions, 101 deletions
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index ca744102b666..96b2011d25f3 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
| @@ -52,17 +52,25 @@ | |||
| 52 | struct xenbus_map_node { | 52 | struct xenbus_map_node { |
| 53 | struct list_head next; | 53 | struct list_head next; |
| 54 | union { | 54 | union { |
| 55 | struct vm_struct *area; /* PV */ | 55 | struct { |
| 56 | struct page *page; /* HVM */ | 56 | struct vm_struct *area; |
| 57 | } pv; | ||
| 58 | struct { | ||
| 59 | struct page *pages[XENBUS_MAX_RING_PAGES]; | ||
| 60 | void *addr; | ||
| 61 | } hvm; | ||
| 57 | }; | 62 | }; |
| 58 | grant_handle_t handle; | 63 | grant_handle_t handles[XENBUS_MAX_RING_PAGES]; |
| 64 | unsigned int nr_handles; | ||
| 59 | }; | 65 | }; |
| 60 | 66 | ||
| 61 | static DEFINE_SPINLOCK(xenbus_valloc_lock); | 67 | static DEFINE_SPINLOCK(xenbus_valloc_lock); |
| 62 | static LIST_HEAD(xenbus_valloc_pages); | 68 | static LIST_HEAD(xenbus_valloc_pages); |
| 63 | 69 | ||
| 64 | struct xenbus_ring_ops { | 70 | struct xenbus_ring_ops { |
| 65 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); | 71 | int (*map)(struct xenbus_device *dev, |
| 72 | grant_ref_t *gnt_refs, unsigned int nr_grefs, | ||
| 73 | void **vaddr); | ||
| 66 | int (*unmap)(struct xenbus_device *dev, void *vaddr); | 74 | int (*unmap)(struct xenbus_device *dev, void *vaddr); |
| 67 | }; | 75 | }; |
| 68 | 76 | ||
| @@ -355,17 +363,39 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, | |||
| 355 | /** | 363 | /** |
| 356 | * xenbus_grant_ring | 364 | * xenbus_grant_ring |
| 357 | * @dev: xenbus device | 365 | * @dev: xenbus device |
| 358 | * @ring_mfn: mfn of ring to grant | 366 | * @vaddr: starting virtual address of the ring |
| 359 | 367 | * @nr_pages: number of pages to be granted | |
| 360 | * Grant access to the given @ring_mfn to the peer of the given device. Return | 368 | * @grefs: grant reference array to be filled in |
| 361 | * a grant reference on success, or -errno on error. On error, the device will | 369 | * |
| 362 | * switch to XenbusStateClosing, and the error will be saved in the store. | 370 | * Grant access to the given @vaddr to the peer of the given device. |
| 371 | * Then fill in @grefs with grant references. Return 0 on success, or | ||
| 372 | * -errno on error. On error, the device will switch to | ||
| 373 | * XenbusStateClosing, and the error will be saved in the store. | ||
| 363 | */ | 374 | */ |
| 364 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) | 375 | int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, |
| 376 | unsigned int nr_pages, grant_ref_t *grefs) | ||
| 365 | { | 377 | { |
| 366 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); | 378 | int err; |
| 367 | if (err < 0) | 379 | int i, j; |
| 368 | xenbus_dev_fatal(dev, err, "granting access to ring page"); | 380 | |
| 381 | for (i = 0; i < nr_pages; i++) { | ||
| 382 | unsigned long addr = (unsigned long)vaddr + | ||
| 383 | (PAGE_SIZE * i); | ||
| 384 | err = gnttab_grant_foreign_access(dev->otherend_id, | ||
| 385 | virt_to_mfn(addr), 0); | ||
| 386 | if (err < 0) { | ||
| 387 | xenbus_dev_fatal(dev, err, | ||
| 388 | "granting access to ring page"); | ||
| 389 | goto fail; | ||
| 390 | } | ||
| 391 | grefs[i] = err; | ||
| 392 | } | ||
| 393 | |||
| 394 | return 0; | ||
| 395 | |||
| 396 | fail: | ||
| 397 | for (j = 0; j < i; j++) | ||
| 398 | gnttab_end_foreign_access_ref(grefs[j], 0); | ||
| 369 | return err; | 399 | return err; |
| 370 | } | 400 | } |
| 371 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); | 401 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); |
| @@ -419,62 +449,130 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | |||
| 419 | /** | 449 | /** |
| 420 | * xenbus_map_ring_valloc | 450 | * xenbus_map_ring_valloc |
| 421 | * @dev: xenbus device | 451 | * @dev: xenbus device |
| 422 | * @gnt_ref: grant reference | 452 | * @gnt_refs: grant reference array |
| 453 | * @nr_grefs: number of grant references | ||
| 423 | * @vaddr: pointer to address to be filled out by mapping | 454 | * @vaddr: pointer to address to be filled out by mapping |
| 424 | * | 455 | * |
| 425 | * Based on Rusty Russell's skeleton driver's map_page. | 456 | * Map @nr_grefs pages of memory into this domain from another |
| 426 | * Map a page of memory into this domain from another domain's grant table. | 457 | * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs |
| 427 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the | 458 | * pages of virtual address space, maps the pages to that address, and |
| 428 | * page to that address, and sets *vaddr to that address. | 459 | * sets *vaddr to that address. Returns 0 on success, and GNTST_* |
| 429 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 460 | * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on |
| 430 | * or -ENOMEM on error. If an error is returned, device will switch to | 461 | * error. If an error is returned, device will switch to |
| 431 | * XenbusStateClosing and the error message will be saved in XenStore. | 462 | * XenbusStateClosing and the error message will be saved in XenStore. |
| 432 | */ | 463 | */ |
| 433 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | 464 | int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, |
| 465 | unsigned int nr_grefs, void **vaddr) | ||
| 434 | { | 466 | { |
| 435 | return ring_ops->map(dev, gnt_ref, vaddr); | 467 | return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); |
| 436 | } | 468 | } |
| 437 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | 469 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); |
| 438 | 470 | ||
| 471 | /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned | ||
| 472 | * long), e.g. 32-on-64. Caller is responsible for preparing the | ||
| 473 | * right array to feed into this function */ | ||
| 474 | static int __xenbus_map_ring(struct xenbus_device *dev, | ||
| 475 | grant_ref_t *gnt_refs, | ||
| 476 | unsigned int nr_grefs, | ||
| 477 | grant_handle_t *handles, | ||
| 478 | phys_addr_t *addrs, | ||
| 479 | unsigned int flags, | ||
| 480 | bool *leaked) | ||
| 481 | { | ||
| 482 | struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES]; | ||
| 483 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; | ||
| 484 | int i, j; | ||
| 485 | int err = GNTST_okay; | ||
| 486 | |||
| 487 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
| 488 | return -EINVAL; | ||
| 489 | |||
| 490 | for (i = 0; i < nr_grefs; i++) { | ||
| 491 | memset(&map[i], 0, sizeof(map[i])); | ||
| 492 | gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], | ||
| 493 | dev->otherend_id); | ||
| 494 | handles[i] = INVALID_GRANT_HANDLE; | ||
| 495 | } | ||
| 496 | |||
| 497 | gnttab_batch_map(map, i); | ||
| 498 | |||
| 499 | for (i = 0; i < nr_grefs; i++) { | ||
| 500 | if (map[i].status != GNTST_okay) { | ||
| 501 | err = map[i].status; | ||
| 502 | xenbus_dev_fatal(dev, map[i].status, | ||
| 503 | "mapping in shared page %d from domain %d", | ||
| 504 | gnt_refs[i], dev->otherend_id); | ||
| 505 | goto fail; | ||
| 506 | } else | ||
| 507 | handles[i] = map[i].handle; | ||
| 508 | } | ||
| 509 | |||
| 510 | return GNTST_okay; | ||
| 511 | |||
| 512 | fail: | ||
| 513 | for (i = j = 0; i < nr_grefs; i++) { | ||
| 514 | if (handles[i] != INVALID_GRANT_HANDLE) { | ||
| 515 | memset(&unmap[j], 0, sizeof(unmap[j])); | ||
| 516 | gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], | ||
| 517 | GNTMAP_host_map, handles[i]); | ||
| 518 | j++; | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) | ||
| 523 | BUG(); | ||
| 524 | |||
| 525 | *leaked = false; | ||
| 526 | for (i = 0; i < j; i++) { | ||
| 527 | if (unmap[i].status != GNTST_okay) { | ||
| 528 | *leaked = true; | ||
| 529 | break; | ||
| 530 | } | ||
| 531 | } | ||
| 532 | |||
| 533 | return err; | ||
| 534 | } | ||
| 535 | |||
| 439 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | 536 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, |
| 440 | int gnt_ref, void **vaddr) | 537 | grant_ref_t *gnt_refs, |
| 538 | unsigned int nr_grefs, | ||
| 539 | void **vaddr) | ||
| 441 | { | 540 | { |
| 442 | struct gnttab_map_grant_ref op = { | ||
| 443 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, | ||
| 444 | .ref = gnt_ref, | ||
| 445 | .dom = dev->otherend_id, | ||
| 446 | }; | ||
| 447 | struct xenbus_map_node *node; | 541 | struct xenbus_map_node *node; |
| 448 | struct vm_struct *area; | 542 | struct vm_struct *area; |
| 449 | pte_t *pte; | 543 | pte_t *ptes[XENBUS_MAX_RING_PAGES]; |
| 544 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; | ||
| 545 | int err = GNTST_okay; | ||
| 546 | int i; | ||
| 547 | bool leaked; | ||
| 450 | 548 | ||
| 451 | *vaddr = NULL; | 549 | *vaddr = NULL; |
| 452 | 550 | ||
| 551 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
| 552 | return -EINVAL; | ||
| 553 | |||
| 453 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 554 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 454 | if (!node) | 555 | if (!node) |
| 455 | return -ENOMEM; | 556 | return -ENOMEM; |
| 456 | 557 | ||
| 457 | area = alloc_vm_area(PAGE_SIZE, &pte); | 558 | area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes); |
| 458 | if (!area) { | 559 | if (!area) { |
| 459 | kfree(node); | 560 | kfree(node); |
| 460 | return -ENOMEM; | 561 | return -ENOMEM; |
| 461 | } | 562 | } |
| 462 | 563 | ||
| 463 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; | 564 | for (i = 0; i < nr_grefs; i++) |
| 565 | phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; | ||
| 464 | 566 | ||
| 465 | gnttab_batch_map(&op, 1); | 567 | err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, |
| 466 | 568 | phys_addrs, | |
| 467 | if (op.status != GNTST_okay) { | 569 | GNTMAP_host_map | GNTMAP_contains_pte, |
| 468 | free_vm_area(area); | 570 | &leaked); |
| 469 | kfree(node); | 571 | if (err) |
| 470 | xenbus_dev_fatal(dev, op.status, | 572 | goto failed; |
| 471 | "mapping in shared page %d from domain %d", | ||
| 472 | gnt_ref, dev->otherend_id); | ||
| 473 | return op.status; | ||
| 474 | } | ||
| 475 | 573 | ||
| 476 | node->handle = op.handle; | 574 | node->nr_handles = nr_grefs; |
| 477 | node->area = area; | 575 | node->pv.area = area; |
| 478 | 576 | ||
| 479 | spin_lock(&xenbus_valloc_lock); | 577 | spin_lock(&xenbus_valloc_lock); |
| 480 | list_add(&node->next, &xenbus_valloc_pages); | 578 | list_add(&node->next, &xenbus_valloc_pages); |
| @@ -482,14 +580,33 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | |||
| 482 | 580 | ||
| 483 | *vaddr = area->addr; | 581 | *vaddr = area->addr; |
| 484 | return 0; | 582 | return 0; |
| 583 | |||
| 584 | failed: | ||
| 585 | if (!leaked) | ||
| 586 | free_vm_area(area); | ||
| 587 | else | ||
| 588 | pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); | ||
| 589 | |||
| 590 | kfree(node); | ||
| 591 | return err; | ||
| 485 | } | 592 | } |
| 486 | 593 | ||
| 487 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | 594 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, |
| 488 | int gnt_ref, void **vaddr) | 595 | grant_ref_t *gnt_ref, |
| 596 | unsigned int nr_grefs, | ||
| 597 | void **vaddr) | ||
| 489 | { | 598 | { |
| 490 | struct xenbus_map_node *node; | 599 | struct xenbus_map_node *node; |
| 600 | int i; | ||
| 491 | int err; | 601 | int err; |
| 492 | void *addr; | 602 | void *addr; |
| 603 | bool leaked = false; | ||
| 604 | /* Why do we need two arrays? See comment of __xenbus_map_ring */ | ||
| 605 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; | ||
| 606 | unsigned long addrs[XENBUS_MAX_RING_PAGES]; | ||
| 607 | |||
| 608 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
| 609 | return -EINVAL; | ||
| 493 | 610 | ||
| 494 | *vaddr = NULL; | 611 | *vaddr = NULL; |
| 495 | 612 | ||
| @@ -497,15 +614,32 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
| 497 | if (!node) | 614 | if (!node) |
| 498 | return -ENOMEM; | 615 | return -ENOMEM; |
| 499 | 616 | ||
| 500 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); | 617 | err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages, |
| 618 | false /* lowmem */); | ||
| 501 | if (err) | 619 | if (err) |
| 502 | goto out_err; | 620 | goto out_err; |
| 503 | 621 | ||
| 504 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 622 | for (i = 0; i < nr_grefs; i++) { |
| 623 | unsigned long pfn = page_to_pfn(node->hvm.pages[i]); | ||
| 624 | phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn); | ||
| 625 | addrs[i] = (unsigned long)pfn_to_kaddr(pfn); | ||
| 626 | } | ||
| 627 | |||
| 628 | err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, | ||
| 629 | phys_addrs, GNTMAP_host_map, &leaked); | ||
| 630 | node->nr_handles = nr_grefs; | ||
| 505 | 631 | ||
| 506 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); | ||
| 507 | if (err) | 632 | if (err) |
| 508 | goto out_err_free_ballooned_pages; | 633 | goto out_free_ballooned_pages; |
| 634 | |||
| 635 | addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP, | ||
| 636 | PAGE_KERNEL); | ||
| 637 | if (!addr) { | ||
| 638 | err = -ENOMEM; | ||
| 639 | goto out_xenbus_unmap_ring; | ||
| 640 | } | ||
| 641 | |||
| 642 | node->hvm.addr = addr; | ||
| 509 | 643 | ||
| 510 | spin_lock(&xenbus_valloc_lock); | 644 | spin_lock(&xenbus_valloc_lock); |
| 511 | list_add(&node->next, &xenbus_valloc_pages); | 645 | list_add(&node->next, &xenbus_valloc_pages); |
| @@ -514,8 +648,16 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
| 514 | *vaddr = addr; | 648 | *vaddr = addr; |
| 515 | return 0; | 649 | return 0; |
| 516 | 650 | ||
| 517 | out_err_free_ballooned_pages: | 651 | out_xenbus_unmap_ring: |
| 518 | free_xenballooned_pages(1, &node->page); | 652 | if (!leaked) |
| 653 | xenbus_unmap_ring(dev, node->handles, node->nr_handles, | ||
| 654 | addrs); | ||
| 655 | else | ||
| 656 | pr_alert("leaking %p size %u page(s)", | ||
| 657 | addr, nr_grefs); | ||
| 658 | out_free_ballooned_pages: | ||
| 659 | if (!leaked) | ||
| 660 | free_xenballooned_pages(nr_grefs, node->hvm.pages); | ||
| 519 | out_err: | 661 | out_err: |
| 520 | kfree(node); | 662 | kfree(node); |
| 521 | return err; | 663 | return err; |
| @@ -525,35 +667,37 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
| 525 | /** | 667 | /** |
| 526 | * xenbus_map_ring | 668 | * xenbus_map_ring |
| 527 | * @dev: xenbus device | 669 | * @dev: xenbus device |
| 528 | * @gnt_ref: grant reference | 670 | * @gnt_refs: grant reference array |
| 529 | * @handle: pointer to grant handle to be filled | 671 | * @nr_grefs: number of grant reference |
| 530 | * @vaddr: address to be mapped to | 672 | * @handles: pointer to grant handle to be filled |
| 673 | * @vaddrs: addresses to be mapped to | ||
| 674 | * @leaked: fail to clean up a failed map, caller should not free vaddr | ||
| 531 | * | 675 | * |
| 532 | * Map a page of memory into this domain from another domain's grant table. | 676 | * Map pages of memory into this domain from another domain's grant table. |
| 533 | * xenbus_map_ring does not allocate the virtual address space (you must do | 677 | * xenbus_map_ring does not allocate the virtual address space (you must do |
| 534 | * this yourself!). It only maps in the page to the specified address. | 678 | * this yourself!). It only maps in the pages to the specified address. |
| 535 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 679 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
| 536 | * or -ENOMEM on error. If an error is returned, device will switch to | 680 | * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to |
| 537 | * XenbusStateClosing and the error message will be saved in XenStore. | 681 | * XenbusStateClosing and the first error message will be saved in XenStore. |
| 682 | * Further more if we fail to map the ring, caller should check @leaked. | ||
| 683 | * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller | ||
| 684 | * should not free the address space of @vaddr. | ||
| 538 | */ | 685 | */ |
| 539 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | 686 | int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, |
| 540 | grant_handle_t *handle, void *vaddr) | 687 | unsigned int nr_grefs, grant_handle_t *handles, |
| 688 | unsigned long *vaddrs, bool *leaked) | ||
| 541 | { | 689 | { |
| 542 | struct gnttab_map_grant_ref op; | 690 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; |
| 543 | 691 | int i; | |
| 544 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, | ||
| 545 | dev->otherend_id); | ||
| 546 | 692 | ||
| 547 | gnttab_batch_map(&op, 1); | 693 | if (nr_grefs > XENBUS_MAX_RING_PAGES) |
| 694 | return -EINVAL; | ||
| 548 | 695 | ||
| 549 | if (op.status != GNTST_okay) { | 696 | for (i = 0; i < nr_grefs; i++) |
| 550 | xenbus_dev_fatal(dev, op.status, | 697 | phys_addrs[i] = (unsigned long)vaddrs[i]; |
| 551 | "mapping in shared page %d from domain %d", | ||
| 552 | gnt_ref, dev->otherend_id); | ||
| 553 | } else | ||
| 554 | *handle = op.handle; | ||
| 555 | 698 | ||
| 556 | return op.status; | 699 | return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles, |
| 700 | phys_addrs, GNTMAP_host_map, leaked); | ||
| 557 | } | 701 | } |
| 558 | EXPORT_SYMBOL_GPL(xenbus_map_ring); | 702 | EXPORT_SYMBOL_GPL(xenbus_map_ring); |
| 559 | 703 | ||
| @@ -579,14 +723,15 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | |||
| 579 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | 723 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) |
| 580 | { | 724 | { |
| 581 | struct xenbus_map_node *node; | 725 | struct xenbus_map_node *node; |
| 582 | struct gnttab_unmap_grant_ref op = { | 726 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; |
| 583 | .host_addr = (unsigned long)vaddr, | ||
| 584 | }; | ||
| 585 | unsigned int level; | 727 | unsigned int level; |
| 728 | int i; | ||
| 729 | bool leaked = false; | ||
| 730 | int err; | ||
| 586 | 731 | ||
| 587 | spin_lock(&xenbus_valloc_lock); | 732 | spin_lock(&xenbus_valloc_lock); |
| 588 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 733 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
| 589 | if (node->area->addr == vaddr) { | 734 | if (node->pv.area->addr == vaddr) { |
| 590 | list_del(&node->next); | 735 | list_del(&node->next); |
| 591 | goto found; | 736 | goto found; |
| 592 | } | 737 | } |
| @@ -601,22 +746,41 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | |||
| 601 | return GNTST_bad_virt_addr; | 746 | return GNTST_bad_virt_addr; |
| 602 | } | 747 | } |
| 603 | 748 | ||
| 604 | op.handle = node->handle; | 749 | for (i = 0; i < node->nr_handles; i++) { |
| 605 | op.host_addr = arbitrary_virt_to_machine( | 750 | unsigned long addr; |
| 606 | lookup_address((unsigned long)vaddr, &level)).maddr; | 751 | |
| 752 | memset(&unmap[i], 0, sizeof(unmap[i])); | ||
| 753 | addr = (unsigned long)vaddr + (PAGE_SIZE * i); | ||
| 754 | unmap[i].host_addr = arbitrary_virt_to_machine( | ||
| 755 | lookup_address(addr, &level)).maddr; | ||
| 756 | unmap[i].dev_bus_addr = 0; | ||
| 757 | unmap[i].handle = node->handles[i]; | ||
| 758 | } | ||
| 607 | 759 | ||
| 608 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 760 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) |
| 609 | BUG(); | 761 | BUG(); |
| 610 | 762 | ||
| 611 | if (op.status == GNTST_okay) | 763 | err = GNTST_okay; |
| 612 | free_vm_area(node->area); | 764 | leaked = false; |
| 765 | for (i = 0; i < node->nr_handles; i++) { | ||
| 766 | if (unmap[i].status != GNTST_okay) { | ||
| 767 | leaked = true; | ||
| 768 | xenbus_dev_error(dev, unmap[i].status, | ||
| 769 | "unmapping page at handle %d error %d", | ||
| 770 | node->handles[i], unmap[i].status); | ||
| 771 | err = unmap[i].status; | ||
| 772 | break; | ||
| 773 | } | ||
| 774 | } | ||
| 775 | |||
| 776 | if (!leaked) | ||
| 777 | free_vm_area(node->pv.area); | ||
| 613 | else | 778 | else |
| 614 | xenbus_dev_error(dev, op.status, | 779 | pr_alert("leaking VM area %p size %u page(s)", |
| 615 | "unmapping page at handle %d error %d", | 780 | node->pv.area, node->nr_handles); |
| 616 | node->handle, op.status); | ||
| 617 | 781 | ||
| 618 | kfree(node); | 782 | kfree(node); |
| 619 | return op.status; | 783 | return err; |
| 620 | } | 784 | } |
| 621 | 785 | ||
| 622 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | 786 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) |
| @@ -624,10 +788,12 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
| 624 | int rv; | 788 | int rv; |
| 625 | struct xenbus_map_node *node; | 789 | struct xenbus_map_node *node; |
| 626 | void *addr; | 790 | void *addr; |
| 791 | unsigned long addrs[XENBUS_MAX_RING_PAGES]; | ||
| 792 | int i; | ||
| 627 | 793 | ||
| 628 | spin_lock(&xenbus_valloc_lock); | 794 | spin_lock(&xenbus_valloc_lock); |
| 629 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 795 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
| 630 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 796 | addr = node->hvm.addr; |
| 631 | if (addr == vaddr) { | 797 | if (addr == vaddr) { |
| 632 | list_del(&node->next); | 798 | list_del(&node->next); |
| 633 | goto found; | 799 | goto found; |
| @@ -643,12 +809,16 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
| 643 | return GNTST_bad_virt_addr; | 809 | return GNTST_bad_virt_addr; |
| 644 | } | 810 | } |
| 645 | 811 | ||
| 646 | rv = xenbus_unmap_ring(dev, node->handle, addr); | 812 | for (i = 0; i < node->nr_handles; i++) |
| 813 | addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i])); | ||
| 647 | 814 | ||
| 815 | rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, | ||
| 816 | addrs); | ||
| 648 | if (!rv) | 817 | if (!rv) |
| 649 | free_xenballooned_pages(1, &node->page); | 818 | vunmap(vaddr); |
| 650 | else | 819 | else |
| 651 | WARN(1, "Leaking %p\n", vaddr); | 820 | WARN(1, "Leaking %p, size %u page(s)\n", vaddr, |
| 821 | node->nr_handles); | ||
| 652 | 822 | ||
| 653 | kfree(node); | 823 | kfree(node); |
| 654 | return rv; | 824 | return rv; |
| @@ -657,29 +827,44 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
| 657 | /** | 827 | /** |
| 658 | * xenbus_unmap_ring | 828 | * xenbus_unmap_ring |
| 659 | * @dev: xenbus device | 829 | * @dev: xenbus device |
| 660 | * @handle: grant handle | 830 | * @handles: grant handle array |
| 661 | * @vaddr: addr to unmap | 831 | * @nr_handles: number of handles in the array |
| 832 | * @vaddrs: addresses to unmap | ||
| 662 | * | 833 | * |
| 663 | * Unmap a page of memory in this domain that was imported from another domain. | 834 | * Unmap memory in this domain that was imported from another domain. |
| 664 | * Returns 0 on success and returns GNTST_* on error | 835 | * Returns 0 on success and returns GNTST_* on error |
| 665 | * (see xen/include/interface/grant_table.h). | 836 | * (see xen/include/interface/grant_table.h). |
| 666 | */ | 837 | */ |
| 667 | int xenbus_unmap_ring(struct xenbus_device *dev, | 838 | int xenbus_unmap_ring(struct xenbus_device *dev, |
| 668 | grant_handle_t handle, void *vaddr) | 839 | grant_handle_t *handles, unsigned int nr_handles, |
| 840 | unsigned long *vaddrs) | ||
| 669 | { | 841 | { |
| 670 | struct gnttab_unmap_grant_ref op; | 842 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; |
| 843 | int i; | ||
| 844 | int err; | ||
| 671 | 845 | ||
| 672 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); | 846 | if (nr_handles > XENBUS_MAX_RING_PAGES) |
| 847 | return -EINVAL; | ||
| 673 | 848 | ||
| 674 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 849 | for (i = 0; i < nr_handles; i++) |
| 850 | gnttab_set_unmap_op(&unmap[i], vaddrs[i], | ||
| 851 | GNTMAP_host_map, handles[i]); | ||
| 852 | |||
| 853 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) | ||
| 675 | BUG(); | 854 | BUG(); |
| 676 | 855 | ||
| 677 | if (op.status != GNTST_okay) | 856 | err = GNTST_okay; |
| 678 | xenbus_dev_error(dev, op.status, | 857 | for (i = 0; i < nr_handles; i++) { |
| 679 | "unmapping page at handle %d error %d", | 858 | if (unmap[i].status != GNTST_okay) { |
| 680 | handle, op.status); | 859 | xenbus_dev_error(dev, unmap[i].status, |
| 860 | "unmapping page at handle %d error %d", | ||
| 861 | handles[i], unmap[i].status); | ||
| 862 | err = unmap[i].status; | ||
| 863 | break; | ||
| 864 | } | ||
| 865 | } | ||
| 681 | 866 | ||
| 682 | return op.status; | 867 | return err; |
| 683 | } | 868 | } |
| 684 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); | 869 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); |
| 685 | 870 | ||
