diff options
author | Robin Murphy <robin.murphy@arm.com> | 2017-10-13 14:23:39 -0400 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2017-11-06 12:27:06 -0500 |
commit | 1c7e7c0278df968221a5edb1a293423e13b13814 (patch) | |
tree | f17e2a6ec535867d06bd76321d76deb4159c4e9a /drivers/iommu | |
parent | 105a004e2187609a74f75d55fd0f9a054b49d60a (diff) |
iommu/ipmmu-vmsa: Unify domain alloc/free
We have two implementations for ipmmu_ops->alloc depending on
CONFIG_IOMMU_DMA, the difference being whether they accept the
IOMMU_DOMAIN_DMA type or not. However, iommu_dma_get_cookie() is
guaranteed to return an error when !CONFIG_IOMMU_DMA, so if
ipmmu_domain_alloc_dma() was actually checking and handling the return
value correctly, it would behave the same as ipmmu_domain_alloc()
anyway.
Similarly for freeing; iommu_put_dma_cookie() is robust by design.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 65 |
1 files changed, 24 insertions, 41 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 00e88a88ee3a..37154075c00a 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -528,6 +528,27 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) | |||
528 | return &domain->io_domain; | 528 | return &domain->io_domain; |
529 | } | 529 | } |
530 | 530 | ||
531 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) | ||
532 | { | ||
533 | struct iommu_domain *io_domain = NULL; | ||
534 | |||
535 | switch (type) { | ||
536 | case IOMMU_DOMAIN_UNMANAGED: | ||
537 | io_domain = __ipmmu_domain_alloc(type); | ||
538 | break; | ||
539 | |||
540 | case IOMMU_DOMAIN_DMA: | ||
541 | io_domain = __ipmmu_domain_alloc(type); | ||
542 | if (io_domain && iommu_get_dma_cookie(io_domain)) { | ||
543 | kfree(io_domain); | ||
544 | io_domain = NULL; | ||
545 | } | ||
546 | break; | ||
547 | } | ||
548 | |||
549 | return io_domain; | ||
550 | } | ||
551 | |||
531 | static void ipmmu_domain_free(struct iommu_domain *io_domain) | 552 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
532 | { | 553 | { |
533 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); | 554 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
@@ -536,6 +557,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) | |||
536 | * Free the domain resources. We assume that all devices have already | 557 | * Free the domain resources. We assume that all devices have already |
537 | * been detached. | 558 | * been detached. |
538 | */ | 559 | */ |
560 | iommu_put_dma_cookie(io_domain); | ||
539 | ipmmu_domain_destroy_context(domain); | 561 | ipmmu_domain_destroy_context(domain); |
540 | free_io_pgtable_ops(domain->iop); | 562 | free_io_pgtable_ops(domain->iop); |
541 | kfree(domain); | 563 | kfree(domain); |
@@ -671,14 +693,6 @@ static int ipmmu_of_xlate(struct device *dev, | |||
671 | 693 | ||
672 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) | 694 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
673 | 695 | ||
674 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) | ||
675 | { | ||
676 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
677 | return NULL; | ||
678 | |||
679 | return __ipmmu_domain_alloc(type); | ||
680 | } | ||
681 | |||
682 | static int ipmmu_add_device(struct device *dev) | 696 | static int ipmmu_add_device(struct device *dev) |
683 | { | 697 | { |
684 | struct ipmmu_vmsa_device *mmu = NULL; | 698 | struct ipmmu_vmsa_device *mmu = NULL; |
@@ -779,37 +793,6 @@ static const struct iommu_ops ipmmu_ops = { | |||
779 | static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); | 793 | static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); |
780 | static LIST_HEAD(ipmmu_slave_devices); | 794 | static LIST_HEAD(ipmmu_slave_devices); |
781 | 795 | ||
782 | static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) | ||
783 | { | ||
784 | struct iommu_domain *io_domain = NULL; | ||
785 | |||
786 | switch (type) { | ||
787 | case IOMMU_DOMAIN_UNMANAGED: | ||
788 | io_domain = __ipmmu_domain_alloc(type); | ||
789 | break; | ||
790 | |||
791 | case IOMMU_DOMAIN_DMA: | ||
792 | io_domain = __ipmmu_domain_alloc(type); | ||
793 | if (io_domain) | ||
794 | iommu_get_dma_cookie(io_domain); | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | return io_domain; | ||
799 | } | ||
800 | |||
801 | static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) | ||
802 | { | ||
803 | switch (io_domain->type) { | ||
804 | case IOMMU_DOMAIN_DMA: | ||
805 | iommu_put_dma_cookie(io_domain); | ||
806 | /* fall-through */ | ||
807 | default: | ||
808 | ipmmu_domain_free(io_domain); | ||
809 | break; | ||
810 | } | ||
811 | } | ||
812 | |||
813 | static int ipmmu_add_device_dma(struct device *dev) | 796 | static int ipmmu_add_device_dma(struct device *dev) |
814 | { | 797 | { |
815 | struct iommu_group *group; | 798 | struct iommu_group *group; |
@@ -878,8 +861,8 @@ static struct iommu_group *ipmmu_find_group_dma(struct device *dev) | |||
878 | } | 861 | } |
879 | 862 | ||
880 | static const struct iommu_ops ipmmu_ops = { | 863 | static const struct iommu_ops ipmmu_ops = { |
881 | .domain_alloc = ipmmu_domain_alloc_dma, | 864 | .domain_alloc = ipmmu_domain_alloc, |
882 | .domain_free = ipmmu_domain_free_dma, | 865 | .domain_free = ipmmu_domain_free, |
883 | .attach_dev = ipmmu_attach_device, | 866 | .attach_dev = ipmmu_attach_device, |
884 | .detach_dev = ipmmu_detach_device, | 867 | .detach_dev = ipmmu_detach_device, |
885 | .map = ipmmu_map, | 868 | .map = ipmmu_map, |