diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-03-26 08:43:17 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-03-31 09:32:14 -0400 |
commit | bcd516a32416aadd4f1ac40540407aa3b4ffd222 (patch) | |
tree | 8ae4b5c3016b557ae4d3cc58d5074462c2a44dfb /drivers/iommu/rockchip-iommu.c | |
parent | 5914c5fdde926604f65b249b662b26e3bc2c5923 (diff) |
iommu/rockchip: Make use of domain_alloc and domain_free
Implement domain_alloc and domain_free iommu-ops as a
replacement for domain_init/domain_destroy.
Tested-by: Heiko Stuebner <heiko@sntech.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/rockchip-iommu.c')
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9f74fddcd304..4015560bf486 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -80,6 +80,8 @@ struct rk_iommu_domain { | |||
80 | u32 *dt; /* page directory table */ | 80 | u32 *dt; /* page directory table */ |
81 | spinlock_t iommus_lock; /* lock for iommus list */ | 81 | spinlock_t iommus_lock; /* lock for iommus list */ |
82 | spinlock_t dt_lock; /* lock for modifying page directory table */ | 82 | spinlock_t dt_lock; /* lock for modifying page directory table */ |
83 | |||
84 | struct iommu_domain domain; | ||
83 | }; | 85 | }; |
84 | 86 | ||
85 | struct rk_iommu { | 87 | struct rk_iommu { |
@@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count) | |||
100 | outer_flush_range(pa_start, pa_end); | 102 | outer_flush_range(pa_start, pa_end); |
101 | } | 103 | } |
102 | 104 | ||
105 | static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) | ||
106 | { | ||
107 | return container_of(dom, struct rk_iommu_domain, domain); | ||
108 | } | ||
109 | |||
103 | /** | 110 | /** |
104 | * Inspired by _wait_for in intel_drv.h | 111 | * Inspired by _wait_for in intel_drv.h |
105 | * This is NOT safe for use in interrupt context. | 112 | * This is NOT safe for use in interrupt context. |
@@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | |||
503 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | 510 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, |
504 | dma_addr_t iova) | 511 | dma_addr_t iova) |
505 | { | 512 | { |
506 | struct rk_iommu_domain *rk_domain = domain->priv; | 513 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
507 | unsigned long flags; | 514 | unsigned long flags; |
508 | phys_addr_t pt_phys, phys = 0; | 515 | phys_addr_t pt_phys, phys = 0; |
509 | u32 dte, pte; | 516 | u32 dte, pte; |
@@ -639,7 +646,7 @@ unwind: | |||
639 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | 646 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, |
640 | phys_addr_t paddr, size_t size, int prot) | 647 | phys_addr_t paddr, size_t size, int prot) |
641 | { | 648 | { |
642 | struct rk_iommu_domain *rk_domain = domain->priv; | 649 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
643 | unsigned long flags; | 650 | unsigned long flags; |
644 | dma_addr_t iova = (dma_addr_t)_iova; | 651 | dma_addr_t iova = (dma_addr_t)_iova; |
645 | u32 *page_table, *pte_addr; | 652 | u32 *page_table, *pte_addr; |
@@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | |||
670 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | 677 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, |
671 | size_t size) | 678 | size_t size) |
672 | { | 679 | { |
673 | struct rk_iommu_domain *rk_domain = domain->priv; | 680 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
674 | unsigned long flags; | 681 | unsigned long flags; |
675 | dma_addr_t iova = (dma_addr_t)_iova; | 682 | dma_addr_t iova = (dma_addr_t)_iova; |
676 | phys_addr_t pt_phys; | 683 | phys_addr_t pt_phys; |
@@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, | |||
726 | struct device *dev) | 733 | struct device *dev) |
727 | { | 734 | { |
728 | struct rk_iommu *iommu; | 735 | struct rk_iommu *iommu; |
729 | struct rk_iommu_domain *rk_domain = domain->priv; | 736 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
730 | unsigned long flags; | 737 | unsigned long flags; |
731 | int ret; | 738 | int ret; |
732 | phys_addr_t dte_addr; | 739 | phys_addr_t dte_addr; |
@@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, | |||
778 | struct device *dev) | 785 | struct device *dev) |
779 | { | 786 | { |
780 | struct rk_iommu *iommu; | 787 | struct rk_iommu *iommu; |
781 | struct rk_iommu_domain *rk_domain = domain->priv; | 788 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
782 | unsigned long flags; | 789 | unsigned long flags; |
783 | 790 | ||
784 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | 791 | /* Allow 'virtual devices' (eg drm) to detach from domain */ |
@@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, | |||
804 | dev_info(dev, "Detached from iommu domain\n"); | 811 | dev_info(dev, "Detached from iommu domain\n"); |
805 | } | 812 | } |
806 | 813 | ||
807 | static int rk_iommu_domain_init(struct iommu_domain *domain) | 814 | static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) |
808 | { | 815 | { |
809 | struct rk_iommu_domain *rk_domain; | 816 | struct rk_iommu_domain *rk_domain; |
810 | 817 | ||
818 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
819 | return NULL; | ||
820 | |||
811 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); | 821 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); |
812 | if (!rk_domain) | 822 | if (!rk_domain) |
813 | return -ENOMEM; | 823 | return NULL; |
814 | 824 | ||
815 | /* | 825 | /* |
816 | * rk32xx iommus use a 2 level pagetable. | 826 | * rk32xx iommus use a 2 level pagetable. |
@@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain) | |||
827 | spin_lock_init(&rk_domain->dt_lock); | 837 | spin_lock_init(&rk_domain->dt_lock); |
828 | INIT_LIST_HEAD(&rk_domain->iommus); | 838 | INIT_LIST_HEAD(&rk_domain->iommus); |
829 | 839 | ||
830 | domain->priv = rk_domain; | 840 | return &rk_domain->domain; |
831 | 841 | ||
832 | return 0; | ||
833 | err_dt: | 842 | err_dt: |
834 | kfree(rk_domain); | 843 | kfree(rk_domain); |
835 | return -ENOMEM; | 844 | return NULL; |
836 | } | 845 | } |
837 | 846 | ||
838 | static void rk_iommu_domain_destroy(struct iommu_domain *domain) | 847 | static void rk_iommu_domain_free(struct iommu_domain *domain) |
839 | { | 848 | { |
840 | struct rk_iommu_domain *rk_domain = domain->priv; | 849 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
841 | int i; | 850 | int i; |
842 | 851 | ||
843 | WARN_ON(!list_empty(&rk_domain->iommus)); | 852 | WARN_ON(!list_empty(&rk_domain->iommus)); |
@@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain) | |||
852 | } | 861 | } |
853 | 862 | ||
854 | free_page((unsigned long)rk_domain->dt); | 863 | free_page((unsigned long)rk_domain->dt); |
855 | kfree(domain->priv); | 864 | kfree(rk_domain); |
856 | domain->priv = NULL; | ||
857 | } | 865 | } |
858 | 866 | ||
859 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | 867 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) |
@@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev) | |||
952 | } | 960 | } |
953 | 961 | ||
954 | static const struct iommu_ops rk_iommu_ops = { | 962 | static const struct iommu_ops rk_iommu_ops = { |
955 | .domain_init = rk_iommu_domain_init, | 963 | .domain_alloc = rk_iommu_domain_alloc, |
956 | .domain_destroy = rk_iommu_domain_destroy, | 964 | .domain_free = rk_iommu_domain_free, |
957 | .attach_dev = rk_iommu_attach_device, | 965 | .attach_dev = rk_iommu_attach_device, |
958 | .detach_dev = rk_iommu_detach_device, | 966 | .detach_dev = rk_iommu_detach_device, |
959 | .map = rk_iommu_map, | 967 | .map = rk_iommu_map, |