diff options
-rw-r--r-- | drivers/gpu/drm/etnaviv/Kconfig | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_iommu.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 106 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 28 |
7 files changed, 158 insertions, 157 deletions
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 38b477b5fbf9..a29b8f59eb15 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig | |||
@@ -7,8 +7,6 @@ config DRM_ETNAVIV | |||
7 | select SHMEM | 7 | select SHMEM |
8 | select SYNC_FILE | 8 | select SYNC_FILE |
9 | select TMPFS | 9 | select TMPFS |
10 | select IOMMU_API | ||
11 | select IOMMU_SUPPORT | ||
12 | select WANT_DEV_COREDUMP | 10 | select WANT_DEV_COREDUMP |
13 | select CMA if HAVE_DMA_CONTIGUOUS | 11 | select CMA if HAVE_DMA_CONTIGUOUS |
14 | select DMA_CMA if HAVE_DMA_CONTIGUOUS | 12 | select DMA_CMA if HAVE_DMA_CONTIGUOUS |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 058389f93b69..d157d9379e68 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/iommu.h> | ||
30 | #include <linux/types.h> | 29 | #include <linux/types.h> |
31 | #include <linux/sizes.h> | 30 | #include <linux/sizes.h> |
32 | 31 | ||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 2ffdb27e4ac2..14e24ac6573f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c | |||
@@ -14,7 +14,6 @@ | |||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/iommu.h> | ||
18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
19 | #include <linux/sizes.h> | 18 | #include <linux/sizes.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -31,127 +30,115 @@ | |||
31 | 30 | ||
32 | #define GPU_MEM_START 0x80000000 | 31 | #define GPU_MEM_START 0x80000000 |
33 | 32 | ||
34 | struct etnaviv_iommu_domain_pgtable { | 33 | struct etnaviv_iommuv1_domain { |
35 | u32 *pgtable; | 34 | struct etnaviv_iommu_domain base; |
36 | dma_addr_t paddr; | 35 | u32 *pgtable_cpu; |
36 | dma_addr_t pgtable_dma; | ||
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct etnaviv_iommu_domain { | 39 | static struct etnaviv_iommuv1_domain * |
40 | struct iommu_domain domain; | 40 | to_etnaviv_domain(struct etnaviv_iommu_domain *domain) |
41 | struct device *dev; | ||
42 | void *bad_page_cpu; | ||
43 | dma_addr_t bad_page_dma; | ||
44 | struct etnaviv_iommu_domain_pgtable pgtable; | ||
45 | }; | ||
46 | |||
47 | static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain) | ||
48 | { | 41 | { |
49 | return container_of(domain, struct etnaviv_iommu_domain, domain); | 42 | return container_of(domain, struct etnaviv_iommuv1_domain, base); |
50 | } | 43 | } |
51 | 44 | ||
52 | static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) | 45 | static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain) |
53 | { | 46 | { |
54 | u32 *p; | 47 | u32 *p; |
55 | int i; | 48 | int i; |
56 | 49 | ||
57 | etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, | 50 | etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent( |
58 | SZ_4K, | 51 | etnaviv_domain->base.dev, |
59 | &etnaviv_domain->bad_page_dma, | 52 | SZ_4K, |
60 | GFP_KERNEL); | 53 | &etnaviv_domain->base.bad_page_dma, |
61 | if (!etnaviv_domain->bad_page_cpu) | 54 | GFP_KERNEL); |
55 | if (!etnaviv_domain->base.bad_page_cpu) | ||
62 | return -ENOMEM; | 56 | return -ENOMEM; |
63 | 57 | ||
64 | p = etnaviv_domain->bad_page_cpu; | 58 | p = etnaviv_domain->base.bad_page_cpu; |
65 | for (i = 0; i < SZ_4K / 4; i++) | 59 | for (i = 0; i < SZ_4K / 4; i++) |
66 | *p++ = 0xdead55aa; | 60 | *p++ = 0xdead55aa; |
67 | 61 | ||
68 | etnaviv_domain->pgtable.pgtable = | 62 | etnaviv_domain->pgtable_cpu = |
69 | dma_alloc_coherent(etnaviv_domain->dev, PT_SIZE, | 63 | dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE, |
70 | &etnaviv_domain->pgtable.paddr, | 64 | &etnaviv_domain->pgtable_dma, |
71 | GFP_KERNEL); | 65 | GFP_KERNEL); |
72 | if (!etnaviv_domain->pgtable.pgtable) { | 66 | if (!etnaviv_domain->pgtable_cpu) { |
73 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 67 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
74 | etnaviv_domain->bad_page_cpu, | 68 | etnaviv_domain->base.bad_page_cpu, |
75 | etnaviv_domain->bad_page_dma); | 69 | etnaviv_domain->base.bad_page_dma); |
76 | return -ENOMEM; | 70 | return -ENOMEM; |
77 | } | 71 | } |
78 | 72 | ||
79 | for (i = 0; i < PT_ENTRIES; i++) | 73 | for (i = 0; i < PT_ENTRIES; i++) |
80 | etnaviv_domain->pgtable.pgtable[i] = | 74 | etnaviv_domain->pgtable_cpu[i] = |
81 | etnaviv_domain->bad_page_dma; | 75 | etnaviv_domain->base.bad_page_dma; |
82 | 76 | ||
83 | return 0; | 77 | return 0; |
84 | } | 78 | } |
85 | 79 | ||
86 | static void etnaviv_domain_free(struct iommu_domain *domain) | 80 | static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain) |
87 | { | 81 | { |
88 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | 82 | struct etnaviv_iommuv1_domain *etnaviv_domain = |
83 | to_etnaviv_domain(domain); | ||
89 | 84 | ||
90 | dma_free_coherent(etnaviv_domain->dev, PT_SIZE, | 85 | dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE, |
91 | etnaviv_domain->pgtable.pgtable, | 86 | etnaviv_domain->pgtable_cpu, |
92 | etnaviv_domain->pgtable.paddr); | 87 | etnaviv_domain->pgtable_dma); |
93 | 88 | ||
94 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 89 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
95 | etnaviv_domain->bad_page_cpu, | 90 | etnaviv_domain->base.bad_page_cpu, |
96 | etnaviv_domain->bad_page_dma); | 91 | etnaviv_domain->base.bad_page_dma); |
97 | 92 | ||
98 | kfree(etnaviv_domain); | 93 | kfree(etnaviv_domain); |
99 | } | 94 | } |
100 | 95 | ||
101 | static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, | 96 | static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain, |
102 | phys_addr_t paddr, size_t size, int prot) | 97 | unsigned long iova, phys_addr_t paddr, |
98 | size_t size, int prot) | ||
103 | { | 99 | { |
104 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | 100 | struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain); |
105 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; | 101 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; |
106 | 102 | ||
107 | if (size != SZ_4K) | 103 | if (size != SZ_4K) |
108 | return -EINVAL; | 104 | return -EINVAL; |
109 | 105 | ||
110 | etnaviv_domain->pgtable.pgtable[index] = paddr; | 106 | etnaviv_domain->pgtable_cpu[index] = paddr; |
111 | 107 | ||
112 | return 0; | 108 | return 0; |
113 | } | 109 | } |
114 | 110 | ||
115 | static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, | 111 | static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain, |
116 | unsigned long iova, size_t size) | 112 | unsigned long iova, size_t size) |
117 | { | 113 | { |
118 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | 114 | struct etnaviv_iommuv1_domain *etnaviv_domain = |
115 | to_etnaviv_domain(domain); | ||
119 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; | 116 | unsigned int index = (iova - GPU_MEM_START) / SZ_4K; |
120 | 117 | ||
121 | if (size != SZ_4K) | 118 | if (size != SZ_4K) |
122 | return -EINVAL; | 119 | return -EINVAL; |
123 | 120 | ||
124 | etnaviv_domain->pgtable.pgtable[index] = etnaviv_domain->bad_page_dma; | 121 | etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma; |
125 | 122 | ||
126 | return SZ_4K; | 123 | return SZ_4K; |
127 | } | 124 | } |
128 | 125 | ||
129 | static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) | 126 | static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain) |
130 | { | 127 | { |
131 | return PT_SIZE; | 128 | return PT_SIZE; |
132 | } | 129 | } |
133 | 130 | ||
134 | static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) | 131 | static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf) |
135 | { | 132 | { |
136 | struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); | 133 | struct etnaviv_iommuv1_domain *etnaviv_domain = |
134 | to_etnaviv_domain(domain); | ||
137 | 135 | ||
138 | memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); | 136 | memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE); |
139 | } | 137 | } |
140 | 138 | ||
141 | static const struct etnaviv_iommu_ops etnaviv_iommu_ops = { | ||
142 | .ops = { | ||
143 | .domain_free = etnaviv_domain_free, | ||
144 | .map = etnaviv_iommuv1_map, | ||
145 | .unmap = etnaviv_iommuv1_unmap, | ||
146 | .pgsize_bitmap = SZ_4K, | ||
147 | }, | ||
148 | .dump_size = etnaviv_iommuv1_dump_size, | ||
149 | .dump = etnaviv_iommuv1_dump, | ||
150 | }; | ||
151 | |||
152 | void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) | 139 | void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) |
153 | { | 140 | { |
154 | struct etnaviv_iommu_domain *etnaviv_domain = | 141 | struct etnaviv_iommuv1_domain *etnaviv_domain = |
155 | to_etnaviv_domain(gpu->mmu->domain); | 142 | to_etnaviv_domain(gpu->mmu->domain); |
156 | u32 pgtable; | 143 | u32 pgtable; |
157 | 144 | ||
@@ -163,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) | |||
163 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); | 150 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); |
164 | 151 | ||
165 | /* set page table address in MC */ | 152 | /* set page table address in MC */ |
166 | pgtable = (u32)etnaviv_domain->pgtable.paddr; | 153 | pgtable = (u32)etnaviv_domain->pgtable_dma; |
167 | 154 | ||
168 | gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); | 155 | gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); |
169 | gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); | 156 | gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); |
@@ -172,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) | |||
172 | gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); | 159 | gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); |
173 | } | 160 | } |
174 | 161 | ||
175 | struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) | 162 | const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = { |
163 | .free = etnaviv_iommuv1_domain_free, | ||
164 | .map = etnaviv_iommuv1_map, | ||
165 | .unmap = etnaviv_iommuv1_unmap, | ||
166 | .dump_size = etnaviv_iommuv1_dump_size, | ||
167 | .dump = etnaviv_iommuv1_dump, | ||
168 | }; | ||
169 | |||
170 | struct etnaviv_iommu_domain * | ||
171 | etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) | ||
176 | { | 172 | { |
177 | struct etnaviv_iommu_domain *etnaviv_domain; | 173 | struct etnaviv_iommuv1_domain *etnaviv_domain; |
174 | struct etnaviv_iommu_domain *domain; | ||
178 | int ret; | 175 | int ret; |
179 | 176 | ||
180 | etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); | 177 | etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); |
181 | if (!etnaviv_domain) | 178 | if (!etnaviv_domain) |
182 | return NULL; | 179 | return NULL; |
183 | 180 | ||
184 | etnaviv_domain->dev = gpu->dev; | 181 | domain = &etnaviv_domain->base; |
185 | 182 | ||
186 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; | 183 | domain->dev = gpu->dev; |
187 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; | 184 | domain->base = GPU_MEM_START; |
188 | etnaviv_domain->domain.pgsize_bitmap = SZ_4K; | 185 | domain->size = PT_ENTRIES * SZ_4K; |
189 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; | 186 | domain->ops = &etnaviv_iommuv1_ops; |
190 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; | ||
191 | 187 | ||
192 | ret = __etnaviv_iommu_init(etnaviv_domain); | 188 | ret = __etnaviv_iommu_init(etnaviv_domain); |
193 | if (ret) | 189 | if (ret) |
194 | goto out_free; | 190 | goto out_free; |
195 | 191 | ||
196 | return &etnaviv_domain->domain; | 192 | return &etnaviv_domain->base; |
197 | 193 | ||
198 | out_free: | 194 | out_free: |
199 | kfree(etnaviv_domain); | 195 | kfree(etnaviv_domain); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h index 8b51e7c16feb..01d59bf70d78 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h | |||
@@ -18,11 +18,14 @@ | |||
18 | #define __ETNAVIV_IOMMU_H__ | 18 | #define __ETNAVIV_IOMMU_H__ |
19 | 19 | ||
20 | struct etnaviv_gpu; | 20 | struct etnaviv_gpu; |
21 | struct etnaviv_iommu_domain; | ||
21 | 22 | ||
22 | struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu); | 23 | struct etnaviv_iommu_domain * |
24 | etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu); | ||
23 | void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu); | 25 | void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu); |
24 | 26 | ||
25 | struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu); | 27 | struct etnaviv_iommu_domain * |
28 | etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu); | ||
26 | void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu); | 29 | void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu); |
27 | 30 | ||
28 | #endif /* __ETNAVIV_IOMMU_H__ */ | 31 | #endif /* __ETNAVIV_IOMMU_H__ */ |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index d794e8c0dd7e..fc60fc8ddbf0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | |||
@@ -14,7 +14,6 @@ | |||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/iommu.h> | ||
18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
19 | #include <linux/sizes.h> | 18 | #include <linux/sizes.h> |
20 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -40,10 +39,7 @@ | |||
40 | #define MMUv2_MAX_STLB_ENTRIES 1024 | 39 | #define MMUv2_MAX_STLB_ENTRIES 1024 |
41 | 40 | ||
42 | struct etnaviv_iommuv2_domain { | 41 | struct etnaviv_iommuv2_domain { |
43 | struct iommu_domain domain; | 42 | struct etnaviv_iommu_domain base; |
44 | struct device *dev; | ||
45 | void *bad_page_cpu; | ||
46 | dma_addr_t bad_page_dma; | ||
47 | /* M(aster) TLB aka first level pagetable */ | 43 | /* M(aster) TLB aka first level pagetable */ |
48 | u32 *mtlb_cpu; | 44 | u32 *mtlb_cpu; |
49 | dma_addr_t mtlb_dma; | 45 | dma_addr_t mtlb_dma; |
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain { | |||
52 | dma_addr_t stlb_dma[1024]; | 48 | dma_addr_t stlb_dma[1024]; |
53 | }; | 49 | }; |
54 | 50 | ||
55 | static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain) | 51 | static struct etnaviv_iommuv2_domain * |
52 | to_etnaviv_domain(struct etnaviv_iommu_domain *domain) | ||
56 | { | 53 | { |
57 | return container_of(domain, struct etnaviv_iommuv2_domain, domain); | 54 | return container_of(domain, struct etnaviv_iommuv2_domain, base); |
58 | } | 55 | } |
59 | 56 | ||
60 | static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, | 57 | static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain, |
61 | phys_addr_t paddr, size_t size, int prot) | 58 | unsigned long iova, phys_addr_t paddr, |
59 | size_t size, int prot) | ||
62 | { | 60 | { |
63 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 61 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
64 | to_etnaviv_domain(domain); | 62 | to_etnaviv_domain(domain); |
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, | |||
68 | if (size != SZ_4K) | 66 | if (size != SZ_4K) |
69 | return -EINVAL; | 67 | return -EINVAL; |
70 | 68 | ||
71 | if (prot & IOMMU_WRITE) | 69 | if (prot & ETNAVIV_PROT_WRITE) |
72 | entry |= MMUv2_PTE_WRITEABLE; | 70 | entry |= MMUv2_PTE_WRITEABLE; |
73 | 71 | ||
74 | mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; | 72 | mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; |
@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova, | |||
79 | return 0; | 77 | return 0; |
80 | } | 78 | } |
81 | 79 | ||
82 | static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain, | 80 | static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain, |
83 | unsigned long iova, size_t size) | 81 | unsigned long iova, size_t size) |
84 | { | 82 | { |
85 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 83 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
86 | to_etnaviv_domain(domain); | 84 | to_etnaviv_domain(domain); |
@@ -103,19 +101,20 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) | |||
103 | int ret, i, j; | 101 | int ret, i, j; |
104 | 102 | ||
105 | /* allocate scratch page */ | 103 | /* allocate scratch page */ |
106 | etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, | 104 | etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent( |
107 | SZ_4K, | 105 | etnaviv_domain->base.dev, |
108 | &etnaviv_domain->bad_page_dma, | 106 | SZ_4K, |
109 | GFP_KERNEL); | 107 | &etnaviv_domain->base.bad_page_dma, |
110 | if (!etnaviv_domain->bad_page_cpu) { | 108 | GFP_KERNEL); |
109 | if (!etnaviv_domain->base.bad_page_cpu) { | ||
111 | ret = -ENOMEM; | 110 | ret = -ENOMEM; |
112 | goto fail_mem; | 111 | goto fail_mem; |
113 | } | 112 | } |
114 | p = etnaviv_domain->bad_page_cpu; | 113 | p = etnaviv_domain->base.bad_page_cpu; |
115 | for (i = 0; i < SZ_4K / 4; i++) | 114 | for (i = 0; i < SZ_4K / 4; i++) |
116 | *p++ = 0xdead55aa; | 115 | *p++ = 0xdead55aa; |
117 | 116 | ||
118 | etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev, | 117 | etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev, |
119 | SZ_4K, | 118 | SZ_4K, |
120 | &etnaviv_domain->mtlb_dma, | 119 | &etnaviv_domain->mtlb_dma, |
121 | GFP_KERNEL); | 120 | GFP_KERNEL); |
@@ -127,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) | |||
127 | /* pre-populate STLB pages (may want to switch to on-demand later) */ | 126 | /* pre-populate STLB pages (may want to switch to on-demand later) */ |
128 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { | 127 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { |
129 | etnaviv_domain->stlb_cpu[i] = | 128 | etnaviv_domain->stlb_cpu[i] = |
130 | dma_alloc_coherent(etnaviv_domain->dev, | 129 | dma_alloc_coherent(etnaviv_domain->base.dev, |
131 | SZ_4K, | 130 | SZ_4K, |
132 | &etnaviv_domain->stlb_dma[i], | 131 | &etnaviv_domain->stlb_dma[i], |
133 | GFP_KERNEL); | 132 | GFP_KERNEL); |
@@ -146,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) | |||
146 | return 0; | 145 | return 0; |
147 | 146 | ||
148 | fail_mem: | 147 | fail_mem: |
149 | if (etnaviv_domain->bad_page_cpu) | 148 | if (etnaviv_domain->base.bad_page_cpu) |
150 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 149 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
151 | etnaviv_domain->bad_page_cpu, | 150 | etnaviv_domain->base.bad_page_cpu, |
152 | etnaviv_domain->bad_page_dma); | 151 | etnaviv_domain->base.bad_page_dma); |
153 | 152 | ||
154 | if (etnaviv_domain->mtlb_cpu) | 153 | if (etnaviv_domain->mtlb_cpu) |
155 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 154 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
156 | etnaviv_domain->mtlb_cpu, | 155 | etnaviv_domain->mtlb_cpu, |
157 | etnaviv_domain->mtlb_dma); | 156 | etnaviv_domain->mtlb_dma); |
158 | 157 | ||
159 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { | 158 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { |
160 | if (etnaviv_domain->stlb_cpu[i]) | 159 | if (etnaviv_domain->stlb_cpu[i]) |
161 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 160 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
162 | etnaviv_domain->stlb_cpu[i], | 161 | etnaviv_domain->stlb_cpu[i], |
163 | etnaviv_domain->stlb_dma[i]); | 162 | etnaviv_domain->stlb_dma[i]); |
164 | } | 163 | } |
@@ -166,23 +165,23 @@ fail_mem: | |||
166 | return ret; | 165 | return ret; |
167 | } | 166 | } |
168 | 167 | ||
169 | static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain) | 168 | static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain) |
170 | { | 169 | { |
171 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 170 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
172 | to_etnaviv_domain(domain); | 171 | to_etnaviv_domain(domain); |
173 | int i; | 172 | int i; |
174 | 173 | ||
175 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 174 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
176 | etnaviv_domain->bad_page_cpu, | 175 | etnaviv_domain->base.bad_page_cpu, |
177 | etnaviv_domain->bad_page_dma); | 176 | etnaviv_domain->base.bad_page_dma); |
178 | 177 | ||
179 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 178 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
180 | etnaviv_domain->mtlb_cpu, | 179 | etnaviv_domain->mtlb_cpu, |
181 | etnaviv_domain->mtlb_dma); | 180 | etnaviv_domain->mtlb_dma); |
182 | 181 | ||
183 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { | 182 | for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { |
184 | if (etnaviv_domain->stlb_cpu[i]) | 183 | if (etnaviv_domain->stlb_cpu[i]) |
185 | dma_free_coherent(etnaviv_domain->dev, SZ_4K, | 184 | dma_free_coherent(etnaviv_domain->base.dev, SZ_4K, |
186 | etnaviv_domain->stlb_cpu[i], | 185 | etnaviv_domain->stlb_cpu[i], |
187 | etnaviv_domain->stlb_dma[i]); | 186 | etnaviv_domain->stlb_dma[i]); |
188 | } | 187 | } |
@@ -190,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain) | |||
190 | vfree(etnaviv_domain); | 189 | vfree(etnaviv_domain); |
191 | } | 190 | } |
192 | 191 | ||
193 | static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain) | 192 | static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain) |
194 | { | 193 | { |
195 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 194 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
196 | to_etnaviv_domain(domain); | 195 | to_etnaviv_domain(domain); |
@@ -204,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain) | |||
204 | return dump_size; | 203 | return dump_size; |
205 | } | 204 | } |
206 | 205 | ||
207 | static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf) | 206 | static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf) |
208 | { | 207 | { |
209 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 208 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
210 | to_etnaviv_domain(domain); | 209 | to_etnaviv_domain(domain); |
@@ -217,17 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf) | |||
217 | memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K); | 216 | memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K); |
218 | } | 217 | } |
219 | 218 | ||
220 | static const struct etnaviv_iommu_ops etnaviv_iommu_ops = { | ||
221 | .ops = { | ||
222 | .domain_free = etnaviv_iommuv2_domain_free, | ||
223 | .map = etnaviv_iommuv2_map, | ||
224 | .unmap = etnaviv_iommuv2_unmap, | ||
225 | .pgsize_bitmap = SZ_4K, | ||
226 | }, | ||
227 | .dump_size = etnaviv_iommuv2_dump_size, | ||
228 | .dump = etnaviv_iommuv2_dump, | ||
229 | }; | ||
230 | |||
231 | void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) | 219 | void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) |
232 | { | 220 | { |
233 | struct etnaviv_iommuv2_domain *etnaviv_domain = | 221 | struct etnaviv_iommuv2_domain *etnaviv_domain = |
@@ -240,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) | |||
240 | 228 | ||
241 | prefetch = etnaviv_buffer_config_mmuv2(gpu, | 229 | prefetch = etnaviv_buffer_config_mmuv2(gpu, |
242 | (u32)etnaviv_domain->mtlb_dma, | 230 | (u32)etnaviv_domain->mtlb_dma, |
243 | (u32)etnaviv_domain->bad_page_dma); | 231 | (u32)etnaviv_domain->base.bad_page_dma); |
244 | etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer), | 232 | etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer), |
245 | prefetch); | 233 | prefetch); |
246 | etnaviv_gpu_wait_idle(gpu, 100); | 234 | etnaviv_gpu_wait_idle(gpu, 100); |
247 | 235 | ||
248 | gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); | 236 | gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); |
249 | } | 237 | } |
250 | struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) | 238 | |
239 | const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = { | ||
240 | .free = etnaviv_iommuv2_domain_free, | ||
241 | .map = etnaviv_iommuv2_map, | ||
242 | .unmap = etnaviv_iommuv2_unmap, | ||
243 | .dump_size = etnaviv_iommuv2_dump_size, | ||
244 | .dump = etnaviv_iommuv2_dump, | ||
245 | }; | ||
246 | |||
247 | struct etnaviv_iommu_domain * | ||
248 | etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) | ||
251 | { | 249 | { |
252 | struct etnaviv_iommuv2_domain *etnaviv_domain; | 250 | struct etnaviv_iommuv2_domain *etnaviv_domain; |
251 | struct etnaviv_iommu_domain *domain; | ||
253 | int ret; | 252 | int ret; |
254 | 253 | ||
255 | etnaviv_domain = vzalloc(sizeof(*etnaviv_domain)); | 254 | etnaviv_domain = vzalloc(sizeof(*etnaviv_domain)); |
256 | if (!etnaviv_domain) | 255 | if (!etnaviv_domain) |
257 | return NULL; | 256 | return NULL; |
258 | 257 | ||
259 | etnaviv_domain->dev = gpu->dev; | 258 | domain = &etnaviv_domain->base; |
260 | 259 | ||
261 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; | 260 | domain->dev = gpu->dev; |
262 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; | 261 | domain->base = 0; |
263 | etnaviv_domain->domain.pgsize_bitmap = SZ_4K; | 262 | domain->size = (u64)SZ_1G * 4; |
264 | etnaviv_domain->domain.geometry.aperture_start = 0; | 263 | domain->ops = &etnaviv_iommuv2_ops; |
265 | etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1); | ||
266 | 264 | ||
267 | ret = etnaviv_iommuv2_init(etnaviv_domain); | 265 | ret = etnaviv_iommuv2_init(etnaviv_domain); |
268 | if (ret) | 266 | if (ret) |
269 | goto out_free; | 267 | goto out_free; |
270 | 268 | ||
271 | return &etnaviv_domain->domain; | 269 | return &etnaviv_domain->base; |
272 | 270 | ||
273 | out_free: | 271 | out_free: |
274 | vfree(etnaviv_domain); | 272 | vfree(etnaviv_domain); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 95e1671aee53..35074b944778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include "etnaviv_iommu.h" | 22 | #include "etnaviv_iommu.h" |
23 | #include "etnaviv_mmu.h" | 23 | #include "etnaviv_mmu.h" |
24 | 24 | ||
25 | static void etnaviv_domain_unmap(struct iommu_domain *domain, | 25 | static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, |
26 | unsigned long iova, size_t size) | 26 | unsigned long iova, size_t size) |
27 | { | 27 | { |
28 | size_t unmapped_page, unmapped = 0; | 28 | size_t unmapped_page, unmapped = 0; |
@@ -44,8 +44,9 @@ static void etnaviv_domain_unmap(struct iommu_domain *domain, | |||
44 | } | 44 | } |
45 | } | 45 | } |
46 | 46 | ||
47 | static int etnaviv_domain_map(struct iommu_domain *domain, unsigned long iova, | 47 | static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, |
48 | phys_addr_t paddr, size_t size, int prot) | 48 | unsigned long iova, phys_addr_t paddr, |
49 | size_t size, int prot) | ||
49 | { | 50 | { |
50 | unsigned long orig_iova = iova; | 51 | unsigned long orig_iova = iova; |
51 | size_t pgsize = SZ_4K; | 52 | size_t pgsize = SZ_4K; |
@@ -78,7 +79,7 @@ static int etnaviv_domain_map(struct iommu_domain *domain, unsigned long iova, | |||
78 | static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, | 79 | static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, |
79 | struct sg_table *sgt, unsigned len, int prot) | 80 | struct sg_table *sgt, unsigned len, int prot) |
80 | { | 81 | { |
81 | struct iommu_domain *domain = iommu->domain; | 82 | struct etnaviv_iommu_domain *domain = iommu->domain; |
82 | struct scatterlist *sg; | 83 | struct scatterlist *sg; |
83 | unsigned int da = iova; | 84 | unsigned int da = iova; |
84 | unsigned int i, j; | 85 | unsigned int i, j; |
@@ -117,7 +118,7 @@ fail: | |||
117 | static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, | 118 | static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, |
118 | struct sg_table *sgt, unsigned len) | 119 | struct sg_table *sgt, unsigned len) |
119 | { | 120 | { |
120 | struct iommu_domain *domain = iommu->domain; | 121 | struct etnaviv_iommu_domain *domain = iommu->domain; |
121 | struct scatterlist *sg; | 122 | struct scatterlist *sg; |
122 | unsigned int da = iova; | 123 | unsigned int da = iova; |
123 | int i; | 124 | int i; |
@@ -278,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | |||
278 | mmu->last_iova = node->start + etnaviv_obj->base.size; | 279 | mmu->last_iova = node->start + etnaviv_obj->base.size; |
279 | mapping->iova = node->start; | 280 | mapping->iova = node->start; |
280 | ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, | 281 | ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, |
281 | IOMMU_READ | IOMMU_WRITE); | 282 | ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); |
282 | 283 | ||
283 | if (ret < 0) { | 284 | if (ret < 0) { |
284 | drm_mm_remove_node(node); | 285 | drm_mm_remove_node(node); |
@@ -312,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, | |||
312 | void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) | 313 | void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) |
313 | { | 314 | { |
314 | drm_mm_takedown(&mmu->mm); | 315 | drm_mm_takedown(&mmu->mm); |
315 | iommu_domain_free(mmu->domain); | 316 | mmu->domain->ops->free(mmu->domain); |
316 | kfree(mmu); | 317 | kfree(mmu); |
317 | } | 318 | } |
318 | 319 | ||
@@ -344,9 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu) | |||
344 | mutex_init(&mmu->lock); | 345 | mutex_init(&mmu->lock); |
345 | INIT_LIST_HEAD(&mmu->mappings); | 346 | INIT_LIST_HEAD(&mmu->mappings); |
346 | 347 | ||
347 | drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start, | 348 | drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size); |
348 | mmu->domain->geometry.aperture_end - | ||
349 | mmu->domain->geometry.aperture_start + 1); | ||
350 | 349 | ||
351 | return mmu; | 350 | return mmu; |
352 | } | 351 | } |
@@ -378,7 +377,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, | |||
378 | return ret; | 377 | return ret; |
379 | } | 378 | } |
380 | ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr, | 379 | ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr, |
381 | size, IOMMU_READ); | 380 | size, ETNAVIV_PROT_READ); |
382 | if (ret < 0) { | 381 | if (ret < 0) { |
383 | drm_mm_remove_node(vram_node); | 382 | drm_mm_remove_node(vram_node); |
384 | mutex_unlock(&mmu->lock); | 383 | mutex_unlock(&mmu->lock); |
@@ -408,18 +407,10 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu, | |||
408 | } | 407 | } |
409 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) | 408 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) |
410 | { | 409 | { |
411 | struct etnaviv_iommu_ops *ops; | 410 | return iommu->domain->ops->dump_size(iommu->domain); |
412 | |||
413 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | ||
414 | |||
415 | return ops->dump_size(iommu->domain); | ||
416 | } | 411 | } |
417 | 412 | ||
418 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) | 413 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) |
419 | { | 414 | { |
420 | struct etnaviv_iommu_ops *ops; | 415 | iommu->domain->ops->dump(iommu->domain, buf); |
421 | |||
422 | ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); | ||
423 | |||
424 | ops->dump(iommu->domain, buf); | ||
425 | } | 416 | } |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index d072eda7a00d..ab603f5166b1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h | |||
@@ -17,7 +17,8 @@ | |||
17 | #ifndef __ETNAVIV_MMU_H__ | 17 | #ifndef __ETNAVIV_MMU_H__ |
18 | #define __ETNAVIV_MMU_H__ | 18 | #define __ETNAVIV_MMU_H__ |
19 | 19 | ||
20 | #include <linux/iommu.h> | 20 | #define ETNAVIV_PROT_READ (1 << 0) |
21 | #define ETNAVIV_PROT_WRITE (1 << 1) | ||
21 | 22 | ||
22 | enum etnaviv_iommu_version { | 23 | enum etnaviv_iommu_version { |
23 | ETNAVIV_IOMMU_V1 = 0, | 24 | ETNAVIV_IOMMU_V1 = 0, |
@@ -26,16 +27,31 @@ enum etnaviv_iommu_version { | |||
26 | 27 | ||
27 | struct etnaviv_gpu; | 28 | struct etnaviv_gpu; |
28 | struct etnaviv_vram_mapping; | 29 | struct etnaviv_vram_mapping; |
30 | struct etnaviv_iommu_domain; | ||
29 | 31 | ||
30 | struct etnaviv_iommu_ops { | 32 | struct etnaviv_iommu_domain_ops { |
31 | struct iommu_ops ops; | 33 | void (*free)(struct etnaviv_iommu_domain *); |
32 | size_t (*dump_size)(struct iommu_domain *); | 34 | int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova, |
33 | void (*dump)(struct iommu_domain *, void *); | 35 | phys_addr_t paddr, size_t size, int prot); |
36 | size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova, | ||
37 | size_t size); | ||
38 | size_t (*dump_size)(struct etnaviv_iommu_domain *); | ||
39 | void (*dump)(struct etnaviv_iommu_domain *, void *); | ||
40 | }; | ||
41 | |||
42 | struct etnaviv_iommu_domain { | ||
43 | struct device *dev; | ||
44 | void *bad_page_cpu; | ||
45 | dma_addr_t bad_page_dma; | ||
46 | u64 base; | ||
47 | u64 size; | ||
48 | |||
49 | const struct etnaviv_iommu_domain_ops *ops; | ||
34 | }; | 50 | }; |
35 | 51 | ||
36 | struct etnaviv_iommu { | 52 | struct etnaviv_iommu { |
37 | struct etnaviv_gpu *gpu; | 53 | struct etnaviv_gpu *gpu; |
38 | struct iommu_domain *domain; | 54 | struct etnaviv_iommu_domain *domain; |
39 | 55 | ||
40 | enum etnaviv_iommu_version version; | 56 | enum etnaviv_iommu_version version; |
41 | 57 | ||