diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-03-26 08:43:13 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-03-31 09:32:12 -0400 |
commit | b5cbb386ae312b2028cee61c96f65581ac0d44ec (patch) | |
tree | 2464e1453cfedb361b43fac61358bc71d3cf551c | |
parent | d5f1a81ccb8947828f60b50d8e9ed7617259a9ec (diff) |
iommu/tegra-gart: Make use of domain_alloc and domain_free
Implement domain_alloc and domain_free iommu-ops as a
replacement for domain_init/domain_destroy.
Tested-by: Thierry Reding <treding@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/tegra-gart.c | 67 |
1 files changed, 46 insertions, 21 deletions
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index c48da057dbb1..fc588a1ffeef 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
@@ -63,11 +63,21 @@ struct gart_device { | |||
63 | struct device *dev; | 63 | struct device *dev; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct gart_domain { | ||
67 | struct iommu_domain domain; /* generic domain handle */ | ||
68 | struct gart_device *gart; /* link to gart device */ | ||
69 | }; | ||
70 | |||
66 | static struct gart_device *gart_handle; /* unique for a system */ | 71 | static struct gart_device *gart_handle; /* unique for a system */ |
67 | 72 | ||
68 | #define GART_PTE(_pfn) \ | 73 | #define GART_PTE(_pfn) \ |
69 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | 74 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) |
70 | 75 | ||
76 | static struct gart_domain *to_gart_domain(struct iommu_domain *dom) | ||
77 | { | ||
78 | return container_of(dom, struct gart_domain, domain); | ||
79 | } | ||
80 | |||
71 | /* | 81 | /* |
72 | * Any interaction between any block on PPSB and a block on APB or AHB | 82 | * Any interaction between any block on PPSB and a block on APB or AHB |
73 | * must have these read-back to ensure the APB/AHB bus transaction is | 83 | * must have these read-back to ensure the APB/AHB bus transaction is |
@@ -156,6 +166,7 @@ static inline bool gart_iova_range_valid(struct gart_device *gart, | |||
156 | static int gart_iommu_attach_dev(struct iommu_domain *domain, | 166 | static int gart_iommu_attach_dev(struct iommu_domain *domain, |
157 | struct device *dev) | 167 | struct device *dev) |
158 | { | 168 | { |
169 | struct gart_domain *gart_domain = to_gart_domain(domain); | ||
159 | struct gart_device *gart; | 170 | struct gart_device *gart; |
160 | struct gart_client *client, *c; | 171 | struct gart_client *client, *c; |
161 | int err = 0; | 172 | int err = 0; |
@@ -163,7 +174,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, | |||
163 | gart = gart_handle; | 174 | gart = gart_handle; |
164 | if (!gart) | 175 | if (!gart) |
165 | return -EINVAL; | 176 | return -EINVAL; |
166 | domain->priv = gart; | 177 | gart_domain->gart = gart; |
167 | 178 | ||
168 | domain->geometry.aperture_start = gart->iovmm_base; | 179 | domain->geometry.aperture_start = gart->iovmm_base; |
169 | domain->geometry.aperture_end = gart->iovmm_base + | 180 | domain->geometry.aperture_end = gart->iovmm_base + |
@@ -198,7 +209,8 @@ fail: | |||
198 | static void gart_iommu_detach_dev(struct iommu_domain *domain, | 209 | static void gart_iommu_detach_dev(struct iommu_domain *domain, |
199 | struct device *dev) | 210 | struct device *dev) |
200 | { | 211 | { |
201 | struct gart_device *gart = domain->priv; | 212 | struct gart_domain *gart_domain = to_gart_domain(domain); |
213 | struct gart_device *gart = gart_domain->gart; | ||
202 | struct gart_client *c; | 214 | struct gart_client *c; |
203 | 215 | ||
204 | spin_lock(&gart->client_lock); | 216 | spin_lock(&gart->client_lock); |
@@ -216,33 +228,44 @@ out: | |||
216 | spin_unlock(&gart->client_lock); | 228 | spin_unlock(&gart->client_lock); |
217 | } | 229 | } |
218 | 230 | ||
219 | static int gart_iommu_domain_init(struct iommu_domain *domain) | 231 | static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) |
220 | { | 232 | { |
221 | return 0; | 233 | struct gart_domain *gart_domain; |
234 | |||
235 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
236 | return NULL; | ||
237 | |||
238 | gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); | ||
239 | if (!gart_domain) | ||
240 | return NULL; | ||
241 | |||
242 | return &gart_domain->domain; | ||
222 | } | 243 | } |
223 | 244 | ||
224 | static void gart_iommu_domain_destroy(struct iommu_domain *domain) | 245 | static void gart_iommu_domain_free(struct iommu_domain *domain) |
225 | { | 246 | { |
226 | struct gart_device *gart = domain->priv; | 247 | struct gart_domain *gart_domain = to_gart_domain(domain); |
227 | 248 | struct gart_device *gart = gart_domain->gart; | |
228 | if (!gart) | ||
229 | return; | ||
230 | 249 | ||
231 | spin_lock(&gart->client_lock); | 250 | if (gart) { |
232 | if (!list_empty(&gart->client)) { | 251 | spin_lock(&gart->client_lock); |
233 | struct gart_client *c; | 252 | if (!list_empty(&gart->client)) { |
253 | struct gart_client *c; | ||
234 | 254 | ||
235 | list_for_each_entry(c, &gart->client, list) | 255 | list_for_each_entry(c, &gart->client, list) |
236 | gart_iommu_detach_dev(domain, c->dev); | 256 | gart_iommu_detach_dev(domain, c->dev); |
257 | } | ||
258 | spin_unlock(&gart->client_lock); | ||
237 | } | 259 | } |
238 | spin_unlock(&gart->client_lock); | 260 | |
239 | domain->priv = NULL; | 261 | kfree(gart_domain); |
240 | } | 262 | } |
241 | 263 | ||
242 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | 264 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, |
243 | phys_addr_t pa, size_t bytes, int prot) | 265 | phys_addr_t pa, size_t bytes, int prot) |
244 | { | 266 | { |
245 | struct gart_device *gart = domain->priv; | 267 | struct gart_domain *gart_domain = to_gart_domain(domain); |
268 | struct gart_device *gart = gart_domain->gart; | ||
246 | unsigned long flags; | 269 | unsigned long flags; |
247 | unsigned long pfn; | 270 | unsigned long pfn; |
248 | 271 | ||
@@ -265,7 +288,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
265 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 288 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
266 | size_t bytes) | 289 | size_t bytes) |
267 | { | 290 | { |
268 | struct gart_device *gart = domain->priv; | 291 | struct gart_domain *gart_domain = to_gart_domain(domain); |
292 | struct gart_device *gart = gart_domain->gart; | ||
269 | unsigned long flags; | 293 | unsigned long flags; |
270 | 294 | ||
271 | if (!gart_iova_range_valid(gart, iova, bytes)) | 295 | if (!gart_iova_range_valid(gart, iova, bytes)) |
@@ -281,7 +305,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
281 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | 305 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, |
282 | dma_addr_t iova) | 306 | dma_addr_t iova) |
283 | { | 307 | { |
284 | struct gart_device *gart = domain->priv; | 308 | struct gart_domain *gart_domain = to_gart_domain(domain); |
309 | struct gart_device *gart = gart_domain->gart; | ||
285 | unsigned long pte; | 310 | unsigned long pte; |
286 | phys_addr_t pa; | 311 | phys_addr_t pa; |
287 | unsigned long flags; | 312 | unsigned long flags; |
@@ -310,8 +335,8 @@ static bool gart_iommu_capable(enum iommu_cap cap) | |||
310 | 335 | ||
311 | static const struct iommu_ops gart_iommu_ops = { | 336 | static const struct iommu_ops gart_iommu_ops = { |
312 | .capable = gart_iommu_capable, | 337 | .capable = gart_iommu_capable, |
313 | .domain_init = gart_iommu_domain_init, | 338 | .domain_alloc = gart_iommu_domain_alloc, |
314 | .domain_destroy = gart_iommu_domain_destroy, | 339 | .domain_free = gart_iommu_domain_free, |
315 | .attach_dev = gart_iommu_attach_dev, | 340 | .attach_dev = gart_iommu_attach_dev, |
316 | .detach_dev = gart_iommu_detach_dev, | 341 | .detach_dev = gart_iommu_detach_dev, |
317 | .map = gart_iommu_map, | 342 | .map = gart_iommu_map, |