aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Osipenko <digetx@gmail.com>2018-12-12 15:39:06 -0500
committerJoerg Roedel <jroedel@suse.de>2019-01-16 07:54:14 -0500
commite7e2367041179318591d3a656f146e20418f99bb (patch)
tree28afa798bbbba3e66e33d350548e80b4b7788539
parentcc0e1205766b8be1e6f7985991ad80c8c5e791c2 (diff)
iommu/tegra: gart: Simplify clients-tracking code
GART is a simple IOMMU provider that has single address space. There is no need to setup global clients list and manage it for tracking of the active domain, hence lot's of code could be safely removed and replaced with a simpler alternative. Signed-off-by: Dmitry Osipenko <digetx@gmail.com> Acked-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/tegra-gart.c155
1 files changed, 40 insertions, 115 deletions
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 74c9be13f043..ad348c61d5e7 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/iommu.h> 25#include <linux/iommu.h>
26#include <linux/list.h>
27#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -46,30 +45,20 @@
46#define GART_PAGE_MASK \ 45#define GART_PAGE_MASK \
47 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID) 46 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
48 47
49struct gart_client {
50 struct device *dev;
51 struct list_head list;
52};
53
54struct gart_device { 48struct gart_device {
55 void __iomem *regs; 49 void __iomem *regs;
56 u32 *savedata; 50 u32 *savedata;
57 u32 page_count; /* total remappable size */ 51 u32 page_count; /* total remappable size */
58 dma_addr_t iovmm_base; /* offset to vmm_area */ 52 dma_addr_t iovmm_base; /* offset to vmm_area */
59 spinlock_t pte_lock; /* for pagetable */ 53 spinlock_t pte_lock; /* for pagetable */
60 struct list_head client; 54 spinlock_t dom_lock; /* for active domain */
61 spinlock_t client_lock; /* for client list */ 55 unsigned int active_devices; /* number of active devices */
62 struct iommu_domain *active_domain; /* current active domain */ 56 struct iommu_domain *active_domain; /* current active domain */
63 struct device *dev; 57 struct device *dev;
64 58
65 struct iommu_device iommu; /* IOMMU Core handle */ 59 struct iommu_device iommu; /* IOMMU Core handle */
66}; 60};
67 61
68struct gart_domain {
69 struct iommu_domain domain; /* generic domain handle */
70 struct gart_device *gart; /* link to gart device */
71};
72
73static struct gart_device *gart_handle; /* unique for a system */ 62static struct gart_device *gart_handle; /* unique for a system */
74 63
75static bool gart_debug; 64static bool gart_debug;
@@ -77,11 +66,6 @@ static bool gart_debug;
77#define GART_PTE(_pfn) \ 66#define GART_PTE(_pfn) \
78 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) 67 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
79 68
80static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
81{
82 return container_of(dom, struct gart_domain, domain);
83}
84
85/* 69/*
86 * Any interaction between any block on PPSB and a block on APB or AHB 70 * Any interaction between any block on PPSB and a block on APB or AHB
87 * must have these read-back to ensure the APB/AHB bus transaction is 71 * must have these read-back to ensure the APB/AHB bus transaction is
@@ -170,125 +154,70 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
170static int gart_iommu_attach_dev(struct iommu_domain *domain, 154static int gart_iommu_attach_dev(struct iommu_domain *domain,
171 struct device *dev) 155 struct device *dev)
172{ 156{
173 struct gart_domain *gart_domain = to_gart_domain(domain);
174 struct gart_device *gart = gart_handle; 157 struct gart_device *gart = gart_handle;
175 struct gart_client *client, *c; 158 int ret = 0;
176 int err = 0;
177
178 client = kzalloc(sizeof(*c), GFP_KERNEL);
179 if (!client)
180 return -ENOMEM;
181 client->dev = dev;
182
183 spin_lock(&gart->client_lock);
184 list_for_each_entry(c, &gart->client, list) {
185 if (c->dev == dev) {
186 dev_err(gart->dev,
187 "%s is already attached\n", dev_name(dev));
188 err = -EINVAL;
189 goto fail;
190 }
191 }
192 if (gart->active_domain && gart->active_domain != domain) {
193 dev_err(gart->dev, "Only one domain can be active at a time\n");
194 err = -EINVAL;
195 goto fail;
196 }
197 gart->active_domain = domain;
198 gart_domain->gart = gart;
199 list_add(&client->list, &gart->client);
200 spin_unlock(&gart->client_lock);
201 dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
202 return 0;
203 159
204fail: 160 spin_lock(&gart->dom_lock);
205 kfree(client);
206 spin_unlock(&gart->client_lock);
207 return err;
208}
209 161
210static void __gart_iommu_detach_dev(struct iommu_domain *domain, 162 if (gart->active_domain && gart->active_domain != domain) {
211 struct device *dev) 163 ret = -EBUSY;
212{ 164 } else if (dev->archdata.iommu != domain) {
213 struct gart_domain *gart_domain = to_gart_domain(domain); 165 dev->archdata.iommu = domain;
214 struct gart_device *gart = gart_domain->gart; 166 gart->active_domain = domain;
215 struct gart_client *c; 167 gart->active_devices++;
216
217 list_for_each_entry(c, &gart->client, list) {
218 if (c->dev == dev) {
219 list_del(&c->list);
220 kfree(c);
221 if (list_empty(&gart->client)) {
222 gart->active_domain = NULL;
223 gart_domain->gart = NULL;
224 }
225 dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
226 return;
227 }
228 } 168 }
229 169
230 dev_err(gart->dev, "Couldn't find %s to detach\n", dev_name(dev)); 170 spin_unlock(&gart->dom_lock);
171
172 return ret;
231} 173}
232 174
233static void gart_iommu_detach_dev(struct iommu_domain *domain, 175static void gart_iommu_detach_dev(struct iommu_domain *domain,
234 struct device *dev) 176 struct device *dev)
235{ 177{
236 struct gart_domain *gart_domain = to_gart_domain(domain); 178 struct gart_device *gart = gart_handle;
237 struct gart_device *gart = gart_domain->gart; 179
180 spin_lock(&gart->dom_lock);
238 181
239 spin_lock(&gart->client_lock); 182 if (dev->archdata.iommu == domain) {
240 __gart_iommu_detach_dev(domain, dev); 183 dev->archdata.iommu = NULL;
241 spin_unlock(&gart->client_lock); 184
185 if (--gart->active_devices == 0)
186 gart->active_domain = NULL;
187 }
188
189 spin_unlock(&gart->dom_lock);
242} 190}
243 191
244static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) 192static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
245{ 193{
246 struct gart_domain *gart_domain; 194 struct gart_device *gart = gart_handle;
247 struct gart_device *gart; 195 struct iommu_domain *domain;
248 196
249 if (type != IOMMU_DOMAIN_UNMANAGED) 197 if (type != IOMMU_DOMAIN_UNMANAGED)
250 return NULL; 198 return NULL;
251 199
252 gart = gart_handle; 200 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
253 if (!gart) 201 if (domain) {
254 return NULL; 202 domain->geometry.aperture_start = gart->iovmm_base;
255 203 domain->geometry.aperture_end = gart->iovmm_base +
256 gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
257 if (!gart_domain)
258 return NULL;
259
260 gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
261 gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
262 gart->page_count * GART_PAGE_SIZE - 1; 204 gart->page_count * GART_PAGE_SIZE - 1;
263 gart_domain->domain.geometry.force_aperture = true; 205 domain->geometry.force_aperture = true;
206 }
264 207
265 return &gart_domain->domain; 208 return domain;
266} 209}
267 210
268static void gart_iommu_domain_free(struct iommu_domain *domain) 211static void gart_iommu_domain_free(struct iommu_domain *domain)
269{ 212{
270 struct gart_domain *gart_domain = to_gart_domain(domain); 213 WARN_ON(gart_handle->active_domain == domain);
271 struct gart_device *gart = gart_domain->gart; 214 kfree(domain);
272
273 if (gart) {
274 spin_lock(&gart->client_lock);
275 if (!list_empty(&gart->client)) {
276 struct gart_client *c, *tmp;
277
278 list_for_each_entry_safe(c, tmp, &gart->client, list)
279 __gart_iommu_detach_dev(domain, c->dev);
280 }
281 spin_unlock(&gart->client_lock);
282 }
283
284 kfree(gart_domain);
285} 215}
286 216
287static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, 217static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
288 phys_addr_t pa, size_t bytes, int prot) 218 phys_addr_t pa, size_t bytes, int prot)
289{ 219{
290 struct gart_domain *gart_domain = to_gart_domain(domain); 220 struct gart_device *gart = gart_handle;
291 struct gart_device *gart = gart_domain->gart;
292 unsigned long flags; 221 unsigned long flags;
293 unsigned long pfn; 222 unsigned long pfn;
294 unsigned long pte; 223 unsigned long pte;
@@ -319,8 +248,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
319static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, 248static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
320 size_t bytes) 249 size_t bytes)
321{ 250{
322 struct gart_domain *gart_domain = to_gart_domain(domain); 251 struct gart_device *gart = gart_handle;
323 struct gart_device *gart = gart_domain->gart;
324 unsigned long flags; 252 unsigned long flags;
325 253
326 if (!gart_iova_range_valid(gart, iova, bytes)) 254 if (!gart_iova_range_valid(gart, iova, bytes))
@@ -335,8 +263,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
335static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, 263static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
336 dma_addr_t iova) 264 dma_addr_t iova)
337{ 265{
338 struct gart_domain *gart_domain = to_gart_domain(domain); 266 struct gart_device *gart = gart_handle;
339 struct gart_device *gart = gart_domain->gart;
340 unsigned long pte; 267 unsigned long pte;
341 phys_addr_t pa; 268 phys_addr_t pa;
342 unsigned long flags; 269 unsigned long flags;
@@ -395,8 +322,7 @@ static int gart_iommu_of_xlate(struct device *dev,
395 322
396static void gart_iommu_sync(struct iommu_domain *domain) 323static void gart_iommu_sync(struct iommu_domain *domain)
397{ 324{
398 struct gart_domain *gart_domain = to_gart_domain(domain); 325 struct gart_device *gart = gart_handle;
399 struct gart_device *gart = gart_domain->gart;
400 326
401 FLUSH_GART_REGS(gart); 327 FLUSH_GART_REGS(gart);
402} 328}
@@ -483,8 +409,7 @@ struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
483 gart->dev = dev; 409 gart->dev = dev;
484 gart_regs = mc->regs + GART_REG_BASE; 410 gart_regs = mc->regs + GART_REG_BASE;
485 spin_lock_init(&gart->pte_lock); 411 spin_lock_init(&gart->pte_lock);
486 spin_lock_init(&gart->client_lock); 412 spin_lock_init(&gart->dom_lock);
487 INIT_LIST_HEAD(&gart->client);
488 gart->regs = gart_regs; 413 gart->regs = gart_regs;
489 gart->iovmm_base = (dma_addr_t)res_remap->start; 414 gart->iovmm_base = (dma_addr_t)res_remap->start;
490 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); 415 gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);