aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 13:07:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 13:07:25 -0500
commit787314c35fbb97e02823a1b8eb8cfa58f366cd49 (patch)
tree3fe5a484c1846c80361217a726997484533e8344 /drivers/iommu
parent6491d4d02893d9787ba67279595990217177b351 (diff)
parent9c6ecf6a3ade2dc4b03a239af68058b22897af41 (diff)
Merge tag 'iommu-updates-v3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "A few new features this merge-window. The most important one is probably, that dma-debug now warns if a dma-handle is not checked with dma_mapping_error by the device driver. This requires minor changes to some architectures which make use of dma-debug. Most of these changes have the respective Acks by the Arch-Maintainers. Besides that there are updates to the AMD IOMMU driver for refactor the IOMMU-Groups support and to make sure it does not trigger a hardware erratum. The OMAP changes (for which I pulled in a branch from Tony Lindgren's tree) have a conflict in linux-next with the arm-soc tree. The conflict is in the file arch/arm/mach-omap2/clock44xx_data.c which is deleted in the arm-soc tree. It is safe to delete the file too so solve the conflict. Similar changes are done in the arm-soc tree in the common clock framework migration. A missing hunk from the patch in the IOMMU tree will be submitted as a seperate patch when the merge-window is closed." * tag 'iommu-updates-v3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (29 commits) ARM: dma-mapping: support debug_dma_mapping_error ARM: OMAP4: hwmod data: ipu and dsp to use parent clocks instead of leaf clocks iommu/omap: Adapt to runtime pm iommu/omap: Migrate to hwmod framework iommu/omap: Keep mmu enabled when requested iommu/omap: Remove redundant clock handling on ISR iommu/amd: Remove obsolete comment iommu/amd: Don't use 512GB pages iommu/tegra: smmu: Move bus_set_iommu after probe for multi arch iommu/tegra: gart: Move bus_set_iommu after probe for multi arch iommu/tegra: smmu: Remove unnecessary PTC/TLB flush all tile: dma_debug: add debug_dma_mapping_error support sh: dma_debug: add debug_dma_mapping_error support powerpc: dma_debug: add debug_dma_mapping_error support mips: dma_debug: add debug_dma_mapping_error support microblaze: dma-mapping: support debug_dma_mapping_error ia64: dma_debug: add debug_dma_mapping_error support c6x: dma_debug: add debug_dma_mapping_error support ARM64: dma_debug: add debug_dma_mapping_error support intel-iommu: Prevent devices with RMRRs from being placed into SI Domain ...
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c196
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/iommu/omap-iommu.c68
-rw-r--r--drivers/iommu/omap-iommu.h3
-rw-r--r--drivers/iommu/omap-iommu2.c36
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c6
8 files changed, 215 insertions, 128 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20eb..c1c74e030a58 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
57 * physically contiguous memory regions it is mapping into page sizes 57 * physically contiguous memory regions it is mapping into page sizes
58 * that we support. 58 * that we support.
59 * 59 *
60 * Traditionally the IOMMU core just handed us the mappings directly, 60 * 512GB Pages are not supported due to a hardware bug
61 * after making sure the size is an order of a 4KiB page and that the
62 * mapping has natural alignment.
63 *
64 * To retain this behavior, we currently advertise that we support
65 * all page sizes that are an order of 4KiB.
66 *
67 * If at some point we'd like to utilize the IOMMU core's new behavior,
68 * we could change this to advertise the real page sizes we support.
69 */ 61 */
70#define AMD_IOMMU_PGSIZES (~0xFFFUL) 62#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
71 63
72static DEFINE_RWLOCK(amd_iommu_devtable_lock); 64static DEFINE_RWLOCK(amd_iommu_devtable_lock);
73 65
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
140 list_del(&dev_data->dev_data_list); 132 list_del(&dev_data->dev_data_list);
141 spin_unlock_irqrestore(&dev_data_list_lock, flags); 133 spin_unlock_irqrestore(&dev_data_list_lock, flags);
142 134
135 if (dev_data->group)
136 iommu_group_put(dev_data->group);
137
143 kfree(dev_data); 138 kfree(dev_data);
144} 139}
145 140
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
274 *from = to; 269 *from = to;
275} 270}
276 271
277#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 272static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
278
279static int iommu_init_device(struct device *dev)
280{ 273{
281 struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev); 274 while (!bus->self) {
282 struct iommu_dev_data *dev_data; 275 if (!pci_is_root_bus(bus))
283 struct iommu_group *group; 276 bus = bus->parent;
284 u16 alias; 277 else
285 int ret; 278 return ERR_PTR(-ENODEV);
286 279 }
287 if (dev->archdata.iommu)
288 return 0;
289
290 dev_data = find_dev_data(get_device_id(dev));
291 if (!dev_data)
292 return -ENOMEM;
293
294 alias = amd_iommu_alias_table[dev_data->devid];
295 if (alias != dev_data->devid) {
296 struct iommu_dev_data *alias_data;
297 280
298 alias_data = find_dev_data(alias); 281 return bus;
299 if (alias_data == NULL) { 282}
300 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
301 dev_name(dev));
302 free_dev_data(dev_data);
303 return -ENOTSUPP;
304 }
305 dev_data->alias_data = alias_data;
306 283
307 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); 284#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
308 }
309 285
310 if (dma_pdev == NULL) 286static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
311 dma_pdev = pci_dev_get(pdev); 287{
288 struct pci_dev *dma_pdev = pdev;
312 289
313 /* Account for quirked devices */ 290 /* Account for quirked devices */
314 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 291 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
330 * Finding the next device may require skipping virtual buses. 307 * Finding the next device may require skipping virtual buses.
331 */ 308 */
332 while (!pci_is_root_bus(dma_pdev->bus)) { 309 while (!pci_is_root_bus(dma_pdev->bus)) {
333 struct pci_bus *bus = dma_pdev->bus; 310 struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
334 311 if (IS_ERR(bus))
335 while (!bus->self) { 312 break;
336 if (!pci_is_root_bus(bus))
337 bus = bus->parent;
338 else
339 goto root_bus;
340 }
341 313
342 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 314 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
343 break; 315 break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
345 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); 317 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
346 } 318 }
347 319
348root_bus: 320 return dma_pdev;
349 group = iommu_group_get(&dma_pdev->dev); 321}
350 pci_dev_put(dma_pdev); 322
323static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
324{
325 struct iommu_group *group = iommu_group_get(&pdev->dev);
326 int ret;
327
351 if (!group) { 328 if (!group) {
352 group = iommu_group_alloc(); 329 group = iommu_group_alloc();
353 if (IS_ERR(group)) 330 if (IS_ERR(group))
354 return PTR_ERR(group); 331 return PTR_ERR(group);
332
333 WARN_ON(&pdev->dev != dev);
355 } 334 }
356 335
357 ret = iommu_group_add_device(group, dev); 336 ret = iommu_group_add_device(group, dev);
358
359 iommu_group_put(group); 337 iommu_group_put(group);
338 return ret;
339}
340
341static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
342 struct device *dev)
343{
344 if (!dev_data->group) {
345 struct iommu_group *group = iommu_group_alloc();
346 if (IS_ERR(group))
347 return PTR_ERR(group);
348
349 dev_data->group = group;
350 }
351
352 return iommu_group_add_device(dev_data->group, dev);
353}
354
355static int init_iommu_group(struct device *dev)
356{
357 struct iommu_dev_data *dev_data;
358 struct iommu_group *group;
359 struct pci_dev *dma_pdev;
360 int ret;
361
362 group = iommu_group_get(dev);
363 if (group) {
364 iommu_group_put(group);
365 return 0;
366 }
367
368 dev_data = find_dev_data(get_device_id(dev));
369 if (!dev_data)
370 return -ENOMEM;
371
372 if (dev_data->alias_data) {
373 u16 alias;
374 struct pci_bus *bus;
375
376 if (dev_data->alias_data->group)
377 goto use_group;
378
379 /*
380 * If the alias device exists, it's effectively just a first
381 * level quirk for finding the DMA source.
382 */
383 alias = amd_iommu_alias_table[dev_data->devid];
384 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
385 if (dma_pdev) {
386 dma_pdev = get_isolation_root(dma_pdev);
387 goto use_pdev;
388 }
389
390 /*
391 * If the alias is virtual, try to find a parent device
392 * and test whether the IOMMU group is actualy rooted above
393 * the alias. Be careful to also test the parent device if
394 * we think the alias is the root of the group.
395 */
396 bus = pci_find_bus(0, alias >> 8);
397 if (!bus)
398 goto use_group;
399
400 bus = find_hosted_bus(bus);
401 if (IS_ERR(bus) || !bus->self)
402 goto use_group;
403
404 dma_pdev = get_isolation_root(pci_dev_get(bus->self));
405 if (dma_pdev != bus->self || (dma_pdev->multifunction &&
406 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
407 goto use_pdev;
408
409 pci_dev_put(dma_pdev);
410 goto use_group;
411 }
412
413 dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
414use_pdev:
415 ret = use_pdev_iommu_group(dma_pdev, dev);
416 pci_dev_put(dma_pdev);
417 return ret;
418use_group:
419 return use_dev_data_iommu_group(dev_data->alias_data, dev);
420}
421
422static int iommu_init_device(struct device *dev)
423{
424 struct pci_dev *pdev = to_pci_dev(dev);
425 struct iommu_dev_data *dev_data;
426 u16 alias;
427 int ret;
428
429 if (dev->archdata.iommu)
430 return 0;
431
432 dev_data = find_dev_data(get_device_id(dev));
433 if (!dev_data)
434 return -ENOMEM;
435
436 alias = amd_iommu_alias_table[dev_data->devid];
437 if (alias != dev_data->devid) {
438 struct iommu_dev_data *alias_data;
439
440 alias_data = find_dev_data(alias);
441 if (alias_data == NULL) {
442 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
443 dev_name(dev));
444 free_dev_data(dev_data);
445 return -ENOTSUPP;
446 }
447 dev_data->alias_data = alias_data;
448 }
360 449
450 ret = init_iommu_group(dev);
361 if (ret) 451 if (ret)
362 return ret; 452 return ret;
363 453
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff0..e38ab438bb34 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
426 struct iommu_dev_data *alias_data;/* The alias dev_data */ 426 struct iommu_dev_data *alias_data;/* The alias dev_data */
427 struct protection_domain *domain; /* Domain the device is bound to */ 427 struct protection_domain *domain; /* Domain the device is bound to */
428 atomic_t bind; /* Domain attach reference count */ 428 atomic_t bind; /* Domain attach reference count */
429 struct iommu_group *group; /* IOMMU group for virtual aliases */
429 u16 devid; /* PCI Device ID */ 430 u16 devid; /* PCI Device ID */
430 bool iommu_v2; /* Device can make use of IOMMUv2 */ 431 bool iommu_v2; /* Device can make use of IOMMUv2 */
431 bool passthrough; /* Default for device is pt_domain */ 432 bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 9476c1b96090..c2c07a4a7f21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2327,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2327 return 0; 2327 return 0;
2328} 2328}
2329 2329
2330static bool device_has_rmrr(struct pci_dev *dev)
2331{
2332 struct dmar_rmrr_unit *rmrr;
2333 int i;
2334
2335 for_each_rmrr_units(rmrr) {
2336 for (i = 0; i < rmrr->devices_cnt; i++) {
2337 /*
2338 * Return TRUE if this RMRR contains the device that
2339 * is passed in.
2340 */
2341 if (rmrr->devices[i] == dev)
2342 return true;
2343 }
2344 }
2345 return false;
2346}
2347
2330static int iommu_should_identity_map(struct pci_dev *pdev, int startup) 2348static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2331{ 2349{
2350
2351 /*
2352 * We want to prevent any device associated with an RMRR from
2353 * getting placed into the SI Domain. This is done because
2354 * problems exist when devices are moved in and out of domains
2355 * and their respective RMRR info is lost. We exempt USB devices
2356 * from this process due to their usage of RMRRs that are known
2357 * to not be needed after BIOS hand-off to OS.
2358 */
2359 if (device_has_rmrr(pdev) &&
2360 (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2361 return 0;
2362
2332 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2363 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2333 return 1; 2364 return 1;
2334 2365
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index badc17c2bcb4..18108c1405e2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,13 +16,13 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/iommu.h> 20#include <linux/iommu.h>
22#include <linux/omap-iommu.h> 21#include <linux/omap-iommu.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
25#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/pm_runtime.h>
26 26
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28 28
@@ -143,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
143static int iommu_enable(struct omap_iommu *obj) 143static int iommu_enable(struct omap_iommu *obj)
144{ 144{
145 int err; 145 int err;
146 struct platform_device *pdev = to_platform_device(obj->dev);
147 struct iommu_platform_data *pdata = pdev->dev.platform_data;
146 148
147 if (!obj) 149 if (!obj || !pdata)
148 return -EINVAL; 150 return -EINVAL;
149 151
150 if (!arch_iommu) 152 if (!arch_iommu)
151 return -ENODEV; 153 return -ENODEV;
152 154
153 clk_enable(obj->clk); 155 if (pdata->deassert_reset) {
156 err = pdata->deassert_reset(pdev, pdata->reset_name);
157 if (err) {
158 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
159 return err;
160 }
161 }
162
163 pm_runtime_get_sync(obj->dev);
154 164
155 err = arch_iommu->enable(obj); 165 err = arch_iommu->enable(obj);
156 166
157 clk_disable(obj->clk);
158 return err; 167 return err;
159} 168}
160 169
161static void iommu_disable(struct omap_iommu *obj) 170static void iommu_disable(struct omap_iommu *obj)
162{ 171{
163 if (!obj) 172 struct platform_device *pdev = to_platform_device(obj->dev);
164 return; 173 struct iommu_platform_data *pdata = pdev->dev.platform_data;
165 174
166 clk_enable(obj->clk); 175 if (!obj || !pdata)
176 return;
167 177
168 arch_iommu->disable(obj); 178 arch_iommu->disable(obj);
169 179
170 clk_disable(obj->clk); 180 pm_runtime_put_sync(obj->dev);
181
182 if (pdata->assert_reset)
183 pdata->assert_reset(pdev, pdata->reset_name);
171} 184}
172 185
173/* 186/*
@@ -290,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
290 if (!obj || !obj->nr_tlb_entries || !e) 303 if (!obj || !obj->nr_tlb_entries || !e)
291 return -EINVAL; 304 return -EINVAL;
292 305
293 clk_enable(obj->clk); 306 pm_runtime_get_sync(obj->dev);
294 307
295 iotlb_lock_get(obj, &l); 308 iotlb_lock_get(obj, &l);
296 if (l.base == obj->nr_tlb_entries) { 309 if (l.base == obj->nr_tlb_entries) {
@@ -320,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
320 333
321 cr = iotlb_alloc_cr(obj, e); 334 cr = iotlb_alloc_cr(obj, e);
322 if (IS_ERR(cr)) { 335 if (IS_ERR(cr)) {
323 clk_disable(obj->clk); 336 pm_runtime_put_sync(obj->dev);
324 return PTR_ERR(cr); 337 return PTR_ERR(cr);
325 } 338 }
326 339
@@ -334,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
334 l.vict = l.base; 347 l.vict = l.base;
335 iotlb_lock_set(obj, &l); 348 iotlb_lock_set(obj, &l);
336out: 349out:
337 clk_disable(obj->clk); 350 pm_runtime_put_sync(obj->dev);
338 return err; 351 return err;
339} 352}
340 353
@@ -364,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
364 int i; 377 int i;
365 struct cr_regs cr; 378 struct cr_regs cr;
366 379
367 clk_enable(obj->clk); 380 pm_runtime_get_sync(obj->dev);
368 381
369 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 382 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
370 u32 start; 383 u32 start;
@@ -383,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
383 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 396 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
384 } 397 }
385 } 398 }
386 clk_disable(obj->clk); 399 pm_runtime_put_sync(obj->dev);
387 400
388 if (i == obj->nr_tlb_entries) 401 if (i == obj->nr_tlb_entries)
389 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 402 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -397,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
397{ 410{
398 struct iotlb_lock l; 411 struct iotlb_lock l;
399 412
400 clk_enable(obj->clk); 413 pm_runtime_get_sync(obj->dev);
401 414
402 l.base = 0; 415 l.base = 0;
403 l.vict = 0; 416 l.vict = 0;
@@ -405,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
405 418
406 iommu_write_reg(obj, 1, MMU_GFLUSH); 419 iommu_write_reg(obj, 1, MMU_GFLUSH);
407 420
408 clk_disable(obj->clk); 421 pm_runtime_put_sync(obj->dev);
409} 422}
410 423
411#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 424#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -415,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
415 if (!obj || !buf) 428 if (!obj || !buf)
416 return -EINVAL; 429 return -EINVAL;
417 430
418 clk_enable(obj->clk); 431 pm_runtime_get_sync(obj->dev);
419 432
420 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 433 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
421 434
422 clk_disable(obj->clk); 435 pm_runtime_put_sync(obj->dev);
423 436
424 return bytes; 437 return bytes;
425} 438}
@@ -433,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
433 struct cr_regs tmp; 446 struct cr_regs tmp;
434 struct cr_regs *p = crs; 447 struct cr_regs *p = crs;
435 448
436 clk_enable(obj->clk); 449 pm_runtime_get_sync(obj->dev);
437 iotlb_lock_get(obj, &saved); 450 iotlb_lock_get(obj, &saved);
438 451
439 for_each_iotlb_cr(obj, num, i, tmp) { 452 for_each_iotlb_cr(obj, num, i, tmp) {
@@ -443,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
443 } 456 }
444 457
445 iotlb_lock_set(obj, &saved); 458 iotlb_lock_set(obj, &saved);
446 clk_disable(obj->clk); 459 pm_runtime_put_sync(obj->dev);
447 460
448 return p - crs; 461 return p - crs;
449} 462}
@@ -807,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
807 if (!obj->refcount) 820 if (!obj->refcount)
808 return IRQ_NONE; 821 return IRQ_NONE;
809 822
810 clk_enable(obj->clk);
811 errs = iommu_report_fault(obj, &da); 823 errs = iommu_report_fault(obj, &da);
812 clk_disable(obj->clk);
813 if (errs == 0) 824 if (errs == 0)
814 return IRQ_HANDLED; 825 return IRQ_HANDLED;
815 826
@@ -931,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
931 struct resource *res; 942 struct resource *res;
932 struct iommu_platform_data *pdata = pdev->dev.platform_data; 943 struct iommu_platform_data *pdata = pdev->dev.platform_data;
933 944
934 if (pdev->num_resources != 2)
935 return -EINVAL;
936
937 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 945 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
938 if (!obj) 946 if (!obj)
939 return -ENOMEM; 947 return -ENOMEM;
940 948
941 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
942 if (IS_ERR(obj->clk))
943 goto err_clk;
944
945 obj->nr_tlb_entries = pdata->nr_tlb_entries; 949 obj->nr_tlb_entries = pdata->nr_tlb_entries;
946 obj->name = pdata->name; 950 obj->name = pdata->name;
947 obj->dev = &pdev->dev; 951 obj->dev = &pdev->dev;
@@ -984,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
984 goto err_irq; 988 goto err_irq;
985 platform_set_drvdata(pdev, obj); 989 platform_set_drvdata(pdev, obj);
986 990
991 pm_runtime_irq_safe(obj->dev);
992 pm_runtime_enable(obj->dev);
993
987 dev_info(&pdev->dev, "%s registered\n", obj->name); 994 dev_info(&pdev->dev, "%s registered\n", obj->name);
988 return 0; 995 return 0;
989 996
@@ -992,8 +999,6 @@ err_irq:
992err_ioremap: 999err_ioremap:
993 release_mem_region(res->start, resource_size(res)); 1000 release_mem_region(res->start, resource_size(res));
994err_mem: 1001err_mem:
995 clk_put(obj->clk);
996err_clk:
997 kfree(obj); 1002 kfree(obj);
998 return err; 1003 return err;
999} 1004}
@@ -1014,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
1014 release_mem_region(res->start, resource_size(res)); 1019 release_mem_region(res->start, resource_size(res));
1015 iounmap(obj->regbase); 1020 iounmap(obj->regbase);
1016 1021
1017 clk_put(obj->clk); 1022 pm_runtime_disable(obj->dev);
1023
1018 dev_info(&pdev->dev, "%s removed\n", obj->name); 1024 dev_info(&pdev->dev, "%s removed\n", obj->name);
1019 kfree(obj); 1025 kfree(obj);
1020 return 0; 1026 return 0;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 2b5f3c04d167..120084206602 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,7 +29,6 @@ struct iotlb_entry {
29struct omap_iommu { 29struct omap_iommu {
30 const char *name; 30 const char *name;
31 struct module *owner; 31 struct module *owner;
32 struct clk *clk;
33 void __iomem *regbase; 32 void __iomem *regbase;
34 struct device *dev; 33 struct device *dev;
35 void *isr_priv; 34 void *isr_priv;
@@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
116 * MMU Register offsets 115 * MMU Register offsets
117 */ 116 */
118#define MMU_REVISION 0x00 117#define MMU_REVISION 0x00
119#define MMU_SYSCONFIG 0x10
120#define MMU_SYSSTATUS 0x14
121#define MMU_IRQSTATUS 0x18 118#define MMU_IRQSTATUS 0x18
122#define MMU_IRQENABLE 0x1c 119#define MMU_IRQENABLE 0x1c
123#define MMU_WALKING_ST 0x40 120#define MMU_WALKING_ST 0x40
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
index c02020292377..d745094a69dd 100644
--- a/drivers/iommu/omap-iommu2.c
+++ b/drivers/iommu/omap-iommu2.c
@@ -28,19 +28,6 @@
28 */ 28 */
29#define IOMMU_ARCH_VERSION 0x00000011 29#define IOMMU_ARCH_VERSION 0x00000011
30 30
31/* SYSCONF */
32#define MMU_SYS_IDLE_SHIFT 3
33#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
34#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
35#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
36#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
37
38#define MMU_SYS_SOFTRESET (1 << 1)
39#define MMU_SYS_AUTOIDLE 1
40
41/* SYSSTATUS */
42#define MMU_SYS_RESETDONE 1
43
44/* IRQSTATUS & IRQENABLE */ 31/* IRQSTATUS & IRQENABLE */
45#define MMU_IRQ_MULTIHITFAULT (1 << 4) 32#define MMU_IRQ_MULTIHITFAULT (1 << 4)
46#define MMU_IRQ_TABLEWALKFAULT (1 << 3) 33#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
@@ -97,7 +84,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on)
97static int omap2_iommu_enable(struct omap_iommu *obj) 84static int omap2_iommu_enable(struct omap_iommu *obj)
98{ 85{
99 u32 l, pa; 86 u32 l, pa;
100 unsigned long timeout;
101 87
102 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) 88 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
103 return -EINVAL; 89 return -EINVAL;
@@ -106,29 +92,10 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
106 if (!IS_ALIGNED(pa, SZ_16K)) 92 if (!IS_ALIGNED(pa, SZ_16K))
107 return -EINVAL; 93 return -EINVAL;
108 94
109 iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
110
111 timeout = jiffies + msecs_to_jiffies(20);
112 do {
113 l = iommu_read_reg(obj, MMU_SYSSTATUS);
114 if (l & MMU_SYS_RESETDONE)
115 break;
116 } while (!time_after(jiffies, timeout));
117
118 if (!(l & MMU_SYS_RESETDONE)) {
119 dev_err(obj->dev, "can't take mmu out of reset\n");
120 return -ENODEV;
121 }
122
123 l = iommu_read_reg(obj, MMU_REVISION); 95 l = iommu_read_reg(obj, MMU_REVISION);
124 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, 96 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
125 (l >> 4) & 0xf, l & 0xf); 97 (l >> 4) & 0xf, l & 0xf);
126 98
127 l = iommu_read_reg(obj, MMU_SYSCONFIG);
128 l &= ~MMU_SYS_IDLE_MASK;
129 l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
130 iommu_write_reg(obj, l, MMU_SYSCONFIG);
131
132 iommu_write_reg(obj, pa, MMU_TTB); 99 iommu_write_reg(obj, pa, MMU_TTB);
133 100
134 __iommu_set_twl(obj, true); 101 __iommu_set_twl(obj, true);
@@ -142,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
142 109
143 l &= ~MMU_CNTL_MASK; 110 l &= ~MMU_CNTL_MASK;
144 iommu_write_reg(obj, l, MMU_CNTL); 111 iommu_write_reg(obj, l, MMU_CNTL);
145 iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
146 112
147 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); 113 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
148} 114}
@@ -271,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
271 char *p = buf; 237 char *p = buf;
272 238
273 pr_reg(REVISION); 239 pr_reg(REVISION);
274 pr_reg(SYSCONFIG);
275 pr_reg(SYSSTATUS);
276 pr_reg(IRQSTATUS); 240 pr_reg(IRQSTATUS);
277 pr_reg(IRQENABLE); 241 pr_reg(IRQENABLE);
278 pr_reg(WALKING_ST); 242 pr_reg(WALKING_ST);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4bd..4c9db62814ff 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
398 do_gart_setup(gart, NULL); 398 do_gart_setup(gart, NULL);
399 399
400 gart_handle = gart; 400 gart_handle = gart;
401 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
401 return 0; 402 return 0;
402 403
403fail: 404fail:
@@ -450,7 +451,6 @@ static struct platform_driver tegra_gart_driver = {
450 451
451static int __devinit tegra_gart_init(void) 452static int __devinit tegra_gart_init(void)
452{ 453{
453 bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
454 return platform_driver_register(&tegra_gart_driver); 454 return platform_driver_register(&tegra_gart_driver);
455} 455}
456 456
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4252d743963d..25c1210c0832 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -694,10 +694,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
694 *pte = _PTE_VACANT(iova); 694 *pte = _PTE_VACANT(iova);
695 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); 695 FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
696 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); 696 flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
697 if (!--(*count)) { 697 if (!--(*count))
698 free_ptbl(as, iova); 698 free_ptbl(as, iova);
699 smmu_flush_regs(as->smmu, 0);
700 }
701} 699}
702 700
703static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, 701static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1232,6 +1230,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
1232 1230
1233 smmu_debugfs_create(smmu); 1231 smmu_debugfs_create(smmu);
1234 smmu_handle = smmu; 1232 smmu_handle = smmu;
1233 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1235 return 0; 1234 return 0;
1236} 1235}
1237 1236
@@ -1276,7 +1275,6 @@ static struct platform_driver tegra_smmu_driver = {
1276 1275
1277static int __devinit tegra_smmu_init(void) 1276static int __devinit tegra_smmu_init(void)
1278{ 1277{
1279 bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1280 return platform_driver_register(&tegra_smmu_driver); 1278 return platform_driver_register(&tegra_smmu_driver);
1281} 1279}
1282 1280