diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-20 13:50:05 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-20 13:50:05 -0400 |
| commit | 79319a052cb0ae862954fe9f6e606417f1698ddb (patch) | |
| tree | 8de4379dd3534fd5a92e15a4781d25f759e4f8b7 | |
| parent | 6496edfce95f943e1da43631c2f437509e56af7f (diff) | |
| parent | 7f65ef01e131650d455875598099cd06fea6096b (diff) | |
Merge tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"Not much this time, but the changes include:
- moving domain allocation into the iommu drivers to prepare for the
introduction of default domains for devices
- fixing the IO page-table code in the AMD IOMMU driver to correctly
encode large page sizes
- extension of the PCI support in the ARM-SMMU driver
- various fixes and cleanups"
* tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (34 commits)
iommu/amd: Correctly encode huge pages in iommu page tables
iommu/amd: Optimize amd_iommu_iova_to_phys for new fetch_pte interface
iommu/amd: Optimize alloc_new_range for new fetch_pte interface
iommu/amd: Optimize iommu_unmap_page for new fetch_pte interface
iommu/amd: Return the pte page-size in fetch_pte
iommu/amd: Add support for contiguous dma allocator
iommu/amd: Don't allocate with __GFP_ZERO in alloc_coherent
iommu/amd: Ignore BUS_NOTIFY_UNBOUND_DRIVER event
iommu/amd: Use BUS_NOTIFY_REMOVED_DEVICE
iommu/tegra: smmu: Compute PFN mask at runtime
iommu/tegra: gart: Set aperture at domain initialization time
iommu/tegra: Setup aperture
iommu: Remove domain_init and domain_free iommu_ops
iommu/fsl: Make use of domain_alloc and domain_free
iommu/rockchip: Make use of domain_alloc and domain_free
iommu/ipmmu-vmsa: Make use of domain_alloc and domain_free
iommu/shmobile: Make use of domain_alloc and domain_free
iommu/msm: Make use of domain_alloc and domain_free
iommu/tegra-gart: Make use of domain_alloc and domain_free
iommu/tegra-smmu: Make use of domain_alloc and domain_free
...
| -rw-r--r-- | drivers/iommu/amd_iommu.c | 250 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_types.h | 13 | ||||
| -rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 2 | ||||
| -rw-r--r-- | drivers/iommu/arm-smmu.c | 171 | ||||
| -rw-r--r-- | drivers/iommu/exynos-iommu.c | 87 | ||||
| -rw-r--r-- | drivers/iommu/fsl_pamu_domain.c | 60 | ||||
| -rw-r--r-- | drivers/iommu/fsl_pamu_domain.h | 2 | ||||
| -rw-r--r-- | drivers/iommu/intel-iommu.c | 61 | ||||
| -rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 12 | ||||
| -rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 5 | ||||
| -rw-r--r-- | drivers/iommu/iommu.c | 26 | ||||
| -rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 41 | ||||
| -rw-r--r-- | drivers/iommu/msm_iommu.c | 73 | ||||
| -rw-r--r-- | drivers/iommu/omap-iommu.c | 49 | ||||
| -rw-r--r-- | drivers/iommu/rockchip-iommu.c | 40 | ||||
| -rw-r--r-- | drivers/iommu/shmobile-iommu.c | 39 | ||||
| -rw-r--r-- | drivers/iommu/tegra-gart.c | 88 | ||||
| -rw-r--r-- | drivers/iommu/tegra-smmu.c | 59 | ||||
| -rw-r--r-- | include/linux/iommu.h | 33 |
19 files changed, 634 insertions, 477 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 48882c126245..e43d48956dea 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/export.h> | 33 | #include <linux/export.h> |
| 34 | #include <linux/irq.h> | 34 | #include <linux/irq.h> |
| 35 | #include <linux/msi.h> | 35 | #include <linux/msi.h> |
| 36 | #include <linux/dma-contiguous.h> | ||
| 36 | #include <asm/irq_remapping.h> | 37 | #include <asm/irq_remapping.h> |
| 37 | #include <asm/io_apic.h> | 38 | #include <asm/io_apic.h> |
| 38 | #include <asm/apic.h> | 39 | #include <asm/apic.h> |
| @@ -126,6 +127,11 @@ static int __init alloc_passthrough_domain(void); | |||
| 126 | * | 127 | * |
| 127 | ****************************************************************************/ | 128 | ****************************************************************************/ |
| 128 | 129 | ||
| 130 | static struct protection_domain *to_pdomain(struct iommu_domain *dom) | ||
| 131 | { | ||
| 132 | return container_of(dom, struct protection_domain, domain); | ||
| 133 | } | ||
| 134 | |||
| 129 | static struct iommu_dev_data *alloc_dev_data(u16 devid) | 135 | static struct iommu_dev_data *alloc_dev_data(u16 devid) |
| 130 | { | 136 | { |
| 131 | struct iommu_dev_data *dev_data; | 137 | struct iommu_dev_data *dev_data; |
| @@ -1321,7 +1327,9 @@ static u64 *alloc_pte(struct protection_domain *domain, | |||
| 1321 | * This function checks if there is a PTE for a given dma address. If | 1327 | * This function checks if there is a PTE for a given dma address. If |
| 1322 | * there is one, it returns the pointer to it. | 1328 | * there is one, it returns the pointer to it. |
| 1323 | */ | 1329 | */ |
| 1324 | static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | 1330 | static u64 *fetch_pte(struct protection_domain *domain, |
| 1331 | unsigned long address, | ||
| 1332 | unsigned long *page_size) | ||
| 1325 | { | 1333 | { |
| 1326 | int level; | 1334 | int level; |
| 1327 | u64 *pte; | 1335 | u64 *pte; |
| @@ -1329,8 +1337,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
| 1329 | if (address > PM_LEVEL_SIZE(domain->mode)) | 1337 | if (address > PM_LEVEL_SIZE(domain->mode)) |
| 1330 | return NULL; | 1338 | return NULL; |
| 1331 | 1339 | ||
| 1332 | level = domain->mode - 1; | 1340 | level = domain->mode - 1; |
| 1333 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | 1341 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
| 1342 | *page_size = PTE_LEVEL_PAGE_SIZE(level); | ||
| 1334 | 1343 | ||
| 1335 | while (level > 0) { | 1344 | while (level > 0) { |
| 1336 | 1345 | ||
| @@ -1339,19 +1348,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
| 1339 | return NULL; | 1348 | return NULL; |
| 1340 | 1349 | ||
| 1341 | /* Large PTE */ | 1350 | /* Large PTE */ |
| 1342 | if (PM_PTE_LEVEL(*pte) == 0x07) { | 1351 | if (PM_PTE_LEVEL(*pte) == 7 || |
| 1343 | unsigned long pte_mask, __pte; | 1352 | PM_PTE_LEVEL(*pte) == 0) |
| 1344 | 1353 | break; | |
| 1345 | /* | ||
| 1346 | * If we have a series of large PTEs, make | ||
| 1347 | * sure to return a pointer to the first one. | ||
| 1348 | */ | ||
| 1349 | pte_mask = PTE_PAGE_SIZE(*pte); | ||
| 1350 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
| 1351 | __pte = ((unsigned long)pte) & pte_mask; | ||
| 1352 | |||
| 1353 | return (u64 *)__pte; | ||
| 1354 | } | ||
| 1355 | 1354 | ||
| 1356 | /* No level skipping support yet */ | 1355 | /* No level skipping support yet */ |
| 1357 | if (PM_PTE_LEVEL(*pte) != level) | 1356 | if (PM_PTE_LEVEL(*pte) != level) |
| @@ -1360,8 +1359,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) | |||
| 1360 | level -= 1; | 1359 | level -= 1; |
| 1361 | 1360 | ||
| 1362 | /* Walk to the next level */ | 1361 | /* Walk to the next level */ |
| 1363 | pte = IOMMU_PTE_PAGE(*pte); | 1362 | pte = IOMMU_PTE_PAGE(*pte); |
| 1364 | pte = &pte[PM_LEVEL_INDEX(level, address)]; | 1363 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
| 1364 | *page_size = PTE_LEVEL_PAGE_SIZE(level); | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | if (PM_PTE_LEVEL(*pte) == 0x07) { | ||
| 1368 | unsigned long pte_mask; | ||
| 1369 | |||
| 1370 | /* | ||
| 1371 | * If we have a series of large PTEs, make | ||
| 1372 | * sure to return a pointer to the first one. | ||
| 1373 | */ | ||
| 1374 | *page_size = pte_mask = PTE_PAGE_SIZE(*pte); | ||
| 1375 | pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); | ||
| 1376 | pte = (u64 *)(((unsigned long)pte) & pte_mask); | ||
| 1365 | } | 1377 | } |
| 1366 | 1378 | ||
| 1367 | return pte; | 1379 | return pte; |
| @@ -1383,13 +1395,14 @@ static int iommu_map_page(struct protection_domain *dom, | |||
| 1383 | u64 __pte, *pte; | 1395 | u64 __pte, *pte; |
| 1384 | int i, count; | 1396 | int i, count; |
| 1385 | 1397 | ||
| 1398 | BUG_ON(!IS_ALIGNED(bus_addr, page_size)); | ||
| 1399 | BUG_ON(!IS_ALIGNED(phys_addr, page_size)); | ||
| 1400 | |||
| 1386 | if (!(prot & IOMMU_PROT_MASK)) | 1401 | if (!(prot & IOMMU_PROT_MASK)) |
| 1387 | return -EINVAL; | 1402 | return -EINVAL; |
| 1388 | 1403 | ||
| 1389 | bus_addr = PAGE_ALIGN(bus_addr); | 1404 | count = PAGE_SIZE_PTE_COUNT(page_size); |
| 1390 | phys_addr = PAGE_ALIGN(phys_addr); | 1405 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); |
| 1391 | count = PAGE_SIZE_PTE_COUNT(page_size); | ||
| 1392 | pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); | ||
| 1393 | 1406 | ||
| 1394 | if (!pte) | 1407 | if (!pte) |
| 1395 | return -ENOMEM; | 1408 | return -ENOMEM; |
| @@ -1398,7 +1411,7 @@ static int iommu_map_page(struct protection_domain *dom, | |||
| 1398 | if (IOMMU_PTE_PRESENT(pte[i])) | 1411 | if (IOMMU_PTE_PRESENT(pte[i])) |
| 1399 | return -EBUSY; | 1412 | return -EBUSY; |
| 1400 | 1413 | ||
| 1401 | if (page_size > PAGE_SIZE) { | 1414 | if (count > 1) { |
| 1402 | __pte = PAGE_SIZE_PTE(phys_addr, page_size); | 1415 | __pte = PAGE_SIZE_PTE(phys_addr, page_size); |
| 1403 | __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; | 1416 | __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; |
| 1404 | } else | 1417 | } else |
| @@ -1421,7 +1434,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
| 1421 | unsigned long bus_addr, | 1434 | unsigned long bus_addr, |
| 1422 | unsigned long page_size) | 1435 | unsigned long page_size) |
| 1423 | { | 1436 | { |
| 1424 | unsigned long long unmap_size, unmapped; | 1437 | unsigned long long unmapped; |
| 1438 | unsigned long unmap_size; | ||
| 1425 | u64 *pte; | 1439 | u64 *pte; |
| 1426 | 1440 | ||
| 1427 | BUG_ON(!is_power_of_2(page_size)); | 1441 | BUG_ON(!is_power_of_2(page_size)); |
| @@ -1430,28 +1444,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
| 1430 | 1444 | ||
| 1431 | while (unmapped < page_size) { | 1445 | while (unmapped < page_size) { |
| 1432 | 1446 | ||
| 1433 | pte = fetch_pte(dom, bus_addr); | 1447 | pte = fetch_pte(dom, bus_addr, &unmap_size); |
| 1434 | 1448 | ||
| 1435 | if (!pte) { | 1449 | if (pte) { |
| 1436 | /* | 1450 | int i, count; |
| 1437 | * No PTE for this address | 1451 | |
| 1438 | * move forward in 4kb steps | 1452 | count = PAGE_SIZE_PTE_COUNT(unmap_size); |
| 1439 | */ | ||
| 1440 | unmap_size = PAGE_SIZE; | ||
| 1441 | } else if (PM_PTE_LEVEL(*pte) == 0) { | ||
| 1442 | /* 4kb PTE found for this address */ | ||
| 1443 | unmap_size = PAGE_SIZE; | ||
| 1444 | *pte = 0ULL; | ||
| 1445 | } else { | ||
| 1446 | int count, i; | ||
| 1447 | |||
| 1448 | /* Large PTE found which maps this address */ | ||
| 1449 | unmap_size = PTE_PAGE_SIZE(*pte); | ||
| 1450 | |||
| 1451 | /* Only unmap from the first pte in the page */ | ||
| 1452 | if ((unmap_size - 1) & bus_addr) | ||
| 1453 | break; | ||
| 1454 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | ||
| 1455 | for (i = 0; i < count; i++) | 1453 | for (i = 0; i < count; i++) |
| 1456 | pte[i] = 0ULL; | 1454 | pte[i] = 0ULL; |
| 1457 | } | 1455 | } |
| @@ -1599,7 +1597,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
| 1599 | { | 1597 | { |
| 1600 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; | 1598 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; |
| 1601 | struct amd_iommu *iommu; | 1599 | struct amd_iommu *iommu; |
| 1602 | unsigned long i, old_size; | 1600 | unsigned long i, old_size, pte_pgsize; |
| 1603 | 1601 | ||
| 1604 | #ifdef CONFIG_IOMMU_STRESS | 1602 | #ifdef CONFIG_IOMMU_STRESS |
| 1605 | populate = false; | 1603 | populate = false; |
| @@ -1672,12 +1670,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
| 1672 | */ | 1670 | */ |
| 1673 | for (i = dma_dom->aperture[index]->offset; | 1671 | for (i = dma_dom->aperture[index]->offset; |
| 1674 | i < dma_dom->aperture_size; | 1672 | i < dma_dom->aperture_size; |
| 1675 | i += PAGE_SIZE) { | 1673 | i += pte_pgsize) { |
| 1676 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 1674 | u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); |
| 1677 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 1675 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
| 1678 | continue; | 1676 | continue; |
| 1679 | 1677 | ||
| 1680 | dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1); | 1678 | dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, |
| 1679 | pte_pgsize >> 12); | ||
| 1681 | } | 1680 | } |
| 1682 | 1681 | ||
| 1683 | update_domain(&dma_dom->domain); | 1682 | update_domain(&dma_dom->domain); |
| @@ -2422,16 +2421,6 @@ static int device_change_notifier(struct notifier_block *nb, | |||
| 2422 | dev_data = get_dev_data(dev); | 2421 | dev_data = get_dev_data(dev); |
| 2423 | 2422 | ||
| 2424 | switch (action) { | 2423 | switch (action) { |
| 2425 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
| 2426 | |||
| 2427 | domain = domain_for_device(dev); | ||
| 2428 | |||
| 2429 | if (!domain) | ||
| 2430 | goto out; | ||
| 2431 | if (dev_data->passthrough) | ||
| 2432 | break; | ||
| 2433 | detach_device(dev); | ||
| 2434 | break; | ||
| 2435 | case BUS_NOTIFY_ADD_DEVICE: | 2424 | case BUS_NOTIFY_ADD_DEVICE: |
| 2436 | 2425 | ||
| 2437 | iommu_init_device(dev); | 2426 | iommu_init_device(dev); |
| @@ -2467,7 +2456,7 @@ static int device_change_notifier(struct notifier_block *nb, | |||
| 2467 | dev->archdata.dma_ops = &amd_iommu_dma_ops; | 2456 | dev->archdata.dma_ops = &amd_iommu_dma_ops; |
| 2468 | 2457 | ||
| 2469 | break; | 2458 | break; |
| 2470 | case BUS_NOTIFY_DEL_DEVICE: | 2459 | case BUS_NOTIFY_REMOVED_DEVICE: |
| 2471 | 2460 | ||
| 2472 | iommu_uninit_device(dev); | 2461 | iommu_uninit_device(dev); |
| 2473 | 2462 | ||
| @@ -2923,38 +2912,42 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 2923 | dma_addr_t *dma_addr, gfp_t flag, | 2912 | dma_addr_t *dma_addr, gfp_t flag, |
| 2924 | struct dma_attrs *attrs) | 2913 | struct dma_attrs *attrs) |
| 2925 | { | 2914 | { |
| 2926 | unsigned long flags; | ||
| 2927 | void *virt_addr; | ||
| 2928 | struct protection_domain *domain; | ||
| 2929 | phys_addr_t paddr; | ||
| 2930 | u64 dma_mask = dev->coherent_dma_mask; | 2915 | u64 dma_mask = dev->coherent_dma_mask; |
| 2916 | struct protection_domain *domain; | ||
| 2917 | unsigned long flags; | ||
| 2918 | struct page *page; | ||
| 2931 | 2919 | ||
| 2932 | INC_STATS_COUNTER(cnt_alloc_coherent); | 2920 | INC_STATS_COUNTER(cnt_alloc_coherent); |
| 2933 | 2921 | ||
| 2934 | domain = get_domain(dev); | 2922 | domain = get_domain(dev); |
| 2935 | if (PTR_ERR(domain) == -EINVAL) { | 2923 | if (PTR_ERR(domain) == -EINVAL) { |
| 2936 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 2924 | page = alloc_pages(flag, get_order(size)); |
| 2937 | *dma_addr = __pa(virt_addr); | 2925 | *dma_addr = page_to_phys(page); |
| 2938 | return virt_addr; | 2926 | return page_address(page); |
| 2939 | } else if (IS_ERR(domain)) | 2927 | } else if (IS_ERR(domain)) |
| 2940 | return NULL; | 2928 | return NULL; |
| 2941 | 2929 | ||
| 2930 | size = PAGE_ALIGN(size); | ||
| 2942 | dma_mask = dev->coherent_dma_mask; | 2931 | dma_mask = dev->coherent_dma_mask; |
| 2943 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 2932 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
| 2944 | flag |= __GFP_ZERO; | ||
| 2945 | 2933 | ||
| 2946 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 2934 | page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); |
| 2947 | if (!virt_addr) | 2935 | if (!page) { |
| 2948 | return NULL; | 2936 | if (!(flag & __GFP_WAIT)) |
| 2937 | return NULL; | ||
| 2949 | 2938 | ||
| 2950 | paddr = virt_to_phys(virt_addr); | 2939 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
| 2940 | get_order(size)); | ||
| 2941 | if (!page) | ||
| 2942 | return NULL; | ||
| 2943 | } | ||
| 2951 | 2944 | ||
| 2952 | if (!dma_mask) | 2945 | if (!dma_mask) |
| 2953 | dma_mask = *dev->dma_mask; | 2946 | dma_mask = *dev->dma_mask; |
| 2954 | 2947 | ||
| 2955 | spin_lock_irqsave(&domain->lock, flags); | 2948 | spin_lock_irqsave(&domain->lock, flags); |
| 2956 | 2949 | ||
| 2957 | *dma_addr = __map_single(dev, domain->priv, paddr, | 2950 | *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), |
| 2958 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 2951 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
| 2959 | 2952 | ||
| 2960 | if (*dma_addr == DMA_ERROR_CODE) { | 2953 | if (*dma_addr == DMA_ERROR_CODE) { |
| @@ -2966,11 +2959,12 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 2966 | 2959 | ||
| 2967 | spin_unlock_irqrestore(&domain->lock, flags); | 2960 | spin_unlock_irqrestore(&domain->lock, flags); |
| 2968 | 2961 | ||
| 2969 | return virt_addr; | 2962 | return page_address(page); |
| 2970 | 2963 | ||
| 2971 | out_free: | 2964 | out_free: |
| 2972 | 2965 | ||
| 2973 | free_pages((unsigned long)virt_addr, get_order(size)); | 2966 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
| 2967 | __free_pages(page, get_order(size)); | ||
| 2974 | 2968 | ||
| 2975 | return NULL; | 2969 | return NULL; |
| 2976 | } | 2970 | } |
| @@ -2982,11 +2976,15 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 2982 | void *virt_addr, dma_addr_t dma_addr, | 2976 | void *virt_addr, dma_addr_t dma_addr, |
| 2983 | struct dma_attrs *attrs) | 2977 | struct dma_attrs *attrs) |
| 2984 | { | 2978 | { |
| 2985 | unsigned long flags; | ||
| 2986 | struct protection_domain *domain; | 2979 | struct protection_domain *domain; |
| 2980 | unsigned long flags; | ||
| 2981 | struct page *page; | ||
| 2987 | 2982 | ||
| 2988 | INC_STATS_COUNTER(cnt_free_coherent); | 2983 | INC_STATS_COUNTER(cnt_free_coherent); |
| 2989 | 2984 | ||
| 2985 | page = virt_to_page(virt_addr); | ||
| 2986 | size = PAGE_ALIGN(size); | ||
| 2987 | |||
| 2990 | domain = get_domain(dev); | 2988 | domain = get_domain(dev); |
| 2991 | if (IS_ERR(domain)) | 2989 | if (IS_ERR(domain)) |
| 2992 | goto free_mem; | 2990 | goto free_mem; |
| @@ -3000,7 +2998,8 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 3000 | spin_unlock_irqrestore(&domain->lock, flags); | 2998 | spin_unlock_irqrestore(&domain->lock, flags); |
| 3001 | 2999 | ||
| 3002 | free_mem: | 3000 | free_mem: |
| 3003 | free_pages((unsigned long)virt_addr, get_order(size)); | 3001 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
| 3002 | __free_pages(page, get_order(size)); | ||
| 3004 | } | 3003 | } |
| 3005 | 3004 | ||
| 3006 | /* | 3005 | /* |
| @@ -3236,42 +3235,45 @@ static int __init alloc_passthrough_domain(void) | |||
| 3236 | 3235 | ||
| 3237 | return 0; | 3236 | return 0; |
| 3238 | } | 3237 | } |
| 3239 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 3238 | |
| 3239 | static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) | ||
| 3240 | { | 3240 | { |
| 3241 | struct protection_domain *domain; | 3241 | struct protection_domain *pdomain; |
| 3242 | 3242 | ||
| 3243 | domain = protection_domain_alloc(); | 3243 | /* We only support unmanaged domains for now */ |
| 3244 | if (!domain) | 3244 | if (type != IOMMU_DOMAIN_UNMANAGED) |
| 3245 | goto out_free; | 3245 | return NULL; |
| 3246 | 3246 | ||
| 3247 | domain->mode = PAGE_MODE_3_LEVEL; | 3247 | pdomain = protection_domain_alloc(); |
| 3248 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 3248 | if (!pdomain) |
| 3249 | if (!domain->pt_root) | ||
| 3250 | goto out_free; | 3249 | goto out_free; |
| 3251 | 3250 | ||
| 3252 | domain->iommu_domain = dom; | 3251 | pdomain->mode = PAGE_MODE_3_LEVEL; |
| 3253 | 3252 | pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | |
| 3254 | dom->priv = domain; | 3253 | if (!pdomain->pt_root) |
| 3254 | goto out_free; | ||
| 3255 | 3255 | ||
| 3256 | dom->geometry.aperture_start = 0; | 3256 | pdomain->domain.geometry.aperture_start = 0; |
| 3257 | dom->geometry.aperture_end = ~0ULL; | 3257 | pdomain->domain.geometry.aperture_end = ~0ULL; |
| 3258 | dom->geometry.force_aperture = true; | 3258 | pdomain->domain.geometry.force_aperture = true; |
| 3259 | 3259 | ||
| 3260 | return 0; | 3260 | return &pdomain->domain; |
| 3261 | 3261 | ||
| 3262 | out_free: | 3262 | out_free: |
| 3263 | protection_domain_free(domain); | 3263 | protection_domain_free(pdomain); |
| 3264 | 3264 | ||
| 3265 | return -ENOMEM; | 3265 | return NULL; |
| 3266 | } | 3266 | } |
| 3267 | 3267 | ||
| 3268 | static void amd_iommu_domain_destroy(struct iommu_domain *dom) | 3268 | static void amd_iommu_domain_free(struct iommu_domain *dom) |
| 3269 | { | 3269 | { |
| 3270 | struct protection_domain *domain = dom->priv; | 3270 | struct protection_domain *domain; |
| 3271 | 3271 | ||
| 3272 | if (!domain) | 3272 | if (!dom) |
| 3273 | return; | 3273 | return; |
| 3274 | 3274 | ||
| 3275 | domain = to_pdomain(dom); | ||
| 3276 | |||
| 3275 | if (domain->dev_cnt > 0) | 3277 | if (domain->dev_cnt > 0) |
| 3276 | cleanup_domain(domain); | 3278 | cleanup_domain(domain); |
| 3277 | 3279 | ||
| @@ -3284,8 +3286,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
| 3284 | free_gcr3_table(domain); | 3286 | free_gcr3_table(domain); |
| 3285 | 3287 | ||
| 3286 | protection_domain_free(domain); | 3288 | protection_domain_free(domain); |
| 3287 | |||
| 3288 | dom->priv = NULL; | ||
| 3289 | } | 3289 | } |
| 3290 | 3290 | ||
| 3291 | static void amd_iommu_detach_device(struct iommu_domain *dom, | 3291 | static void amd_iommu_detach_device(struct iommu_domain *dom, |
| @@ -3313,7 +3313,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, | |||
| 3313 | static int amd_iommu_attach_device(struct iommu_domain *dom, | 3313 | static int amd_iommu_attach_device(struct iommu_domain *dom, |
| 3314 | struct device *dev) | 3314 | struct device *dev) |
| 3315 | { | 3315 | { |
| 3316 | struct protection_domain *domain = dom->priv; | 3316 | struct protection_domain *domain = to_pdomain(dom); |
| 3317 | struct iommu_dev_data *dev_data; | 3317 | struct iommu_dev_data *dev_data; |
| 3318 | struct amd_iommu *iommu; | 3318 | struct amd_iommu *iommu; |
| 3319 | int ret; | 3319 | int ret; |
| @@ -3340,7 +3340,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, | |||
| 3340 | static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, | 3340 | static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, |
| 3341 | phys_addr_t paddr, size_t page_size, int iommu_prot) | 3341 | phys_addr_t paddr, size_t page_size, int iommu_prot) |
| 3342 | { | 3342 | { |
| 3343 | struct protection_domain *domain = dom->priv; | 3343 | struct protection_domain *domain = to_pdomain(dom); |
| 3344 | int prot = 0; | 3344 | int prot = 0; |
| 3345 | int ret; | 3345 | int ret; |
| 3346 | 3346 | ||
| @@ -3362,7 +3362,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, | |||
| 3362 | static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | 3362 | static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, |
| 3363 | size_t page_size) | 3363 | size_t page_size) |
| 3364 | { | 3364 | { |
| 3365 | struct protection_domain *domain = dom->priv; | 3365 | struct protection_domain *domain = to_pdomain(dom); |
| 3366 | size_t unmap_size; | 3366 | size_t unmap_size; |
| 3367 | 3367 | ||
| 3368 | if (domain->mode == PAGE_MODE_NONE) | 3368 | if (domain->mode == PAGE_MODE_NONE) |
| @@ -3380,28 +3380,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
| 3380 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 3380 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
| 3381 | dma_addr_t iova) | 3381 | dma_addr_t iova) |
| 3382 | { | 3382 | { |
| 3383 | struct protection_domain *domain = dom->priv; | 3383 | struct protection_domain *domain = to_pdomain(dom); |
| 3384 | unsigned long offset_mask; | 3384 | unsigned long offset_mask, pte_pgsize; |
| 3385 | phys_addr_t paddr; | ||
| 3386 | u64 *pte, __pte; | 3385 | u64 *pte, __pte; |
| 3387 | 3386 | ||
| 3388 | if (domain->mode == PAGE_MODE_NONE) | 3387 | if (domain->mode == PAGE_MODE_NONE) |
| 3389 | return iova; | 3388 | return iova; |
| 3390 | 3389 | ||
| 3391 | pte = fetch_pte(domain, iova); | 3390 | pte = fetch_pte(domain, iova, &pte_pgsize); |
| 3392 | 3391 | ||
| 3393 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 3392 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
| 3394 | return 0; | 3393 | return 0; |
| 3395 | 3394 | ||
| 3396 | if (PM_PTE_LEVEL(*pte) == 0) | 3395 | offset_mask = pte_pgsize - 1; |
| 3397 | offset_mask = PAGE_SIZE - 1; | 3396 | __pte = *pte & PM_ADDR_MASK; |
| 3398 | else | ||
| 3399 | offset_mask = PTE_PAGE_SIZE(*pte) - 1; | ||
| 3400 | |||
| 3401 | __pte = *pte & PM_ADDR_MASK; | ||
| 3402 | paddr = (__pte & ~offset_mask) | (iova & offset_mask); | ||
| 3403 | 3397 | ||
| 3404 | return paddr; | 3398 | return (__pte & ~offset_mask) | (iova & offset_mask); |
| 3405 | } | 3399 | } |
| 3406 | 3400 | ||
| 3407 | static bool amd_iommu_capable(enum iommu_cap cap) | 3401 | static bool amd_iommu_capable(enum iommu_cap cap) |
| @@ -3420,8 +3414,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) | |||
| 3420 | 3414 | ||
| 3421 | static const struct iommu_ops amd_iommu_ops = { | 3415 | static const struct iommu_ops amd_iommu_ops = { |
| 3422 | .capable = amd_iommu_capable, | 3416 | .capable = amd_iommu_capable, |
| 3423 | .domain_init = amd_iommu_domain_init, | 3417 | .domain_alloc = amd_iommu_domain_alloc, |
| 3424 | .domain_destroy = amd_iommu_domain_destroy, | 3418 | .domain_free = amd_iommu_domain_free, |
| 3425 | .attach_dev = amd_iommu_attach_device, | 3419 | .attach_dev = amd_iommu_attach_device, |
| 3426 | .detach_dev = amd_iommu_detach_device, | 3420 | .detach_dev = amd_iommu_detach_device, |
| 3427 | .map = amd_iommu_map, | 3421 | .map = amd_iommu_map, |
| @@ -3483,7 +3477,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); | |||
| 3483 | 3477 | ||
| 3484 | void amd_iommu_domain_direct_map(struct iommu_domain *dom) | 3478 | void amd_iommu_domain_direct_map(struct iommu_domain *dom) |
| 3485 | { | 3479 | { |
| 3486 | struct protection_domain *domain = dom->priv; | 3480 | struct protection_domain *domain = to_pdomain(dom); |
| 3487 | unsigned long flags; | 3481 | unsigned long flags; |
| 3488 | 3482 | ||
| 3489 | spin_lock_irqsave(&domain->lock, flags); | 3483 | spin_lock_irqsave(&domain->lock, flags); |
| @@ -3504,7 +3498,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map); | |||
| 3504 | 3498 | ||
| 3505 | int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) | 3499 | int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) |
| 3506 | { | 3500 | { |
| 3507 | struct protection_domain *domain = dom->priv; | 3501 | struct protection_domain *domain = to_pdomain(dom); |
| 3508 | unsigned long flags; | 3502 | unsigned long flags; |
| 3509 | int levels, ret; | 3503 | int levels, ret; |
| 3510 | 3504 | ||
| @@ -3616,7 +3610,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, | |||
| 3616 | int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, | 3610 | int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, |
| 3617 | u64 address) | 3611 | u64 address) |
| 3618 | { | 3612 | { |
| 3619 | struct protection_domain *domain = dom->priv; | 3613 | struct protection_domain *domain = to_pdomain(dom); |
| 3620 | unsigned long flags; | 3614 | unsigned long flags; |
| 3621 | int ret; | 3615 | int ret; |
| 3622 | 3616 | ||
| @@ -3638,7 +3632,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) | |||
| 3638 | 3632 | ||
| 3639 | int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid) | 3633 | int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid) |
| 3640 | { | 3634 | { |
| 3641 | struct protection_domain *domain = dom->priv; | 3635 | struct protection_domain *domain = to_pdomain(dom); |
| 3642 | unsigned long flags; | 3636 | unsigned long flags; |
| 3643 | int ret; | 3637 | int ret; |
| 3644 | 3638 | ||
| @@ -3718,7 +3712,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid) | |||
| 3718 | int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, | 3712 | int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, |
| 3719 | unsigned long cr3) | 3713 | unsigned long cr3) |
| 3720 | { | 3714 | { |
| 3721 | struct protection_domain *domain = dom->priv; | 3715 | struct protection_domain *domain = to_pdomain(dom); |
| 3722 | unsigned long flags; | 3716 | unsigned long flags; |
| 3723 | int ret; | 3717 | int ret; |
| 3724 | 3718 | ||
| @@ -3732,7 +3726,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3); | |||
| 3732 | 3726 | ||
| 3733 | int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid) | 3727 | int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid) |
| 3734 | { | 3728 | { |
| 3735 | struct protection_domain *domain = dom->priv; | 3729 | struct protection_domain *domain = to_pdomain(dom); |
| 3736 | unsigned long flags; | 3730 | unsigned long flags; |
| 3737 | int ret; | 3731 | int ret; |
| 3738 | 3732 | ||
| @@ -3765,17 +3759,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr); | |||
| 3765 | 3759 | ||
| 3766 | struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) | 3760 | struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) |
| 3767 | { | 3761 | { |
| 3768 | struct protection_domain *domain; | 3762 | struct protection_domain *pdomain; |
| 3769 | 3763 | ||
| 3770 | domain = get_domain(&pdev->dev); | 3764 | pdomain = get_domain(&pdev->dev); |
| 3771 | if (IS_ERR(domain)) | 3765 | if (IS_ERR(pdomain)) |
| 3772 | return NULL; | 3766 | return NULL; |
| 3773 | 3767 | ||
| 3774 | /* Only return IOMMUv2 domains */ | 3768 | /* Only return IOMMUv2 domains */ |
| 3775 | if (!(domain->flags & PD_IOMMUV2_MASK)) | 3769 | if (!(pdomain->flags & PD_IOMMUV2_MASK)) |
| 3776 | return NULL; | 3770 | return NULL; |
| 3777 | 3771 | ||
| 3778 | return domain->iommu_domain; | 3772 | return &pdomain->domain; |
| 3779 | } | 3773 | } |
| 3780 | EXPORT_SYMBOL(amd_iommu_get_v2_domain); | 3774 | EXPORT_SYMBOL(amd_iommu_get_v2_domain); |
| 3781 | 3775 | ||
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index c4fffb710c58..05030e523771 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -282,6 +282,12 @@ | |||
| 282 | #define PTE_PAGE_SIZE(pte) \ | 282 | #define PTE_PAGE_SIZE(pte) \ |
| 283 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) | 283 | (1ULL << (1 + ffz(((pte) | 0xfffULL)))) |
| 284 | 284 | ||
| 285 | /* | ||
| 286 | * Takes a page-table level and returns the default page-size for this level | ||
| 287 | */ | ||
| 288 | #define PTE_LEVEL_PAGE_SIZE(level) \ | ||
| 289 | (1ULL << (12 + (9 * (level)))) | ||
| 290 | |||
| 285 | #define IOMMU_PTE_P (1ULL << 0) | 291 | #define IOMMU_PTE_P (1ULL << 0) |
| 286 | #define IOMMU_PTE_TV (1ULL << 1) | 292 | #define IOMMU_PTE_TV (1ULL << 1) |
| 287 | #define IOMMU_PTE_U (1ULL << 59) | 293 | #define IOMMU_PTE_U (1ULL << 59) |
| @@ -400,6 +406,8 @@ struct iommu_domain; | |||
| 400 | struct protection_domain { | 406 | struct protection_domain { |
| 401 | struct list_head list; /* for list of all protection domains */ | 407 | struct list_head list; /* for list of all protection domains */ |
| 402 | struct list_head dev_list; /* List of all devices in this domain */ | 408 | struct list_head dev_list; /* List of all devices in this domain */ |
| 409 | struct iommu_domain domain; /* generic domain handle used by | ||
| 410 | iommu core code */ | ||
| 403 | spinlock_t lock; /* mostly used to lock the page table*/ | 411 | spinlock_t lock; /* mostly used to lock the page table*/ |
| 404 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | 412 | struct mutex api_lock; /* protect page tables in the iommu-api path */ |
| 405 | u16 id; /* the domain id written to the device table */ | 413 | u16 id; /* the domain id written to the device table */ |
| @@ -411,10 +419,7 @@ struct protection_domain { | |||
| 411 | bool updated; /* complete domain flush required */ | 419 | bool updated; /* complete domain flush required */ |
| 412 | unsigned dev_cnt; /* devices assigned to this domain */ | 420 | unsigned dev_cnt; /* devices assigned to this domain */ |
| 413 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ | 421 | unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ |
| 414 | void *priv; /* private data */ | 422 | void *priv; /* private data */ |
| 415 | struct iommu_domain *iommu_domain; /* Pointer to generic | ||
| 416 | domain structure */ | ||
| 417 | |||
| 418 | }; | 423 | }; |
| 419 | 424 | ||
| 420 | /* | 425 | /* |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 6d5a5c44453b..a1cbba9056fd 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
| @@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
| 417 | dev_state = pasid_state->device_state; | 417 | dev_state = pasid_state->device_state; |
| 418 | run_inv_ctx_cb = !pasid_state->invalid; | 418 | run_inv_ctx_cb = !pasid_state->invalid; |
| 419 | 419 | ||
| 420 | if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb) | 420 | if (run_inv_ctx_cb && dev_state->inv_ctx_cb) |
| 421 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); | 421 | dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); |
| 422 | 422 | ||
| 423 | unbind_pasid(pasid_state); | 423 | unbind_pasid(pasid_state); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a3adde6519f0..9f7e1d34a32b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -343,6 +343,7 @@ struct arm_smmu_domain { | |||
| 343 | struct arm_smmu_cfg cfg; | 343 | struct arm_smmu_cfg cfg; |
| 344 | enum arm_smmu_domain_stage stage; | 344 | enum arm_smmu_domain_stage stage; |
| 345 | struct mutex init_mutex; /* Protects smmu pointer */ | 345 | struct mutex init_mutex; /* Protects smmu pointer */ |
| 346 | struct iommu_domain domain; | ||
| 346 | }; | 347 | }; |
| 347 | 348 | ||
| 348 | static struct iommu_ops arm_smmu_ops; | 349 | static struct iommu_ops arm_smmu_ops; |
| @@ -360,6 +361,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { | |||
| 360 | { 0, NULL}, | 361 | { 0, NULL}, |
| 361 | }; | 362 | }; |
| 362 | 363 | ||
| 364 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) | ||
| 365 | { | ||
| 366 | return container_of(dom, struct arm_smmu_domain, domain); | ||
| 367 | } | ||
| 368 | |||
| 363 | static void parse_driver_options(struct arm_smmu_device *smmu) | 369 | static void parse_driver_options(struct arm_smmu_device *smmu) |
| 364 | { | 370 | { |
| 365 | int i = 0; | 371 | int i = 0; |
| @@ -645,7 +651,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
| 645 | u32 fsr, far, fsynr, resume; | 651 | u32 fsr, far, fsynr, resume; |
| 646 | unsigned long iova; | 652 | unsigned long iova; |
| 647 | struct iommu_domain *domain = dev; | 653 | struct iommu_domain *domain = dev; |
| 648 | struct arm_smmu_domain *smmu_domain = domain->priv; | 654 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 649 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 655 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
| 650 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 656 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 651 | void __iomem *cb_base; | 657 | void __iomem *cb_base; |
| @@ -730,6 +736,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
| 730 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | 736 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
| 731 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 737 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
| 732 | 738 | ||
| 739 | if (smmu->version > ARM_SMMU_V1) { | ||
| 740 | /* | ||
| 741 | * CBA2R. | ||
| 742 | * *Must* be initialised before CBAR thanks to VMID16 | ||
| 743 | * architectural oversight affected some implementations. | ||
| 744 | */ | ||
| 745 | #ifdef CONFIG_64BIT | ||
| 746 | reg = CBA2R_RW64_64BIT; | ||
| 747 | #else | ||
| 748 | reg = CBA2R_RW64_32BIT; | ||
| 749 | #endif | ||
| 750 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | ||
| 751 | } | ||
| 752 | |||
| 733 | /* CBAR */ | 753 | /* CBAR */ |
| 734 | reg = cfg->cbar; | 754 | reg = cfg->cbar; |
| 735 | if (smmu->version == ARM_SMMU_V1) | 755 | if (smmu->version == ARM_SMMU_V1) |
| @@ -747,16 +767,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
| 747 | } | 767 | } |
| 748 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); | 768 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
| 749 | 769 | ||
| 750 | if (smmu->version > ARM_SMMU_V1) { | ||
| 751 | /* CBA2R */ | ||
| 752 | #ifdef CONFIG_64BIT | ||
| 753 | reg = CBA2R_RW64_64BIT; | ||
| 754 | #else | ||
| 755 | reg = CBA2R_RW64_32BIT; | ||
| 756 | #endif | ||
| 757 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | ||
| 758 | } | ||
| 759 | |||
| 760 | /* TTBRs */ | 770 | /* TTBRs */ |
| 761 | if (stage1) { | 771 | if (stage1) { |
| 762 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; | 772 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; |
| @@ -836,7 +846,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
| 836 | struct io_pgtable_ops *pgtbl_ops; | 846 | struct io_pgtable_ops *pgtbl_ops; |
| 837 | struct io_pgtable_cfg pgtbl_cfg; | 847 | struct io_pgtable_cfg pgtbl_cfg; |
| 838 | enum io_pgtable_fmt fmt; | 848 | enum io_pgtable_fmt fmt; |
| 839 | struct arm_smmu_domain *smmu_domain = domain->priv; | 849 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 840 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 850 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
| 841 | 851 | ||
| 842 | mutex_lock(&smmu_domain->init_mutex); | 852 | mutex_lock(&smmu_domain->init_mutex); |
| @@ -958,7 +968,7 @@ out_unlock: | |||
| 958 | 968 | ||
| 959 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | 969 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) |
| 960 | { | 970 | { |
| 961 | struct arm_smmu_domain *smmu_domain = domain->priv; | 971 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 962 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 972 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 963 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 973 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
| 964 | void __iomem *cb_base; | 974 | void __iomem *cb_base; |
| @@ -985,10 +995,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
| 985 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); | 995 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
| 986 | } | 996 | } |
| 987 | 997 | ||
| 988 | static int arm_smmu_domain_init(struct iommu_domain *domain) | 998 | static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) |
| 989 | { | 999 | { |
| 990 | struct arm_smmu_domain *smmu_domain; | 1000 | struct arm_smmu_domain *smmu_domain; |
| 991 | 1001 | ||
| 1002 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 1003 | return NULL; | ||
| 992 | /* | 1004 | /* |
| 993 | * Allocate the domain and initialise some of its data structures. | 1005 | * Allocate the domain and initialise some of its data structures. |
| 994 | * We can't really do anything meaningful until we've added a | 1006 | * We can't really do anything meaningful until we've added a |
| @@ -996,17 +1008,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) | |||
| 996 | */ | 1008 | */ |
| 997 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | 1009 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); |
| 998 | if (!smmu_domain) | 1010 | if (!smmu_domain) |
| 999 | return -ENOMEM; | 1011 | return NULL; |
| 1000 | 1012 | ||
| 1001 | mutex_init(&smmu_domain->init_mutex); | 1013 | mutex_init(&smmu_domain->init_mutex); |
| 1002 | spin_lock_init(&smmu_domain->pgtbl_lock); | 1014 | spin_lock_init(&smmu_domain->pgtbl_lock); |
| 1003 | domain->priv = smmu_domain; | 1015 | |
| 1004 | return 0; | 1016 | return &smmu_domain->domain; |
| 1005 | } | 1017 | } |
| 1006 | 1018 | ||
| 1007 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) | 1019 | static void arm_smmu_domain_free(struct iommu_domain *domain) |
| 1008 | { | 1020 | { |
| 1009 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1021 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1010 | 1022 | ||
| 1011 | /* | 1023 | /* |
| 1012 | * Free the domain resources. We assume that all devices have | 1024 | * Free the domain resources. We assume that all devices have |
| @@ -1143,7 +1155,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |||
| 1143 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1155 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
| 1144 | { | 1156 | { |
| 1145 | int ret; | 1157 | int ret; |
| 1146 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1158 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1147 | struct arm_smmu_device *smmu; | 1159 | struct arm_smmu_device *smmu; |
| 1148 | struct arm_smmu_master_cfg *cfg; | 1160 | struct arm_smmu_master_cfg *cfg; |
| 1149 | 1161 | ||
| @@ -1187,7 +1199,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1187 | 1199 | ||
| 1188 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 1200 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
| 1189 | { | 1201 | { |
| 1190 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1202 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1191 | struct arm_smmu_master_cfg *cfg; | 1203 | struct arm_smmu_master_cfg *cfg; |
| 1192 | 1204 | ||
| 1193 | cfg = find_smmu_master_cfg(dev); | 1205 | cfg = find_smmu_master_cfg(dev); |
| @@ -1203,7 +1215,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 1203 | { | 1215 | { |
| 1204 | int ret; | 1216 | int ret; |
| 1205 | unsigned long flags; | 1217 | unsigned long flags; |
| 1206 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1218 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1207 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | 1219 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
| 1208 | 1220 | ||
| 1209 | if (!ops) | 1221 | if (!ops) |
| @@ -1220,7 +1232,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
| 1220 | { | 1232 | { |
| 1221 | size_t ret; | 1233 | size_t ret; |
| 1222 | unsigned long flags; | 1234 | unsigned long flags; |
| 1223 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1235 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1224 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | 1236 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
| 1225 | 1237 | ||
| 1226 | if (!ops) | 1238 | if (!ops) |
| @@ -1235,7 +1247,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
| 1235 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, | 1247 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
| 1236 | dma_addr_t iova) | 1248 | dma_addr_t iova) |
| 1237 | { | 1249 | { |
| 1238 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1250 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1239 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1251 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 1240 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 1252 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
| 1241 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | 1253 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
| @@ -1281,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
| 1281 | { | 1293 | { |
| 1282 | phys_addr_t ret; | 1294 | phys_addr_t ret; |
| 1283 | unsigned long flags; | 1295 | unsigned long flags; |
| 1284 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1296 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1285 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | 1297 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
| 1286 | 1298 | ||
| 1287 | if (!ops) | 1299 | if (!ops) |
| @@ -1329,61 +1341,83 @@ static void __arm_smmu_release_pci_iommudata(void *data) | |||
| 1329 | kfree(data); | 1341 | kfree(data); |
| 1330 | } | 1342 | } |
| 1331 | 1343 | ||
| 1332 | static int arm_smmu_add_device(struct device *dev) | 1344 | static int arm_smmu_add_pci_device(struct pci_dev *pdev) |
| 1333 | { | 1345 | { |
| 1334 | struct arm_smmu_device *smmu; | 1346 | int i, ret; |
| 1335 | struct arm_smmu_master_cfg *cfg; | 1347 | u16 sid; |
| 1336 | struct iommu_group *group; | 1348 | struct iommu_group *group; |
| 1337 | void (*releasefn)(void *) = NULL; | 1349 | struct arm_smmu_master_cfg *cfg; |
| 1338 | int ret; | ||
| 1339 | |||
| 1340 | smmu = find_smmu_for_device(dev); | ||
| 1341 | if (!smmu) | ||
| 1342 | return -ENODEV; | ||
| 1343 | 1350 | ||
| 1344 | group = iommu_group_alloc(); | 1351 | group = iommu_group_get_for_dev(&pdev->dev); |
| 1345 | if (IS_ERR(group)) { | 1352 | if (IS_ERR(group)) |
| 1346 | dev_err(dev, "Failed to allocate IOMMU group\n"); | ||
| 1347 | return PTR_ERR(group); | 1353 | return PTR_ERR(group); |
| 1348 | } | ||
| 1349 | |||
| 1350 | if (dev_is_pci(dev)) { | ||
| 1351 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 1352 | 1354 | ||
| 1355 | cfg = iommu_group_get_iommudata(group); | ||
| 1356 | if (!cfg) { | ||
| 1353 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | 1357 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
| 1354 | if (!cfg) { | 1358 | if (!cfg) { |
| 1355 | ret = -ENOMEM; | 1359 | ret = -ENOMEM; |
| 1356 | goto out_put_group; | 1360 | goto out_put_group; |
| 1357 | } | 1361 | } |
| 1358 | 1362 | ||
| 1359 | cfg->num_streamids = 1; | 1363 | iommu_group_set_iommudata(group, cfg, |
| 1360 | /* | 1364 | __arm_smmu_release_pci_iommudata); |
| 1361 | * Assume Stream ID == Requester ID for now. | 1365 | } |
| 1362 | * We need a way to describe the ID mappings in FDT. | ||
| 1363 | */ | ||
| 1364 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, | ||
| 1365 | &cfg->streamids[0]); | ||
| 1366 | releasefn = __arm_smmu_release_pci_iommudata; | ||
| 1367 | } else { | ||
| 1368 | struct arm_smmu_master *master; | ||
| 1369 | |||
| 1370 | master = find_smmu_master(smmu, dev->of_node); | ||
| 1371 | if (!master) { | ||
| 1372 | ret = -ENODEV; | ||
| 1373 | goto out_put_group; | ||
| 1374 | } | ||
| 1375 | 1366 | ||
| 1376 | cfg = &master->cfg; | 1367 | if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { |
| 1368 | ret = -ENOSPC; | ||
| 1369 | goto out_put_group; | ||
| 1377 | } | 1370 | } |
| 1378 | 1371 | ||
| 1379 | iommu_group_set_iommudata(group, cfg, releasefn); | 1372 | /* |
| 1380 | ret = iommu_group_add_device(group, dev); | 1373 | * Assume Stream ID == Requester ID for now. |
| 1374 | * We need a way to describe the ID mappings in FDT. | ||
| 1375 | */ | ||
| 1376 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | ||
| 1377 | for (i = 0; i < cfg->num_streamids; ++i) | ||
| 1378 | if (cfg->streamids[i] == sid) | ||
| 1379 | break; | ||
| 1380 | |||
| 1381 | /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ | ||
| 1382 | if (i == cfg->num_streamids) | ||
| 1383 | cfg->streamids[cfg->num_streamids++] = sid; | ||
| 1381 | 1384 | ||
| 1385 | return 0; | ||
| 1382 | out_put_group: | 1386 | out_put_group: |
| 1383 | iommu_group_put(group); | 1387 | iommu_group_put(group); |
| 1384 | return ret; | 1388 | return ret; |
| 1385 | } | 1389 | } |
| 1386 | 1390 | ||
| 1391 | static int arm_smmu_add_platform_device(struct device *dev) | ||
| 1392 | { | ||
| 1393 | struct iommu_group *group; | ||
| 1394 | struct arm_smmu_master *master; | ||
| 1395 | struct arm_smmu_device *smmu = find_smmu_for_device(dev); | ||
| 1396 | |||
| 1397 | if (!smmu) | ||
| 1398 | return -ENODEV; | ||
| 1399 | |||
| 1400 | master = find_smmu_master(smmu, dev->of_node); | ||
| 1401 | if (!master) | ||
| 1402 | return -ENODEV; | ||
| 1403 | |||
| 1404 | /* No automatic group creation for platform devices */ | ||
| 1405 | group = iommu_group_alloc(); | ||
| 1406 | if (IS_ERR(group)) | ||
| 1407 | return PTR_ERR(group); | ||
| 1408 | |||
| 1409 | iommu_group_set_iommudata(group, &master->cfg, NULL); | ||
| 1410 | return iommu_group_add_device(group, dev); | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | static int arm_smmu_add_device(struct device *dev) | ||
| 1414 | { | ||
| 1415 | if (dev_is_pci(dev)) | ||
| 1416 | return arm_smmu_add_pci_device(to_pci_dev(dev)); | ||
| 1417 | |||
| 1418 | return arm_smmu_add_platform_device(dev); | ||
| 1419 | } | ||
| 1420 | |||
| 1387 | static void arm_smmu_remove_device(struct device *dev) | 1421 | static void arm_smmu_remove_device(struct device *dev) |
| 1388 | { | 1422 | { |
| 1389 | iommu_group_remove_device(dev); | 1423 | iommu_group_remove_device(dev); |
| @@ -1392,7 +1426,7 @@ static void arm_smmu_remove_device(struct device *dev) | |||
| 1392 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | 1426 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
| 1393 | enum iommu_attr attr, void *data) | 1427 | enum iommu_attr attr, void *data) |
| 1394 | { | 1428 | { |
| 1395 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1429 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1396 | 1430 | ||
| 1397 | switch (attr) { | 1431 | switch (attr) { |
| 1398 | case DOMAIN_ATTR_NESTING: | 1432 | case DOMAIN_ATTR_NESTING: |
| @@ -1407,7 +1441,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |||
| 1407 | enum iommu_attr attr, void *data) | 1441 | enum iommu_attr attr, void *data) |
| 1408 | { | 1442 | { |
| 1409 | int ret = 0; | 1443 | int ret = 0; |
| 1410 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1444 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
| 1411 | 1445 | ||
| 1412 | mutex_lock(&smmu_domain->init_mutex); | 1446 | mutex_lock(&smmu_domain->init_mutex); |
| 1413 | 1447 | ||
| @@ -1435,8 +1469,8 @@ out_unlock: | |||
| 1435 | 1469 | ||
| 1436 | static struct iommu_ops arm_smmu_ops = { | 1470 | static struct iommu_ops arm_smmu_ops = { |
| 1437 | .capable = arm_smmu_capable, | 1471 | .capable = arm_smmu_capable, |
| 1438 | .domain_init = arm_smmu_domain_init, | 1472 | .domain_alloc = arm_smmu_domain_alloc, |
| 1439 | .domain_destroy = arm_smmu_domain_destroy, | 1473 | .domain_free = arm_smmu_domain_free, |
| 1440 | .attach_dev = arm_smmu_attach_dev, | 1474 | .attach_dev = arm_smmu_attach_dev, |
| 1441 | .detach_dev = arm_smmu_detach_dev, | 1475 | .detach_dev = arm_smmu_detach_dev, |
| 1442 | .map = arm_smmu_map, | 1476 | .map = arm_smmu_map, |
| @@ -1633,6 +1667,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1633 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1667 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
| 1634 | smmu->pa_size = size; | 1668 | smmu->pa_size = size; |
| 1635 | 1669 | ||
| 1670 | /* | ||
| 1671 | * What the page table walker can address actually depends on which | ||
| 1672 | * descriptor format is in use, but since a) we don't know that yet, | ||
| 1673 | * and b) it can vary per context bank, this will have to do... | ||
| 1674 | */ | ||
| 1675 | if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) | ||
| 1676 | dev_warn(smmu->dev, | ||
| 1677 | "failed to set DMA mask for table walker\n"); | ||
| 1678 | |||
| 1636 | if (smmu->version == ARM_SMMU_V1) { | 1679 | if (smmu->version == ARM_SMMU_V1) { |
| 1637 | smmu->va_size = smmu->ipa_size; | 1680 | smmu->va_size = smmu->ipa_size; |
| 1638 | size = SZ_4K | SZ_2M | SZ_1G; | 1681 | size = SZ_4K | SZ_2M | SZ_1G; |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index dc14fec4ede1..3e898504a7c4 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -200,6 +200,7 @@ struct exynos_iommu_domain { | |||
| 200 | short *lv2entcnt; /* free lv2 entry counter for each section */ | 200 | short *lv2entcnt; /* free lv2 entry counter for each section */ |
| 201 | spinlock_t lock; /* lock for this structure */ | 201 | spinlock_t lock; /* lock for this structure */ |
| 202 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ | 202 | spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ |
| 203 | struct iommu_domain domain; /* generic domain data structure */ | ||
| 203 | }; | 204 | }; |
| 204 | 205 | ||
| 205 | struct sysmmu_drvdata { | 206 | struct sysmmu_drvdata { |
| @@ -214,6 +215,11 @@ struct sysmmu_drvdata { | |||
| 214 | phys_addr_t pgtable; | 215 | phys_addr_t pgtable; |
| 215 | }; | 216 | }; |
| 216 | 217 | ||
| 218 | static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) | ||
| 219 | { | ||
| 220 | return container_of(dom, struct exynos_iommu_domain, domain); | ||
| 221 | } | ||
| 222 | |||
| 217 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) | 223 | static bool set_sysmmu_active(struct sysmmu_drvdata *data) |
| 218 | { | 224 | { |
| 219 | /* return true if the System MMU was not active previously | 225 | /* return true if the System MMU was not active previously |
| @@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend) | |||
| 696 | virt_to_phys(vaend)); | 702 | virt_to_phys(vaend)); |
| 697 | } | 703 | } |
| 698 | 704 | ||
| 699 | static int exynos_iommu_domain_init(struct iommu_domain *domain) | 705 | static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) |
| 700 | { | 706 | { |
| 701 | struct exynos_iommu_domain *priv; | 707 | struct exynos_iommu_domain *exynos_domain; |
| 702 | int i; | 708 | int i; |
| 703 | 709 | ||
| 704 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 710 | if (type != IOMMU_DOMAIN_UNMANAGED) |
| 705 | if (!priv) | 711 | return NULL; |
| 706 | return -ENOMEM; | 712 | |
| 713 | exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL); | ||
| 714 | if (!exynos_domain) | ||
| 715 | return NULL; | ||
| 707 | 716 | ||
| 708 | priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); | 717 | exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); |
| 709 | if (!priv->pgtable) | 718 | if (!exynos_domain->pgtable) |
| 710 | goto err_pgtable; | 719 | goto err_pgtable; |
| 711 | 720 | ||
| 712 | priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); | 721 | exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); |
| 713 | if (!priv->lv2entcnt) | 722 | if (!exynos_domain->lv2entcnt) |
| 714 | goto err_counter; | 723 | goto err_counter; |
| 715 | 724 | ||
| 716 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ | 725 | /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ |
| 717 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { | 726 | for (i = 0; i < NUM_LV1ENTRIES; i += 8) { |
| 718 | priv->pgtable[i + 0] = ZERO_LV2LINK; | 727 | exynos_domain->pgtable[i + 0] = ZERO_LV2LINK; |
| 719 | priv->pgtable[i + 1] = ZERO_LV2LINK; | 728 | exynos_domain->pgtable[i + 1] = ZERO_LV2LINK; |
| 720 | priv->pgtable[i + 2] = ZERO_LV2LINK; | 729 | exynos_domain->pgtable[i + 2] = ZERO_LV2LINK; |
| 721 | priv->pgtable[i + 3] = ZERO_LV2LINK; | 730 | exynos_domain->pgtable[i + 3] = ZERO_LV2LINK; |
| 722 | priv->pgtable[i + 4] = ZERO_LV2LINK; | 731 | exynos_domain->pgtable[i + 4] = ZERO_LV2LINK; |
| 723 | priv->pgtable[i + 5] = ZERO_LV2LINK; | 732 | exynos_domain->pgtable[i + 5] = ZERO_LV2LINK; |
| 724 | priv->pgtable[i + 6] = ZERO_LV2LINK; | 733 | exynos_domain->pgtable[i + 6] = ZERO_LV2LINK; |
| 725 | priv->pgtable[i + 7] = ZERO_LV2LINK; | 734 | exynos_domain->pgtable[i + 7] = ZERO_LV2LINK; |
| 726 | } | 735 | } |
| 727 | 736 | ||
| 728 | pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); | 737 | pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES); |
| 729 | 738 | ||
| 730 | spin_lock_init(&priv->lock); | 739 | spin_lock_init(&exynos_domain->lock); |
| 731 | spin_lock_init(&priv->pgtablelock); | 740 | spin_lock_init(&exynos_domain->pgtablelock); |
| 732 | INIT_LIST_HEAD(&priv->clients); | 741 | INIT_LIST_HEAD(&exynos_domain->clients); |
| 733 | 742 | ||
| 734 | domain->geometry.aperture_start = 0; | 743 | exynos_domain->domain.geometry.aperture_start = 0; |
| 735 | domain->geometry.aperture_end = ~0UL; | 744 | exynos_domain->domain.geometry.aperture_end = ~0UL; |
| 736 | domain->geometry.force_aperture = true; | 745 | exynos_domain->domain.geometry.force_aperture = true; |
| 737 | 746 | ||
| 738 | domain->priv = priv; | 747 | return &exynos_domain->domain; |
| 739 | return 0; | ||
| 740 | 748 | ||
| 741 | err_counter: | 749 | err_counter: |
| 742 | free_pages((unsigned long)priv->pgtable, 2); | 750 | free_pages((unsigned long)exynos_domain->pgtable, 2); |
| 743 | err_pgtable: | 751 | err_pgtable: |
| 744 | kfree(priv); | 752 | kfree(exynos_domain); |
| 745 | return -ENOMEM; | 753 | return NULL; |
| 746 | } | 754 | } |
| 747 | 755 | ||
| 748 | static void exynos_iommu_domain_destroy(struct iommu_domain *domain) | 756 | static void exynos_iommu_domain_free(struct iommu_domain *domain) |
| 749 | { | 757 | { |
| 750 | struct exynos_iommu_domain *priv = domain->priv; | 758 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 751 | struct exynos_iommu_owner *owner; | 759 | struct exynos_iommu_owner *owner; |
| 752 | unsigned long flags; | 760 | unsigned long flags; |
| 753 | int i; | 761 | int i; |
| @@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 773 | 781 | ||
| 774 | free_pages((unsigned long)priv->pgtable, 2); | 782 | free_pages((unsigned long)priv->pgtable, 2); |
| 775 | free_pages((unsigned long)priv->lv2entcnt, 1); | 783 | free_pages((unsigned long)priv->lv2entcnt, 1); |
| 776 | kfree(domain->priv); | 784 | kfree(priv); |
| 777 | domain->priv = NULL; | ||
| 778 | } | 785 | } |
| 779 | 786 | ||
| 780 | static int exynos_iommu_attach_device(struct iommu_domain *domain, | 787 | static int exynos_iommu_attach_device(struct iommu_domain *domain, |
| 781 | struct device *dev) | 788 | struct device *dev) |
| 782 | { | 789 | { |
| 783 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | 790 | struct exynos_iommu_owner *owner = dev->archdata.iommu; |
| 784 | struct exynos_iommu_domain *priv = domain->priv; | 791 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 785 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); | 792 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); |
| 786 | unsigned long flags; | 793 | unsigned long flags; |
| 787 | int ret; | 794 | int ret; |
| @@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain, | |||
| 812 | struct device *dev) | 819 | struct device *dev) |
| 813 | { | 820 | { |
| 814 | struct exynos_iommu_owner *owner; | 821 | struct exynos_iommu_owner *owner; |
| 815 | struct exynos_iommu_domain *priv = domain->priv; | 822 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 816 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); | 823 | phys_addr_t pagetable = virt_to_phys(priv->pgtable); |
| 817 | unsigned long flags; | 824 | unsigned long flags; |
| 818 | 825 | ||
| @@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, | |||
| 988 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, | 995 | static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, |
| 989 | phys_addr_t paddr, size_t size, int prot) | 996 | phys_addr_t paddr, size_t size, int prot) |
| 990 | { | 997 | { |
| 991 | struct exynos_iommu_domain *priv = domain->priv; | 998 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 992 | sysmmu_pte_t *entry; | 999 | sysmmu_pte_t *entry; |
| 993 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | 1000 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
| 994 | unsigned long flags; | 1001 | unsigned long flags; |
| @@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv, | |||
| 1042 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, | 1049 | static size_t exynos_iommu_unmap(struct iommu_domain *domain, |
| 1043 | unsigned long l_iova, size_t size) | 1050 | unsigned long l_iova, size_t size) |
| 1044 | { | 1051 | { |
| 1045 | struct exynos_iommu_domain *priv = domain->priv; | 1052 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 1046 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | 1053 | sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; |
| 1047 | sysmmu_pte_t *ent; | 1054 | sysmmu_pte_t *ent; |
| 1048 | size_t err_pgsize; | 1055 | size_t err_pgsize; |
| @@ -1119,7 +1126,7 @@ err: | |||
| 1119 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, | 1126 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, |
| 1120 | dma_addr_t iova) | 1127 | dma_addr_t iova) |
| 1121 | { | 1128 | { |
| 1122 | struct exynos_iommu_domain *priv = domain->priv; | 1129 | struct exynos_iommu_domain *priv = to_exynos_domain(domain); |
| 1123 | sysmmu_pte_t *entry; | 1130 | sysmmu_pte_t *entry; |
| 1124 | unsigned long flags; | 1131 | unsigned long flags; |
| 1125 | phys_addr_t phys = 0; | 1132 | phys_addr_t phys = 0; |
| @@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev) | |||
| 1171 | } | 1178 | } |
| 1172 | 1179 | ||
| 1173 | static const struct iommu_ops exynos_iommu_ops = { | 1180 | static const struct iommu_ops exynos_iommu_ops = { |
| 1174 | .domain_init = exynos_iommu_domain_init, | 1181 | .domain_alloc = exynos_iommu_domain_alloc, |
| 1175 | .domain_destroy = exynos_iommu_domain_destroy, | 1182 | .domain_free = exynos_iommu_domain_free, |
| 1176 | .attach_dev = exynos_iommu_attach_device, | 1183 | .attach_dev = exynos_iommu_attach_device, |
| 1177 | .detach_dev = exynos_iommu_detach_device, | 1184 | .detach_dev = exynos_iommu_detach_device, |
| 1178 | .map = exynos_iommu_map, | 1185 | .map = exynos_iommu_map, |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index ceebd287b660..1d452930c890 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
| @@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache; | |||
| 33 | static struct kmem_cache *iommu_devinfo_cache; | 33 | static struct kmem_cache *iommu_devinfo_cache; |
| 34 | static DEFINE_SPINLOCK(device_domain_lock); | 34 | static DEFINE_SPINLOCK(device_domain_lock); |
| 35 | 35 | ||
| 36 | static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom) | ||
| 37 | { | ||
| 38 | return container_of(dom, struct fsl_dma_domain, iommu_domain); | ||
| 39 | } | ||
| 40 | |||
| 36 | static int __init iommu_init_mempool(void) | 41 | static int __init iommu_init_mempool(void) |
| 37 | { | 42 | { |
| 38 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", | 43 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", |
| @@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i | |||
| 65 | struct dma_window *win_ptr = &dma_domain->win_arr[0]; | 70 | struct dma_window *win_ptr = &dma_domain->win_arr[0]; |
| 66 | struct iommu_domain_geometry *geom; | 71 | struct iommu_domain_geometry *geom; |
| 67 | 72 | ||
| 68 | geom = &dma_domain->iommu_domain->geometry; | 73 | geom = &dma_domain->iommu_domain.geometry; |
| 69 | 74 | ||
| 70 | if (!win_cnt || !dma_domain->geom_size) { | 75 | if (!win_cnt || !dma_domain->geom_size) { |
| 71 | pr_debug("Number of windows/geometry not configured for the domain\n"); | 76 | pr_debug("Number of windows/geometry not configured for the domain\n"); |
| @@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) | |||
| 123 | { | 128 | { |
| 124 | int ret; | 129 | int ret; |
| 125 | struct dma_window *wnd = &dma_domain->win_arr[0]; | 130 | struct dma_window *wnd = &dma_domain->win_arr[0]; |
| 126 | phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; | 131 | phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; |
| 127 | unsigned long flags; | 132 | unsigned long flags; |
| 128 | 133 | ||
| 129 | spin_lock_irqsave(&iommu_lock, flags); | 134 | spin_lock_irqsave(&iommu_lock, flags); |
| @@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
| 172 | } else { | 177 | } else { |
| 173 | phys_addr_t wnd_addr; | 178 | phys_addr_t wnd_addr; |
| 174 | 179 | ||
| 175 | wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; | 180 | wnd_addr = dma_domain->iommu_domain.geometry.aperture_start; |
| 176 | 181 | ||
| 177 | ret = pamu_config_ppaace(liodn, wnd_addr, | 182 | ret = pamu_config_ppaace(liodn, wnd_addr, |
| 178 | wnd->size, | 183 | wnd->size, |
| @@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d | |||
| 384 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | 389 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, |
| 385 | dma_addr_t iova) | 390 | dma_addr_t iova) |
| 386 | { | 391 | { |
| 387 | struct fsl_dma_domain *dma_domain = domain->priv; | 392 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 388 | 393 | ||
| 389 | if (iova < domain->geometry.aperture_start || | 394 | if (iova < domain->geometry.aperture_start || |
| 390 | iova > domain->geometry.aperture_end) | 395 | iova > domain->geometry.aperture_end) |
| @@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap) | |||
| 398 | return cap == IOMMU_CAP_CACHE_COHERENCY; | 403 | return cap == IOMMU_CAP_CACHE_COHERENCY; |
| 399 | } | 404 | } |
| 400 | 405 | ||
| 401 | static void fsl_pamu_domain_destroy(struct iommu_domain *domain) | 406 | static void fsl_pamu_domain_free(struct iommu_domain *domain) |
| 402 | { | 407 | { |
| 403 | struct fsl_dma_domain *dma_domain = domain->priv; | 408 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 404 | |||
| 405 | domain->priv = NULL; | ||
| 406 | 409 | ||
| 407 | /* remove all the devices from the device list */ | 410 | /* remove all the devices from the device list */ |
| 408 | detach_device(NULL, dma_domain); | 411 | detach_device(NULL, dma_domain); |
| @@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain) | |||
| 413 | kmem_cache_free(fsl_pamu_domain_cache, dma_domain); | 416 | kmem_cache_free(fsl_pamu_domain_cache, dma_domain); |
| 414 | } | 417 | } |
| 415 | 418 | ||
| 416 | static int fsl_pamu_domain_init(struct iommu_domain *domain) | 419 | static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type) |
| 417 | { | 420 | { |
| 418 | struct fsl_dma_domain *dma_domain; | 421 | struct fsl_dma_domain *dma_domain; |
| 419 | 422 | ||
| 423 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 424 | return NULL; | ||
| 425 | |||
| 420 | dma_domain = iommu_alloc_dma_domain(); | 426 | dma_domain = iommu_alloc_dma_domain(); |
| 421 | if (!dma_domain) { | 427 | if (!dma_domain) { |
| 422 | pr_debug("dma_domain allocation failed\n"); | 428 | pr_debug("dma_domain allocation failed\n"); |
| 423 | return -ENOMEM; | 429 | return NULL; |
| 424 | } | 430 | } |
| 425 | domain->priv = dma_domain; | ||
| 426 | dma_domain->iommu_domain = domain; | ||
| 427 | /* defaul geometry 64 GB i.e. maximum system address */ | 431 | /* defaul geometry 64 GB i.e. maximum system address */ |
| 428 | domain->geometry.aperture_start = 0; | 432 | dma_domain->iommu_domain. geometry.aperture_start = 0; |
| 429 | domain->geometry.aperture_end = (1ULL << 36) - 1; | 433 | dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1; |
| 430 | domain->geometry.force_aperture = true; | 434 | dma_domain->iommu_domain.geometry.force_aperture = true; |
| 431 | 435 | ||
| 432 | return 0; | 436 | return &dma_domain->iommu_domain; |
| 433 | } | 437 | } |
| 434 | 438 | ||
| 435 | /* Configure geometry settings for all LIODNs associated with domain */ | 439 | /* Configure geometry settings for all LIODNs associated with domain */ |
| @@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) | |||
| 499 | 503 | ||
| 500 | static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) | 504 | static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) |
| 501 | { | 505 | { |
| 502 | struct fsl_dma_domain *dma_domain = domain->priv; | 506 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 503 | unsigned long flags; | 507 | unsigned long flags; |
| 504 | int ret; | 508 | int ret; |
| 505 | 509 | ||
| @@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |||
| 530 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 534 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
| 531 | phys_addr_t paddr, u64 size, int prot) | 535 | phys_addr_t paddr, u64 size, int prot) |
| 532 | { | 536 | { |
| 533 | struct fsl_dma_domain *dma_domain = domain->priv; | 537 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 534 | struct dma_window *wnd; | 538 | struct dma_window *wnd; |
| 535 | int pamu_prot = 0; | 539 | int pamu_prot = 0; |
| 536 | int ret; | 540 | int ret; |
| @@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
| 607 | int num) | 611 | int num) |
| 608 | { | 612 | { |
| 609 | unsigned long flags; | 613 | unsigned long flags; |
| 610 | struct iommu_domain *domain = dma_domain->iommu_domain; | 614 | struct iommu_domain *domain = &dma_domain->iommu_domain; |
| 611 | int ret = 0; | 615 | int ret = 0; |
| 612 | int i; | 616 | int i; |
| 613 | 617 | ||
| @@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
| 653 | static int fsl_pamu_attach_device(struct iommu_domain *domain, | 657 | static int fsl_pamu_attach_device(struct iommu_domain *domain, |
| 654 | struct device *dev) | 658 | struct device *dev) |
| 655 | { | 659 | { |
| 656 | struct fsl_dma_domain *dma_domain = domain->priv; | 660 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 657 | const u32 *liodn; | 661 | const u32 *liodn; |
| 658 | u32 liodn_cnt; | 662 | u32 liodn_cnt; |
| 659 | int len, ret = 0; | 663 | int len, ret = 0; |
| @@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, | |||
| 691 | static void fsl_pamu_detach_device(struct iommu_domain *domain, | 695 | static void fsl_pamu_detach_device(struct iommu_domain *domain, |
| 692 | struct device *dev) | 696 | struct device *dev) |
| 693 | { | 697 | { |
| 694 | struct fsl_dma_domain *dma_domain = domain->priv; | 698 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 695 | const u32 *prop; | 699 | const u32 *prop; |
| 696 | int len; | 700 | int len; |
| 697 | struct pci_dev *pdev = NULL; | 701 | struct pci_dev *pdev = NULL; |
| @@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, | |||
| 723 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) | 727 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) |
| 724 | { | 728 | { |
| 725 | struct iommu_domain_geometry *geom_attr = data; | 729 | struct iommu_domain_geometry *geom_attr = data; |
| 726 | struct fsl_dma_domain *dma_domain = domain->priv; | 730 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 727 | dma_addr_t geom_size; | 731 | dma_addr_t geom_size; |
| 728 | unsigned long flags; | 732 | unsigned long flags; |
| 729 | 733 | ||
| @@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en | |||
| 813 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | 817 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, |
| 814 | enum iommu_attr attr_type, void *data) | 818 | enum iommu_attr attr_type, void *data) |
| 815 | { | 819 | { |
| 816 | struct fsl_dma_domain *dma_domain = domain->priv; | 820 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 817 | int ret = 0; | 821 | int ret = 0; |
| 818 | 822 | ||
| 819 | switch (attr_type) { | 823 | switch (attr_type) { |
| @@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | |||
| 838 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | 842 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, |
| 839 | enum iommu_attr attr_type, void *data) | 843 | enum iommu_attr attr_type, void *data) |
| 840 | { | 844 | { |
| 841 | struct fsl_dma_domain *dma_domain = domain->priv; | 845 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 842 | int ret = 0; | 846 | int ret = 0; |
| 843 | 847 | ||
| 844 | switch (attr_type) { | 848 | switch (attr_type) { |
| @@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev) | |||
| 999 | 1003 | ||
| 1000 | static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) | 1004 | static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) |
| 1001 | { | 1005 | { |
| 1002 | struct fsl_dma_domain *dma_domain = domain->priv; | 1006 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 1003 | unsigned long flags; | 1007 | unsigned long flags; |
| 1004 | int ret; | 1008 | int ret; |
| 1005 | 1009 | ||
| @@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) | |||
| 1048 | 1052 | ||
| 1049 | static u32 fsl_pamu_get_windows(struct iommu_domain *domain) | 1053 | static u32 fsl_pamu_get_windows(struct iommu_domain *domain) |
| 1050 | { | 1054 | { |
| 1051 | struct fsl_dma_domain *dma_domain = domain->priv; | 1055 | struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); |
| 1052 | 1056 | ||
| 1053 | return dma_domain->win_cnt; | 1057 | return dma_domain->win_cnt; |
| 1054 | } | 1058 | } |
| 1055 | 1059 | ||
| 1056 | static const struct iommu_ops fsl_pamu_ops = { | 1060 | static const struct iommu_ops fsl_pamu_ops = { |
| 1057 | .capable = fsl_pamu_capable, | 1061 | .capable = fsl_pamu_capable, |
| 1058 | .domain_init = fsl_pamu_domain_init, | 1062 | .domain_alloc = fsl_pamu_domain_alloc, |
| 1059 | .domain_destroy = fsl_pamu_domain_destroy, | 1063 | .domain_free = fsl_pamu_domain_free, |
| 1060 | .attach_dev = fsl_pamu_attach_device, | 1064 | .attach_dev = fsl_pamu_attach_device, |
| 1061 | .detach_dev = fsl_pamu_detach_device, | 1065 | .detach_dev = fsl_pamu_detach_device, |
| 1062 | .domain_window_enable = fsl_pamu_window_enable, | 1066 | .domain_window_enable = fsl_pamu_window_enable, |
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index c90293f99709..f2b0f741d3de 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h | |||
| @@ -71,7 +71,7 @@ struct fsl_dma_domain { | |||
| 71 | u32 stash_id; | 71 | u32 stash_id; |
| 72 | struct pamu_stash_attribute dma_stash; | 72 | struct pamu_stash_attribute dma_stash; |
| 73 | u32 snoop_id; | 73 | u32 snoop_id; |
| 74 | struct iommu_domain *iommu_domain; | 74 | struct iommu_domain iommu_domain; |
| 75 | spinlock_t domain_lock; | 75 | spinlock_t domain_lock; |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 4fc1f8a7f98e..a35927cd42e5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -339,7 +339,7 @@ struct dmar_domain { | |||
| 339 | DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); | 339 | DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); |
| 340 | /* bitmap of iommus this domain uses*/ | 340 | /* bitmap of iommus this domain uses*/ |
| 341 | 341 | ||
| 342 | struct list_head devices; /* all devices' list */ | 342 | struct list_head devices; /* all devices' list */ |
| 343 | struct iova_domain iovad; /* iova's that belong to this domain */ | 343 | struct iova_domain iovad; /* iova's that belong to this domain */ |
| 344 | 344 | ||
| 345 | struct dma_pte *pgd; /* virtual address */ | 345 | struct dma_pte *pgd; /* virtual address */ |
| @@ -358,6 +358,9 @@ struct dmar_domain { | |||
| 358 | 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ | 358 | 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ |
| 359 | spinlock_t iommu_lock; /* protect iommu set in domain */ | 359 | spinlock_t iommu_lock; /* protect iommu set in domain */ |
| 360 | u64 max_addr; /* maximum mapped address */ | 360 | u64 max_addr; /* maximum mapped address */ |
| 361 | |||
| 362 | struct iommu_domain domain; /* generic domain data structure for | ||
| 363 | iommu core */ | ||
| 361 | }; | 364 | }; |
| 362 | 365 | ||
| 363 | /* PCI domain-device relationship */ | 366 | /* PCI domain-device relationship */ |
| @@ -449,6 +452,12 @@ static LIST_HEAD(device_domain_list); | |||
| 449 | 452 | ||
| 450 | static const struct iommu_ops intel_iommu_ops; | 453 | static const struct iommu_ops intel_iommu_ops; |
| 451 | 454 | ||
| 455 | /* Convert generic 'struct iommu_domain to private struct dmar_domain */ | ||
| 456 | static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) | ||
| 457 | { | ||
| 458 | return container_of(dom, struct dmar_domain, domain); | ||
| 459 | } | ||
| 460 | |||
| 452 | static int __init intel_iommu_setup(char *str) | 461 | static int __init intel_iommu_setup(char *str) |
| 453 | { | 462 | { |
| 454 | if (!str) | 463 | if (!str) |
| @@ -595,12 +604,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) | |||
| 595 | { | 604 | { |
| 596 | struct dmar_drhd_unit *drhd; | 605 | struct dmar_drhd_unit *drhd; |
| 597 | struct intel_iommu *iommu; | 606 | struct intel_iommu *iommu; |
| 598 | int i, found = 0; | 607 | bool found = false; |
| 608 | int i; | ||
| 599 | 609 | ||
| 600 | domain->iommu_coherency = 1; | 610 | domain->iommu_coherency = 1; |
| 601 | 611 | ||
| 602 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { | 612 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { |
| 603 | found = 1; | 613 | found = true; |
| 604 | if (!ecap_coherent(g_iommus[i]->ecap)) { | 614 | if (!ecap_coherent(g_iommus[i]->ecap)) { |
| 605 | domain->iommu_coherency = 0; | 615 | domain->iommu_coherency = 0; |
| 606 | break; | 616 | break; |
| @@ -1267,7 +1277,7 @@ static struct device_domain_info * | |||
| 1267 | iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, | 1277 | iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, |
| 1268 | u8 bus, u8 devfn) | 1278 | u8 bus, u8 devfn) |
| 1269 | { | 1279 | { |
| 1270 | int found = 0; | 1280 | bool found = false; |
| 1271 | unsigned long flags; | 1281 | unsigned long flags; |
| 1272 | struct device_domain_info *info; | 1282 | struct device_domain_info *info; |
| 1273 | struct pci_dev *pdev; | 1283 | struct pci_dev *pdev; |
| @@ -1282,7 +1292,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, | |||
| 1282 | list_for_each_entry(info, &domain->devices, link) | 1292 | list_for_each_entry(info, &domain->devices, link) |
| 1283 | if (info->iommu == iommu && info->bus == bus && | 1293 | if (info->iommu == iommu && info->bus == bus && |
| 1284 | info->devfn == devfn) { | 1294 | info->devfn == devfn) { |
| 1285 | found = 1; | 1295 | found = true; |
| 1286 | break; | 1296 | break; |
| 1287 | } | 1297 | } |
| 1288 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1298 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| @@ -4269,7 +4279,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 4269 | struct device_domain_info *info, *tmp; | 4279 | struct device_domain_info *info, *tmp; |
| 4270 | struct intel_iommu *iommu; | 4280 | struct intel_iommu *iommu; |
| 4271 | unsigned long flags; | 4281 | unsigned long flags; |
| 4272 | int found = 0; | 4282 | bool found = false; |
| 4273 | u8 bus, devfn; | 4283 | u8 bus, devfn; |
| 4274 | 4284 | ||
| 4275 | iommu = device_to_iommu(dev, &bus, &devfn); | 4285 | iommu = device_to_iommu(dev, &bus, &devfn); |
| @@ -4301,7 +4311,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 4301 | * update iommu count and coherency | 4311 | * update iommu count and coherency |
| 4302 | */ | 4312 | */ |
| 4303 | if (info->iommu == iommu) | 4313 | if (info->iommu == iommu) |
| 4304 | found = 1; | 4314 | found = true; |
| 4305 | } | 4315 | } |
| 4306 | 4316 | ||
| 4307 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4317 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| @@ -4339,44 +4349,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
| 4339 | return 0; | 4349 | return 0; |
| 4340 | } | 4350 | } |
| 4341 | 4351 | ||
| 4342 | static int intel_iommu_domain_init(struct iommu_domain *domain) | 4352 | static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) |
| 4343 | { | 4353 | { |
| 4344 | struct dmar_domain *dmar_domain; | 4354 | struct dmar_domain *dmar_domain; |
| 4355 | struct iommu_domain *domain; | ||
| 4356 | |||
| 4357 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 4358 | return NULL; | ||
| 4345 | 4359 | ||
| 4346 | dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); | 4360 | dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); |
| 4347 | if (!dmar_domain) { | 4361 | if (!dmar_domain) { |
| 4348 | printk(KERN_ERR | 4362 | printk(KERN_ERR |
| 4349 | "intel_iommu_domain_init: dmar_domain == NULL\n"); | 4363 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
| 4350 | return -ENOMEM; | 4364 | return NULL; |
| 4351 | } | 4365 | } |
| 4352 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 4366 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
| 4353 | printk(KERN_ERR | 4367 | printk(KERN_ERR |
| 4354 | "intel_iommu_domain_init() failed\n"); | 4368 | "intel_iommu_domain_init() failed\n"); |
| 4355 | domain_exit(dmar_domain); | 4369 | domain_exit(dmar_domain); |
| 4356 | return -ENOMEM; | 4370 | return NULL; |
| 4357 | } | 4371 | } |
| 4358 | domain_update_iommu_cap(dmar_domain); | 4372 | domain_update_iommu_cap(dmar_domain); |
| 4359 | domain->priv = dmar_domain; | ||
| 4360 | 4373 | ||
| 4374 | domain = &dmar_domain->domain; | ||
| 4361 | domain->geometry.aperture_start = 0; | 4375 | domain->geometry.aperture_start = 0; |
| 4362 | domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); | 4376 | domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); |
| 4363 | domain->geometry.force_aperture = true; | 4377 | domain->geometry.force_aperture = true; |
| 4364 | 4378 | ||
| 4365 | return 0; | 4379 | return domain; |
| 4366 | } | 4380 | } |
| 4367 | 4381 | ||
| 4368 | static void intel_iommu_domain_destroy(struct iommu_domain *domain) | 4382 | static void intel_iommu_domain_free(struct iommu_domain *domain) |
| 4369 | { | 4383 | { |
| 4370 | struct dmar_domain *dmar_domain = domain->priv; | 4384 | domain_exit(to_dmar_domain(domain)); |
| 4371 | |||
| 4372 | domain->priv = NULL; | ||
| 4373 | domain_exit(dmar_domain); | ||
| 4374 | } | 4385 | } |
| 4375 | 4386 | ||
| 4376 | static int intel_iommu_attach_device(struct iommu_domain *domain, | 4387 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
| 4377 | struct device *dev) | 4388 | struct device *dev) |
| 4378 | { | 4389 | { |
| 4379 | struct dmar_domain *dmar_domain = domain->priv; | 4390 | struct dmar_domain *dmar_domain = to_dmar_domain(domain); |
| 4380 | struct intel_iommu *iommu; | 4391 | struct intel_iommu *iommu; |
| 4381 | int addr_width; | 4392 | int addr_width; |
| 4382 | u8 bus, devfn; | 4393 | u8 bus, devfn; |
| @@ -4441,16 +4452,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 4441 | static void intel_iommu_detach_device(struct iommu_domain *domain, | 4452 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
| 4442 | struct device *dev) | 4453 | struct device *dev) |
| 4443 | { | 4454 | { |
| 4444 | struct dmar_domain *dmar_domain = domain->priv; | 4455 | domain_remove_one_dev_info(to_dmar_domain(domain), dev); |
| 4445 | |||
| 4446 | domain_remove_one_dev_info(dmar_domain, dev); | ||
| 4447 | } | 4456 | } |
| 4448 | 4457 | ||
| 4449 | static int intel_iommu_map(struct iommu_domain *domain, | 4458 | static int intel_iommu_map(struct iommu_domain *domain, |
| 4450 | unsigned long iova, phys_addr_t hpa, | 4459 | unsigned long iova, phys_addr_t hpa, |
| 4451 | size_t size, int iommu_prot) | 4460 | size_t size, int iommu_prot) |
| 4452 | { | 4461 | { |
| 4453 | struct dmar_domain *dmar_domain = domain->priv; | 4462 | struct dmar_domain *dmar_domain = to_dmar_domain(domain); |
| 4454 | u64 max_addr; | 4463 | u64 max_addr; |
| 4455 | int prot = 0; | 4464 | int prot = 0; |
| 4456 | int ret; | 4465 | int ret; |
| @@ -4487,7 +4496,7 @@ static int intel_iommu_map(struct iommu_domain *domain, | |||
| 4487 | static size_t intel_iommu_unmap(struct iommu_domain *domain, | 4496 | static size_t intel_iommu_unmap(struct iommu_domain *domain, |
| 4488 | unsigned long iova, size_t size) | 4497 | unsigned long iova, size_t size) |
| 4489 | { | 4498 | { |
| 4490 | struct dmar_domain *dmar_domain = domain->priv; | 4499 | struct dmar_domain *dmar_domain = to_dmar_domain(domain); |
| 4491 | struct page *freelist = NULL; | 4500 | struct page *freelist = NULL; |
| 4492 | struct intel_iommu *iommu; | 4501 | struct intel_iommu *iommu; |
| 4493 | unsigned long start_pfn, last_pfn; | 4502 | unsigned long start_pfn, last_pfn; |
| @@ -4535,7 +4544,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, | |||
| 4535 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 4544 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
| 4536 | dma_addr_t iova) | 4545 | dma_addr_t iova) |
| 4537 | { | 4546 | { |
| 4538 | struct dmar_domain *dmar_domain = domain->priv; | 4547 | struct dmar_domain *dmar_domain = to_dmar_domain(domain); |
| 4539 | struct dma_pte *pte; | 4548 | struct dma_pte *pte; |
| 4540 | int level = 0; | 4549 | int level = 0; |
| 4541 | u64 phys = 0; | 4550 | u64 phys = 0; |
| @@ -4594,8 +4603,8 @@ static void intel_iommu_remove_device(struct device *dev) | |||
| 4594 | 4603 | ||
| 4595 | static const struct iommu_ops intel_iommu_ops = { | 4604 | static const struct iommu_ops intel_iommu_ops = { |
| 4596 | .capable = intel_iommu_capable, | 4605 | .capable = intel_iommu_capable, |
| 4597 | .domain_init = intel_iommu_domain_init, | 4606 | .domain_alloc = intel_iommu_domain_alloc, |
| 4598 | .domain_destroy = intel_iommu_domain_destroy, | 4607 | .domain_free = intel_iommu_domain_free, |
| 4599 | .attach_dev = intel_iommu_attach_device, | 4608 | .attach_dev = intel_iommu_attach_device, |
| 4600 | .detach_dev = intel_iommu_detach_device, | 4609 | .detach_dev = intel_iommu_detach_device, |
| 4601 | .map = intel_iommu_map, | 4610 | .map = intel_iommu_map, |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 14de1ab223c8..6c25b3c5b729 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
| @@ -631,7 +631,7 @@ static int __init intel_enable_irq_remapping(void) | |||
| 631 | { | 631 | { |
| 632 | struct dmar_drhd_unit *drhd; | 632 | struct dmar_drhd_unit *drhd; |
| 633 | struct intel_iommu *iommu; | 633 | struct intel_iommu *iommu; |
| 634 | int setup = 0; | 634 | bool setup = false; |
| 635 | int eim = 0; | 635 | int eim = 0; |
| 636 | 636 | ||
| 637 | if (x2apic_supported()) { | 637 | if (x2apic_supported()) { |
| @@ -697,7 +697,7 @@ static int __init intel_enable_irq_remapping(void) | |||
| 697 | */ | 697 | */ |
| 698 | for_each_iommu(iommu, drhd) { | 698 | for_each_iommu(iommu, drhd) { |
| 699 | iommu_set_irq_remapping(iommu, eim); | 699 | iommu_set_irq_remapping(iommu, eim); |
| 700 | setup = 1; | 700 | setup = true; |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | if (!setup) | 703 | if (!setup) |
| @@ -856,7 +856,7 @@ static int __init parse_ioapics_under_ir(void) | |||
| 856 | { | 856 | { |
| 857 | struct dmar_drhd_unit *drhd; | 857 | struct dmar_drhd_unit *drhd; |
| 858 | struct intel_iommu *iommu; | 858 | struct intel_iommu *iommu; |
| 859 | int ir_supported = 0; | 859 | bool ir_supported = false; |
| 860 | int ioapic_idx; | 860 | int ioapic_idx; |
| 861 | 861 | ||
| 862 | for_each_iommu(iommu, drhd) | 862 | for_each_iommu(iommu, drhd) |
| @@ -864,7 +864,7 @@ static int __init parse_ioapics_under_ir(void) | |||
| 864 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) | 864 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
| 865 | return -1; | 865 | return -1; |
| 866 | 866 | ||
| 867 | ir_supported = 1; | 867 | ir_supported = true; |
| 868 | } | 868 | } |
| 869 | 869 | ||
| 870 | if (!ir_supported) | 870 | if (!ir_supported) |
| @@ -917,7 +917,7 @@ static void disable_irq_remapping(void) | |||
| 917 | static int reenable_irq_remapping(int eim) | 917 | static int reenable_irq_remapping(int eim) |
| 918 | { | 918 | { |
| 919 | struct dmar_drhd_unit *drhd; | 919 | struct dmar_drhd_unit *drhd; |
| 920 | int setup = 0; | 920 | bool setup = false; |
| 921 | struct intel_iommu *iommu = NULL; | 921 | struct intel_iommu *iommu = NULL; |
| 922 | 922 | ||
| 923 | for_each_iommu(iommu, drhd) | 923 | for_each_iommu(iommu, drhd) |
| @@ -933,7 +933,7 @@ static int reenable_irq_remapping(int eim) | |||
| 933 | 933 | ||
| 934 | /* Set up interrupt remapping for iommu.*/ | 934 | /* Set up interrupt remapping for iommu.*/ |
| 935 | iommu_set_irq_remapping(iommu, eim); | 935 | iommu_set_irq_remapping(iommu, eim); |
| 936 | setup = 1; | 936 | setup = true; |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | if (!setup) | 939 | if (!setup) |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b610a8dee238..4e460216bd16 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -116,6 +116,8 @@ | |||
| 116 | #define ARM_32_LPAE_TCR_EAE (1 << 31) | 116 | #define ARM_32_LPAE_TCR_EAE (1 << 31) |
| 117 | #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) | 117 | #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) |
| 118 | 118 | ||
| 119 | #define ARM_LPAE_TCR_EPD1 (1 << 23) | ||
| 120 | |||
| 119 | #define ARM_LPAE_TCR_TG0_4K (0 << 14) | 121 | #define ARM_LPAE_TCR_TG0_4K (0 << 14) |
| 120 | #define ARM_LPAE_TCR_TG0_64K (1 << 14) | 122 | #define ARM_LPAE_TCR_TG0_64K (1 << 14) |
| 121 | #define ARM_LPAE_TCR_TG0_16K (2 << 14) | 123 | #define ARM_LPAE_TCR_TG0_16K (2 << 14) |
| @@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) | |||
| 621 | } | 623 | } |
| 622 | 624 | ||
| 623 | reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; | 625 | reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; |
| 626 | |||
| 627 | /* Disable speculative walks through TTBR1 */ | ||
| 628 | reg |= ARM_LPAE_TCR_EPD1; | ||
| 624 | cfg->arm_lpae_s1_cfg.tcr = reg; | 629 | cfg->arm_lpae_s1_cfg.tcr = reg; |
| 625 | 630 | ||
| 626 | /* MAIRs */ | 631 | /* MAIRs */ |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 72e683df0731..d4f527e56679 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler); | |||
| 901 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) | 901 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
| 902 | { | 902 | { |
| 903 | struct iommu_domain *domain; | 903 | struct iommu_domain *domain; |
| 904 | int ret; | ||
| 905 | 904 | ||
| 906 | if (bus == NULL || bus->iommu_ops == NULL) | 905 | if (bus == NULL || bus->iommu_ops == NULL) |
| 907 | return NULL; | 906 | return NULL; |
| 908 | 907 | ||
| 909 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 908 | domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); |
| 910 | if (!domain) | 909 | if (!domain) |
| 911 | return NULL; | 910 | return NULL; |
| 912 | 911 | ||
| 913 | domain->ops = bus->iommu_ops; | 912 | domain->ops = bus->iommu_ops; |
| 914 | 913 | domain->type = IOMMU_DOMAIN_UNMANAGED; | |
| 915 | ret = domain->ops->domain_init(domain); | ||
| 916 | if (ret) | ||
| 917 | goto out_free; | ||
| 918 | 914 | ||
| 919 | return domain; | 915 | return domain; |
| 920 | |||
| 921 | out_free: | ||
| 922 | kfree(domain); | ||
| 923 | |||
| 924 | return NULL; | ||
| 925 | } | 916 | } |
| 926 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); | 917 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); |
| 927 | 918 | ||
| 928 | void iommu_domain_free(struct iommu_domain *domain) | 919 | void iommu_domain_free(struct iommu_domain *domain) |
| 929 | { | 920 | { |
| 930 | if (likely(domain->ops->domain_destroy != NULL)) | 921 | domain->ops->domain_free(domain); |
| 931 | domain->ops->domain_destroy(domain); | ||
| 932 | |||
| 933 | kfree(domain); | ||
| 934 | } | 922 | } |
| 935 | EXPORT_SYMBOL_GPL(iommu_domain_free); | 923 | EXPORT_SYMBOL_GPL(iommu_domain_free); |
| 936 | 924 | ||
| @@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 1049 | domain->ops->pgsize_bitmap == 0UL)) | 1037 | domain->ops->pgsize_bitmap == 0UL)) |
| 1050 | return -ENODEV; | 1038 | return -ENODEV; |
| 1051 | 1039 | ||
| 1040 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) | ||
| 1041 | return -EINVAL; | ||
| 1042 | |||
| 1052 | /* find out the minimum page size supported */ | 1043 | /* find out the minimum page size supported */ |
| 1053 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | 1044 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
| 1054 | 1045 | ||
| @@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
| 1100 | domain->ops->pgsize_bitmap == 0UL)) | 1091 | domain->ops->pgsize_bitmap == 0UL)) |
| 1101 | return -ENODEV; | 1092 | return -ENODEV; |
| 1102 | 1093 | ||
| 1094 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) | ||
| 1095 | return -EINVAL; | ||
| 1096 | |||
| 1103 | /* find out the minimum page size supported */ | 1097 | /* find out the minimum page size supported */ |
| 1104 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | 1098 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
| 1105 | 1099 | ||
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index bc39bdf7b99b..1a67c531a07e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
| @@ -38,7 +38,7 @@ struct ipmmu_vmsa_device { | |||
| 38 | 38 | ||
| 39 | struct ipmmu_vmsa_domain { | 39 | struct ipmmu_vmsa_domain { |
| 40 | struct ipmmu_vmsa_device *mmu; | 40 | struct ipmmu_vmsa_device *mmu; |
| 41 | struct iommu_domain *io_domain; | 41 | struct iommu_domain io_domain; |
| 42 | 42 | ||
| 43 | struct io_pgtable_cfg cfg; | 43 | struct io_pgtable_cfg cfg; |
| 44 | struct io_pgtable_ops *iop; | 44 | struct io_pgtable_ops *iop; |
| @@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata { | |||
| 56 | static DEFINE_SPINLOCK(ipmmu_devices_lock); | 56 | static DEFINE_SPINLOCK(ipmmu_devices_lock); |
| 57 | static LIST_HEAD(ipmmu_devices); | 57 | static LIST_HEAD(ipmmu_devices); |
| 58 | 58 | ||
| 59 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) | ||
| 60 | { | ||
| 61 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); | ||
| 62 | } | ||
| 63 | |||
| 59 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ | 64 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ |
| 60 | 65 | ||
| 61 | /* ----------------------------------------------------------------------------- | 66 | /* ----------------------------------------------------------------------------- |
| @@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |||
| 428 | * TODO: We need to look up the faulty device based on the I/O VA. Use | 433 | * TODO: We need to look up the faulty device based on the I/O VA. Use |
| 429 | * the IOMMU device for now. | 434 | * the IOMMU device for now. |
| 430 | */ | 435 | */ |
| 431 | if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) | 436 | if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) |
| 432 | return IRQ_HANDLED; | 437 | return IRQ_HANDLED; |
| 433 | 438 | ||
| 434 | dev_err_ratelimited(mmu->dev, | 439 | dev_err_ratelimited(mmu->dev, |
| @@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) | |||
| 448 | return IRQ_NONE; | 453 | return IRQ_NONE; |
| 449 | 454 | ||
| 450 | io_domain = mmu->mapping->domain; | 455 | io_domain = mmu->mapping->domain; |
| 451 | domain = io_domain->priv; | 456 | domain = to_vmsa_domain(io_domain); |
| 452 | 457 | ||
| 453 | return ipmmu_domain_irq(domain); | 458 | return ipmmu_domain_irq(domain); |
| 454 | } | 459 | } |
| @@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) | |||
| 457 | * IOMMU Operations | 462 | * IOMMU Operations |
| 458 | */ | 463 | */ |
| 459 | 464 | ||
| 460 | static int ipmmu_domain_init(struct iommu_domain *io_domain) | 465 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) |
| 461 | { | 466 | { |
| 462 | struct ipmmu_vmsa_domain *domain; | 467 | struct ipmmu_vmsa_domain *domain; |
| 463 | 468 | ||
| 469 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 470 | return NULL; | ||
| 471 | |||
| 464 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 472 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
| 465 | if (!domain) | 473 | if (!domain) |
| 466 | return -ENOMEM; | 474 | return NULL; |
| 467 | 475 | ||
| 468 | spin_lock_init(&domain->lock); | 476 | spin_lock_init(&domain->lock); |
| 469 | 477 | ||
| 470 | io_domain->priv = domain; | 478 | return &domain->io_domain; |
| 471 | domain->io_domain = io_domain; | ||
| 472 | |||
| 473 | return 0; | ||
| 474 | } | 479 | } |
| 475 | 480 | ||
| 476 | static void ipmmu_domain_destroy(struct iommu_domain *io_domain) | 481 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
| 477 | { | 482 | { |
| 478 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 483 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 479 | 484 | ||
| 480 | /* | 485 | /* |
| 481 | * Free the domain resources. We assume that all devices have already | 486 | * Free the domain resources. We assume that all devices have already |
| @@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, | |||
| 491 | { | 496 | { |
| 492 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; | 497 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
| 493 | struct ipmmu_vmsa_device *mmu = archdata->mmu; | 498 | struct ipmmu_vmsa_device *mmu = archdata->mmu; |
| 494 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 499 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 495 | unsigned long flags; | 500 | unsigned long flags; |
| 496 | unsigned int i; | 501 | unsigned int i; |
| 497 | int ret = 0; | 502 | int ret = 0; |
| @@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, | |||
| 532 | struct device *dev) | 537 | struct device *dev) |
| 533 | { | 538 | { |
| 534 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; | 539 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
| 535 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 540 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 536 | unsigned int i; | 541 | unsigned int i; |
| 537 | 542 | ||
| 538 | for (i = 0; i < archdata->num_utlbs; ++i) | 543 | for (i = 0; i < archdata->num_utlbs; ++i) |
| @@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, | |||
| 546 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | 551 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, |
| 547 | phys_addr_t paddr, size_t size, int prot) | 552 | phys_addr_t paddr, size_t size, int prot) |
| 548 | { | 553 | { |
| 549 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 554 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 550 | 555 | ||
| 551 | if (!domain) | 556 | if (!domain) |
| 552 | return -ENODEV; | 557 | return -ENODEV; |
| @@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |||
| 557 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | 562 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, |
| 558 | size_t size) | 563 | size_t size) |
| 559 | { | 564 | { |
| 560 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 565 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 561 | 566 | ||
| 562 | return domain->iop->unmap(domain->iop, iova, size); | 567 | return domain->iop->unmap(domain->iop, iova, size); |
| 563 | } | 568 | } |
| @@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | |||
| 565 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | 570 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, |
| 566 | dma_addr_t iova) | 571 | dma_addr_t iova) |
| 567 | { | 572 | { |
| 568 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 573 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
| 569 | 574 | ||
| 570 | /* TODO: Is locking needed ? */ | 575 | /* TODO: Is locking needed ? */ |
| 571 | 576 | ||
| @@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev) | |||
| 737 | } | 742 | } |
| 738 | 743 | ||
| 739 | static const struct iommu_ops ipmmu_ops = { | 744 | static const struct iommu_ops ipmmu_ops = { |
| 740 | .domain_init = ipmmu_domain_init, | 745 | .domain_alloc = ipmmu_domain_alloc, |
| 741 | .domain_destroy = ipmmu_domain_destroy, | 746 | .domain_free = ipmmu_domain_free, |
| 742 | .attach_dev = ipmmu_attach_device, | 747 | .attach_dev = ipmmu_attach_device, |
| 743 | .detach_dev = ipmmu_detach_device, | 748 | .detach_dev = ipmmu_detach_device, |
| 744 | .map = ipmmu_map, | 749 | .map = ipmmu_map, |
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index e1b05379ca0e..15a2063812fa 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
| @@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock); | |||
| 52 | struct msm_priv { | 52 | struct msm_priv { |
| 53 | unsigned long *pgtable; | 53 | unsigned long *pgtable; |
| 54 | struct list_head list_attached; | 54 | struct list_head list_attached; |
| 55 | struct iommu_domain domain; | ||
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 58 | static struct msm_priv *to_msm_priv(struct iommu_domain *dom) | ||
| 59 | { | ||
| 60 | return container_of(dom, struct msm_priv, domain); | ||
| 61 | } | ||
| 62 | |||
| 57 | static int __enable_clocks(struct msm_iommu_drvdata *drvdata) | 63 | static int __enable_clocks(struct msm_iommu_drvdata *drvdata) |
| 58 | { | 64 | { |
| 59 | int ret; | 65 | int ret; |
| @@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata) | |||
| 79 | 85 | ||
| 80 | static int __flush_iotlb(struct iommu_domain *domain) | 86 | static int __flush_iotlb(struct iommu_domain *domain) |
| 81 | { | 87 | { |
| 82 | struct msm_priv *priv = domain->priv; | 88 | struct msm_priv *priv = to_msm_priv(domain); |
| 83 | struct msm_iommu_drvdata *iommu_drvdata; | 89 | struct msm_iommu_drvdata *iommu_drvdata; |
| 84 | struct msm_iommu_ctx_drvdata *ctx_drvdata; | 90 | struct msm_iommu_ctx_drvdata *ctx_drvdata; |
| 85 | int ret = 0; | 91 | int ret = 0; |
| @@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) | |||
| 209 | SET_M(base, ctx, 1); | 215 | SET_M(base, ctx, 1); |
| 210 | } | 216 | } |
| 211 | 217 | ||
| 212 | static int msm_iommu_domain_init(struct iommu_domain *domain) | 218 | static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) |
| 213 | { | 219 | { |
| 214 | struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 220 | struct msm_priv *priv; |
| 215 | 221 | ||
| 222 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 223 | return NULL; | ||
| 224 | |||
| 225 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 216 | if (!priv) | 226 | if (!priv) |
| 217 | goto fail_nomem; | 227 | goto fail_nomem; |
| 218 | 228 | ||
| @@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain) | |||
| 224 | goto fail_nomem; | 234 | goto fail_nomem; |
| 225 | 235 | ||
| 226 | memset(priv->pgtable, 0, SZ_16K); | 236 | memset(priv->pgtable, 0, SZ_16K); |
| 227 | domain->priv = priv; | ||
| 228 | 237 | ||
| 229 | domain->geometry.aperture_start = 0; | 238 | priv->domain.geometry.aperture_start = 0; |
| 230 | domain->geometry.aperture_end = (1ULL << 32) - 1; | 239 | priv->domain.geometry.aperture_end = (1ULL << 32) - 1; |
| 231 | domain->geometry.force_aperture = true; | 240 | priv->domain.geometry.force_aperture = true; |
| 232 | 241 | ||
| 233 | return 0; | 242 | return &priv->domain; |
| 234 | 243 | ||
| 235 | fail_nomem: | 244 | fail_nomem: |
| 236 | kfree(priv); | 245 | kfree(priv); |
| 237 | return -ENOMEM; | 246 | return NULL; |
| 238 | } | 247 | } |
| 239 | 248 | ||
| 240 | static void msm_iommu_domain_destroy(struct iommu_domain *domain) | 249 | static void msm_iommu_domain_free(struct iommu_domain *domain) |
| 241 | { | 250 | { |
| 242 | struct msm_priv *priv; | 251 | struct msm_priv *priv; |
| 243 | unsigned long flags; | 252 | unsigned long flags; |
| @@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 245 | int i; | 254 | int i; |
| 246 | 255 | ||
| 247 | spin_lock_irqsave(&msm_iommu_lock, flags); | 256 | spin_lock_irqsave(&msm_iommu_lock, flags); |
| 248 | priv = domain->priv; | 257 | priv = to_msm_priv(domain); |
| 249 | domain->priv = NULL; | ||
| 250 | 258 | ||
| 251 | if (priv) { | 259 | fl_table = priv->pgtable; |
| 252 | fl_table = priv->pgtable; | ||
| 253 | 260 | ||
| 254 | for (i = 0; i < NUM_FL_PTE; i++) | 261 | for (i = 0; i < NUM_FL_PTE; i++) |
| 255 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) | 262 | if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) |
| 256 | free_page((unsigned long) __va(((fl_table[i]) & | 263 | free_page((unsigned long) __va(((fl_table[i]) & |
| 257 | FL_BASE_MASK))); | 264 | FL_BASE_MASK))); |
| 258 | 265 | ||
| 259 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); | 266 | free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); |
| 260 | priv->pgtable = NULL; | 267 | priv->pgtable = NULL; |
| 261 | } | ||
| 262 | 268 | ||
| 263 | kfree(priv); | 269 | kfree(priv); |
| 264 | spin_unlock_irqrestore(&msm_iommu_lock, flags); | 270 | spin_unlock_irqrestore(&msm_iommu_lock, flags); |
| @@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 276 | 282 | ||
| 277 | spin_lock_irqsave(&msm_iommu_lock, flags); | 283 | spin_lock_irqsave(&msm_iommu_lock, flags); |
| 278 | 284 | ||
| 279 | priv = domain->priv; | 285 | priv = to_msm_priv(domain); |
| 280 | 286 | ||
| 281 | if (!priv || !dev) { | 287 | if (!dev) { |
| 282 | ret = -EINVAL; | 288 | ret = -EINVAL; |
| 283 | goto fail; | 289 | goto fail; |
| 284 | } | 290 | } |
| @@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, | |||
| 330 | int ret; | 336 | int ret; |
| 331 | 337 | ||
| 332 | spin_lock_irqsave(&msm_iommu_lock, flags); | 338 | spin_lock_irqsave(&msm_iommu_lock, flags); |
| 333 | priv = domain->priv; | 339 | priv = to_msm_priv(domain); |
| 334 | 340 | ||
| 335 | if (!priv || !dev) | 341 | if (!dev) |
| 336 | goto fail; | 342 | goto fail; |
| 337 | 343 | ||
| 338 | iommu_drvdata = dev_get_drvdata(dev->parent); | 344 | iommu_drvdata = dev_get_drvdata(dev->parent); |
| @@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, | |||
| 382 | goto fail; | 388 | goto fail; |
| 383 | } | 389 | } |
| 384 | 390 | ||
| 385 | priv = domain->priv; | 391 | priv = to_msm_priv(domain); |
| 386 | if (!priv) { | ||
| 387 | ret = -EINVAL; | ||
| 388 | goto fail; | ||
| 389 | } | ||
| 390 | 392 | ||
| 391 | fl_table = priv->pgtable; | 393 | fl_table = priv->pgtable; |
| 392 | 394 | ||
| @@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, | |||
| 484 | 486 | ||
| 485 | spin_lock_irqsave(&msm_iommu_lock, flags); | 487 | spin_lock_irqsave(&msm_iommu_lock, flags); |
| 486 | 488 | ||
| 487 | priv = domain->priv; | 489 | priv = to_msm_priv(domain); |
| 488 | |||
| 489 | if (!priv) | ||
| 490 | goto fail; | ||
| 491 | 490 | ||
| 492 | fl_table = priv->pgtable; | 491 | fl_table = priv->pgtable; |
| 493 | 492 | ||
| @@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | |||
| 566 | 565 | ||
| 567 | spin_lock_irqsave(&msm_iommu_lock, flags); | 566 | spin_lock_irqsave(&msm_iommu_lock, flags); |
| 568 | 567 | ||
| 569 | priv = domain->priv; | 568 | priv = to_msm_priv(domain); |
| 570 | if (list_empty(&priv->list_attached)) | 569 | if (list_empty(&priv->list_attached)) |
| 571 | goto fail; | 570 | goto fail; |
| 572 | 571 | ||
| @@ -674,8 +673,8 @@ fail: | |||
| 674 | 673 | ||
| 675 | static const struct iommu_ops msm_iommu_ops = { | 674 | static const struct iommu_ops msm_iommu_ops = { |
| 676 | .capable = msm_iommu_capable, | 675 | .capable = msm_iommu_capable, |
| 677 | .domain_init = msm_iommu_domain_init, | 676 | .domain_alloc = msm_iommu_domain_alloc, |
| 678 | .domain_destroy = msm_iommu_domain_destroy, | 677 | .domain_free = msm_iommu_domain_free, |
| 679 | .attach_dev = msm_iommu_attach_dev, | 678 | .attach_dev = msm_iommu_attach_dev, |
| 680 | .detach_dev = msm_iommu_detach_dev, | 679 | .detach_dev = msm_iommu_detach_dev, |
| 681 | .map = msm_iommu_map, | 680 | .map = msm_iommu_map, |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index a4ba851825c2..a22c33d6a486 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
| @@ -59,6 +59,7 @@ struct omap_iommu_domain { | |||
| 59 | struct omap_iommu *iommu_dev; | 59 | struct omap_iommu *iommu_dev; |
| 60 | struct device *dev; | 60 | struct device *dev; |
| 61 | spinlock_t lock; | 61 | spinlock_t lock; |
| 62 | struct iommu_domain domain; | ||
| 62 | }; | 63 | }; |
| 63 | 64 | ||
| 64 | #define MMU_LOCK_BASE_SHIFT 10 | 65 | #define MMU_LOCK_BASE_SHIFT 10 |
| @@ -80,6 +81,15 @@ static struct platform_driver omap_iommu_driver; | |||
| 80 | static struct kmem_cache *iopte_cachep; | 81 | static struct kmem_cache *iopte_cachep; |
| 81 | 82 | ||
| 82 | /** | 83 | /** |
| 84 | * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain | ||
| 85 | * @dom: generic iommu domain handle | ||
| 86 | **/ | ||
| 87 | static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) | ||
| 88 | { | ||
| 89 | return container_of(dom, struct omap_iommu_domain, domain); | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 83 | * omap_iommu_save_ctx - Save registers for pm off-mode support | 93 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
| 84 | * @dev: client device | 94 | * @dev: client device |
| 85 | **/ | 95 | **/ |
| @@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
| 901 | u32 *iopgd, *iopte; | 911 | u32 *iopgd, *iopte; |
| 902 | struct omap_iommu *obj = data; | 912 | struct omap_iommu *obj = data; |
| 903 | struct iommu_domain *domain = obj->domain; | 913 | struct iommu_domain *domain = obj->domain; |
| 904 | struct omap_iommu_domain *omap_domain = domain->priv; | 914 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 905 | 915 | ||
| 906 | if (!omap_domain->iommu_dev) | 916 | if (!omap_domain->iommu_dev) |
| 907 | return IRQ_NONE; | 917 | return IRQ_NONE; |
| @@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) | |||
| 1113 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | 1123 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
| 1114 | phys_addr_t pa, size_t bytes, int prot) | 1124 | phys_addr_t pa, size_t bytes, int prot) |
| 1115 | { | 1125 | { |
| 1116 | struct omap_iommu_domain *omap_domain = domain->priv; | 1126 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1117 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1127 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
| 1118 | struct device *dev = oiommu->dev; | 1128 | struct device *dev = oiommu->dev; |
| 1119 | struct iotlb_entry e; | 1129 | struct iotlb_entry e; |
| @@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | |||
| 1140 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | 1150 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, |
| 1141 | size_t size) | 1151 | size_t size) |
| 1142 | { | 1152 | { |
| 1143 | struct omap_iommu_domain *omap_domain = domain->priv; | 1153 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1144 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1154 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
| 1145 | struct device *dev = oiommu->dev; | 1155 | struct device *dev = oiommu->dev; |
| 1146 | 1156 | ||
| @@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | |||
| 1152 | static int | 1162 | static int |
| 1153 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1163 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
| 1154 | { | 1164 | { |
| 1155 | struct omap_iommu_domain *omap_domain = domain->priv; | 1165 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1156 | struct omap_iommu *oiommu; | 1166 | struct omap_iommu *oiommu; |
| 1157 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; | 1167 | struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; |
| 1158 | int ret = 0; | 1168 | int ret = 0; |
| @@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, | |||
| 1212 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | 1222 | static void omap_iommu_detach_dev(struct iommu_domain *domain, |
| 1213 | struct device *dev) | 1223 | struct device *dev) |
| 1214 | { | 1224 | { |
| 1215 | struct omap_iommu_domain *omap_domain = domain->priv; | 1225 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1216 | 1226 | ||
| 1217 | spin_lock(&omap_domain->lock); | 1227 | spin_lock(&omap_domain->lock); |
| 1218 | _omap_iommu_detach_dev(omap_domain, dev); | 1228 | _omap_iommu_detach_dev(omap_domain, dev); |
| 1219 | spin_unlock(&omap_domain->lock); | 1229 | spin_unlock(&omap_domain->lock); |
| 1220 | } | 1230 | } |
| 1221 | 1231 | ||
| 1222 | static int omap_iommu_domain_init(struct iommu_domain *domain) | 1232 | static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) |
| 1223 | { | 1233 | { |
| 1224 | struct omap_iommu_domain *omap_domain; | 1234 | struct omap_iommu_domain *omap_domain; |
| 1225 | 1235 | ||
| 1236 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 1237 | return NULL; | ||
| 1238 | |||
| 1226 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | 1239 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); |
| 1227 | if (!omap_domain) { | 1240 | if (!omap_domain) { |
| 1228 | pr_err("kzalloc failed\n"); | 1241 | pr_err("kzalloc failed\n"); |
| @@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain) | |||
| 1244 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | 1257 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); |
| 1245 | spin_lock_init(&omap_domain->lock); | 1258 | spin_lock_init(&omap_domain->lock); |
| 1246 | 1259 | ||
| 1247 | domain->priv = omap_domain; | 1260 | omap_domain->domain.geometry.aperture_start = 0; |
| 1261 | omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; | ||
| 1262 | omap_domain->domain.geometry.force_aperture = true; | ||
| 1248 | 1263 | ||
| 1249 | domain->geometry.aperture_start = 0; | 1264 | return &omap_domain->domain; |
| 1250 | domain->geometry.aperture_end = (1ULL << 32) - 1; | ||
| 1251 | domain->geometry.force_aperture = true; | ||
| 1252 | |||
| 1253 | return 0; | ||
| 1254 | 1265 | ||
| 1255 | fail_nomem: | 1266 | fail_nomem: |
| 1256 | kfree(omap_domain); | 1267 | kfree(omap_domain); |
| 1257 | out: | 1268 | out: |
| 1258 | return -ENOMEM; | 1269 | return NULL; |
| 1259 | } | 1270 | } |
| 1260 | 1271 | ||
| 1261 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | 1272 | static void omap_iommu_domain_free(struct iommu_domain *domain) |
| 1262 | { | 1273 | { |
| 1263 | struct omap_iommu_domain *omap_domain = domain->priv; | 1274 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1264 | |||
| 1265 | domain->priv = NULL; | ||
| 1266 | 1275 | ||
| 1267 | /* | 1276 | /* |
| 1268 | * An iommu device is still attached | 1277 | * An iommu device is still attached |
| @@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 1278 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | 1287 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
| 1279 | dma_addr_t da) | 1288 | dma_addr_t da) |
| 1280 | { | 1289 | { |
| 1281 | struct omap_iommu_domain *omap_domain = domain->priv; | 1290 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1282 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1291 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
| 1283 | struct device *dev = oiommu->dev; | 1292 | struct device *dev = oiommu->dev; |
| 1284 | u32 *pgd, *pte; | 1293 | u32 *pgd, *pte; |
| @@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev) | |||
| 1358 | } | 1367 | } |
| 1359 | 1368 | ||
| 1360 | static const struct iommu_ops omap_iommu_ops = { | 1369 | static const struct iommu_ops omap_iommu_ops = { |
| 1361 | .domain_init = omap_iommu_domain_init, | 1370 | .domain_alloc = omap_iommu_domain_alloc, |
| 1362 | .domain_destroy = omap_iommu_domain_destroy, | 1371 | .domain_free = omap_iommu_domain_free, |
| 1363 | .attach_dev = omap_iommu_attach_dev, | 1372 | .attach_dev = omap_iommu_attach_dev, |
| 1364 | .detach_dev = omap_iommu_detach_dev, | 1373 | .detach_dev = omap_iommu_detach_dev, |
| 1365 | .map = omap_iommu_map, | 1374 | .map = omap_iommu_map, |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9f74fddcd304..4015560bf486 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -80,6 +80,8 @@ struct rk_iommu_domain { | |||
| 80 | u32 *dt; /* page directory table */ | 80 | u32 *dt; /* page directory table */ |
| 81 | spinlock_t iommus_lock; /* lock for iommus list */ | 81 | spinlock_t iommus_lock; /* lock for iommus list */ |
| 82 | spinlock_t dt_lock; /* lock for modifying page directory table */ | 82 | spinlock_t dt_lock; /* lock for modifying page directory table */ |
| 83 | |||
| 84 | struct iommu_domain domain; | ||
| 83 | }; | 85 | }; |
| 84 | 86 | ||
| 85 | struct rk_iommu { | 87 | struct rk_iommu { |
| @@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count) | |||
| 100 | outer_flush_range(pa_start, pa_end); | 102 | outer_flush_range(pa_start, pa_end); |
| 101 | } | 103 | } |
| 102 | 104 | ||
| 105 | static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) | ||
| 106 | { | ||
| 107 | return container_of(dom, struct rk_iommu_domain, domain); | ||
| 108 | } | ||
| 109 | |||
| 103 | /** | 110 | /** |
| 104 | * Inspired by _wait_for in intel_drv.h | 111 | * Inspired by _wait_for in intel_drv.h |
| 105 | * This is NOT safe for use in interrupt context. | 112 | * This is NOT safe for use in interrupt context. |
| @@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | |||
| 503 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | 510 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, |
| 504 | dma_addr_t iova) | 511 | dma_addr_t iova) |
| 505 | { | 512 | { |
| 506 | struct rk_iommu_domain *rk_domain = domain->priv; | 513 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 507 | unsigned long flags; | 514 | unsigned long flags; |
| 508 | phys_addr_t pt_phys, phys = 0; | 515 | phys_addr_t pt_phys, phys = 0; |
| 509 | u32 dte, pte; | 516 | u32 dte, pte; |
| @@ -639,7 +646,7 @@ unwind: | |||
| 639 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | 646 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, |
| 640 | phys_addr_t paddr, size_t size, int prot) | 647 | phys_addr_t paddr, size_t size, int prot) |
| 641 | { | 648 | { |
| 642 | struct rk_iommu_domain *rk_domain = domain->priv; | 649 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 643 | unsigned long flags; | 650 | unsigned long flags; |
| 644 | dma_addr_t iova = (dma_addr_t)_iova; | 651 | dma_addr_t iova = (dma_addr_t)_iova; |
| 645 | u32 *page_table, *pte_addr; | 652 | u32 *page_table, *pte_addr; |
| @@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | |||
| 670 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | 677 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, |
| 671 | size_t size) | 678 | size_t size) |
| 672 | { | 679 | { |
| 673 | struct rk_iommu_domain *rk_domain = domain->priv; | 680 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 674 | unsigned long flags; | 681 | unsigned long flags; |
| 675 | dma_addr_t iova = (dma_addr_t)_iova; | 682 | dma_addr_t iova = (dma_addr_t)_iova; |
| 676 | phys_addr_t pt_phys; | 683 | phys_addr_t pt_phys; |
| @@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, | |||
| 726 | struct device *dev) | 733 | struct device *dev) |
| 727 | { | 734 | { |
| 728 | struct rk_iommu *iommu; | 735 | struct rk_iommu *iommu; |
| 729 | struct rk_iommu_domain *rk_domain = domain->priv; | 736 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 730 | unsigned long flags; | 737 | unsigned long flags; |
| 731 | int ret; | 738 | int ret; |
| 732 | phys_addr_t dte_addr; | 739 | phys_addr_t dte_addr; |
| @@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, | |||
| 778 | struct device *dev) | 785 | struct device *dev) |
| 779 | { | 786 | { |
| 780 | struct rk_iommu *iommu; | 787 | struct rk_iommu *iommu; |
| 781 | struct rk_iommu_domain *rk_domain = domain->priv; | 788 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 782 | unsigned long flags; | 789 | unsigned long flags; |
| 783 | 790 | ||
| 784 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | 791 | /* Allow 'virtual devices' (eg drm) to detach from domain */ |
| @@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, | |||
| 804 | dev_info(dev, "Detached from iommu domain\n"); | 811 | dev_info(dev, "Detached from iommu domain\n"); |
| 805 | } | 812 | } |
| 806 | 813 | ||
| 807 | static int rk_iommu_domain_init(struct iommu_domain *domain) | 814 | static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) |
| 808 | { | 815 | { |
| 809 | struct rk_iommu_domain *rk_domain; | 816 | struct rk_iommu_domain *rk_domain; |
| 810 | 817 | ||
| 818 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 819 | return NULL; | ||
| 820 | |||
| 811 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); | 821 | rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); |
| 812 | if (!rk_domain) | 822 | if (!rk_domain) |
| 813 | return -ENOMEM; | 823 | return NULL; |
| 814 | 824 | ||
| 815 | /* | 825 | /* |
| 816 | * rk32xx iommus use a 2 level pagetable. | 826 | * rk32xx iommus use a 2 level pagetable. |
| @@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain) | |||
| 827 | spin_lock_init(&rk_domain->dt_lock); | 837 | spin_lock_init(&rk_domain->dt_lock); |
| 828 | INIT_LIST_HEAD(&rk_domain->iommus); | 838 | INIT_LIST_HEAD(&rk_domain->iommus); |
| 829 | 839 | ||
| 830 | domain->priv = rk_domain; | 840 | return &rk_domain->domain; |
| 831 | 841 | ||
| 832 | return 0; | ||
| 833 | err_dt: | 842 | err_dt: |
| 834 | kfree(rk_domain); | 843 | kfree(rk_domain); |
| 835 | return -ENOMEM; | 844 | return NULL; |
| 836 | } | 845 | } |
| 837 | 846 | ||
| 838 | static void rk_iommu_domain_destroy(struct iommu_domain *domain) | 847 | static void rk_iommu_domain_free(struct iommu_domain *domain) |
| 839 | { | 848 | { |
| 840 | struct rk_iommu_domain *rk_domain = domain->priv; | 849 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 841 | int i; | 850 | int i; |
| 842 | 851 | ||
| 843 | WARN_ON(!list_empty(&rk_domain->iommus)); | 852 | WARN_ON(!list_empty(&rk_domain->iommus)); |
| @@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 852 | } | 861 | } |
| 853 | 862 | ||
| 854 | free_page((unsigned long)rk_domain->dt); | 863 | free_page((unsigned long)rk_domain->dt); |
| 855 | kfree(domain->priv); | 864 | kfree(rk_domain); |
| 856 | domain->priv = NULL; | ||
| 857 | } | 865 | } |
| 858 | 866 | ||
| 859 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | 867 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) |
| @@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev) | |||
| 952 | } | 960 | } |
| 953 | 961 | ||
| 954 | static const struct iommu_ops rk_iommu_ops = { | 962 | static const struct iommu_ops rk_iommu_ops = { |
| 955 | .domain_init = rk_iommu_domain_init, | 963 | .domain_alloc = rk_iommu_domain_alloc, |
| 956 | .domain_destroy = rk_iommu_domain_destroy, | 964 | .domain_free = rk_iommu_domain_free, |
| 957 | .attach_dev = rk_iommu_attach_device, | 965 | .attach_dev = rk_iommu_attach_device, |
| 958 | .detach_dev = rk_iommu_detach_device, | 966 | .detach_dev = rk_iommu_detach_device, |
| 959 | .map = rk_iommu_map, | 967 | .map = rk_iommu_map, |
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c index f1b00774e4de..a0287519a1d4 100644 --- a/drivers/iommu/shmobile-iommu.c +++ b/drivers/iommu/shmobile-iommu.c | |||
| @@ -42,11 +42,17 @@ struct shmobile_iommu_domain { | |||
| 42 | spinlock_t map_lock; | 42 | spinlock_t map_lock; |
| 43 | spinlock_t attached_list_lock; | 43 | spinlock_t attached_list_lock; |
| 44 | struct list_head attached_list; | 44 | struct list_head attached_list; |
| 45 | struct iommu_domain domain; | ||
| 45 | }; | 46 | }; |
| 46 | 47 | ||
| 47 | static struct shmobile_iommu_archdata *ipmmu_archdata; | 48 | static struct shmobile_iommu_archdata *ipmmu_archdata; |
| 48 | static struct kmem_cache *l1cache, *l2cache; | 49 | static struct kmem_cache *l1cache, *l2cache; |
| 49 | 50 | ||
| 51 | static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom) | ||
| 52 | { | ||
| 53 | return container_of(dom, struct shmobile_iommu_domain, domain); | ||
| 54 | } | ||
| 55 | |||
| 50 | static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable, | 56 | static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable, |
| 51 | struct kmem_cache *cache, size_t size) | 57 | struct kmem_cache *cache, size_t size) |
| 52 | { | 58 | { |
| @@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable, | |||
| 82 | sizeof(val) * count, DMA_TO_DEVICE); | 88 | sizeof(val) * count, DMA_TO_DEVICE); |
| 83 | } | 89 | } |
| 84 | 90 | ||
| 85 | static int shmobile_iommu_domain_init(struct iommu_domain *domain) | 91 | static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type) |
| 86 | { | 92 | { |
| 87 | struct shmobile_iommu_domain *sh_domain; | 93 | struct shmobile_iommu_domain *sh_domain; |
| 88 | int i, ret; | 94 | int i, ret; |
| 89 | 95 | ||
| 90 | sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL); | 96 | if (type != IOMMU_DOMAIN_UNMANAGED) |
| 97 | return NULL; | ||
| 98 | |||
| 99 | sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL); | ||
| 91 | if (!sh_domain) | 100 | if (!sh_domain) |
| 92 | return -ENOMEM; | 101 | return NULL; |
| 93 | ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); | 102 | ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); |
| 94 | if (ret < 0) { | 103 | if (ret < 0) { |
| 95 | kfree(sh_domain); | 104 | kfree(sh_domain); |
| 96 | return ret; | 105 | return NULL; |
| 97 | } | 106 | } |
| 98 | for (i = 0; i < L1_LEN; i++) | 107 | for (i = 0; i < L1_LEN; i++) |
| 99 | sh_domain->l2[i].pgtable = NULL; | 108 | sh_domain->l2[i].pgtable = NULL; |
| 100 | spin_lock_init(&sh_domain->map_lock); | 109 | spin_lock_init(&sh_domain->map_lock); |
| 101 | spin_lock_init(&sh_domain->attached_list_lock); | 110 | spin_lock_init(&sh_domain->attached_list_lock); |
| 102 | INIT_LIST_HEAD(&sh_domain->attached_list); | 111 | INIT_LIST_HEAD(&sh_domain->attached_list); |
| 103 | domain->priv = sh_domain; | 112 | return &sh_domain->domain; |
| 104 | return 0; | ||
| 105 | } | 113 | } |
| 106 | 114 | ||
| 107 | static void shmobile_iommu_domain_destroy(struct iommu_domain *domain) | 115 | static void shmobile_iommu_domain_free(struct iommu_domain *domain) |
| 108 | { | 116 | { |
| 109 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 117 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 110 | int i; | 118 | int i; |
| 111 | 119 | ||
| 112 | for (i = 0; i < L1_LEN; i++) { | 120 | for (i = 0; i < L1_LEN; i++) { |
| @@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain) | |||
| 115 | } | 123 | } |
| 116 | pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); | 124 | pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); |
| 117 | kfree(sh_domain); | 125 | kfree(sh_domain); |
| 118 | domain->priv = NULL; | ||
| 119 | } | 126 | } |
| 120 | 127 | ||
| 121 | static int shmobile_iommu_attach_device(struct iommu_domain *domain, | 128 | static int shmobile_iommu_attach_device(struct iommu_domain *domain, |
| 122 | struct device *dev) | 129 | struct device *dev) |
| 123 | { | 130 | { |
| 124 | struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; | 131 | struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; |
| 125 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 132 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 126 | int ret = -EBUSY; | 133 | int ret = -EBUSY; |
| 127 | 134 | ||
| 128 | if (!archdata) | 135 | if (!archdata) |
| @@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain, | |||
| 151 | struct device *dev) | 158 | struct device *dev) |
| 152 | { | 159 | { |
| 153 | struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; | 160 | struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; |
| 154 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 161 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 155 | 162 | ||
| 156 | if (!archdata) | 163 | if (!archdata) |
| 157 | return; | 164 | return; |
| @@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 214 | phys_addr_t paddr, size_t size, int prot) | 221 | phys_addr_t paddr, size_t size, int prot) |
| 215 | { | 222 | { |
| 216 | struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; | 223 | struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; |
| 217 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 224 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 218 | unsigned int l1index, l2index; | 225 | unsigned int l1index, l2index; |
| 219 | int ret; | 226 | int ret; |
| 220 | 227 | ||
| @@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain, | |||
| 258 | unsigned long iova, size_t size) | 265 | unsigned long iova, size_t size) |
| 259 | { | 266 | { |
| 260 | struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; | 267 | struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; |
| 261 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 268 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 262 | unsigned int l1index, l2index; | 269 | unsigned int l1index, l2index; |
| 263 | uint32_t l2entry = 0; | 270 | uint32_t l2entry = 0; |
| 264 | size_t ret = 0; | 271 | size_t ret = 0; |
| @@ -298,7 +305,7 @@ done: | |||
| 298 | static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, | 305 | static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, |
| 299 | dma_addr_t iova) | 306 | dma_addr_t iova) |
| 300 | { | 307 | { |
| 301 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 308 | struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); |
| 302 | uint32_t l1entry = 0, l2entry = 0; | 309 | uint32_t l1entry = 0, l2entry = 0; |
| 303 | unsigned int l1index, l2index; | 310 | unsigned int l1index, l2index; |
| 304 | 311 | ||
| @@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev) | |||
| 355 | } | 362 | } |
| 356 | 363 | ||
| 357 | static const struct iommu_ops shmobile_iommu_ops = { | 364 | static const struct iommu_ops shmobile_iommu_ops = { |
| 358 | .domain_init = shmobile_iommu_domain_init, | 365 | .domain_alloc = shmobile_iommu_domain_alloc, |
| 359 | .domain_destroy = shmobile_iommu_domain_destroy, | 366 | .domain_free = shmobile_iommu_domain_free, |
| 360 | .attach_dev = shmobile_iommu_attach_device, | 367 | .attach_dev = shmobile_iommu_attach_device, |
| 361 | .detach_dev = shmobile_iommu_detach_device, | 368 | .detach_dev = shmobile_iommu_detach_device, |
| 362 | .map = shmobile_iommu_map, | 369 | .map = shmobile_iommu_map, |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index c48da057dbb1..37e708fdbb5a 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
| @@ -63,11 +63,21 @@ struct gart_device { | |||
| 63 | struct device *dev; | 63 | struct device *dev; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | struct gart_domain { | ||
| 67 | struct iommu_domain domain; /* generic domain handle */ | ||
| 68 | struct gart_device *gart; /* link to gart device */ | ||
| 69 | }; | ||
| 70 | |||
| 66 | static struct gart_device *gart_handle; /* unique for a system */ | 71 | static struct gart_device *gart_handle; /* unique for a system */ |
| 67 | 72 | ||
| 68 | #define GART_PTE(_pfn) \ | 73 | #define GART_PTE(_pfn) \ |
| 69 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) | 74 | (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) |
| 70 | 75 | ||
| 76 | static struct gart_domain *to_gart_domain(struct iommu_domain *dom) | ||
| 77 | { | ||
| 78 | return container_of(dom, struct gart_domain, domain); | ||
| 79 | } | ||
| 80 | |||
| 71 | /* | 81 | /* |
| 72 | * Any interaction between any block on PPSB and a block on APB or AHB | 82 | * Any interaction between any block on PPSB and a block on APB or AHB |
| 73 | * must have these read-back to ensure the APB/AHB bus transaction is | 83 | * must have these read-back to ensure the APB/AHB bus transaction is |
| @@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart, | |||
| 156 | static int gart_iommu_attach_dev(struct iommu_domain *domain, | 166 | static int gart_iommu_attach_dev(struct iommu_domain *domain, |
| 157 | struct device *dev) | 167 | struct device *dev) |
| 158 | { | 168 | { |
| 159 | struct gart_device *gart; | 169 | struct gart_domain *gart_domain = to_gart_domain(domain); |
| 170 | struct gart_device *gart = gart_domain->gart; | ||
| 160 | struct gart_client *client, *c; | 171 | struct gart_client *client, *c; |
| 161 | int err = 0; | 172 | int err = 0; |
| 162 | 173 | ||
| 163 | gart = gart_handle; | ||
| 164 | if (!gart) | ||
| 165 | return -EINVAL; | ||
| 166 | domain->priv = gart; | ||
| 167 | |||
| 168 | domain->geometry.aperture_start = gart->iovmm_base; | ||
| 169 | domain->geometry.aperture_end = gart->iovmm_base + | ||
| 170 | gart->page_count * GART_PAGE_SIZE - 1; | ||
| 171 | domain->geometry.force_aperture = true; | ||
| 172 | |||
| 173 | client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); | 174 | client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); |
| 174 | if (!client) | 175 | if (!client) |
| 175 | return -ENOMEM; | 176 | return -ENOMEM; |
| @@ -198,7 +199,8 @@ fail: | |||
| 198 | static void gart_iommu_detach_dev(struct iommu_domain *domain, | 199 | static void gart_iommu_detach_dev(struct iommu_domain *domain, |
| 199 | struct device *dev) | 200 | struct device *dev) |
| 200 | { | 201 | { |
| 201 | struct gart_device *gart = domain->priv; | 202 | struct gart_domain *gart_domain = to_gart_domain(domain); |
| 203 | struct gart_device *gart = gart_domain->gart; | ||
| 202 | struct gart_client *c; | 204 | struct gart_client *c; |
| 203 | 205 | ||
| 204 | spin_lock(&gart->client_lock); | 206 | spin_lock(&gart->client_lock); |
| @@ -216,33 +218,55 @@ out: | |||
| 216 | spin_unlock(&gart->client_lock); | 218 | spin_unlock(&gart->client_lock); |
| 217 | } | 219 | } |
| 218 | 220 | ||
| 219 | static int gart_iommu_domain_init(struct iommu_domain *domain) | 221 | static struct iommu_domain *gart_iommu_domain_alloc(unsigned type) |
| 220 | { | 222 | { |
| 221 | return 0; | 223 | struct gart_domain *gart_domain; |
| 222 | } | 224 | struct gart_device *gart; |
| 223 | 225 | ||
| 224 | static void gart_iommu_domain_destroy(struct iommu_domain *domain) | 226 | if (type != IOMMU_DOMAIN_UNMANAGED) |
| 225 | { | 227 | return NULL; |
| 226 | struct gart_device *gart = domain->priv; | ||
| 227 | 228 | ||
| 229 | gart = gart_handle; | ||
| 228 | if (!gart) | 230 | if (!gart) |
| 229 | return; | 231 | return NULL; |
| 230 | 232 | ||
| 231 | spin_lock(&gart->client_lock); | 233 | gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL); |
| 232 | if (!list_empty(&gart->client)) { | 234 | if (!gart_domain) |
| 233 | struct gart_client *c; | 235 | return NULL; |
| 236 | |||
| 237 | gart_domain->gart = gart; | ||
| 238 | gart_domain->domain.geometry.aperture_start = gart->iovmm_base; | ||
| 239 | gart_domain->domain.geometry.aperture_end = gart->iovmm_base + | ||
| 240 | gart->page_count * GART_PAGE_SIZE - 1; | ||
| 241 | gart_domain->domain.geometry.force_aperture = true; | ||
| 242 | |||
| 243 | return &gart_domain->domain; | ||
| 244 | } | ||
| 245 | |||
| 246 | static void gart_iommu_domain_free(struct iommu_domain *domain) | ||
| 247 | { | ||
| 248 | struct gart_domain *gart_domain = to_gart_domain(domain); | ||
| 249 | struct gart_device *gart = gart_domain->gart; | ||
| 250 | |||
| 251 | if (gart) { | ||
| 252 | spin_lock(&gart->client_lock); | ||
| 253 | if (!list_empty(&gart->client)) { | ||
| 254 | struct gart_client *c; | ||
| 234 | 255 | ||
| 235 | list_for_each_entry(c, &gart->client, list) | 256 | list_for_each_entry(c, &gart->client, list) |
| 236 | gart_iommu_detach_dev(domain, c->dev); | 257 | gart_iommu_detach_dev(domain, c->dev); |
| 258 | } | ||
| 259 | spin_unlock(&gart->client_lock); | ||
| 237 | } | 260 | } |
| 238 | spin_unlock(&gart->client_lock); | 261 | |
| 239 | domain->priv = NULL; | 262 | kfree(gart_domain); |
| 240 | } | 263 | } |
| 241 | 264 | ||
| 242 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | 265 | static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, |
| 243 | phys_addr_t pa, size_t bytes, int prot) | 266 | phys_addr_t pa, size_t bytes, int prot) |
| 244 | { | 267 | { |
| 245 | struct gart_device *gart = domain->priv; | 268 | struct gart_domain *gart_domain = to_gart_domain(domain); |
| 269 | struct gart_device *gart = gart_domain->gart; | ||
| 246 | unsigned long flags; | 270 | unsigned long flags; |
| 247 | unsigned long pfn; | 271 | unsigned long pfn; |
| 248 | 272 | ||
| @@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 265 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 289 | static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
| 266 | size_t bytes) | 290 | size_t bytes) |
| 267 | { | 291 | { |
| 268 | struct gart_device *gart = domain->priv; | 292 | struct gart_domain *gart_domain = to_gart_domain(domain); |
| 293 | struct gart_device *gart = gart_domain->gart; | ||
| 269 | unsigned long flags; | 294 | unsigned long flags; |
| 270 | 295 | ||
| 271 | if (!gart_iova_range_valid(gart, iova, bytes)) | 296 | if (!gart_iova_range_valid(gart, iova, bytes)) |
| @@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
| 281 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | 306 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, |
| 282 | dma_addr_t iova) | 307 | dma_addr_t iova) |
| 283 | { | 308 | { |
| 284 | struct gart_device *gart = domain->priv; | 309 | struct gart_domain *gart_domain = to_gart_domain(domain); |
| 310 | struct gart_device *gart = gart_domain->gart; | ||
| 285 | unsigned long pte; | 311 | unsigned long pte; |
| 286 | phys_addr_t pa; | 312 | phys_addr_t pa; |
| 287 | unsigned long flags; | 313 | unsigned long flags; |
| @@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap) | |||
| 310 | 336 | ||
| 311 | static const struct iommu_ops gart_iommu_ops = { | 337 | static const struct iommu_ops gart_iommu_ops = { |
| 312 | .capable = gart_iommu_capable, | 338 | .capable = gart_iommu_capable, |
| 313 | .domain_init = gart_iommu_domain_init, | 339 | .domain_alloc = gart_iommu_domain_alloc, |
| 314 | .domain_destroy = gart_iommu_domain_destroy, | 340 | .domain_free = gart_iommu_domain_free, |
| 315 | .attach_dev = gart_iommu_attach_dev, | 341 | .attach_dev = gart_iommu_attach_dev, |
| 316 | .detach_dev = gart_iommu_detach_dev, | 342 | .detach_dev = gart_iommu_detach_dev, |
| 317 | .map = gart_iommu_map, | 343 | .map = gart_iommu_map, |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 6e134c7c227f..c845d99ecf6b 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/bitops.h> | ||
| 9 | #include <linux/err.h> | 10 | #include <linux/err.h> |
| 10 | #include <linux/iommu.h> | 11 | #include <linux/iommu.h> |
| 11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| @@ -24,6 +25,8 @@ struct tegra_smmu { | |||
| 24 | struct tegra_mc *mc; | 25 | struct tegra_mc *mc; |
| 25 | const struct tegra_smmu_soc *soc; | 26 | const struct tegra_smmu_soc *soc; |
| 26 | 27 | ||
| 28 | unsigned long pfn_mask; | ||
| 29 | |||
| 27 | unsigned long *asids; | 30 | unsigned long *asids; |
| 28 | struct mutex lock; | 31 | struct mutex lock; |
| 29 | 32 | ||
| @@ -31,7 +34,7 @@ struct tegra_smmu { | |||
| 31 | }; | 34 | }; |
| 32 | 35 | ||
| 33 | struct tegra_smmu_as { | 36 | struct tegra_smmu_as { |
| 34 | struct iommu_domain *domain; | 37 | struct iommu_domain domain; |
| 35 | struct tegra_smmu *smmu; | 38 | struct tegra_smmu *smmu; |
| 36 | unsigned int use_count; | 39 | unsigned int use_count; |
| 37 | struct page *count; | 40 | struct page *count; |
| @@ -40,6 +43,11 @@ struct tegra_smmu_as { | |||
| 40 | u32 attr; | 43 | u32 attr; |
| 41 | }; | 44 | }; |
| 42 | 45 | ||
| 46 | static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) | ||
| 47 | { | ||
| 48 | return container_of(dom, struct tegra_smmu_as, domain); | ||
| 49 | } | ||
| 50 | |||
| 43 | static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, | 51 | static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, |
| 44 | unsigned long offset) | 52 | unsigned long offset) |
| 45 | { | 53 | { |
| @@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) | |||
| 105 | #define SMMU_PDE_SHIFT 22 | 113 | #define SMMU_PDE_SHIFT 22 |
| 106 | #define SMMU_PTE_SHIFT 12 | 114 | #define SMMU_PTE_SHIFT 12 |
| 107 | 115 | ||
| 108 | #define SMMU_PFN_MASK 0x000fffff | ||
| 109 | |||
| 110 | #define SMMU_PD_READABLE (1 << 31) | 116 | #define SMMU_PD_READABLE (1 << 31) |
| 111 | #define SMMU_PD_WRITABLE (1 << 30) | 117 | #define SMMU_PD_WRITABLE (1 << 30) |
| 112 | #define SMMU_PD_NONSECURE (1 << 29) | 118 | #define SMMU_PD_NONSECURE (1 << 29) |
| @@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap) | |||
| 224 | return false; | 230 | return false; |
| 225 | } | 231 | } |
| 226 | 232 | ||
| 227 | static int tegra_smmu_domain_init(struct iommu_domain *domain) | 233 | static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) |
| 228 | { | 234 | { |
| 229 | struct tegra_smmu_as *as; | 235 | struct tegra_smmu_as *as; |
| 230 | unsigned int i; | 236 | unsigned int i; |
| 231 | uint32_t *pd; | 237 | uint32_t *pd; |
| 232 | 238 | ||
| 239 | if (type != IOMMU_DOMAIN_UNMANAGED) | ||
| 240 | return NULL; | ||
| 241 | |||
| 233 | as = kzalloc(sizeof(*as), GFP_KERNEL); | 242 | as = kzalloc(sizeof(*as), GFP_KERNEL); |
| 234 | if (!as) | 243 | if (!as) |
| 235 | return -ENOMEM; | 244 | return NULL; |
| 236 | 245 | ||
| 237 | as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; | 246 | as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; |
| 238 | as->domain = domain; | ||
| 239 | 247 | ||
| 240 | as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); | 248 | as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); |
| 241 | if (!as->pd) { | 249 | if (!as->pd) { |
| 242 | kfree(as); | 250 | kfree(as); |
| 243 | return -ENOMEM; | 251 | return NULL; |
| 244 | } | 252 | } |
| 245 | 253 | ||
| 246 | as->count = alloc_page(GFP_KERNEL); | 254 | as->count = alloc_page(GFP_KERNEL); |
| 247 | if (!as->count) { | 255 | if (!as->count) { |
| 248 | __free_page(as->pd); | 256 | __free_page(as->pd); |
| 249 | kfree(as); | 257 | kfree(as); |
| 250 | return -ENOMEM; | 258 | return NULL; |
| 251 | } | 259 | } |
| 252 | 260 | ||
| 253 | /* clear PDEs */ | 261 | /* clear PDEs */ |
| @@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain) | |||
| 264 | for (i = 0; i < SMMU_NUM_PDE; i++) | 272 | for (i = 0; i < SMMU_NUM_PDE; i++) |
| 265 | pd[i] = 0; | 273 | pd[i] = 0; |
| 266 | 274 | ||
| 267 | domain->priv = as; | 275 | /* setup aperture */ |
| 276 | as->domain.geometry.aperture_start = 0; | ||
| 277 | as->domain.geometry.aperture_end = 0xffffffff; | ||
| 278 | as->domain.geometry.force_aperture = true; | ||
| 268 | 279 | ||
| 269 | return 0; | 280 | return &as->domain; |
| 270 | } | 281 | } |
| 271 | 282 | ||
| 272 | static void tegra_smmu_domain_destroy(struct iommu_domain *domain) | 283 | static void tegra_smmu_domain_free(struct iommu_domain *domain) |
| 273 | { | 284 | { |
| 274 | struct tegra_smmu_as *as = domain->priv; | 285 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 275 | 286 | ||
| 276 | /* TODO: free page directory and page tables */ | 287 | /* TODO: free page directory and page tables */ |
| 277 | ClearPageReserved(as->pd); | 288 | ClearPageReserved(as->pd); |
| @@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain, | |||
| 395 | struct device *dev) | 406 | struct device *dev) |
| 396 | { | 407 | { |
| 397 | struct tegra_smmu *smmu = dev->archdata.iommu; | 408 | struct tegra_smmu *smmu = dev->archdata.iommu; |
| 398 | struct tegra_smmu_as *as = domain->priv; | 409 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 399 | struct device_node *np = dev->of_node; | 410 | struct device_node *np = dev->of_node; |
| 400 | struct of_phandle_args args; | 411 | struct of_phandle_args args; |
| 401 | unsigned int index = 0; | 412 | unsigned int index = 0; |
| @@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain, | |||
| 428 | 439 | ||
| 429 | static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 440 | static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
| 430 | { | 441 | { |
| 431 | struct tegra_smmu_as *as = domain->priv; | 442 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 432 | struct device_node *np = dev->of_node; | 443 | struct device_node *np = dev->of_node; |
| 433 | struct tegra_smmu *smmu = as->smmu; | 444 | struct tegra_smmu *smmu = as->smmu; |
| 434 | struct of_phandle_args args; | 445 | struct of_phandle_args args; |
| @@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, | |||
| 481 | smmu_flush_tlb_section(smmu, as->id, iova); | 492 | smmu_flush_tlb_section(smmu, as->id, iova); |
| 482 | smmu_flush(smmu); | 493 | smmu_flush(smmu); |
| 483 | } else { | 494 | } else { |
| 484 | page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); | 495 | page = pfn_to_page(pd[pde] & smmu->pfn_mask); |
| 485 | pt = page_address(page); | 496 | pt = page_address(page); |
| 486 | } | 497 | } |
| 487 | 498 | ||
| @@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) | |||
| 503 | u32 *pd = page_address(as->pd), *pt; | 514 | u32 *pd = page_address(as->pd), *pt; |
| 504 | struct page *page; | 515 | struct page *page; |
| 505 | 516 | ||
| 506 | page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); | 517 | page = pfn_to_page(pd[pde] & as->smmu->pfn_mask); |
| 507 | pt = page_address(page); | 518 | pt = page_address(page); |
| 508 | 519 | ||
| 509 | /* | 520 | /* |
| @@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) | |||
| 524 | static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, | 535 | static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, |
| 525 | phys_addr_t paddr, size_t size, int prot) | 536 | phys_addr_t paddr, size_t size, int prot) |
| 526 | { | 537 | { |
| 527 | struct tegra_smmu_as *as = domain->priv; | 538 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 528 | struct tegra_smmu *smmu = as->smmu; | 539 | struct tegra_smmu *smmu = as->smmu; |
| 529 | unsigned long offset; | 540 | unsigned long offset; |
| 530 | struct page *page; | 541 | struct page *page; |
| @@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, | |||
| 548 | static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | 559 | static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
| 549 | size_t size) | 560 | size_t size) |
| 550 | { | 561 | { |
| 551 | struct tegra_smmu_as *as = domain->priv; | 562 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 552 | struct tegra_smmu *smmu = as->smmu; | 563 | struct tegra_smmu *smmu = as->smmu; |
| 553 | unsigned long offset; | 564 | unsigned long offset; |
| 554 | struct page *page; | 565 | struct page *page; |
| @@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
| 572 | static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, | 583 | static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, |
| 573 | dma_addr_t iova) | 584 | dma_addr_t iova) |
| 574 | { | 585 | { |
| 575 | struct tegra_smmu_as *as = domain->priv; | 586 | struct tegra_smmu_as *as = to_smmu_as(domain); |
| 576 | struct page *page; | 587 | struct page *page; |
| 577 | unsigned long pfn; | 588 | unsigned long pfn; |
| 578 | u32 *pte; | 589 | u32 *pte; |
| 579 | 590 | ||
| 580 | pte = as_get_pte(as, iova, &page); | 591 | pte = as_get_pte(as, iova, &page); |
| 581 | pfn = *pte & SMMU_PFN_MASK; | 592 | pfn = *pte & as->smmu->pfn_mask; |
| 582 | 593 | ||
| 583 | return PFN_PHYS(pfn); | 594 | return PFN_PHYS(pfn); |
| 584 | } | 595 | } |
| @@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev) | |||
| 633 | 644 | ||
| 634 | static const struct iommu_ops tegra_smmu_ops = { | 645 | static const struct iommu_ops tegra_smmu_ops = { |
| 635 | .capable = tegra_smmu_capable, | 646 | .capable = tegra_smmu_capable, |
| 636 | .domain_init = tegra_smmu_domain_init, | 647 | .domain_alloc = tegra_smmu_domain_alloc, |
| 637 | .domain_destroy = tegra_smmu_domain_destroy, | 648 | .domain_free = tegra_smmu_domain_free, |
| 638 | .attach_dev = tegra_smmu_attach_dev, | 649 | .attach_dev = tegra_smmu_attach_dev, |
| 639 | .detach_dev = tegra_smmu_detach_dev, | 650 | .detach_dev = tegra_smmu_detach_dev, |
| 640 | .add_device = tegra_smmu_add_device, | 651 | .add_device = tegra_smmu_add_device, |
| @@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, | |||
| 702 | smmu->dev = dev; | 713 | smmu->dev = dev; |
| 703 | smmu->mc = mc; | 714 | smmu->mc = mc; |
| 704 | 715 | ||
| 716 | smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; | ||
| 717 | dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", | ||
| 718 | mc->soc->num_address_bits, smmu->pfn_mask); | ||
| 719 | |||
| 705 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); | 720 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); |
| 706 | 721 | ||
| 707 | if (soc->supports_request_limit) | 722 | if (soc->supports_request_limit) |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 38daa453f2e5..0546b8710ce3 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -51,9 +51,33 @@ struct iommu_domain_geometry { | |||
| 51 | bool force_aperture; /* DMA only allowed in mappable range? */ | 51 | bool force_aperture; /* DMA only allowed in mappable range? */ |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | /* Domain feature flags */ | ||
| 55 | #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ | ||
| 56 | #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API | ||
| 57 | implementation */ | ||
| 58 | #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ | ||
| 59 | |||
| 60 | /* | ||
| 61 | * This are the possible domain-types | ||
| 62 | * | ||
| 63 | * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate | ||
| 64 | * devices | ||
| 65 | * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses | ||
| 66 | * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used | ||
| 67 | * for VMs | ||
| 68 | * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. | ||
| 69 | * This flag allows IOMMU drivers to implement | ||
| 70 | * certain optimizations for these domains | ||
| 71 | */ | ||
| 72 | #define IOMMU_DOMAIN_BLOCKED (0U) | ||
| 73 | #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) | ||
| 74 | #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) | ||
| 75 | #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ | ||
| 76 | __IOMMU_DOMAIN_DMA_API) | ||
| 77 | |||
| 54 | struct iommu_domain { | 78 | struct iommu_domain { |
| 79 | unsigned type; | ||
| 55 | const struct iommu_ops *ops; | 80 | const struct iommu_ops *ops; |
| 56 | void *priv; | ||
| 57 | iommu_fault_handler_t handler; | 81 | iommu_fault_handler_t handler; |
| 58 | void *handler_token; | 82 | void *handler_token; |
| 59 | struct iommu_domain_geometry geometry; | 83 | struct iommu_domain_geometry geometry; |
| @@ -113,8 +137,11 @@ enum iommu_attr { | |||
| 113 | */ | 137 | */ |
| 114 | struct iommu_ops { | 138 | struct iommu_ops { |
| 115 | bool (*capable)(enum iommu_cap); | 139 | bool (*capable)(enum iommu_cap); |
| 116 | int (*domain_init)(struct iommu_domain *domain); | 140 | |
| 117 | void (*domain_destroy)(struct iommu_domain *domain); | 141 | /* Domain allocation and freeing by the iommu driver */ |
| 142 | struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); | ||
| 143 | void (*domain_free)(struct iommu_domain *); | ||
| 144 | |||
| 118 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 145 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); |
| 119 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); | 146 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); |
| 120 | int (*map)(struct iommu_domain *domain, unsigned long iova, | 147 | int (*map)(struct iommu_domain *domain, unsigned long iova, |
