diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-10 17:46:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-10 17:46:40 -0400 |
commit | 6664565681a1c0c95607ae2e30070352d9a563d0 (patch) | |
tree | fdd60bf602e1c26e87ab25ffd2cc370e0ab51eac | |
parent | 496fd15bee6f2fd673ab992e5211c5f3c5bd6779 (diff) | |
parent | 01ce784acfa69a171afe6ec3f85a959546f2d18a (diff) |
Merge tag 'iommu-updates-v3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"A few updates this time, most important and exiciting (to me) is:
- The new ARM SMMU driver. This is a common IOMMU driver that will
hopefully be used in a lot of upcoming ARM chips. So the mess in
the past where every SOC had its own IOMMU will be over.
Besides that:
- Some important fixes in the IOMMU unmap path. There are fixes in
the common code and also in the AMD IOMMU driver.
- Other random fixes"
* tag 'iommu-updates-v3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
MAINTAINERS: add entry for ARM system MMU driver
iommu/arm: Add support for ARM Ltd. System MMU architecture
documentation/iommu: Add description of ARM System MMU binding
iommu: Use %pa and %zx instead of casting
iommu/amd: Only unmap large pages from the first pte
iommu: Fix compiler warning on pr_debug
iommu/amd: Fix memory leak in free_pagetable
iommu: Split iommu_unmaps
iommu/{vt-d,amd}: Remove multifunction assumption around grouping
iommu/omap: fix checkpatch warnings in omap iommu code
iommu/omap: fix printk formats for dma_addr_t
iommu/vt-d: DMAR reporting table needs at least one DRHD
iommu/vt-d: Downgrade the warning if enabling irq remapping fails
-rw-r--r-- | Documentation/devicetree/bindings/iommu/arm,smmu.txt | 70 | ||||
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 13 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 104 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 1969 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 4 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 25 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 3 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 86 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 15 | ||||
-rw-r--r-- | drivers/iommu/omap-iopgtable.h | 2 | ||||
-rw-r--r-- | drivers/iommu/omap-iovmm.c | 4 |
13 files changed, 2212 insertions, 90 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt new file mode 100644 index 000000000000..e34c6cdd8ba8 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt | |||
@@ -0,0 +1,70 @@ | |||
1 | * ARM System MMU Architecture Implementation | ||
2 | |||
3 | ARM SoCs may contain an implementation of the ARM System Memory | ||
4 | Management Unit Architecture, which can be used to provide 1 or 2 stages | ||
5 | of address translation to bus masters external to the CPU. | ||
6 | |||
7 | The SMMU may also raise interrupts in response to various fault | ||
8 | conditions. | ||
9 | |||
10 | ** System MMU required properties: | ||
11 | |||
12 | - compatible : Should be one of: | ||
13 | |||
14 | "arm,smmu-v1" | ||
15 | "arm,smmu-v2" | ||
16 | "arm,mmu-400" | ||
17 | "arm,mmu-500" | ||
18 | |||
19 | depending on the particular implementation and/or the | ||
20 | version of the architecture implemented. | ||
21 | |||
22 | - reg : Base address and size of the SMMU. | ||
23 | |||
24 | - #global-interrupts : The number of global interrupts exposed by the | ||
25 | device. | ||
26 | |||
27 | - interrupts : Interrupt list, with the first #global-irqs entries | ||
28 | corresponding to the global interrupts and any | ||
29 | following entries corresponding to context interrupts, | ||
30 | specified in order of their indexing by the SMMU. | ||
31 | |||
32 | For SMMUv2 implementations, there must be exactly one | ||
33 | interrupt per context bank. In the case of a single, | ||
34 | combined interrupt, it must be listed multiple times. | ||
35 | |||
36 | - mmu-masters : A list of phandles to device nodes representing bus | ||
37 | masters for which the SMMU can provide a translation | ||
38 | and their corresponding StreamIDs (see example below). | ||
39 | Each device node linked from this list must have a | ||
40 | "#stream-id-cells" property, indicating the number of | ||
41 | StreamIDs associated with it. | ||
42 | |||
43 | ** System MMU optional properties: | ||
44 | |||
45 | - smmu-parent : When multiple SMMUs are chained together, this | ||
46 | property can be used to provide a phandle to the | ||
47 | parent SMMU (that is the next SMMU on the path going | ||
48 | from the mmu-masters towards memory) node for this | ||
49 | SMMU. | ||
50 | |||
51 | Example: | ||
52 | |||
53 | smmu { | ||
54 | compatible = "arm,smmu-v1"; | ||
55 | reg = <0xba5e0000 0x10000>; | ||
56 | #global-interrupts = <2>; | ||
57 | interrupts = <0 32 4>, | ||
58 | <0 33 4>, | ||
59 | <0 34 4>, /* This is the first context interrupt */ | ||
60 | <0 35 4>, | ||
61 | <0 36 4>, | ||
62 | <0 37 4>; | ||
63 | |||
64 | /* | ||
65 | * Two DMA controllers, the first with two StreamIDs (0xd01d | ||
66 | * and 0xd01e) and the second with only one (0xd11c). | ||
67 | */ | ||
68 | mmu-masters = <&dma0 0xd01d 0xd01e>, | ||
69 | <&dma1 0xd11c>; | ||
70 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 37f9a71c744f..af4c3be44ac3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1333,6 +1333,12 @@ S: Supported | |||
1333 | F: arch/arm/mach-zynq/ | 1333 | F: arch/arm/mach-zynq/ |
1334 | F: drivers/cpuidle/cpuidle-zynq.c | 1334 | F: drivers/cpuidle/cpuidle-zynq.c |
1335 | 1335 | ||
1336 | ARM SMMU DRIVER | ||
1337 | M: Will Deacon <will.deacon@arm.com> | ||
1338 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1339 | S: Maintained | ||
1340 | F: drivers/iommu/arm-smmu.c | ||
1341 | |||
1336 | ARM64 PORT (AARCH64 ARCHITECTURE) | 1342 | ARM64 PORT (AARCH64 ARCHITECTURE) |
1337 | M: Catalin Marinas <catalin.marinas@arm.com> | 1343 | M: Catalin Marinas <catalin.marinas@arm.com> |
1338 | M: Will Deacon <will.deacon@arm.com> | 1344 | M: Will Deacon <will.deacon@arm.com> |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 01730b2b9954..820d85c4a4a0 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -269,4 +269,17 @@ config SPAPR_TCE_IOMMU | |||
269 | Enables bits of IOMMU API required by VFIO. The iommu_ops | 269 | Enables bits of IOMMU API required by VFIO. The iommu_ops |
270 | is not implemented as it is not necessary for VFIO. | 270 | is not implemented as it is not necessary for VFIO. |
271 | 271 | ||
272 | config ARM_SMMU | ||
273 | bool "ARM Ltd. System MMU (SMMU) Support" | ||
274 | depends on ARM64 || (ARM_LPAE && OF) | ||
275 | select IOMMU_API | ||
276 | select ARM_DMA_USE_IOMMU if ARM | ||
277 | help | ||
278 | Support for implementations of the ARM System MMU architecture | ||
279 | versions 1 and 2. The driver supports both v7l and v8l table | ||
280 | formats with 4k and 64k page sizes. | ||
281 | |||
282 | Say Y here if your SoC includes an IOMMU device implementing | ||
283 | the ARM SMMU architecture. | ||
284 | |||
272 | endif # IOMMU_SUPPORT | 285 | endif # IOMMU_SUPPORT |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index ef0e5207ad69..bbe7041212dd 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU) += of_iommu.o | |||
3 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | 3 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o |
4 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | 4 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o |
5 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o | 5 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o |
6 | obj-$(CONFIG_ARM_SMMU) += arm-smmu.o | ||
6 | obj-$(CONFIG_DMAR_TABLE) += dmar.o | 7 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
7 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | 8 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o |
8 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | 9 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 21d02b0d907c..6dc659426a51 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -287,14 +287,27 @@ static struct pci_dev *get_isolation_root(struct pci_dev *pdev) | |||
287 | 287 | ||
288 | /* | 288 | /* |
289 | * If it's a multifunction device that does not support our | 289 | * If it's a multifunction device that does not support our |
290 | * required ACS flags, add to the same group as function 0. | 290 | * required ACS flags, add to the same group as lowest numbered |
291 | * function that also does not suport the required ACS flags. | ||
291 | */ | 292 | */ |
292 | if (dma_pdev->multifunction && | 293 | if (dma_pdev->multifunction && |
293 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) | 294 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { |
294 | swap_pci_ref(&dma_pdev, | 295 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); |
295 | pci_get_slot(dma_pdev->bus, | 296 | |
296 | PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), | 297 | for (i = 0; i < 8; i++) { |
297 | 0))); | 298 | struct pci_dev *tmp; |
299 | |||
300 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
301 | if (!tmp) | ||
302 | continue; | ||
303 | |||
304 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
305 | swap_pci_ref(&dma_pdev, tmp); | ||
306 | break; | ||
307 | } | ||
308 | pci_dev_put(tmp); | ||
309 | } | ||
310 | } | ||
298 | 311 | ||
299 | /* | 312 | /* |
300 | * Devices on the root bus go through the iommu. If that's not us, | 313 | * Devices on the root bus go through the iommu. If that's not us, |
@@ -1484,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1484 | 1497 | ||
1485 | /* Large PTE found which maps this address */ | 1498 | /* Large PTE found which maps this address */ |
1486 | unmap_size = PTE_PAGE_SIZE(*pte); | 1499 | unmap_size = PTE_PAGE_SIZE(*pte); |
1500 | |||
1501 | /* Only unmap from the first pte in the page */ | ||
1502 | if ((unmap_size - 1) & bus_addr) | ||
1503 | break; | ||
1487 | count = PAGE_SIZE_PTE_COUNT(unmap_size); | 1504 | count = PAGE_SIZE_PTE_COUNT(unmap_size); |
1488 | for (i = 0; i < count; i++) | 1505 | for (i = 0; i < count; i++) |
1489 | pte[i] = 0ULL; | 1506 | pte[i] = 0ULL; |
@@ -1493,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, | |||
1493 | unmapped += unmap_size; | 1510 | unmapped += unmap_size; |
1494 | } | 1511 | } |
1495 | 1512 | ||
1496 | BUG_ON(!is_power_of_2(unmapped)); | 1513 | BUG_ON(unmapped && !is_power_of_2(unmapped)); |
1497 | 1514 | ||
1498 | return unmapped; | 1515 | return unmapped; |
1499 | } | 1516 | } |
@@ -1893,34 +1910,59 @@ static void domain_id_free(int id) | |||
1893 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1910 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1894 | } | 1911 | } |
1895 | 1912 | ||
1913 | #define DEFINE_FREE_PT_FN(LVL, FN) \ | ||
1914 | static void free_pt_##LVL (unsigned long __pt) \ | ||
1915 | { \ | ||
1916 | unsigned long p; \ | ||
1917 | u64 *pt; \ | ||
1918 | int i; \ | ||
1919 | \ | ||
1920 | pt = (u64 *)__pt; \ | ||
1921 | \ | ||
1922 | for (i = 0; i < 512; ++i) { \ | ||
1923 | if (!IOMMU_PTE_PRESENT(pt[i])) \ | ||
1924 | continue; \ | ||
1925 | \ | ||
1926 | p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ | ||
1927 | FN(p); \ | ||
1928 | } \ | ||
1929 | free_page((unsigned long)pt); \ | ||
1930 | } | ||
1931 | |||
1932 | DEFINE_FREE_PT_FN(l2, free_page) | ||
1933 | DEFINE_FREE_PT_FN(l3, free_pt_l2) | ||
1934 | DEFINE_FREE_PT_FN(l4, free_pt_l3) | ||
1935 | DEFINE_FREE_PT_FN(l5, free_pt_l4) | ||
1936 | DEFINE_FREE_PT_FN(l6, free_pt_l5) | ||
1937 | |||
1896 | static void free_pagetable(struct protection_domain *domain) | 1938 | static void free_pagetable(struct protection_domain *domain) |
1897 | { | 1939 | { |
1898 | int i, j; | 1940 | unsigned long root = (unsigned long)domain->pt_root; |
1899 | u64 *p1, *p2, *p3; | ||
1900 | |||
1901 | p1 = domain->pt_root; | ||
1902 | |||
1903 | if (!p1) | ||
1904 | return; | ||
1905 | |||
1906 | for (i = 0; i < 512; ++i) { | ||
1907 | if (!IOMMU_PTE_PRESENT(p1[i])) | ||
1908 | continue; | ||
1909 | |||
1910 | p2 = IOMMU_PTE_PAGE(p1[i]); | ||
1911 | for (j = 0; j < 512; ++j) { | ||
1912 | if (!IOMMU_PTE_PRESENT(p2[j])) | ||
1913 | continue; | ||
1914 | p3 = IOMMU_PTE_PAGE(p2[j]); | ||
1915 | free_page((unsigned long)p3); | ||
1916 | } | ||
1917 | 1941 | ||
1918 | free_page((unsigned long)p2); | 1942 | switch (domain->mode) { |
1943 | case PAGE_MODE_NONE: | ||
1944 | break; | ||
1945 | case PAGE_MODE_1_LEVEL: | ||
1946 | free_page(root); | ||
1947 | break; | ||
1948 | case PAGE_MODE_2_LEVEL: | ||
1949 | free_pt_l2(root); | ||
1950 | break; | ||
1951 | case PAGE_MODE_3_LEVEL: | ||
1952 | free_pt_l3(root); | ||
1953 | break; | ||
1954 | case PAGE_MODE_4_LEVEL: | ||
1955 | free_pt_l4(root); | ||
1956 | break; | ||
1957 | case PAGE_MODE_5_LEVEL: | ||
1958 | free_pt_l5(root); | ||
1959 | break; | ||
1960 | case PAGE_MODE_6_LEVEL: | ||
1961 | free_pt_l6(root); | ||
1962 | break; | ||
1963 | default: | ||
1964 | BUG(); | ||
1919 | } | 1965 | } |
1920 | |||
1921 | free_page((unsigned long)p1); | ||
1922 | |||
1923 | domain->pt_root = NULL; | ||
1924 | } | 1966 | } |
1925 | 1967 | ||
1926 | static void free_gcr3_tbl_level1(u64 *tbl) | 1968 | static void free_gcr3_tbl_level1(u64 *tbl) |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c new file mode 100644 index 000000000000..ebd0a4cff049 --- /dev/null +++ b/drivers/iommu/arm-smmu.c | |||
@@ -0,0 +1,1969 @@ | |||
1 | /* | ||
2 | * IOMMU API for ARM architected SMMU implementations. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
16 | * | ||
17 | * Copyright (C) 2013 ARM Limited | ||
18 | * | ||
19 | * Author: Will Deacon <will.deacon@arm.com> | ||
20 | * | ||
21 | * This driver currently supports: | ||
22 | * - SMMUv1 and v2 implementations | ||
23 | * - Stream-matching and stream-indexing | ||
24 | * - v7/v8 long-descriptor format | ||
25 | * - Non-secure access to the SMMU | ||
26 | * - 4k and 64k pages, with contiguous pte hints. | ||
27 | * - Up to 39-bit addressing | ||
28 | * - Context fault reporting | ||
29 | */ | ||
30 | |||
31 | #define pr_fmt(fmt) "arm-smmu: " fmt | ||
32 | |||
33 | #include <linux/delay.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/iommu.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/module.h> | ||
41 | #include <linux/of.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <linux/slab.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | |||
46 | #include <linux/amba/bus.h> | ||
47 | |||
48 | #include <asm/pgalloc.h> | ||
49 | |||
50 | /* Maximum number of stream IDs assigned to a single device */ | ||
51 | #define MAX_MASTER_STREAMIDS 8 | ||
52 | |||
53 | /* Maximum number of context banks per SMMU */ | ||
54 | #define ARM_SMMU_MAX_CBS 128 | ||
55 | |||
56 | /* Maximum number of mapping groups per SMMU */ | ||
57 | #define ARM_SMMU_MAX_SMRS 128 | ||
58 | |||
59 | /* Number of VMIDs per SMMU */ | ||
60 | #define ARM_SMMU_NUM_VMIDS 256 | ||
61 | |||
62 | /* SMMU global address space */ | ||
63 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | ||
64 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) | ||
65 | |||
66 | /* Page table bits */ | ||
67 | #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) | ||
68 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) | ||
69 | #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) | ||
70 | #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) | ||
71 | #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) | ||
72 | #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) | ||
73 | |||
74 | #if PAGE_SIZE == SZ_4K | ||
75 | #define ARM_SMMU_PTE_CONT_ENTRIES 16 | ||
76 | #elif PAGE_SIZE == SZ_64K | ||
77 | #define ARM_SMMU_PTE_CONT_ENTRIES 32 | ||
78 | #else | ||
79 | #define ARM_SMMU_PTE_CONT_ENTRIES 1 | ||
80 | #endif | ||
81 | |||
82 | #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) | ||
83 | #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) | ||
84 | #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) | ||
85 | |||
86 | /* Stage-1 PTE */ | ||
87 | #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) | ||
88 | #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) | ||
89 | #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 | ||
90 | |||
91 | /* Stage-2 PTE */ | ||
92 | #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) | ||
93 | #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) | ||
94 | #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) | ||
95 | #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) | ||
96 | #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) | ||
97 | #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) | ||
98 | |||
99 | /* Configuration registers */ | ||
100 | #define ARM_SMMU_GR0_sCR0 0x0 | ||
101 | #define sCR0_CLIENTPD (1 << 0) | ||
102 | #define sCR0_GFRE (1 << 1) | ||
103 | #define sCR0_GFIE (1 << 2) | ||
104 | #define sCR0_GCFGFRE (1 << 4) | ||
105 | #define sCR0_GCFGFIE (1 << 5) | ||
106 | #define sCR0_USFCFG (1 << 10) | ||
107 | #define sCR0_VMIDPNE (1 << 11) | ||
108 | #define sCR0_PTM (1 << 12) | ||
109 | #define sCR0_FB (1 << 13) | ||
110 | #define sCR0_BSU_SHIFT 14 | ||
111 | #define sCR0_BSU_MASK 0x3 | ||
112 | |||
113 | /* Identification registers */ | ||
114 | #define ARM_SMMU_GR0_ID0 0x20 | ||
115 | #define ARM_SMMU_GR0_ID1 0x24 | ||
116 | #define ARM_SMMU_GR0_ID2 0x28 | ||
117 | #define ARM_SMMU_GR0_ID3 0x2c | ||
118 | #define ARM_SMMU_GR0_ID4 0x30 | ||
119 | #define ARM_SMMU_GR0_ID5 0x34 | ||
120 | #define ARM_SMMU_GR0_ID6 0x38 | ||
121 | #define ARM_SMMU_GR0_ID7 0x3c | ||
122 | #define ARM_SMMU_GR0_sGFSR 0x48 | ||
123 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | ||
124 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | ||
125 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | ||
126 | #define ARM_SMMU_GR0_PIDR0 0xfe0 | ||
127 | #define ARM_SMMU_GR0_PIDR1 0xfe4 | ||
128 | #define ARM_SMMU_GR0_PIDR2 0xfe8 | ||
129 | |||
130 | #define ID0_S1TS (1 << 30) | ||
131 | #define ID0_S2TS (1 << 29) | ||
132 | #define ID0_NTS (1 << 28) | ||
133 | #define ID0_SMS (1 << 27) | ||
134 | #define ID0_PTFS_SHIFT 24 | ||
135 | #define ID0_PTFS_MASK 0x2 | ||
136 | #define ID0_PTFS_V8_ONLY 0x2 | ||
137 | #define ID0_CTTW (1 << 14) | ||
138 | #define ID0_NUMIRPT_SHIFT 16 | ||
139 | #define ID0_NUMIRPT_MASK 0xff | ||
140 | #define ID0_NUMSMRG_SHIFT 0 | ||
141 | #define ID0_NUMSMRG_MASK 0xff | ||
142 | |||
143 | #define ID1_PAGESIZE (1 << 31) | ||
144 | #define ID1_NUMPAGENDXB_SHIFT 28 | ||
145 | #define ID1_NUMPAGENDXB_MASK 7 | ||
146 | #define ID1_NUMS2CB_SHIFT 16 | ||
147 | #define ID1_NUMS2CB_MASK 0xff | ||
148 | #define ID1_NUMCB_SHIFT 0 | ||
149 | #define ID1_NUMCB_MASK 0xff | ||
150 | |||
151 | #define ID2_OAS_SHIFT 4 | ||
152 | #define ID2_OAS_MASK 0xf | ||
153 | #define ID2_IAS_SHIFT 0 | ||
154 | #define ID2_IAS_MASK 0xf | ||
155 | #define ID2_UBS_SHIFT 8 | ||
156 | #define ID2_UBS_MASK 0xf | ||
157 | #define ID2_PTFS_4K (1 << 12) | ||
158 | #define ID2_PTFS_16K (1 << 13) | ||
159 | #define ID2_PTFS_64K (1 << 14) | ||
160 | |||
161 | #define PIDR2_ARCH_SHIFT 4 | ||
162 | #define PIDR2_ARCH_MASK 0xf | ||
163 | |||
164 | /* Global TLB invalidation */ | ||
165 | #define ARM_SMMU_GR0_STLBIALL 0x60 | ||
166 | #define ARM_SMMU_GR0_TLBIVMID 0x64 | ||
167 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | ||
168 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | ||
169 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | ||
170 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | ||
171 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | ||
172 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | ||
173 | |||
174 | /* Stream mapping registers */ | ||
175 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | ||
176 | #define SMR_VALID (1 << 31) | ||
177 | #define SMR_MASK_SHIFT 16 | ||
178 | #define SMR_MASK_MASK 0x7fff | ||
179 | #define SMR_ID_SHIFT 0 | ||
180 | #define SMR_ID_MASK 0x7fff | ||
181 | |||
182 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | ||
183 | #define S2CR_CBNDX_SHIFT 0 | ||
184 | #define S2CR_CBNDX_MASK 0xff | ||
185 | #define S2CR_TYPE_SHIFT 16 | ||
186 | #define S2CR_TYPE_MASK 0x3 | ||
187 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | ||
188 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | ||
189 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | ||
190 | |||
191 | /* Context bank attribute registers */ | ||
192 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | ||
193 | #define CBAR_VMID_SHIFT 0 | ||
194 | #define CBAR_VMID_MASK 0xff | ||
195 | #define CBAR_S1_MEMATTR_SHIFT 12 | ||
196 | #define CBAR_S1_MEMATTR_MASK 0xf | ||
197 | #define CBAR_S1_MEMATTR_WB 0xf | ||
198 | #define CBAR_TYPE_SHIFT 16 | ||
199 | #define CBAR_TYPE_MASK 0x3 | ||
200 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | ||
201 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | ||
202 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | ||
203 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | ||
204 | #define CBAR_IRPTNDX_SHIFT 24 | ||
205 | #define CBAR_IRPTNDX_MASK 0xff | ||
206 | |||
207 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | ||
208 | #define CBA2R_RW64_32BIT (0 << 0) | ||
209 | #define CBA2R_RW64_64BIT (1 << 0) | ||
210 | |||
211 | /* Translation context bank */ | ||
212 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | ||
213 | #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize) | ||
214 | |||
215 | #define ARM_SMMU_CB_SCTLR 0x0 | ||
216 | #define ARM_SMMU_CB_RESUME 0x8 | ||
217 | #define ARM_SMMU_CB_TTBCR2 0x10 | ||
218 | #define ARM_SMMU_CB_TTBR0_LO 0x20 | ||
219 | #define ARM_SMMU_CB_TTBR0_HI 0x24 | ||
220 | #define ARM_SMMU_CB_TTBCR 0x30 | ||
221 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | ||
222 | #define ARM_SMMU_CB_FSR 0x58 | ||
223 | #define ARM_SMMU_CB_FAR_LO 0x60 | ||
224 | #define ARM_SMMU_CB_FAR_HI 0x64 | ||
225 | #define ARM_SMMU_CB_FSYNR0 0x68 | ||
226 | |||
227 | #define SCTLR_S1_ASIDPNE (1 << 12) | ||
228 | #define SCTLR_CFCFG (1 << 7) | ||
229 | #define SCTLR_CFIE (1 << 6) | ||
230 | #define SCTLR_CFRE (1 << 5) | ||
231 | #define SCTLR_E (1 << 4) | ||
232 | #define SCTLR_AFE (1 << 2) | ||
233 | #define SCTLR_TRE (1 << 1) | ||
234 | #define SCTLR_M (1 << 0) | ||
235 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | ||
236 | |||
237 | #define RESUME_RETRY (0 << 0) | ||
238 | #define RESUME_TERMINATE (1 << 0) | ||
239 | |||
240 | #define TTBCR_EAE (1 << 31) | ||
241 | |||
242 | #define TTBCR_PASIZE_SHIFT 16 | ||
243 | #define TTBCR_PASIZE_MASK 0x7 | ||
244 | |||
245 | #define TTBCR_TG0_4K (0 << 14) | ||
246 | #define TTBCR_TG0_64K (1 << 14) | ||
247 | |||
248 | #define TTBCR_SH0_SHIFT 12 | ||
249 | #define TTBCR_SH0_MASK 0x3 | ||
250 | #define TTBCR_SH_NS 0 | ||
251 | #define TTBCR_SH_OS 2 | ||
252 | #define TTBCR_SH_IS 3 | ||
253 | |||
254 | #define TTBCR_ORGN0_SHIFT 10 | ||
255 | #define TTBCR_IRGN0_SHIFT 8 | ||
256 | #define TTBCR_RGN_MASK 0x3 | ||
257 | #define TTBCR_RGN_NC 0 | ||
258 | #define TTBCR_RGN_WBWA 1 | ||
259 | #define TTBCR_RGN_WT 2 | ||
260 | #define TTBCR_RGN_WB 3 | ||
261 | |||
262 | #define TTBCR_SL0_SHIFT 6 | ||
263 | #define TTBCR_SL0_MASK 0x3 | ||
264 | #define TTBCR_SL0_LVL_2 0 | ||
265 | #define TTBCR_SL0_LVL_1 1 | ||
266 | |||
267 | #define TTBCR_T1SZ_SHIFT 16 | ||
268 | #define TTBCR_T0SZ_SHIFT 0 | ||
269 | #define TTBCR_SZ_MASK 0xf | ||
270 | |||
271 | #define TTBCR2_SEP_SHIFT 15 | ||
272 | #define TTBCR2_SEP_MASK 0x7 | ||
273 | |||
274 | #define TTBCR2_PASIZE_SHIFT 0 | ||
275 | #define TTBCR2_PASIZE_MASK 0x7 | ||
276 | |||
277 | /* Common definitions for PASize and SEP fields */ | ||
278 | #define TTBCR2_ADDR_32 0 | ||
279 | #define TTBCR2_ADDR_36 1 | ||
280 | #define TTBCR2_ADDR_40 2 | ||
281 | #define TTBCR2_ADDR_42 3 | ||
282 | #define TTBCR2_ADDR_44 4 | ||
283 | #define TTBCR2_ADDR_48 5 | ||
284 | |||
285 | #define MAIR_ATTR_SHIFT(n) ((n) << 3) | ||
286 | #define MAIR_ATTR_MASK 0xff | ||
287 | #define MAIR_ATTR_DEVICE 0x04 | ||
288 | #define MAIR_ATTR_NC 0x44 | ||
289 | #define MAIR_ATTR_WBRWA 0xff | ||
290 | #define MAIR_ATTR_IDX_NC 0 | ||
291 | #define MAIR_ATTR_IDX_CACHE 1 | ||
292 | #define MAIR_ATTR_IDX_DEV 2 | ||
293 | |||
294 | #define FSR_MULTI (1 << 31) | ||
295 | #define FSR_SS (1 << 30) | ||
296 | #define FSR_UUT (1 << 8) | ||
297 | #define FSR_ASF (1 << 7) | ||
298 | #define FSR_TLBLKF (1 << 6) | ||
299 | #define FSR_TLBMCF (1 << 5) | ||
300 | #define FSR_EF (1 << 4) | ||
301 | #define FSR_PF (1 << 3) | ||
302 | #define FSR_AFF (1 << 2) | ||
303 | #define FSR_TF (1 << 1) | ||
304 | |||
305 | #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \ | ||
306 | FSR_TLBLKF) | ||
307 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | ||
308 | FSR_EF | FSR_PF | FSR_TF) | ||
309 | |||
310 | #define FSYNR0_WNR (1 << 4) | ||
311 | |||
312 | struct arm_smmu_smr { | ||
313 | u8 idx; | ||
314 | u16 mask; | ||
315 | u16 id; | ||
316 | }; | ||
317 | |||
318 | struct arm_smmu_master { | ||
319 | struct device_node *of_node; | ||
320 | |||
321 | /* | ||
322 | * The following is specific to the master's position in the | ||
323 | * SMMU chain. | ||
324 | */ | ||
325 | struct rb_node node; | ||
326 | int num_streamids; | ||
327 | u16 streamids[MAX_MASTER_STREAMIDS]; | ||
328 | |||
329 | /* | ||
330 | * We only need to allocate these on the root SMMU, as we | ||
331 | * configure unmatched streams to bypass translation. | ||
332 | */ | ||
333 | struct arm_smmu_smr *smrs; | ||
334 | }; | ||
335 | |||
336 | struct arm_smmu_device { | ||
337 | struct device *dev; | ||
338 | struct device_node *parent_of_node; | ||
339 | |||
340 | void __iomem *base; | ||
341 | unsigned long size; | ||
342 | unsigned long pagesize; | ||
343 | |||
344 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | ||
345 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | ||
346 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | ||
347 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | ||
348 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | ||
349 | u32 features; | ||
350 | int version; | ||
351 | |||
352 | u32 num_context_banks; | ||
353 | u32 num_s2_context_banks; | ||
354 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | ||
355 | atomic_t irptndx; | ||
356 | |||
357 | u32 num_mapping_groups; | ||
358 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | ||
359 | |||
360 | unsigned long input_size; | ||
361 | unsigned long s1_output_size; | ||
362 | unsigned long s2_output_size; | ||
363 | |||
364 | u32 num_global_irqs; | ||
365 | u32 num_context_irqs; | ||
366 | unsigned int *irqs; | ||
367 | |||
368 | DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS); | ||
369 | |||
370 | struct list_head list; | ||
371 | struct rb_root masters; | ||
372 | }; | ||
373 | |||
374 | struct arm_smmu_cfg { | ||
375 | struct arm_smmu_device *smmu; | ||
376 | u8 vmid; | ||
377 | u8 cbndx; | ||
378 | u8 irptndx; | ||
379 | u32 cbar; | ||
380 | pgd_t *pgd; | ||
381 | }; | ||
382 | |||
383 | struct arm_smmu_domain { | ||
384 | /* | ||
385 | * A domain can span across multiple, chained SMMUs and requires | ||
386 | * all devices within the domain to follow the same translation | ||
387 | * path. | ||
388 | */ | ||
389 | struct arm_smmu_device *leaf_smmu; | ||
390 | struct arm_smmu_cfg root_cfg; | ||
391 | phys_addr_t output_mask; | ||
392 | |||
393 | spinlock_t lock; | ||
394 | }; | ||
395 | |||
396 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | ||
397 | static LIST_HEAD(arm_smmu_devices); | ||
398 | |||
399 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, | ||
400 | struct device_node *dev_node) | ||
401 | { | ||
402 | struct rb_node *node = smmu->masters.rb_node; | ||
403 | |||
404 | while (node) { | ||
405 | struct arm_smmu_master *master; | ||
406 | master = container_of(node, struct arm_smmu_master, node); | ||
407 | |||
408 | if (dev_node < master->of_node) | ||
409 | node = node->rb_left; | ||
410 | else if (dev_node > master->of_node) | ||
411 | node = node->rb_right; | ||
412 | else | ||
413 | return master; | ||
414 | } | ||
415 | |||
416 | return NULL; | ||
417 | } | ||
418 | |||
419 | static int insert_smmu_master(struct arm_smmu_device *smmu, | ||
420 | struct arm_smmu_master *master) | ||
421 | { | ||
422 | struct rb_node **new, *parent; | ||
423 | |||
424 | new = &smmu->masters.rb_node; | ||
425 | parent = NULL; | ||
426 | while (*new) { | ||
427 | struct arm_smmu_master *this; | ||
428 | this = container_of(*new, struct arm_smmu_master, node); | ||
429 | |||
430 | parent = *new; | ||
431 | if (master->of_node < this->of_node) | ||
432 | new = &((*new)->rb_left); | ||
433 | else if (master->of_node > this->of_node) | ||
434 | new = &((*new)->rb_right); | ||
435 | else | ||
436 | return -EEXIST; | ||
437 | } | ||
438 | |||
439 | rb_link_node(&master->node, parent, new); | ||
440 | rb_insert_color(&master->node, &smmu->masters); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int register_smmu_master(struct arm_smmu_device *smmu, | ||
445 | struct device *dev, | ||
446 | struct of_phandle_args *masterspec) | ||
447 | { | ||
448 | int i; | ||
449 | struct arm_smmu_master *master; | ||
450 | |||
451 | master = find_smmu_master(smmu, masterspec->np); | ||
452 | if (master) { | ||
453 | dev_err(dev, | ||
454 | "rejecting multiple registrations for master device %s\n", | ||
455 | masterspec->np->name); | ||
456 | return -EBUSY; | ||
457 | } | ||
458 | |||
459 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | ||
460 | dev_err(dev, | ||
461 | "reached maximum number (%d) of stream IDs for master device %s\n", | ||
462 | MAX_MASTER_STREAMIDS, masterspec->np->name); | ||
463 | return -ENOSPC; | ||
464 | } | ||
465 | |||
466 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | ||
467 | if (!master) | ||
468 | return -ENOMEM; | ||
469 | |||
470 | master->of_node = masterspec->np; | ||
471 | master->num_streamids = masterspec->args_count; | ||
472 | |||
473 | for (i = 0; i < master->num_streamids; ++i) | ||
474 | master->streamids[i] = masterspec->args[i]; | ||
475 | |||
476 | return insert_smmu_master(smmu, master); | ||
477 | } | ||
478 | |||
479 | static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu) | ||
480 | { | ||
481 | struct arm_smmu_device *parent; | ||
482 | |||
483 | if (!smmu->parent_of_node) | ||
484 | return NULL; | ||
485 | |||
486 | spin_lock(&arm_smmu_devices_lock); | ||
487 | list_for_each_entry(parent, &arm_smmu_devices, list) | ||
488 | if (parent->dev->of_node == smmu->parent_of_node) | ||
489 | goto out_unlock; | ||
490 | |||
491 | parent = NULL; | ||
492 | dev_warn(smmu->dev, | ||
493 | "Failed to find SMMU parent despite parent in DT\n"); | ||
494 | out_unlock: | ||
495 | spin_unlock(&arm_smmu_devices_lock); | ||
496 | return parent; | ||
497 | } | ||
498 | |||
499 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | ||
500 | { | ||
501 | int idx; | ||
502 | |||
503 | do { | ||
504 | idx = find_next_zero_bit(map, end, start); | ||
505 | if (idx == end) | ||
506 | return -ENOSPC; | ||
507 | } while (test_and_set_bit(idx, map)); | ||
508 | |||
509 | return idx; | ||
510 | } | ||
511 | |||
512 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | ||
513 | { | ||
514 | clear_bit(idx, map); | ||
515 | } | ||
516 | |||
517 | /* Wait for any pending TLB invalidations to complete */ | ||
518 | static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | ||
519 | { | ||
520 | int count = 0; | ||
521 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
522 | |||
523 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | ||
524 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | ||
525 | & sTLBGSTATUS_GSACTIVE) { | ||
526 | cpu_relax(); | ||
527 | if (++count == TLB_LOOP_TIMEOUT) { | ||
528 | dev_err_ratelimited(smmu->dev, | ||
529 | "TLB sync timed out -- SMMU may be deadlocked\n"); | ||
530 | return; | ||
531 | } | ||
532 | udelay(1); | ||
533 | } | ||
534 | } | ||
535 | |||
536 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | ||
537 | { | ||
538 | int flags, ret; | ||
539 | u32 fsr, far, fsynr, resume; | ||
540 | unsigned long iova; | ||
541 | struct iommu_domain *domain = dev; | ||
542 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
543 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
544 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
545 | void __iomem *cb_base; | ||
546 | |||
547 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | ||
548 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); | ||
549 | |||
550 | if (!(fsr & FSR_FAULT)) | ||
551 | return IRQ_NONE; | ||
552 | |||
553 | if (fsr & FSR_IGN) | ||
554 | dev_err_ratelimited(smmu->dev, | ||
555 | "Unexpected context fault (fsr 0x%u)\n", | ||
556 | fsr); | ||
557 | |||
558 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | ||
559 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
560 | |||
561 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | ||
562 | iova = far; | ||
563 | #ifdef CONFIG_64BIT | ||
564 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | ||
565 | iova |= ((unsigned long)far << 32); | ||
566 | #endif | ||
567 | |||
568 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | ||
569 | ret = IRQ_HANDLED; | ||
570 | resume = RESUME_RETRY; | ||
571 | } else { | ||
572 | ret = IRQ_NONE; | ||
573 | resume = RESUME_TERMINATE; | ||
574 | } | ||
575 | |||
576 | /* Clear the faulting FSR */ | ||
577 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | ||
578 | |||
579 | /* Retry or terminate any stalled transactions */ | ||
580 | if (fsr & FSR_SS) | ||
581 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | ||
587 | { | ||
588 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | ||
589 | struct arm_smmu_device *smmu = dev; | ||
590 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
591 | |||
592 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | ||
593 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | ||
594 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | ||
595 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | ||
596 | |||
597 | dev_err_ratelimited(smmu->dev, | ||
598 | "Unexpected global fault, this could be serious\n"); | ||
599 | dev_err_ratelimited(smmu->dev, | ||
600 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | ||
601 | gfsr, gfsynr0, gfsynr1, gfsynr2); | ||
602 | |||
603 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | ||
604 | return IRQ_NONE; | ||
605 | } | ||
606 | |||
607 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | ||
608 | { | ||
609 | u32 reg; | ||
610 | bool stage1; | ||
611 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
612 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
613 | void __iomem *cb_base, *gr0_base, *gr1_base; | ||
614 | |||
615 | gr0_base = ARM_SMMU_GR0(smmu); | ||
616 | gr1_base = ARM_SMMU_GR1(smmu); | ||
617 | stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS; | ||
618 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx); | ||
619 | |||
620 | /* CBAR */ | ||
621 | reg = root_cfg->cbar | | ||
622 | (root_cfg->vmid << CBAR_VMID_SHIFT); | ||
623 | if (smmu->version == 1) | ||
624 | reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; | ||
625 | |||
626 | /* Use the weakest memory type, so it is overridden by the pte */ | ||
627 | if (stage1) | ||
628 | reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | ||
629 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); | ||
630 | |||
631 | if (smmu->version > 1) { | ||
632 | /* CBA2R */ | ||
633 | #ifdef CONFIG_64BIT | ||
634 | reg = CBA2R_RW64_64BIT; | ||
635 | #else | ||
636 | reg = CBA2R_RW64_32BIT; | ||
637 | #endif | ||
638 | writel_relaxed(reg, | ||
639 | gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx)); | ||
640 | |||
641 | /* TTBCR2 */ | ||
642 | switch (smmu->input_size) { | ||
643 | case 32: | ||
644 | reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); | ||
645 | break; | ||
646 | case 36: | ||
647 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | ||
648 | break; | ||
649 | case 39: | ||
650 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | ||
651 | break; | ||
652 | case 42: | ||
653 | reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); | ||
654 | break; | ||
655 | case 44: | ||
656 | reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); | ||
657 | break; | ||
658 | case 48: | ||
659 | reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); | ||
660 | break; | ||
661 | } | ||
662 | |||
663 | switch (smmu->s1_output_size) { | ||
664 | case 32: | ||
665 | reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); | ||
666 | break; | ||
667 | case 36: | ||
668 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); | ||
669 | break; | ||
670 | case 39: | ||
671 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); | ||
672 | break; | ||
673 | case 42: | ||
674 | reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); | ||
675 | break; | ||
676 | case 44: | ||
677 | reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); | ||
678 | break; | ||
679 | case 48: | ||
680 | reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | if (stage1) | ||
685 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); | ||
686 | } | ||
687 | |||
688 | /* TTBR0 */ | ||
689 | reg = __pa(root_cfg->pgd); | ||
690 | #ifndef __BIG_ENDIAN | ||
691 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | ||
692 | reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; | ||
693 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | ||
694 | #else | ||
695 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | ||
696 | reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; | ||
697 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | ||
698 | #endif | ||
699 | |||
700 | /* | ||
701 | * TTBCR | ||
702 | * We use long descriptor, with inner-shareable WBWA tables in TTBR0. | ||
703 | */ | ||
704 | if (smmu->version > 1) { | ||
705 | if (PAGE_SIZE == SZ_4K) | ||
706 | reg = TTBCR_TG0_4K; | ||
707 | else | ||
708 | reg = TTBCR_TG0_64K; | ||
709 | |||
710 | if (!stage1) { | ||
711 | switch (smmu->s2_output_size) { | ||
712 | case 32: | ||
713 | reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); | ||
714 | break; | ||
715 | case 36: | ||
716 | reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); | ||
717 | break; | ||
718 | case 40: | ||
719 | reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); | ||
720 | break; | ||
721 | case 42: | ||
722 | reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); | ||
723 | break; | ||
724 | case 44: | ||
725 | reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); | ||
726 | break; | ||
727 | case 48: | ||
728 | reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); | ||
729 | break; | ||
730 | } | ||
731 | } else { | ||
732 | reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT; | ||
733 | } | ||
734 | } else { | ||
735 | reg = 0; | ||
736 | } | ||
737 | |||
738 | reg |= TTBCR_EAE | | ||
739 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | | ||
740 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | | ||
741 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | | ||
742 | (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | ||
743 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
744 | |||
745 | /* MAIR0 (stage-1 only) */ | ||
746 | if (stage1) { | ||
747 | reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | | ||
748 | (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | | ||
749 | (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); | ||
750 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); | ||
751 | } | ||
752 | |||
753 | /* Nuke the TLB */ | ||
754 | writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); | ||
755 | arm_smmu_tlb_sync(smmu); | ||
756 | |||
757 | /* SCTLR */ | ||
758 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | ||
759 | if (stage1) | ||
760 | reg |= SCTLR_S1_ASIDPNE; | ||
761 | #ifdef __BIG_ENDIAN | ||
762 | reg |= SCTLR_E; | ||
763 | #endif | ||
764 | writel(reg, cb_base + ARM_SMMU_CB_SCTLR); | ||
765 | } | ||
766 | |||
767 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | ||
768 | struct device *dev) | ||
769 | { | ||
770 | int irq, ret, start; | ||
771 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
772 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
773 | struct arm_smmu_device *smmu, *parent; | ||
774 | |||
775 | /* | ||
776 | * Walk the SMMU chain to find the root device for this chain. | ||
777 | * We assume that no masters have translations which terminate | ||
778 | * early, and therefore check that the root SMMU does indeed have | ||
779 | * a StreamID for the master in question. | ||
780 | */ | ||
781 | parent = dev->archdata.iommu; | ||
782 | smmu_domain->output_mask = -1; | ||
783 | do { | ||
784 | smmu = parent; | ||
785 | smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1; | ||
786 | } while ((parent = find_parent_smmu(smmu))); | ||
787 | |||
788 | if (!find_smmu_master(smmu, dev->of_node)) { | ||
789 | dev_err(dev, "unable to find root SMMU for device\n"); | ||
790 | return -ENODEV; | ||
791 | } | ||
792 | |||
793 | ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS); | ||
794 | if (IS_ERR_VALUE(ret)) | ||
795 | return ret; | ||
796 | |||
797 | root_cfg->vmid = ret; | ||
798 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | ||
799 | /* | ||
800 | * We will likely want to change this if/when KVM gets | ||
801 | * involved. | ||
802 | */ | ||
803 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
804 | start = smmu->num_s2_context_banks; | ||
805 | } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) { | ||
806 | root_cfg->cbar = CBAR_TYPE_S2_TRANS; | ||
807 | start = 0; | ||
808 | } else { | ||
809 | root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | ||
810 | start = smmu->num_s2_context_banks; | ||
811 | } | ||
812 | |||
813 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | ||
814 | smmu->num_context_banks); | ||
815 | if (IS_ERR_VALUE(ret)) | ||
816 | goto out_free_vmid; | ||
817 | |||
818 | root_cfg->cbndx = ret; | ||
819 | |||
820 | if (smmu->version == 1) { | ||
821 | root_cfg->irptndx = atomic_inc_return(&smmu->irptndx); | ||
822 | root_cfg->irptndx %= smmu->num_context_irqs; | ||
823 | } else { | ||
824 | root_cfg->irptndx = root_cfg->cbndx; | ||
825 | } | ||
826 | |||
827 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | ||
828 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | ||
829 | "arm-smmu-context-fault", domain); | ||
830 | if (IS_ERR_VALUE(ret)) { | ||
831 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | ||
832 | root_cfg->irptndx, irq); | ||
833 | root_cfg->irptndx = -1; | ||
834 | goto out_free_context; | ||
835 | } | ||
836 | |||
837 | root_cfg->smmu = smmu; | ||
838 | arm_smmu_init_context_bank(smmu_domain); | ||
839 | return ret; | ||
840 | |||
841 | out_free_context: | ||
842 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | ||
843 | out_free_vmid: | ||
844 | __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); | ||
845 | return ret; | ||
846 | } | ||
847 | |||
848 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | ||
849 | { | ||
850 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
851 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
852 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
853 | int irq; | ||
854 | |||
855 | if (!smmu) | ||
856 | return; | ||
857 | |||
858 | if (root_cfg->irptndx != -1) { | ||
859 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | ||
860 | free_irq(irq, domain); | ||
861 | } | ||
862 | |||
863 | __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid); | ||
864 | __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx); | ||
865 | } | ||
866 | |||
867 | static int arm_smmu_domain_init(struct iommu_domain *domain) | ||
868 | { | ||
869 | struct arm_smmu_domain *smmu_domain; | ||
870 | pgd_t *pgd; | ||
871 | |||
872 | /* | ||
873 | * Allocate the domain and initialise some of its data structures. | ||
874 | * We can't really do anything meaningful until we've added a | ||
875 | * master. | ||
876 | */ | ||
877 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | ||
878 | if (!smmu_domain) | ||
879 | return -ENOMEM; | ||
880 | |||
881 | pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
882 | if (!pgd) | ||
883 | goto out_free_domain; | ||
884 | smmu_domain->root_cfg.pgd = pgd; | ||
885 | |||
886 | spin_lock_init(&smmu_domain->lock); | ||
887 | domain->priv = smmu_domain; | ||
888 | return 0; | ||
889 | |||
890 | out_free_domain: | ||
891 | kfree(smmu_domain); | ||
892 | return -ENOMEM; | ||
893 | } | ||
894 | |||
895 | static void arm_smmu_free_ptes(pmd_t *pmd) | ||
896 | { | ||
897 | pgtable_t table = pmd_pgtable(*pmd); | ||
898 | pgtable_page_dtor(table); | ||
899 | __free_page(table); | ||
900 | } | ||
901 | |||
902 | static void arm_smmu_free_pmds(pud_t *pud) | ||
903 | { | ||
904 | int i; | ||
905 | pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); | ||
906 | |||
907 | pmd = pmd_base; | ||
908 | for (i = 0; i < PTRS_PER_PMD; ++i) { | ||
909 | if (pmd_none(*pmd)) | ||
910 | continue; | ||
911 | |||
912 | arm_smmu_free_ptes(pmd); | ||
913 | pmd++; | ||
914 | } | ||
915 | |||
916 | pmd_free(NULL, pmd_base); | ||
917 | } | ||
918 | |||
919 | static void arm_smmu_free_puds(pgd_t *pgd) | ||
920 | { | ||
921 | int i; | ||
922 | pud_t *pud, *pud_base = pud_offset(pgd, 0); | ||
923 | |||
924 | pud = pud_base; | ||
925 | for (i = 0; i < PTRS_PER_PUD; ++i) { | ||
926 | if (pud_none(*pud)) | ||
927 | continue; | ||
928 | |||
929 | arm_smmu_free_pmds(pud); | ||
930 | pud++; | ||
931 | } | ||
932 | |||
933 | pud_free(NULL, pud_base); | ||
934 | } | ||
935 | |||
936 | static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | ||
937 | { | ||
938 | int i; | ||
939 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
940 | pgd_t *pgd, *pgd_base = root_cfg->pgd; | ||
941 | |||
942 | /* | ||
943 | * Recursively free the page tables for this domain. We don't | ||
944 | * care about speculative TLB filling, because the TLB will be | ||
945 | * nuked next time this context bank is re-allocated and no devices | ||
946 | * currently map to these tables. | ||
947 | */ | ||
948 | pgd = pgd_base; | ||
949 | for (i = 0; i < PTRS_PER_PGD; ++i) { | ||
950 | if (pgd_none(*pgd)) | ||
951 | continue; | ||
952 | arm_smmu_free_puds(pgd); | ||
953 | pgd++; | ||
954 | } | ||
955 | |||
956 | kfree(pgd_base); | ||
957 | } | ||
958 | |||
959 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) | ||
960 | { | ||
961 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
962 | arm_smmu_destroy_domain_context(domain); | ||
963 | arm_smmu_free_pgtables(smmu_domain); | ||
964 | kfree(smmu_domain); | ||
965 | } | ||
966 | |||
967 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | ||
968 | struct arm_smmu_master *master) | ||
969 | { | ||
970 | int i; | ||
971 | struct arm_smmu_smr *smrs; | ||
972 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
973 | |||
974 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | ||
975 | return 0; | ||
976 | |||
977 | if (master->smrs) | ||
978 | return -EEXIST; | ||
979 | |||
980 | smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL); | ||
981 | if (!smrs) { | ||
982 | dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n", | ||
983 | master->num_streamids, master->of_node->name); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | /* Allocate the SMRs on the root SMMU */ | ||
988 | for (i = 0; i < master->num_streamids; ++i) { | ||
989 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, | ||
990 | smmu->num_mapping_groups); | ||
991 | if (IS_ERR_VALUE(idx)) { | ||
992 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | ||
993 | goto err_free_smrs; | ||
994 | } | ||
995 | |||
996 | smrs[i] = (struct arm_smmu_smr) { | ||
997 | .idx = idx, | ||
998 | .mask = 0, /* We don't currently share SMRs */ | ||
999 | .id = master->streamids[i], | ||
1000 | }; | ||
1001 | } | ||
1002 | |||
1003 | /* It worked! Now, poke the actual hardware */ | ||
1004 | for (i = 0; i < master->num_streamids; ++i) { | ||
1005 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | | ||
1006 | smrs[i].mask << SMR_MASK_SHIFT; | ||
1007 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | ||
1008 | } | ||
1009 | |||
1010 | master->smrs = smrs; | ||
1011 | return 0; | ||
1012 | |||
1013 | err_free_smrs: | ||
1014 | while (--i >= 0) | ||
1015 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | ||
1016 | kfree(smrs); | ||
1017 | return -ENOSPC; | ||
1018 | } | ||
1019 | |||
1020 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | ||
1021 | struct arm_smmu_master *master) | ||
1022 | { | ||
1023 | int i; | ||
1024 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1025 | struct arm_smmu_smr *smrs = master->smrs; | ||
1026 | |||
1027 | /* Invalidate the SMRs before freeing back to the allocator */ | ||
1028 | for (i = 0; i < master->num_streamids; ++i) { | ||
1029 | u8 idx = smrs[i].idx; | ||
1030 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); | ||
1031 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | ||
1032 | } | ||
1033 | |||
1034 | master->smrs = NULL; | ||
1035 | kfree(smrs); | ||
1036 | } | ||
1037 | |||
1038 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, | ||
1039 | struct arm_smmu_master *master) | ||
1040 | { | ||
1041 | int i; | ||
1042 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1043 | |||
1044 | for (i = 0; i < master->num_streamids; ++i) { | ||
1045 | u16 sid = master->streamids[i]; | ||
1046 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1047 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | ||
1052 | struct arm_smmu_master *master) | ||
1053 | { | ||
1054 | int i, ret; | ||
1055 | struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu; | ||
1056 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1057 | |||
1058 | ret = arm_smmu_master_configure_smrs(smmu, master); | ||
1059 | if (ret) | ||
1060 | return ret; | ||
1061 | |||
1062 | /* Bypass the leaves */ | ||
1063 | smmu = smmu_domain->leaf_smmu; | ||
1064 | while ((parent = find_parent_smmu(smmu))) { | ||
1065 | /* | ||
1066 | * We won't have a StreamID match for anything but the root | ||
1067 | * smmu, so we only need to worry about StreamID indexing, | ||
1068 | * where we must install bypass entries in the S2CRs. | ||
1069 | */ | ||
1070 | if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) | ||
1071 | continue; | ||
1072 | |||
1073 | arm_smmu_bypass_stream_mapping(smmu, master); | ||
1074 | smmu = parent; | ||
1075 | } | ||
1076 | |||
1077 | /* Now we're at the root, time to point at our context bank */ | ||
1078 | for (i = 0; i < master->num_streamids; ++i) { | ||
1079 | u32 idx, s2cr; | ||
1080 | idx = master->smrs ? master->smrs[i].idx : master->streamids[i]; | ||
1081 | s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) | | ||
1082 | (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT); | ||
1083 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); | ||
1084 | } | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | ||
1090 | struct arm_smmu_master *master) | ||
1091 | { | ||
1092 | struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu; | ||
1093 | |||
1094 | /* | ||
1095 | * We *must* clear the S2CR first, because freeing the SMR means | ||
1096 | * that it can be re-allocated immediately. | ||
1097 | */ | ||
1098 | arm_smmu_bypass_stream_mapping(smmu, master); | ||
1099 | arm_smmu_master_free_smrs(smmu, master); | ||
1100 | } | ||
1101 | |||
1102 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | ||
1103 | { | ||
1104 | int ret = -EINVAL; | ||
1105 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1106 | struct arm_smmu_device *device_smmu = dev->archdata.iommu; | ||
1107 | struct arm_smmu_master *master; | ||
1108 | |||
1109 | if (!device_smmu) { | ||
1110 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); | ||
1111 | return -ENXIO; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Sanity check the domain. We don't currently support domains | ||
1116 | * that cross between different SMMU chains. | ||
1117 | */ | ||
1118 | spin_lock(&smmu_domain->lock); | ||
1119 | if (!smmu_domain->leaf_smmu) { | ||
1120 | /* Now that we have a master, we can finalise the domain */ | ||
1121 | ret = arm_smmu_init_domain_context(domain, dev); | ||
1122 | if (IS_ERR_VALUE(ret)) | ||
1123 | goto err_unlock; | ||
1124 | |||
1125 | smmu_domain->leaf_smmu = device_smmu; | ||
1126 | } else if (smmu_domain->leaf_smmu != device_smmu) { | ||
1127 | dev_err(dev, | ||
1128 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | ||
1129 | dev_name(smmu_domain->leaf_smmu->dev), | ||
1130 | dev_name(device_smmu->dev)); | ||
1131 | goto err_unlock; | ||
1132 | } | ||
1133 | spin_unlock(&smmu_domain->lock); | ||
1134 | |||
1135 | /* Looks ok, so add the device to the domain */ | ||
1136 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | ||
1137 | if (!master) | ||
1138 | return -ENODEV; | ||
1139 | |||
1140 | return arm_smmu_domain_add_master(smmu_domain, master); | ||
1141 | |||
1142 | err_unlock: | ||
1143 | spin_unlock(&smmu_domain->lock); | ||
1144 | return ret; | ||
1145 | } | ||
1146 | |||
1147 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | ||
1148 | { | ||
1149 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1150 | struct arm_smmu_master *master; | ||
1151 | |||
1152 | master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); | ||
1153 | if (master) | ||
1154 | arm_smmu_domain_remove_master(smmu_domain, master); | ||
1155 | } | ||
1156 | |||
1157 | static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, | ||
1158 | size_t size) | ||
1159 | { | ||
1160 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | ||
1161 | |||
1162 | /* | ||
1163 | * If the SMMU can't walk tables in the CPU caches, treat them | ||
1164 | * like non-coherent DMA since we need to flush the new entries | ||
1165 | * all the way out to memory. There's no possibility of recursion | ||
1166 | * here as the SMMU table walker will not be wired through another | ||
1167 | * SMMU. | ||
1168 | */ | ||
1169 | if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) | ||
1170 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | ||
1171 | DMA_TO_DEVICE); | ||
1172 | } | ||
1173 | |||
1174 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | ||
1175 | unsigned long end) | ||
1176 | { | ||
1177 | return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && | ||
1178 | (addr + ARM_SMMU_PTE_CONT_SIZE <= end); | ||
1179 | } | ||
1180 | |||
1181 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | ||
1182 | unsigned long addr, unsigned long end, | ||
1183 | unsigned long pfn, int flags, int stage) | ||
1184 | { | ||
1185 | pte_t *pte, *start; | ||
1186 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; | ||
1187 | |||
1188 | if (pmd_none(*pmd)) { | ||
1189 | /* Allocate a new set of tables */ | ||
1190 | pgtable_t table = alloc_page(PGALLOC_GFP); | ||
1191 | if (!table) | ||
1192 | return -ENOMEM; | ||
1193 | |||
1194 | arm_smmu_flush_pgtable(smmu, page_address(table), | ||
1195 | ARM_SMMU_PTE_HWTABLE_SIZE); | ||
1196 | pgtable_page_ctor(table); | ||
1197 | pmd_populate(NULL, pmd, table); | ||
1198 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); | ||
1199 | } | ||
1200 | |||
1201 | if (stage == 1) { | ||
1202 | pteval |= ARM_SMMU_PTE_AP_UNPRIV; | ||
1203 | if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) | ||
1204 | pteval |= ARM_SMMU_PTE_AP_RDONLY; | ||
1205 | |||
1206 | if (flags & IOMMU_CACHE) | ||
1207 | pteval |= (MAIR_ATTR_IDX_CACHE << | ||
1208 | ARM_SMMU_PTE_ATTRINDX_SHIFT); | ||
1209 | } else { | ||
1210 | pteval |= ARM_SMMU_PTE_HAP_FAULT; | ||
1211 | if (flags & IOMMU_READ) | ||
1212 | pteval |= ARM_SMMU_PTE_HAP_READ; | ||
1213 | if (flags & IOMMU_WRITE) | ||
1214 | pteval |= ARM_SMMU_PTE_HAP_WRITE; | ||
1215 | if (flags & IOMMU_CACHE) | ||
1216 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; | ||
1217 | else | ||
1218 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | ||
1219 | } | ||
1220 | |||
1221 | /* If no access, create a faulting entry to avoid TLB fills */ | ||
1222 | if (!(flags & (IOMMU_READ | IOMMU_WRITE))) | ||
1223 | pteval &= ~ARM_SMMU_PTE_PAGE; | ||
1224 | |||
1225 | pteval |= ARM_SMMU_PTE_SH_IS; | ||
1226 | start = pmd_page_vaddr(*pmd) + pte_index(addr); | ||
1227 | pte = start; | ||
1228 | |||
1229 | /* | ||
1230 | * Install the page table entries. This is fairly complicated | ||
1231 | * since we attempt to make use of the contiguous hint in the | ||
1232 | * ptes where possible. The contiguous hint indicates a series | ||
1233 | * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically | ||
1234 | * contiguous region with the following constraints: | ||
1235 | * | ||
1236 | * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE | ||
1237 | * - Each pte in the region has the contiguous hint bit set | ||
1238 | * | ||
1239 | * This complicates unmapping (also handled by this code, when | ||
1240 | * neither IOMMU_READ or IOMMU_WRITE are set) because it is | ||
1241 | * possible, yet highly unlikely, that a client may unmap only | ||
1242 | * part of a contiguous range. This requires clearing of the | ||
1243 | * contiguous hint bits in the range before installing the new | ||
1244 | * faulting entries. | ||
1245 | * | ||
1246 | * Note that re-mapping an address range without first unmapping | ||
1247 | * it is not supported, so TLB invalidation is not required here | ||
1248 | * and is instead performed at unmap and domain-init time. | ||
1249 | */ | ||
1250 | do { | ||
1251 | int i = 1; | ||
1252 | pteval &= ~ARM_SMMU_PTE_CONT; | ||
1253 | |||
1254 | if (arm_smmu_pte_is_contiguous_range(addr, end)) { | ||
1255 | i = ARM_SMMU_PTE_CONT_ENTRIES; | ||
1256 | pteval |= ARM_SMMU_PTE_CONT; | ||
1257 | } else if (pte_val(*pte) & | ||
1258 | (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { | ||
1259 | int j; | ||
1260 | pte_t *cont_start; | ||
1261 | unsigned long idx = pte_index(addr); | ||
1262 | |||
1263 | idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); | ||
1264 | cont_start = pmd_page_vaddr(*pmd) + idx; | ||
1265 | for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) | ||
1266 | pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT; | ||
1267 | |||
1268 | arm_smmu_flush_pgtable(smmu, cont_start, | ||
1269 | sizeof(*pte) * | ||
1270 | ARM_SMMU_PTE_CONT_ENTRIES); | ||
1271 | } | ||
1272 | |||
1273 | do { | ||
1274 | *pte = pfn_pte(pfn, __pgprot(pteval)); | ||
1275 | } while (pte++, pfn++, addr += PAGE_SIZE, --i); | ||
1276 | } while (addr != end); | ||
1277 | |||
1278 | arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); | ||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | ||
1283 | unsigned long addr, unsigned long end, | ||
1284 | phys_addr_t phys, int flags, int stage) | ||
1285 | { | ||
1286 | int ret; | ||
1287 | pmd_t *pmd; | ||
1288 | unsigned long next, pfn = __phys_to_pfn(phys); | ||
1289 | |||
1290 | #ifndef __PAGETABLE_PMD_FOLDED | ||
1291 | if (pud_none(*pud)) { | ||
1292 | pmd = pmd_alloc_one(NULL, addr); | ||
1293 | if (!pmd) | ||
1294 | return -ENOMEM; | ||
1295 | } else | ||
1296 | #endif | ||
1297 | pmd = pmd_offset(pud, addr); | ||
1298 | |||
1299 | do { | ||
1300 | next = pmd_addr_end(addr, end); | ||
1301 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, | ||
1302 | flags, stage); | ||
1303 | pud_populate(NULL, pud, pmd); | ||
1304 | arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); | ||
1305 | phys += next - addr; | ||
1306 | } while (pmd++, addr = next, addr < end); | ||
1307 | |||
1308 | return ret; | ||
1309 | } | ||
1310 | |||
1311 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | ||
1312 | unsigned long addr, unsigned long end, | ||
1313 | phys_addr_t phys, int flags, int stage) | ||
1314 | { | ||
1315 | int ret = 0; | ||
1316 | pud_t *pud; | ||
1317 | unsigned long next; | ||
1318 | |||
1319 | #ifndef __PAGETABLE_PUD_FOLDED | ||
1320 | if (pgd_none(*pgd)) { | ||
1321 | pud = pud_alloc_one(NULL, addr); | ||
1322 | if (!pud) | ||
1323 | return -ENOMEM; | ||
1324 | } else | ||
1325 | #endif | ||
1326 | pud = pud_offset(pgd, addr); | ||
1327 | |||
1328 | do { | ||
1329 | next = pud_addr_end(addr, end); | ||
1330 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, | ||
1331 | flags, stage); | ||
1332 | pgd_populate(NULL, pud, pgd); | ||
1333 | arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); | ||
1334 | phys += next - addr; | ||
1335 | } while (pud++, addr = next, addr < end); | ||
1336 | |||
1337 | return ret; | ||
1338 | } | ||
1339 | |||
1340 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | ||
1341 | unsigned long iova, phys_addr_t paddr, | ||
1342 | size_t size, int flags) | ||
1343 | { | ||
1344 | int ret, stage; | ||
1345 | unsigned long end; | ||
1346 | phys_addr_t input_mask, output_mask; | ||
1347 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1348 | pgd_t *pgd = root_cfg->pgd; | ||
1349 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1350 | |||
1351 | if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { | ||
1352 | stage = 2; | ||
1353 | output_mask = (1ULL << smmu->s2_output_size) - 1; | ||
1354 | } else { | ||
1355 | stage = 1; | ||
1356 | output_mask = (1ULL << smmu->s1_output_size) - 1; | ||
1357 | } | ||
1358 | |||
1359 | if (!pgd) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | if (size & ~PAGE_MASK) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | input_mask = (1ULL << smmu->input_size) - 1; | ||
1366 | if ((phys_addr_t)iova & ~input_mask) | ||
1367 | return -ERANGE; | ||
1368 | |||
1369 | if (paddr & ~output_mask) | ||
1370 | return -ERANGE; | ||
1371 | |||
1372 | spin_lock(&smmu_domain->lock); | ||
1373 | pgd += pgd_index(iova); | ||
1374 | end = iova + size; | ||
1375 | do { | ||
1376 | unsigned long next = pgd_addr_end(iova, end); | ||
1377 | |||
1378 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, | ||
1379 | flags, stage); | ||
1380 | if (ret) | ||
1381 | goto out_unlock; | ||
1382 | |||
1383 | paddr += next - iova; | ||
1384 | iova = next; | ||
1385 | } while (pgd++, iova != end); | ||
1386 | |||
1387 | out_unlock: | ||
1388 | spin_unlock(&smmu_domain->lock); | ||
1389 | |||
1390 | /* Ensure new page tables are visible to the hardware walker */ | ||
1391 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | ||
1392 | dsb(); | ||
1393 | |||
1394 | return ret; | ||
1395 | } | ||
1396 | |||
1397 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||
1398 | phys_addr_t paddr, size_t size, int flags) | ||
1399 | { | ||
1400 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1401 | struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; | ||
1402 | |||
1403 | if (!smmu_domain || !smmu) | ||
1404 | return -ENODEV; | ||
1405 | |||
1406 | /* Check for silent address truncation up the SMMU chain. */ | ||
1407 | if ((phys_addr_t)iova & ~smmu_domain->output_mask) | ||
1408 | return -ERANGE; | ||
1409 | |||
1410 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); | ||
1411 | } | ||
1412 | |||
1413 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | ||
1414 | size_t size) | ||
1415 | { | ||
1416 | int ret; | ||
1417 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1418 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1419 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1420 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1421 | |||
1422 | ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); | ||
1423 | writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID); | ||
1424 | arm_smmu_tlb_sync(smmu); | ||
1425 | return ret ? ret : size; | ||
1426 | } | ||
1427 | |||
1428 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | ||
1429 | dma_addr_t iova) | ||
1430 | { | ||
1431 | pgd_t *pgd; | ||
1432 | pud_t *pud; | ||
1433 | pmd_t *pmd; | ||
1434 | pte_t *pte; | ||
1435 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1436 | struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; | ||
1437 | struct arm_smmu_device *smmu = root_cfg->smmu; | ||
1438 | |||
1439 | spin_lock(&smmu_domain->lock); | ||
1440 | pgd = root_cfg->pgd; | ||
1441 | if (!pgd) | ||
1442 | goto err_unlock; | ||
1443 | |||
1444 | pgd += pgd_index(iova); | ||
1445 | if (pgd_none_or_clear_bad(pgd)) | ||
1446 | goto err_unlock; | ||
1447 | |||
1448 | pud = pud_offset(pgd, iova); | ||
1449 | if (pud_none_or_clear_bad(pud)) | ||
1450 | goto err_unlock; | ||
1451 | |||
1452 | pmd = pmd_offset(pud, iova); | ||
1453 | if (pmd_none_or_clear_bad(pmd)) | ||
1454 | goto err_unlock; | ||
1455 | |||
1456 | pte = pmd_page_vaddr(*pmd) + pte_index(iova); | ||
1457 | if (pte_none(pte)) | ||
1458 | goto err_unlock; | ||
1459 | |||
1460 | spin_unlock(&smmu_domain->lock); | ||
1461 | return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); | ||
1462 | |||
1463 | err_unlock: | ||
1464 | spin_unlock(&smmu_domain->lock); | ||
1465 | dev_warn(smmu->dev, | ||
1466 | "invalid (corrupt?) page tables detected for iova 0x%llx\n", | ||
1467 | (unsigned long long)iova); | ||
1468 | return -EINVAL; | ||
1469 | } | ||
1470 | |||
1471 | static int arm_smmu_domain_has_cap(struct iommu_domain *domain, | ||
1472 | unsigned long cap) | ||
1473 | { | ||
1474 | unsigned long caps = 0; | ||
1475 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1476 | |||
1477 | if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) | ||
1478 | caps |= IOMMU_CAP_CACHE_COHERENCY; | ||
1479 | |||
1480 | return !!(cap & caps); | ||
1481 | } | ||
1482 | |||
1483 | static int arm_smmu_add_device(struct device *dev) | ||
1484 | { | ||
1485 | struct arm_smmu_device *child, *parent, *smmu; | ||
1486 | struct arm_smmu_master *master = NULL; | ||
1487 | |||
1488 | spin_lock(&arm_smmu_devices_lock); | ||
1489 | list_for_each_entry(parent, &arm_smmu_devices, list) { | ||
1490 | smmu = parent; | ||
1491 | |||
1492 | /* Try to find a child of the current SMMU. */ | ||
1493 | list_for_each_entry(child, &arm_smmu_devices, list) { | ||
1494 | if (child->parent_of_node == parent->dev->of_node) { | ||
1495 | /* Does the child sit above our master? */ | ||
1496 | master = find_smmu_master(child, dev->of_node); | ||
1497 | if (master) { | ||
1498 | smmu = NULL; | ||
1499 | break; | ||
1500 | } | ||
1501 | } | ||
1502 | } | ||
1503 | |||
1504 | /* We found some children, so keep searching. */ | ||
1505 | if (!smmu) { | ||
1506 | master = NULL; | ||
1507 | continue; | ||
1508 | } | ||
1509 | |||
1510 | master = find_smmu_master(smmu, dev->of_node); | ||
1511 | if (master) | ||
1512 | break; | ||
1513 | } | ||
1514 | spin_unlock(&arm_smmu_devices_lock); | ||
1515 | |||
1516 | if (!master) | ||
1517 | return -ENODEV; | ||
1518 | |||
1519 | dev->archdata.iommu = smmu; | ||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | static void arm_smmu_remove_device(struct device *dev) | ||
1524 | { | ||
1525 | dev->archdata.iommu = NULL; | ||
1526 | } | ||
1527 | |||
1528 | static struct iommu_ops arm_smmu_ops = { | ||
1529 | .domain_init = arm_smmu_domain_init, | ||
1530 | .domain_destroy = arm_smmu_domain_destroy, | ||
1531 | .attach_dev = arm_smmu_attach_dev, | ||
1532 | .detach_dev = arm_smmu_detach_dev, | ||
1533 | .map = arm_smmu_map, | ||
1534 | .unmap = arm_smmu_unmap, | ||
1535 | .iova_to_phys = arm_smmu_iova_to_phys, | ||
1536 | .domain_has_cap = arm_smmu_domain_has_cap, | ||
1537 | .add_device = arm_smmu_add_device, | ||
1538 | .remove_device = arm_smmu_remove_device, | ||
1539 | .pgsize_bitmap = (SECTION_SIZE | | ||
1540 | ARM_SMMU_PTE_CONT_SIZE | | ||
1541 | PAGE_SIZE), | ||
1542 | }; | ||
1543 | |||
1544 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | ||
1545 | { | ||
1546 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1547 | int i = 0; | ||
1548 | u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); | ||
1549 | |||
1550 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | ||
1551 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | ||
1552 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); | ||
1553 | writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); | ||
1554 | } | ||
1555 | |||
1556 | /* Invalidate the TLB, just in case */ | ||
1557 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); | ||
1558 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); | ||
1559 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | ||
1560 | |||
1561 | /* Enable fault reporting */ | ||
1562 | scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); | ||
1563 | |||
1564 | /* Disable TLB broadcasting. */ | ||
1565 | scr0 |= (sCR0_VMIDPNE | sCR0_PTM); | ||
1566 | |||
1567 | /* Enable client access, but bypass when no mapping is found */ | ||
1568 | scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); | ||
1569 | |||
1570 | /* Disable forced broadcasting */ | ||
1571 | scr0 &= ~sCR0_FB; | ||
1572 | |||
1573 | /* Don't upgrade barriers */ | ||
1574 | scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); | ||
1575 | |||
1576 | /* Push the button */ | ||
1577 | arm_smmu_tlb_sync(smmu); | ||
1578 | writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); | ||
1579 | } | ||
1580 | |||
1581 | static int arm_smmu_id_size_to_bits(int size) | ||
1582 | { | ||
1583 | switch (size) { | ||
1584 | case 0: | ||
1585 | return 32; | ||
1586 | case 1: | ||
1587 | return 36; | ||
1588 | case 2: | ||
1589 | return 40; | ||
1590 | case 3: | ||
1591 | return 42; | ||
1592 | case 4: | ||
1593 | return 44; | ||
1594 | case 5: | ||
1595 | default: | ||
1596 | return 48; | ||
1597 | } | ||
1598 | } | ||
1599 | |||
1600 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | ||
1601 | { | ||
1602 | unsigned long size; | ||
1603 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1604 | u32 id; | ||
1605 | |||
1606 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | ||
1607 | |||
1608 | /* Primecell ID */ | ||
1609 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2); | ||
1610 | smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1; | ||
1611 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); | ||
1612 | |||
1613 | /* ID0 */ | ||
1614 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | ||
1615 | #ifndef CONFIG_64BIT | ||
1616 | if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { | ||
1617 | dev_err(smmu->dev, "\tno v7 descriptor support!\n"); | ||
1618 | return -ENODEV; | ||
1619 | } | ||
1620 | #endif | ||
1621 | if (id & ID0_S1TS) { | ||
1622 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | ||
1623 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | ||
1624 | } | ||
1625 | |||
1626 | if (id & ID0_S2TS) { | ||
1627 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | ||
1628 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | ||
1629 | } | ||
1630 | |||
1631 | if (id & ID0_NTS) { | ||
1632 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | ||
1633 | dev_notice(smmu->dev, "\tnested translation\n"); | ||
1634 | } | ||
1635 | |||
1636 | if (!(smmu->features & | ||
1637 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 | | ||
1638 | ARM_SMMU_FEAT_TRANS_NESTED))) { | ||
1639 | dev_err(smmu->dev, "\tno translation support!\n"); | ||
1640 | return -ENODEV; | ||
1641 | } | ||
1642 | |||
1643 | if (id & ID0_CTTW) { | ||
1644 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; | ||
1645 | dev_notice(smmu->dev, "\tcoherent table walk\n"); | ||
1646 | } | ||
1647 | |||
1648 | if (id & ID0_SMS) { | ||
1649 | u32 smr, sid, mask; | ||
1650 | |||
1651 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | ||
1652 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | ||
1653 | ID0_NUMSMRG_MASK; | ||
1654 | if (smmu->num_mapping_groups == 0) { | ||
1655 | dev_err(smmu->dev, | ||
1656 | "stream-matching supported, but no SMRs present!\n"); | ||
1657 | return -ENODEV; | ||
1658 | } | ||
1659 | |||
1660 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | ||
1661 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | ||
1662 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1663 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | ||
1664 | |||
1665 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | ||
1666 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | ||
1667 | if ((mask & sid) != sid) { | ||
1668 | dev_err(smmu->dev, | ||
1669 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | ||
1670 | mask, sid); | ||
1671 | return -ENODEV; | ||
1672 | } | ||
1673 | |||
1674 | dev_notice(smmu->dev, | ||
1675 | "\tstream matching with %u register groups, mask 0x%x", | ||
1676 | smmu->num_mapping_groups, mask); | ||
1677 | } | ||
1678 | |||
1679 | /* ID1 */ | ||
1680 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | ||
1681 | smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; | ||
1682 | |||
1683 | /* Check that we ioremapped enough */ | ||
1684 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | ||
1685 | size *= (smmu->pagesize << 1); | ||
1686 | if (smmu->size < size) | ||
1687 | dev_warn(smmu->dev, | ||
1688 | "device is 0x%lx bytes but only mapped 0x%lx!\n", | ||
1689 | size, smmu->size); | ||
1690 | |||
1691 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & | ||
1692 | ID1_NUMS2CB_MASK; | ||
1693 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; | ||
1694 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | ||
1695 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | ||
1696 | return -ENODEV; | ||
1697 | } | ||
1698 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | ||
1699 | smmu->num_context_banks, smmu->num_s2_context_banks); | ||
1700 | |||
1701 | /* ID2 */ | ||
1702 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | ||
1703 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | ||
1704 | |||
1705 | /* | ||
1706 | * Stage-1 output limited by stage-2 input size due to pgd | ||
1707 | * allocation (PTRS_PER_PGD). | ||
1708 | */ | ||
1709 | #ifdef CONFIG_64BIT | ||
1710 | /* Current maximum output size of 39 bits */ | ||
1711 | smmu->s1_output_size = min(39UL, size); | ||
1712 | #else | ||
1713 | smmu->s1_output_size = min(32UL, size); | ||
1714 | #endif | ||
1715 | |||
1716 | /* The stage-2 output mask is also applied for bypass */ | ||
1717 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | ||
1718 | smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size); | ||
1719 | |||
1720 | if (smmu->version == 1) { | ||
1721 | smmu->input_size = 32; | ||
1722 | } else { | ||
1723 | #ifdef CONFIG_64BIT | ||
1724 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; | ||
1725 | size = min(39, arm_smmu_id_size_to_bits(size)); | ||
1726 | #else | ||
1727 | size = 32; | ||
1728 | #endif | ||
1729 | smmu->input_size = size; | ||
1730 | |||
1731 | if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || | ||
1732 | (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || | ||
1733 | (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { | ||
1734 | dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", | ||
1735 | PAGE_SIZE); | ||
1736 | return -ENODEV; | ||
1737 | } | ||
1738 | } | ||
1739 | |||
1740 | dev_notice(smmu->dev, | ||
1741 | "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n", | ||
1742 | smmu->input_size, smmu->s1_output_size, smmu->s2_output_size); | ||
1743 | return 0; | ||
1744 | } | ||
1745 | |||
1746 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) | ||
1747 | { | ||
1748 | struct resource *res; | ||
1749 | struct arm_smmu_device *smmu; | ||
1750 | struct device_node *dev_node; | ||
1751 | struct device *dev = &pdev->dev; | ||
1752 | struct rb_node *node; | ||
1753 | struct of_phandle_args masterspec; | ||
1754 | int num_irqs, i, err; | ||
1755 | |||
1756 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | ||
1757 | if (!smmu) { | ||
1758 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | ||
1759 | return -ENOMEM; | ||
1760 | } | ||
1761 | smmu->dev = dev; | ||
1762 | |||
1763 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1764 | if (!res) { | ||
1765 | dev_err(dev, "missing base address/size\n"); | ||
1766 | return -ENODEV; | ||
1767 | } | ||
1768 | |||
1769 | smmu->size = resource_size(res); | ||
1770 | smmu->base = devm_request_and_ioremap(dev, res); | ||
1771 | if (!smmu->base) | ||
1772 | return -EADDRNOTAVAIL; | ||
1773 | |||
1774 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | ||
1775 | &smmu->num_global_irqs)) { | ||
1776 | dev_err(dev, "missing #global-interrupts property\n"); | ||
1777 | return -ENODEV; | ||
1778 | } | ||
1779 | |||
1780 | num_irqs = 0; | ||
1781 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | ||
1782 | num_irqs++; | ||
1783 | if (num_irqs > smmu->num_global_irqs) | ||
1784 | smmu->num_context_irqs++; | ||
1785 | } | ||
1786 | |||
1787 | if (num_irqs < smmu->num_global_irqs) { | ||
1788 | dev_warn(dev, "found %d interrupts but expected at least %d\n", | ||
1789 | num_irqs, smmu->num_global_irqs); | ||
1790 | smmu->num_global_irqs = num_irqs; | ||
1791 | } | ||
1792 | smmu->num_context_irqs = num_irqs - smmu->num_global_irqs; | ||
1793 | |||
1794 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | ||
1795 | GFP_KERNEL); | ||
1796 | if (!smmu->irqs) { | ||
1797 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | ||
1798 | return -ENOMEM; | ||
1799 | } | ||
1800 | |||
1801 | for (i = 0; i < num_irqs; ++i) { | ||
1802 | int irq = platform_get_irq(pdev, i); | ||
1803 | if (irq < 0) { | ||
1804 | dev_err(dev, "failed to get irq index %d\n", i); | ||
1805 | return -ENODEV; | ||
1806 | } | ||
1807 | smmu->irqs[i] = irq; | ||
1808 | } | ||
1809 | |||
1810 | i = 0; | ||
1811 | smmu->masters = RB_ROOT; | ||
1812 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | ||
1813 | "#stream-id-cells", i, | ||
1814 | &masterspec)) { | ||
1815 | err = register_smmu_master(smmu, dev, &masterspec); | ||
1816 | if (err) { | ||
1817 | dev_err(dev, "failed to add master %s\n", | ||
1818 | masterspec.np->name); | ||
1819 | goto out_put_masters; | ||
1820 | } | ||
1821 | |||
1822 | i++; | ||
1823 | } | ||
1824 | dev_notice(dev, "registered %d master devices\n", i); | ||
1825 | |||
1826 | if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0))) | ||
1827 | smmu->parent_of_node = dev_node; | ||
1828 | |||
1829 | err = arm_smmu_device_cfg_probe(smmu); | ||
1830 | if (err) | ||
1831 | goto out_put_parent; | ||
1832 | |||
1833 | if (smmu->version > 1 && | ||
1834 | smmu->num_context_banks != smmu->num_context_irqs) { | ||
1835 | dev_err(dev, | ||
1836 | "found only %d context interrupt(s) but %d required\n", | ||
1837 | smmu->num_context_irqs, smmu->num_context_banks); | ||
1838 | goto out_put_parent; | ||
1839 | } | ||
1840 | |||
1841 | arm_smmu_device_reset(smmu); | ||
1842 | |||
1843 | for (i = 0; i < smmu->num_global_irqs; ++i) { | ||
1844 | err = request_irq(smmu->irqs[i], | ||
1845 | arm_smmu_global_fault, | ||
1846 | IRQF_SHARED, | ||
1847 | "arm-smmu global fault", | ||
1848 | smmu); | ||
1849 | if (err) { | ||
1850 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | ||
1851 | i, smmu->irqs[i]); | ||
1852 | goto out_free_irqs; | ||
1853 | } | ||
1854 | } | ||
1855 | |||
1856 | INIT_LIST_HEAD(&smmu->list); | ||
1857 | spin_lock(&arm_smmu_devices_lock); | ||
1858 | list_add(&smmu->list, &arm_smmu_devices); | ||
1859 | spin_unlock(&arm_smmu_devices_lock); | ||
1860 | return 0; | ||
1861 | |||
1862 | out_free_irqs: | ||
1863 | while (i--) | ||
1864 | free_irq(smmu->irqs[i], smmu); | ||
1865 | |||
1866 | out_put_parent: | ||
1867 | if (smmu->parent_of_node) | ||
1868 | of_node_put(smmu->parent_of_node); | ||
1869 | |||
1870 | out_put_masters: | ||
1871 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | ||
1872 | struct arm_smmu_master *master; | ||
1873 | master = container_of(node, struct arm_smmu_master, node); | ||
1874 | of_node_put(master->of_node); | ||
1875 | } | ||
1876 | |||
1877 | return err; | ||
1878 | } | ||
1879 | |||
1880 | static int arm_smmu_device_remove(struct platform_device *pdev) | ||
1881 | { | ||
1882 | int i; | ||
1883 | struct device *dev = &pdev->dev; | ||
1884 | struct arm_smmu_device *curr, *smmu = NULL; | ||
1885 | struct rb_node *node; | ||
1886 | |||
1887 | spin_lock(&arm_smmu_devices_lock); | ||
1888 | list_for_each_entry(curr, &arm_smmu_devices, list) { | ||
1889 | if (curr->dev == dev) { | ||
1890 | smmu = curr; | ||
1891 | list_del(&smmu->list); | ||
1892 | break; | ||
1893 | } | ||
1894 | } | ||
1895 | spin_unlock(&arm_smmu_devices_lock); | ||
1896 | |||
1897 | if (!smmu) | ||
1898 | return -ENODEV; | ||
1899 | |||
1900 | if (smmu->parent_of_node) | ||
1901 | of_node_put(smmu->parent_of_node); | ||
1902 | |||
1903 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | ||
1904 | struct arm_smmu_master *master; | ||
1905 | master = container_of(node, struct arm_smmu_master, node); | ||
1906 | of_node_put(master->of_node); | ||
1907 | } | ||
1908 | |||
1909 | if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS)) | ||
1910 | dev_err(dev, "removing device with active domains!\n"); | ||
1911 | |||
1912 | for (i = 0; i < smmu->num_global_irqs; ++i) | ||
1913 | free_irq(smmu->irqs[i], smmu); | ||
1914 | |||
1915 | /* Turn the thing off */ | ||
1916 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); | ||
1917 | return 0; | ||
1918 | } | ||
1919 | |||
1920 | #ifdef CONFIG_OF | ||
1921 | static struct of_device_id arm_smmu_of_match[] = { | ||
1922 | { .compatible = "arm,smmu-v1", }, | ||
1923 | { .compatible = "arm,smmu-v2", }, | ||
1924 | { .compatible = "arm,mmu-400", }, | ||
1925 | { .compatible = "arm,mmu-500", }, | ||
1926 | { }, | ||
1927 | }; | ||
1928 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | ||
1929 | #endif | ||
1930 | |||
1931 | static struct platform_driver arm_smmu_driver = { | ||
1932 | .driver = { | ||
1933 | .owner = THIS_MODULE, | ||
1934 | .name = "arm-smmu", | ||
1935 | .of_match_table = of_match_ptr(arm_smmu_of_match), | ||
1936 | }, | ||
1937 | .probe = arm_smmu_device_dt_probe, | ||
1938 | .remove = arm_smmu_device_remove, | ||
1939 | }; | ||
1940 | |||
1941 | static int __init arm_smmu_init(void) | ||
1942 | { | ||
1943 | int ret; | ||
1944 | |||
1945 | ret = platform_driver_register(&arm_smmu_driver); | ||
1946 | if (ret) | ||
1947 | return ret; | ||
1948 | |||
1949 | /* Oh, for a proper bus abstraction */ | ||
1950 | if (!iommu_present(&platform_bus_type)); | ||
1951 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | ||
1952 | |||
1953 | if (!iommu_present(&amba_bustype)); | ||
1954 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | ||
1955 | |||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1959 | static void __exit arm_smmu_exit(void) | ||
1960 | { | ||
1961 | return platform_driver_unregister(&arm_smmu_driver); | ||
1962 | } | ||
1963 | |||
1964 | module_init(arm_smmu_init); | ||
1965 | module_exit(arm_smmu_exit); | ||
1966 | |||
1967 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | ||
1968 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | ||
1969 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index a7967ceb79e6..785675a56a10 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -309,6 +309,7 @@ parse_dmar_table(void) | |||
309 | struct acpi_table_dmar *dmar; | 309 | struct acpi_table_dmar *dmar; |
310 | struct acpi_dmar_header *entry_header; | 310 | struct acpi_dmar_header *entry_header; |
311 | int ret = 0; | 311 | int ret = 0; |
312 | int drhd_count = 0; | ||
312 | 313 | ||
313 | /* | 314 | /* |
314 | * Do it again, earlier dmar_tbl mapping could be mapped with | 315 | * Do it again, earlier dmar_tbl mapping could be mapped with |
@@ -347,6 +348,7 @@ parse_dmar_table(void) | |||
347 | 348 | ||
348 | switch (entry_header->type) { | 349 | switch (entry_header->type) { |
349 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 350 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
351 | drhd_count++; | ||
350 | ret = dmar_parse_one_drhd(entry_header); | 352 | ret = dmar_parse_one_drhd(entry_header); |
351 | break; | 353 | break; |
352 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 354 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
@@ -371,6 +373,8 @@ parse_dmar_table(void) | |||
371 | 373 | ||
372 | entry_header = ((void *)entry_header + entry_header->length); | 374 | entry_header = ((void *)entry_header + entry_header->length); |
373 | } | 375 | } |
376 | if (drhd_count == 0) | ||
377 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); | ||
374 | return ret; | 378 | return ret; |
375 | } | 379 | } |
376 | 380 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b4f0e28dfa41..eec0d3e04bf5 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -4182,14 +4182,27 @@ static int intel_iommu_add_device(struct device *dev) | |||
4182 | 4182 | ||
4183 | /* | 4183 | /* |
4184 | * If it's a multifunction device that does not support our | 4184 | * If it's a multifunction device that does not support our |
4185 | * required ACS flags, add to the same group as function 0. | 4185 | * required ACS flags, add to the same group as lowest numbered |
4186 | * function that also does not suport the required ACS flags. | ||
4186 | */ | 4187 | */ |
4187 | if (dma_pdev->multifunction && | 4188 | if (dma_pdev->multifunction && |
4188 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) | 4189 | !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { |
4189 | swap_pci_ref(&dma_pdev, | 4190 | u8 i, slot = PCI_SLOT(dma_pdev->devfn); |
4190 | pci_get_slot(dma_pdev->bus, | 4191 | |
4191 | PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), | 4192 | for (i = 0; i < 8; i++) { |
4192 | 0))); | 4193 | struct pci_dev *tmp; |
4194 | |||
4195 | tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); | ||
4196 | if (!tmp) | ||
4197 | continue; | ||
4198 | |||
4199 | if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { | ||
4200 | swap_pci_ref(&dma_pdev, tmp); | ||
4201 | break; | ||
4202 | } | ||
4203 | pci_dev_put(tmp); | ||
4204 | } | ||
4205 | } | ||
4193 | 4206 | ||
4194 | /* | 4207 | /* |
4195 | * Devices on the root bus go through the iommu. If that's not us, | 4208 | * Devices on the root bus go through the iommu. If that's not us, |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 5b19b2d6ec2d..f71673dbb23d 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -664,8 +664,7 @@ error: | |||
664 | */ | 664 | */ |
665 | 665 | ||
666 | if (x2apic_present) | 666 | if (x2apic_present) |
667 | WARN(1, KERN_WARNING | 667 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); |
668 | "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | ||
669 | 668 | ||
670 | return -1; | 669 | return -1; |
671 | } | 670 | } |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d8f98b14e2fe..fbe9ca734f8f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain, | |||
754 | } | 754 | } |
755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); | 755 | EXPORT_SYMBOL_GPL(iommu_domain_has_cap); |
756 | 756 | ||
757 | static size_t iommu_pgsize(struct iommu_domain *domain, | ||
758 | unsigned long addr_merge, size_t size) | ||
759 | { | ||
760 | unsigned int pgsize_idx; | ||
761 | size_t pgsize; | ||
762 | |||
763 | /* Max page size that still fits into 'size' */ | ||
764 | pgsize_idx = __fls(size); | ||
765 | |||
766 | /* need to consider alignment requirements ? */ | ||
767 | if (likely(addr_merge)) { | ||
768 | /* Max page size allowed by address */ | ||
769 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
770 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
771 | } | ||
772 | |||
773 | /* build a mask of acceptable page sizes */ | ||
774 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
775 | |||
776 | /* throw away page sizes not supported by the hardware */ | ||
777 | pgsize &= domain->ops->pgsize_bitmap; | ||
778 | |||
779 | /* make sure we're still sane */ | ||
780 | BUG_ON(!pgsize); | ||
781 | |||
782 | /* pick the biggest page */ | ||
783 | pgsize_idx = __fls(pgsize); | ||
784 | pgsize = 1UL << pgsize_idx; | ||
785 | |||
786 | return pgsize; | ||
787 | } | ||
788 | |||
757 | int iommu_map(struct iommu_domain *domain, unsigned long iova, | 789 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
758 | phys_addr_t paddr, size_t size, int prot) | 790 | phys_addr_t paddr, size_t size, int prot) |
759 | { | 791 | { |
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
775 | * size of the smallest page supported by the hardware | 807 | * size of the smallest page supported by the hardware |
776 | */ | 808 | */ |
777 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { | 809 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { |
778 | pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz " | 810 | pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", |
779 | "0x%x\n", iova, (unsigned long)paddr, | 811 | iova, &paddr, size, min_pagesz); |
780 | (unsigned long)size, min_pagesz); | ||
781 | return -EINVAL; | 812 | return -EINVAL; |
782 | } | 813 | } |
783 | 814 | ||
784 | pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova, | 815 | pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); |
785 | (unsigned long)paddr, (unsigned long)size); | ||
786 | 816 | ||
787 | while (size) { | 817 | while (size) { |
788 | unsigned long pgsize, addr_merge = iova | paddr; | 818 | size_t pgsize = iommu_pgsize(domain, iova | paddr, size); |
789 | unsigned int pgsize_idx; | ||
790 | |||
791 | /* Max page size that still fits into 'size' */ | ||
792 | pgsize_idx = __fls(size); | ||
793 | |||
794 | /* need to consider alignment requirements ? */ | ||
795 | if (likely(addr_merge)) { | ||
796 | /* Max page size allowed by both iova and paddr */ | ||
797 | unsigned int align_pgsize_idx = __ffs(addr_merge); | ||
798 | |||
799 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | ||
800 | } | ||
801 | |||
802 | /* build a mask of acceptable page sizes */ | ||
803 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | ||
804 | |||
805 | /* throw away page sizes not supported by the hardware */ | ||
806 | pgsize &= domain->ops->pgsize_bitmap; | ||
807 | |||
808 | /* make sure we're still sane */ | ||
809 | BUG_ON(!pgsize); | ||
810 | |||
811 | /* pick the biggest page */ | ||
812 | pgsize_idx = __fls(pgsize); | ||
813 | pgsize = 1UL << pgsize_idx; | ||
814 | 819 | ||
815 | pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova, | 820 | pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", |
816 | (unsigned long)paddr, pgsize); | 821 | iova, &paddr, pgsize); |
817 | 822 | ||
818 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); | 823 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); |
819 | if (ret) | 824 | if (ret) |
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
850 | * by the hardware | 855 | * by the hardware |
851 | */ | 856 | */ |
852 | if (!IS_ALIGNED(iova | size, min_pagesz)) { | 857 | if (!IS_ALIGNED(iova | size, min_pagesz)) { |
853 | pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n", | 858 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", |
854 | iova, (unsigned long)size, min_pagesz); | 859 | iova, size, min_pagesz); |
855 | return -EINVAL; | 860 | return -EINVAL; |
856 | } | 861 | } |
857 | 862 | ||
858 | pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova, | 863 | pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); |
859 | (unsigned long)size); | ||
860 | 864 | ||
861 | /* | 865 | /* |
862 | * Keep iterating until we either unmap 'size' bytes (or more) | 866 | * Keep iterating until we either unmap 'size' bytes (or more) |
863 | * or we hit an area that isn't mapped. | 867 | * or we hit an area that isn't mapped. |
864 | */ | 868 | */ |
865 | while (unmapped < size) { | 869 | while (unmapped < size) { |
866 | size_t left = size - unmapped; | 870 | size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); |
867 | 871 | ||
868 | unmapped_page = domain->ops->unmap(domain, iova, left); | 872 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); |
869 | if (!unmapped_page) | 873 | if (!unmapped_page) |
870 | break; | 874 | break; |
871 | 875 | ||
872 | pr_debug("unmapped: iova 0x%lx size %lx\n", iova, | 876 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n", |
873 | (unsigned long)unmapped_page); | 877 | iova, unmapped_page); |
874 | 878 | ||
875 | iova += unmapped_page; | 879 | iova += unmapped_page; |
876 | unmapped += unmapped_page; | 880 | unmapped += unmapped_page; |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e02e5d71745b..0ba3766240d5 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) | |||
833 | iopgd = iopgd_offset(obj, da); | 833 | iopgd = iopgd_offset(obj, da); |
834 | 834 | ||
835 | if (!iopgd_is_table(*iopgd)) { | 835 | if (!iopgd_is_table(*iopgd)) { |
836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " | 836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", |
837 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | 837 | obj->name, errs, da, iopgd, *iopgd); |
838 | return IRQ_NONE; | 838 | return IRQ_NONE; |
839 | } | 839 | } |
840 | 840 | ||
841 | iopte = iopte_offset(iopgd, da); | 841 | iopte = iopte_offset(iopgd, da); |
842 | 842 | ||
843 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " | 843 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", |
844 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | 844 | obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); |
845 | iopte, *iopte); | ||
846 | 845 | ||
847 | return IRQ_NONE; | 846 | return IRQ_NONE; |
848 | } | 847 | } |
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | |||
1235 | else if (iopte_is_large(*pte)) | 1234 | else if (iopte_is_large(*pte)) |
1236 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | 1235 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); |
1237 | else | 1236 | else |
1238 | dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da); | 1237 | dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, |
1238 | (unsigned long long)da); | ||
1239 | } else { | 1239 | } else { |
1240 | if (iopgd_is_section(*pgd)) | 1240 | if (iopgd_is_section(*pgd)) |
1241 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | 1241 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); |
1242 | else if (iopgd_is_super(*pgd)) | 1242 | else if (iopgd_is_super(*pgd)) |
1243 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | 1243 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); |
1244 | else | 1244 | else |
1245 | dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da); | 1245 | dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, |
1246 | (unsigned long long)da); | ||
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | return ret; | 1249 | return ret; |
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index cd4ae9e5b0c6..f4003d568a92 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h | |||
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) | |||
95 | #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) | 95 | #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) |
96 | 96 | ||
97 | #define to_iommu(dev) \ | 97 | #define to_iommu(dev) \ |
98 | (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) | 98 | ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) |
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 46d875690739..d14725984153 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c | |||
@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | if (i && sg->offset) { | 104 | if (i && sg->offset) { |
105 | pr_err("%s: sg[%d] offset not allowed in internal " | 105 | pr_err("%s: sg[%d] offset not allowed in internal entries\n", |
106 | "entries\n", __func__, i); | 106 | __func__, i); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||