aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt41
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h4
-rw-r--r--drivers/iommu/Kconfig51
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c14
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_proto.h2
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd_iommu_v2.c35
-rw-r--r--drivers/iommu/arm-smmu.c935
-rw-r--r--drivers/iommu/fsl_pamu.c216
-rw-r--r--drivers/iommu/fsl_pamu.h15
-rw-r--r--drivers/iommu/fsl_pamu_domain.c173
-rw-r--r--drivers/iommu/intel-iommu.c45
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/iova.c53
-rw-r--r--drivers/iommu/ipmmu-vmsa.c674
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--include/linux/iopoll.h144
-rw-r--r--include/linux/iova.h41
-rw-r--r--include/linux/platform_data/ipmmu-vmsa.h24
-rw-r--r--include/trace/events/iommu.h31
28 files changed, 2239 insertions, 1492 deletions
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
new file mode 100644
index 000000000000..cd29083e16ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -0,0 +1,41 @@
1* Renesas VMSA-Compatible IOMMU
2
3The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables.
4It provides address translation for bus masters outside of the CPU, each
5connected to the IPMMU through a port called micro-TLB.
6
7
8Required Properties:
9
10 - compatible: Must contain "renesas,ipmmu-vmsa".
11 - reg: Base address and size of the IPMMU registers.
12 - interrupts: Specifiers for the MMU fault interrupts. For instances that
13 support secure mode two interrupts must be specified, for non-secure and
14 secure mode, in that order. For instances that don't support secure mode a
15 single interrupt must be specified.
16
17 - #iommu-cells: Must be 1.
18
19Each bus master connected to an IPMMU must reference the IPMMU in its device
20node with the following property:
21
22 - iommus: A reference to the IPMMU in two cells. The first cell is a phandle
23 to the IPMMU and the second cell the number of the micro-TLB that the
24 device is connected to.
25
26
27Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
28
29 ipmmu_mx: mmu@fe951000 {
30 compatible = "renasas,ipmmu-vmsa";
31 reg = <0 0xfe951000 0 0x1000>;
32 interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
33 <0 221 IRQ_TYPE_LEVEL_HIGH>;
34 #iommu-cells = <1>;
35 };
36
37 vsp1@fe928000 {
38 ...
39 iommus = <&ipmmu_mx 13>;
40 ...
41 };
diff --git a/MAINTAINERS b/MAINTAINERS
index c4a8703ab493..22999654195a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1606,6 +1606,7 @@ M: Will Deacon <will.deacon@arm.com>
1606L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1606L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1607S: Maintained 1607S: Maintained
1608F: drivers/iommu/arm-smmu.c 1608F: drivers/iommu/arm-smmu.c
1609F: drivers/iommu/io-pgtable-arm.c
1609 1610
1610ARM64 PORT (AARCH64 ARCHITECTURE) 1611ARM64 PORT (AARCH64 ARCHITECTURE)
1611M: Catalin Marinas <catalin.marinas@arm.com> 1612M: Catalin Marinas <catalin.marinas@arm.com>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2814304cec04..676454a65af8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -350,7 +350,6 @@ config ARM64_VA_BITS_42
350 350
351config ARM64_VA_BITS_48 351config ARM64_VA_BITS_48
352 bool "48-bit" 352 bool "48-bit"
353 depends on !ARM_SMMU
354 353
355endchoice 354endchoice
356 355
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
index caa1b21c25cd..38311c98eed9 100644
--- a/arch/powerpc/include/asm/fsl_pamu_stash.h
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -32,8 +32,8 @@ enum pamu_stash_target {
32 */ 32 */
33 33
34struct pamu_stash_attribute { 34struct pamu_stash_attribute {
35 u32 cpu; /* cpu number */ 35 u32 cpu; /* cpu number */
36 u32 cache; /* cache to stash to: L1,L2,L3 */ 36 u32 cache; /* cache to stash to: L1,L2,L3 */
37}; 37};
38 38
39#endif /* __FSL_PAMU_STASH_H */ 39#endif /* __FSL_PAMU_STASH_H */
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 325188eef1c1..baa0d9786f50 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -4,6 +4,7 @@ config IOMMU_API
4 4
5menuconfig IOMMU_SUPPORT 5menuconfig IOMMU_SUPPORT
6 bool "IOMMU Hardware Support" 6 bool "IOMMU Hardware Support"
7 depends on MMU
7 default y 8 default y
8 ---help--- 9 ---help---
9 Say Y here if you want to compile device drivers for IO Memory 10 Say Y here if you want to compile device drivers for IO Memory
@@ -13,13 +14,43 @@ menuconfig IOMMU_SUPPORT
13 14
14if IOMMU_SUPPORT 15if IOMMU_SUPPORT
15 16
17menu "Generic IOMMU Pagetable Support"
18
19# Selected by the actual pagetable implementations
20config IOMMU_IO_PGTABLE
21 bool
22
23config IOMMU_IO_PGTABLE_LPAE
24 bool "ARMv7/v8 Long Descriptor Format"
25 select IOMMU_IO_PGTABLE
26 help
27 Enable support for the ARM long descriptor pagetable format.
28 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
29 sizes at both stage-1 and stage-2, as well as address spaces
30 up to 48-bits in size.
31
32config IOMMU_IO_PGTABLE_LPAE_SELFTEST
33 bool "LPAE selftests"
34 depends on IOMMU_IO_PGTABLE_LPAE
35 help
36 Enable self-tests for LPAE page table allocator. This performs
37 a series of page-table consistency checks during boot.
38
39 If unsure, say N here.
40
41endmenu
42
43config IOMMU_IOVA
44 bool
45
16config OF_IOMMU 46config OF_IOMMU
17 def_bool y 47 def_bool y
18 depends on OF && IOMMU_API 48 depends on OF && IOMMU_API
19 49
20config FSL_PAMU 50config FSL_PAMU
21 bool "Freescale IOMMU support" 51 bool "Freescale IOMMU support"
22 depends on PPC_E500MC 52 depends on PPC32
53 depends on PPC_E500MC || COMPILE_TEST
23 select IOMMU_API 54 select IOMMU_API
24 select GENERIC_ALLOCATOR 55 select GENERIC_ALLOCATOR
25 help 56 help
@@ -30,7 +61,8 @@ config FSL_PAMU
30# MSM IOMMU support 61# MSM IOMMU support
31config MSM_IOMMU 62config MSM_IOMMU
32 bool "MSM IOMMU Support" 63 bool "MSM IOMMU Support"
33 depends on ARCH_MSM8X60 || ARCH_MSM8960 64 depends on ARM
65 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
34 select IOMMU_API 66 select IOMMU_API
35 help 67 help
36 Support for the IOMMUs found on certain Qualcomm SOCs. 68 Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -91,6 +123,7 @@ config INTEL_IOMMU
91 bool "Support for Intel IOMMU using DMA Remapping Devices" 123 bool "Support for Intel IOMMU using DMA Remapping Devices"
92 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) 124 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
93 select IOMMU_API 125 select IOMMU_API
126 select IOMMU_IOVA
94 select DMAR_TABLE 127 select DMAR_TABLE
95 help 128 help
96 DMA remapping (DMAR) devices support enables independent address 129 DMA remapping (DMAR) devices support enables independent address
@@ -140,7 +173,8 @@ config IRQ_REMAP
140# OMAP IOMMU support 173# OMAP IOMMU support
141config OMAP_IOMMU 174config OMAP_IOMMU
142 bool "OMAP IOMMU Support" 175 bool "OMAP IOMMU Support"
143 depends on ARCH_OMAP2PLUS 176 depends on ARM && MMU
177 depends on ARCH_OMAP2PLUS || COMPILE_TEST
144 select IOMMU_API 178 select IOMMU_API
145 179
146config OMAP_IOMMU_DEBUG 180config OMAP_IOMMU_DEBUG
@@ -187,7 +221,7 @@ config TEGRA_IOMMU_SMMU
187 221
188config EXYNOS_IOMMU 222config EXYNOS_IOMMU
189 bool "Exynos IOMMU Support" 223 bool "Exynos IOMMU Support"
190 depends on ARCH_EXYNOS && ARM 224 depends on ARCH_EXYNOS && ARM && MMU
191 select IOMMU_API 225 select IOMMU_API
192 select ARM_DMA_USE_IOMMU 226 select ARM_DMA_USE_IOMMU
193 help 227 help
@@ -216,7 +250,7 @@ config SHMOBILE_IPMMU_TLB
216config SHMOBILE_IOMMU 250config SHMOBILE_IOMMU
217 bool "IOMMU for Renesas IPMMU/IPMMUI" 251 bool "IOMMU for Renesas IPMMU/IPMMUI"
218 default n 252 default n
219 depends on ARM 253 depends on ARM && MMU
220 depends on ARCH_SHMOBILE || COMPILE_TEST 254 depends on ARCH_SHMOBILE || COMPILE_TEST
221 select IOMMU_API 255 select IOMMU_API
222 select ARM_DMA_USE_IOMMU 256 select ARM_DMA_USE_IOMMU
@@ -287,6 +321,7 @@ config IPMMU_VMSA
287 depends on ARM_LPAE 321 depends on ARM_LPAE
288 depends on ARCH_SHMOBILE || COMPILE_TEST 322 depends on ARCH_SHMOBILE || COMPILE_TEST
289 select IOMMU_API 323 select IOMMU_API
324 select IOMMU_IO_PGTABLE_LPAE
290 select ARM_DMA_USE_IOMMU 325 select ARM_DMA_USE_IOMMU
291 help 326 help
292 Support for the Renesas VMSA-compatible IPMMU Renesas found in the 327 Support for the Renesas VMSA-compatible IPMMU Renesas found in the
@@ -304,13 +339,13 @@ config SPAPR_TCE_IOMMU
304 339
305config ARM_SMMU 340config ARM_SMMU
306 bool "ARM Ltd. System MMU (SMMU) Support" 341 bool "ARM Ltd. System MMU (SMMU) Support"
307 depends on ARM64 || (ARM_LPAE && OF) 342 depends on (ARM64 || ARM) && MMU
308 select IOMMU_API 343 select IOMMU_API
344 select IOMMU_IO_PGTABLE_LPAE
309 select ARM_DMA_USE_IOMMU if ARM 345 select ARM_DMA_USE_IOMMU if ARM
310 help 346 help
311 Support for implementations of the ARM System MMU architecture 347 Support for implementations of the ARM System MMU architecture
312 versions 1 and 2. The driver supports both v7l and v8l table 348 versions 1 and 2.
313 formats with 4k and 64k page sizes.
314 349
315 Say Y here if your SoC includes an IOMMU device implementing 350 Say Y here if your SoC includes an IOMMU device implementing
316 the ARM SMMU architecture. 351 the ARM SMMU architecture.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7b976f294a69..080ffab4ed1c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,13 +1,16 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o 1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_IOMMU_API) += iommu-traces.o 2obj-$(CONFIG_IOMMU_API) += iommu-traces.o
3obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o 3obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
4obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
5obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
6obj-$(CONFIG_IOMMU_IOVA) += iova.o
4obj-$(CONFIG_OF_IOMMU) += of_iommu.o 7obj-$(CONFIG_OF_IOMMU) += of_iommu.o
5obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o 8obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
6obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 9obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
7obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o 10obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
8obj-$(CONFIG_ARM_SMMU) += arm-smmu.o 11obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
9obj-$(CONFIG_DMAR_TABLE) += dmar.o 12obj-$(CONFIG_DMAR_TABLE) += dmar.o
10obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o 13obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
11obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o 14obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
12obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o 15obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 16obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 59de6364a910..48882c126245 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
@@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
843 size_t size, u16 domid, int pde) 843 size_t size, u16 domid, int pde)
844{ 844{
845 u64 pages; 845 u64 pages;
846 int s; 846 bool s;
847 847
848 pages = iommu_num_pages(address, size, PAGE_SIZE); 848 pages = iommu_num_pages(address, size, PAGE_SIZE);
849 s = 0; 849 s = false;
850 850
851 if (pages > 1) { 851 if (pages > 1) {
852 /* 852 /*
@@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
854 * TLB entries for this domain 854 * TLB entries for this domain
855 */ 855 */
856 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 856 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
857 s = 1; 857 s = true;
858 } 858 }
859 859
860 address &= PAGE_MASK; 860 address &= PAGE_MASK;
@@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
874 u64 address, size_t size) 874 u64 address, size_t size)
875{ 875{
876 u64 pages; 876 u64 pages;
877 int s; 877 bool s;
878 878
879 pages = iommu_num_pages(address, size, PAGE_SIZE); 879 pages = iommu_num_pages(address, size, PAGE_SIZE);
880 s = 0; 880 s = false;
881 881
882 if (pages > 1) { 882 if (pages > 1) {
883 /* 883 /*
@@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
885 * TLB entries for this domain 885 * TLB entries for this domain
886 */ 886 */
887 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 887 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
888 s = 1; 888 s = true;
889 } 889 }
890 890
891 address &= PAGE_MASK; 891 address &= PAGE_MASK;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 9a20248e7068..450ef5001a65 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 861af9d8338a..72b0fd455e24 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. 2 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index cec51a8ba844..c4fffb710c58 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 90f70d0e1141..6d5a5c44453b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. 2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -31,7 +31,7 @@
31#include "amd_iommu_proto.h" 31#include "amd_iommu_proto.h"
32 32
33MODULE_LICENSE("GPL v2"); 33MODULE_LICENSE("GPL v2");
34MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>"); 34MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
35 35
36#define MAX_DEVICES 0x10000 36#define MAX_DEVICES 0x10000
37#define PRI_QUEUE_SIZE 512 37#define PRI_QUEUE_SIZE 512
@@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state)
151 wake_up(&dev_state->wq); 151 wake_up(&dev_state->wq);
152} 152}
153 153
154static void put_device_state_wait(struct device_state *dev_state)
155{
156 DEFINE_WAIT(wait);
157
158 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
159 if (!atomic_dec_and_test(&dev_state->count))
160 schedule();
161 finish_wait(&dev_state->wq, &wait);
162
163 free_device_state(dev_state);
164}
165
166/* Must be called under dev_state->lock */ 154/* Must be called under dev_state->lock */
167static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, 155static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
168 int pasid, bool alloc) 156 int pasid, bool alloc)
@@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
278 266
279static void put_pasid_state_wait(struct pasid_state *pasid_state) 267static void put_pasid_state_wait(struct pasid_state *pasid_state)
280{ 268{
281 DEFINE_WAIT(wait); 269 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
282
283 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
284
285 if (!atomic_dec_and_test(&pasid_state->count))
286 schedule();
287
288 finish_wait(&pasid_state->wq, &wait);
289 free_pasid_state(pasid_state); 270 free_pasid_state(pasid_state);
290} 271}
291 272
@@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
851 /* Get rid of any remaining pasid states */ 832 /* Get rid of any remaining pasid states */
852 free_pasid_states(dev_state); 833 free_pasid_states(dev_state);
853 834
854 put_device_state_wait(dev_state); 835 put_device_state(dev_state);
836 /*
837 * Wait until the last reference is dropped before freeing
838 * the device state.
839 */
840 wait_event(dev_state->wq, !atomic_read(&dev_state->count));
841 free_device_state(dev_state);
855} 842}
856EXPORT_SYMBOL(amd_iommu_free_device); 843EXPORT_SYMBOL(amd_iommu_free_device);
857 844
@@ -921,7 +908,7 @@ static int __init amd_iommu_v2_init(void)
921{ 908{
922 int ret; 909 int ret;
923 910
924 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n"); 911 pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
925 912
926 if (!amd_iommu_v2_supported()) { 913 if (!amd_iommu_v2_supported()) {
927 pr_info("AMD IOMMUv2 functionality not available on this system\n"); 914 pr_info("AMD IOMMUv2 functionality not available on this system\n");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6cd47b75286f..fc13dd56953e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -23,8 +23,6 @@
23 * - Stream-matching and stream-indexing 23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format 24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU 25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 48-bit addressing (dependent on VA_BITS)
28 * - Context fault reporting 26 * - Context fault reporting
29 */ 27 */
30 28
@@ -36,7 +34,7 @@
36#include <linux/interrupt.h> 34#include <linux/interrupt.h>
37#include <linux/io.h> 35#include <linux/io.h>
38#include <linux/iommu.h> 36#include <linux/iommu.h>
39#include <linux/mm.h> 37#include <linux/iopoll.h>
40#include <linux/module.h> 38#include <linux/module.h>
41#include <linux/of.h> 39#include <linux/of.h>
42#include <linux/pci.h> 40#include <linux/pci.h>
@@ -46,7 +44,7 @@
46 44
47#include <linux/amba/bus.h> 45#include <linux/amba/bus.h>
48 46
49#include <asm/pgalloc.h> 47#include "io-pgtable.h"
50 48
51/* Maximum number of stream IDs assigned to a single device */ 49/* Maximum number of stream IDs assigned to a single device */
52#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS 50#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
@@ -71,40 +69,6 @@
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ 69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0)) 70 ? 0x400 : 0))
73 71
74/* Page table bits */
75#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
76#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
77#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
78#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
79#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
80#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
81#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
82
83#if PAGE_SIZE == SZ_4K
84#define ARM_SMMU_PTE_CONT_ENTRIES 16
85#elif PAGE_SIZE == SZ_64K
86#define ARM_SMMU_PTE_CONT_ENTRIES 32
87#else
88#define ARM_SMMU_PTE_CONT_ENTRIES 1
89#endif
90
91#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
92#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
93
94/* Stage-1 PTE */
95#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
96#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
97#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
98#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
99
100/* Stage-2 PTE */
101#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
102#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
103#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
104#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
105#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
106#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
107
108/* Configuration registers */ 72/* Configuration registers */
109#define ARM_SMMU_GR0_sCR0 0x0 73#define ARM_SMMU_GR0_sCR0 0x0
110#define sCR0_CLIENTPD (1 << 0) 74#define sCR0_CLIENTPD (1 << 0)
@@ -132,17 +96,12 @@
132#define ARM_SMMU_GR0_sGFSYNR0 0x50 96#define ARM_SMMU_GR0_sGFSYNR0 0x50
133#define ARM_SMMU_GR0_sGFSYNR1 0x54 97#define ARM_SMMU_GR0_sGFSYNR1 0x54
134#define ARM_SMMU_GR0_sGFSYNR2 0x58 98#define ARM_SMMU_GR0_sGFSYNR2 0x58
135#define ARM_SMMU_GR0_PIDR0 0xfe0
136#define ARM_SMMU_GR0_PIDR1 0xfe4
137#define ARM_SMMU_GR0_PIDR2 0xfe8
138 99
139#define ID0_S1TS (1 << 30) 100#define ID0_S1TS (1 << 30)
140#define ID0_S2TS (1 << 29) 101#define ID0_S2TS (1 << 29)
141#define ID0_NTS (1 << 28) 102#define ID0_NTS (1 << 28)
142#define ID0_SMS (1 << 27) 103#define ID0_SMS (1 << 27)
143#define ID0_PTFS_SHIFT 24 104#define ID0_ATOSNS (1 << 26)
144#define ID0_PTFS_MASK 0x2
145#define ID0_PTFS_V8_ONLY 0x2
146#define ID0_CTTW (1 << 14) 105#define ID0_CTTW (1 << 14)
147#define ID0_NUMIRPT_SHIFT 16 106#define ID0_NUMIRPT_SHIFT 16
148#define ID0_NUMIRPT_MASK 0xff 107#define ID0_NUMIRPT_MASK 0xff
@@ -169,11 +128,7 @@
169#define ID2_PTFS_16K (1 << 13) 128#define ID2_PTFS_16K (1 << 13)
170#define ID2_PTFS_64K (1 << 14) 129#define ID2_PTFS_64K (1 << 14)
171 130
172#define PIDR2_ARCH_SHIFT 4
173#define PIDR2_ARCH_MASK 0xf
174
175/* Global TLB invalidation */ 131/* Global TLB invalidation */
176#define ARM_SMMU_GR0_STLBIALL 0x60
177#define ARM_SMMU_GR0_TLBIVMID 0x64 132#define ARM_SMMU_GR0_TLBIVMID 0x64
178#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 133#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
179#define ARM_SMMU_GR0_TLBIALLH 0x6c 134#define ARM_SMMU_GR0_TLBIALLH 0x6c
@@ -231,13 +186,25 @@
231#define ARM_SMMU_CB_TTBCR2 0x10 186#define ARM_SMMU_CB_TTBCR2 0x10
232#define ARM_SMMU_CB_TTBR0_LO 0x20 187#define ARM_SMMU_CB_TTBR0_LO 0x20
233#define ARM_SMMU_CB_TTBR0_HI 0x24 188#define ARM_SMMU_CB_TTBR0_HI 0x24
189#define ARM_SMMU_CB_TTBR1_LO 0x28
190#define ARM_SMMU_CB_TTBR1_HI 0x2c
234#define ARM_SMMU_CB_TTBCR 0x30 191#define ARM_SMMU_CB_TTBCR 0x30
235#define ARM_SMMU_CB_S1_MAIR0 0x38 192#define ARM_SMMU_CB_S1_MAIR0 0x38
193#define ARM_SMMU_CB_S1_MAIR1 0x3c
194#define ARM_SMMU_CB_PAR_LO 0x50
195#define ARM_SMMU_CB_PAR_HI 0x54
236#define ARM_SMMU_CB_FSR 0x58 196#define ARM_SMMU_CB_FSR 0x58
237#define ARM_SMMU_CB_FAR_LO 0x60 197#define ARM_SMMU_CB_FAR_LO 0x60
238#define ARM_SMMU_CB_FAR_HI 0x64 198#define ARM_SMMU_CB_FAR_HI 0x64
239#define ARM_SMMU_CB_FSYNR0 0x68 199#define ARM_SMMU_CB_FSYNR0 0x68
200#define ARM_SMMU_CB_S1_TLBIVA 0x600
240#define ARM_SMMU_CB_S1_TLBIASID 0x610 201#define ARM_SMMU_CB_S1_TLBIASID 0x610
202#define ARM_SMMU_CB_S1_TLBIVAL 0x620
203#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
204#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
205#define ARM_SMMU_CB_ATS1PR_LO 0x800
206#define ARM_SMMU_CB_ATS1PR_HI 0x804
207#define ARM_SMMU_CB_ATSR 0x8f0
241 208
242#define SCTLR_S1_ASIDPNE (1 << 12) 209#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7) 210#define SCTLR_CFCFG (1 << 7)
@@ -249,47 +216,16 @@
249#define SCTLR_M (1 << 0) 216#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) 217#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251 218
252#define RESUME_RETRY (0 << 0) 219#define CB_PAR_F (1 << 0)
253#define RESUME_TERMINATE (1 << 0)
254
255#define TTBCR_EAE (1 << 31)
256 220
257#define TTBCR_PASIZE_SHIFT 16 221#define ATSR_ACTIVE (1 << 0)
258#define TTBCR_PASIZE_MASK 0x7
259 222
260#define TTBCR_TG0_4K (0 << 14) 223#define RESUME_RETRY (0 << 0)
261#define TTBCR_TG0_64K (1 << 14) 224#define RESUME_TERMINATE (1 << 0)
262
263#define TTBCR_SH0_SHIFT 12
264#define TTBCR_SH0_MASK 0x3
265#define TTBCR_SH_NS 0
266#define TTBCR_SH_OS 2
267#define TTBCR_SH_IS 3
268
269#define TTBCR_ORGN0_SHIFT 10
270#define TTBCR_IRGN0_SHIFT 8
271#define TTBCR_RGN_MASK 0x3
272#define TTBCR_RGN_NC 0
273#define TTBCR_RGN_WBWA 1
274#define TTBCR_RGN_WT 2
275#define TTBCR_RGN_WB 3
276
277#define TTBCR_SL0_SHIFT 6
278#define TTBCR_SL0_MASK 0x3
279#define TTBCR_SL0_LVL_2 0
280#define TTBCR_SL0_LVL_1 1
281
282#define TTBCR_T1SZ_SHIFT 16
283#define TTBCR_T0SZ_SHIFT 0
284#define TTBCR_SZ_MASK 0xf
285 225
286#define TTBCR2_SEP_SHIFT 15 226#define TTBCR2_SEP_SHIFT 15
287#define TTBCR2_SEP_MASK 0x7 227#define TTBCR2_SEP_MASK 0x7
288 228
289#define TTBCR2_PASIZE_SHIFT 0
290#define TTBCR2_PASIZE_MASK 0x7
291
292/* Common definitions for PASize and SEP fields */
293#define TTBCR2_ADDR_32 0 229#define TTBCR2_ADDR_32 0
294#define TTBCR2_ADDR_36 1 230#define TTBCR2_ADDR_36 1
295#define TTBCR2_ADDR_40 2 231#define TTBCR2_ADDR_40 2
@@ -297,16 +233,7 @@
297#define TTBCR2_ADDR_44 4 233#define TTBCR2_ADDR_44 4
298#define TTBCR2_ADDR_48 5 234#define TTBCR2_ADDR_48 5
299 235
300#define TTBRn_HI_ASID_SHIFT 16 236#define TTBRn_HI_ASID_SHIFT 16
301
302#define MAIR_ATTR_SHIFT(n) ((n) << 3)
303#define MAIR_ATTR_MASK 0xff
304#define MAIR_ATTR_DEVICE 0x04
305#define MAIR_ATTR_NC 0x44
306#define MAIR_ATTR_WBRWA 0xff
307#define MAIR_ATTR_IDX_NC 0
308#define MAIR_ATTR_IDX_CACHE 1
309#define MAIR_ATTR_IDX_DEV 2
310 237
311#define FSR_MULTI (1 << 31) 238#define FSR_MULTI (1 << 31)
312#define FSR_SS (1 << 30) 239#define FSR_SS (1 << 30)
@@ -366,6 +293,7 @@ struct arm_smmu_device {
366#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) 293#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
367#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) 294#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
368#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) 295#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
296#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
369 u32 features; 297 u32 features;
370 298
371#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) 299#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,10 +308,9 @@ struct arm_smmu_device {
380 u32 num_mapping_groups; 308 u32 num_mapping_groups;
381 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); 309 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
382 310
383 unsigned long s1_input_size; 311 unsigned long va_size;
384 unsigned long s1_output_size; 312 unsigned long ipa_size;
385 unsigned long s2_input_size; 313 unsigned long pa_size;
386 unsigned long s2_output_size;
387 314
388 u32 num_global_irqs; 315 u32 num_global_irqs;
389 u32 num_context_irqs; 316 u32 num_context_irqs;
@@ -397,7 +324,6 @@ struct arm_smmu_cfg {
397 u8 cbndx; 324 u8 cbndx;
398 u8 irptndx; 325 u8 irptndx;
399 u32 cbar; 326 u32 cbar;
400 pgd_t *pgd;
401}; 327};
402#define INVALID_IRPTNDX 0xff 328#define INVALID_IRPTNDX 0xff
403 329
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage {
412 338
413struct arm_smmu_domain { 339struct arm_smmu_domain {
414 struct arm_smmu_device *smmu; 340 struct arm_smmu_device *smmu;
341 struct io_pgtable_ops *pgtbl_ops;
342 spinlock_t pgtbl_lock;
415 struct arm_smmu_cfg cfg; 343 struct arm_smmu_cfg cfg;
416 enum arm_smmu_domain_stage stage; 344 enum arm_smmu_domain_stage stage;
417 spinlock_t lock; 345 struct mutex init_mutex; /* Protects smmu pointer */
418}; 346};
419 347
348static struct iommu_ops arm_smmu_ops;
349
420static DEFINE_SPINLOCK(arm_smmu_devices_lock); 350static DEFINE_SPINLOCK(arm_smmu_devices_lock);
421static LIST_HEAD(arm_smmu_devices); 351static LIST_HEAD(arm_smmu_devices);
422 352
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
597} 527}
598 528
599/* Wait for any pending TLB invalidations to complete */ 529/* Wait for any pending TLB invalidations to complete */
600static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) 530static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
601{ 531{
602 int count = 0; 532 int count = 0;
603 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 533 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
615 } 545 }
616} 546}
617 547
618static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) 548static void arm_smmu_tlb_sync(void *cookie)
619{ 549{
550 struct arm_smmu_domain *smmu_domain = cookie;
551 __arm_smmu_tlb_sync(smmu_domain->smmu);
552}
553
554static void arm_smmu_tlb_inv_context(void *cookie)
555{
556 struct arm_smmu_domain *smmu_domain = cookie;
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 557 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621 struct arm_smmu_device *smmu = smmu_domain->smmu; 558 struct arm_smmu_device *smmu = smmu_domain->smmu;
622 void __iomem *base = ARM_SMMU_GR0(smmu);
623 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; 559 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
560 void __iomem *base;
624 561
625 if (stage1) { 562 if (stage1) {
626 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 563 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
632 base + ARM_SMMU_GR0_TLBIVMID); 569 base + ARM_SMMU_GR0_TLBIVMID);
633 } 570 }
634 571
635 arm_smmu_tlb_sync(smmu); 572 __arm_smmu_tlb_sync(smmu);
573}
574
575static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
576 bool leaf, void *cookie)
577{
578 struct arm_smmu_domain *smmu_domain = cookie;
579 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
580 struct arm_smmu_device *smmu = smmu_domain->smmu;
581 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
582 void __iomem *reg;
583
584 if (stage1) {
585 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
586 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
587
588 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
589 iova &= ~12UL;
590 iova |= ARM_SMMU_CB_ASID(cfg);
591 writel_relaxed(iova, reg);
592#ifdef CONFIG_64BIT
593 } else {
594 iova >>= 12;
595 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
596 writeq_relaxed(iova, reg);
597#endif
598 }
599#ifdef CONFIG_64BIT
600 } else if (smmu->version == ARM_SMMU_V2) {
601 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
602 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
603 ARM_SMMU_CB_S2_TLBIIPAS2;
604 writeq_relaxed(iova >> 12, reg);
605#endif
606 } else {
607 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
608 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
609 }
610}
611
612static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
613{
614 struct arm_smmu_domain *smmu_domain = cookie;
615 struct arm_smmu_device *smmu = smmu_domain->smmu;
616 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
617
618
619 /* Ensure new page tables are visible to the hardware walker */
620 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
621 dsb(ishst);
622 } else {
623 /*
624 * If the SMMU can't walk tables in the CPU caches, treat them
625 * like non-coherent DMA since we need to flush the new entries
626 * all the way out to memory. There's no possibility of
627 * recursion here as the SMMU table walker will not be wired
628 * through another SMMU.
629 */
630 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
631 DMA_TO_DEVICE);
632 }
636} 633}
637 634
635static struct iommu_gather_ops arm_smmu_gather_ops = {
636 .tlb_flush_all = arm_smmu_tlb_inv_context,
637 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
638 .tlb_sync = arm_smmu_tlb_sync,
639 .flush_pgtable = arm_smmu_flush_pgtable,
640};
641
638static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 642static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
639{ 643{
640 int flags, ret; 644 int flags, ret;
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
712 return IRQ_HANDLED; 716 return IRQ_HANDLED;
713} 717}
714 718
715static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 719static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
716 size_t size) 720 struct io_pgtable_cfg *pgtbl_cfg)
717{
718 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
719
720
721 /* Ensure new page tables are visible to the hardware walker */
722 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
723 dsb(ishst);
724 } else {
725 /*
726 * If the SMMU can't walk tables in the CPU caches, treat them
727 * like non-coherent DMA since we need to flush the new entries
728 * all the way out to memory. There's no possibility of
729 * recursion here as the SMMU table walker will not be wired
730 * through another SMMU.
731 */
732 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
733 DMA_TO_DEVICE);
734 }
735}
736
737static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
738{ 721{
739 u32 reg; 722 u32 reg;
740 bool stage1; 723 bool stage1;
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
771#else 754#else
772 reg = CBA2R_RW64_32BIT; 755 reg = CBA2R_RW64_32BIT;
773#endif 756#endif
774 writel_relaxed(reg, 757 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
775 gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
776
777 /* TTBCR2 */
778 switch (smmu->s1_input_size) {
779 case 32:
780 reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
781 break;
782 case 36:
783 reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
784 break;
785 case 39:
786 case 40:
787 reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
788 break;
789 case 42:
790 reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
791 break;
792 case 44:
793 reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
794 break;
795 case 48:
796 reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
797 break;
798 }
799
800 switch (smmu->s1_output_size) {
801 case 32:
802 reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
803 break;
804 case 36:
805 reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
806 break;
807 case 39:
808 case 40:
809 reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
810 break;
811 case 42:
812 reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
813 break;
814 case 44:
815 reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
816 break;
817 case 48:
818 reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
819 break;
820 }
821
822 if (stage1)
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
824 } 758 }
825 759
826 /* TTBR0 */ 760 /* TTBRs */
827 arm_smmu_flush_pgtable(smmu, cfg->pgd, 761 if (stage1) {
828 PTRS_PER_PGD * sizeof(pgd_t)); 762 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
829 reg = __pa(cfg->pgd); 763 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
830 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 764 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
831 reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
832 if (stage1)
833 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; 765 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
834 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); 766 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
835
836 /*
837 * TTBCR
838 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
839 */
840 if (smmu->version > ARM_SMMU_V1) {
841 if (PAGE_SIZE == SZ_4K)
842 reg = TTBCR_TG0_4K;
843 else
844 reg = TTBCR_TG0_64K;
845 767
846 if (!stage1) { 768 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
847 reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; 769 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
770 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
771 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
772 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
773 } else {
774 reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
775 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
776 reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
777 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
778 }
848 779
849 switch (smmu->s2_output_size) { 780 /* TTBCR */
781 if (stage1) {
782 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
783 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
784 if (smmu->version > ARM_SMMU_V1) {
785 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
786 switch (smmu->va_size) {
850 case 32: 787 case 32:
851 reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); 788 reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
852 break; 789 break;
853 case 36: 790 case 36:
854 reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); 791 reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
855 break; 792 break;
856 case 40: 793 case 40:
857 reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); 794 reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
858 break; 795 break;
859 case 42: 796 case 42:
860 reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); 797 reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
861 break; 798 break;
862 case 44: 799 case 44:
863 reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); 800 reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
864 break; 801 break;
865 case 48: 802 case 48:
866 reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); 803 reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
867 break; 804 break;
868 } 805 }
869 } else { 806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
870 reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
871 } 807 }
872 } else { 808 } else {
873 reg = 0; 809 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
810 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
874 } 811 }
875 812
876 reg |= TTBCR_EAE | 813 /* MAIRs (stage-1 only) */
877 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
878 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
879 (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
880
881 if (!stage1)
882 reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
883
884 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
885
886 /* MAIR0 (stage-1 only) */
887 if (stage1) { 814 if (stage1) {
888 reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | 815 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
889 (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
890 (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
891 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); 816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
817 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
818 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
892 } 819 }
893 820
894 /* SCTLR */ 821 /* SCTLR */
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
905 struct arm_smmu_device *smmu) 832 struct arm_smmu_device *smmu)
906{ 833{
907 int irq, start, ret = 0; 834 int irq, start, ret = 0;
908 unsigned long flags; 835 unsigned long ias, oas;
836 struct io_pgtable_ops *pgtbl_ops;
837 struct io_pgtable_cfg pgtbl_cfg;
838 enum io_pgtable_fmt fmt;
909 struct arm_smmu_domain *smmu_domain = domain->priv; 839 struct arm_smmu_domain *smmu_domain = domain->priv;
910 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 840 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
911 841
912 spin_lock_irqsave(&smmu_domain->lock, flags); 842 mutex_lock(&smmu_domain->init_mutex);
913 if (smmu_domain->smmu) 843 if (smmu_domain->smmu)
914 goto out_unlock; 844 goto out_unlock;
915 845
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
940 case ARM_SMMU_DOMAIN_S1: 870 case ARM_SMMU_DOMAIN_S1:
941 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 871 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
942 start = smmu->num_s2_context_banks; 872 start = smmu->num_s2_context_banks;
873 ias = smmu->va_size;
874 oas = smmu->ipa_size;
875 if (IS_ENABLED(CONFIG_64BIT))
876 fmt = ARM_64_LPAE_S1;
877 else
878 fmt = ARM_32_LPAE_S1;
943 break; 879 break;
944 case ARM_SMMU_DOMAIN_NESTED: 880 case ARM_SMMU_DOMAIN_NESTED:
945 /* 881 /*
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
949 case ARM_SMMU_DOMAIN_S2: 885 case ARM_SMMU_DOMAIN_S2:
950 cfg->cbar = CBAR_TYPE_S2_TRANS; 886 cfg->cbar = CBAR_TYPE_S2_TRANS;
951 start = 0; 887 start = 0;
888 ias = smmu->ipa_size;
889 oas = smmu->pa_size;
890 if (IS_ENABLED(CONFIG_64BIT))
891 fmt = ARM_64_LPAE_S2;
892 else
893 fmt = ARM_32_LPAE_S2;
952 break; 894 break;
953 default: 895 default:
954 ret = -EINVAL; 896 ret = -EINVAL;
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
968 cfg->irptndx = cfg->cbndx; 910 cfg->irptndx = cfg->cbndx;
969 } 911 }
970 912
971 ACCESS_ONCE(smmu_domain->smmu) = smmu; 913 pgtbl_cfg = (struct io_pgtable_cfg) {
972 arm_smmu_init_context_bank(smmu_domain); 914 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
973 spin_unlock_irqrestore(&smmu_domain->lock, flags); 915 .ias = ias,
916 .oas = oas,
917 .tlb = &arm_smmu_gather_ops,
918 };
919
920 smmu_domain->smmu = smmu;
921 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
922 if (!pgtbl_ops) {
923 ret = -ENOMEM;
924 goto out_clear_smmu;
925 }
926
927 /* Update our support page sizes to reflect the page table format */
928 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
974 929
930 /* Initialise the context bank with our page table cfg */
931 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
932
933 /*
934 * Request context fault interrupt. Do this last to avoid the
935 * handler seeing a half-initialised domain state.
936 */
975 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 937 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
976 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, 938 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
977 "arm-smmu-context-fault", domain); 939 "arm-smmu-context-fault", domain);
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
981 cfg->irptndx = INVALID_IRPTNDX; 943 cfg->irptndx = INVALID_IRPTNDX;
982 } 944 }
983 945
946 mutex_unlock(&smmu_domain->init_mutex);
947
948 /* Publish page table ops for map/unmap */
949 smmu_domain->pgtbl_ops = pgtbl_ops;
984 return 0; 950 return 0;
985 951
952out_clear_smmu:
953 smmu_domain->smmu = NULL;
986out_unlock: 954out_unlock:
987 spin_unlock_irqrestore(&smmu_domain->lock, flags); 955 mutex_unlock(&smmu_domain->init_mutex);
988 return ret; 956 return ret;
989} 957}
990 958
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
999 if (!smmu) 967 if (!smmu)
1000 return; 968 return;
1001 969
1002 /* Disable the context bank and nuke the TLB before freeing it. */ 970 /*
971 * Disable the context bank and free the page tables before freeing
972 * it.
973 */
1003 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 974 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1004 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 975 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1005 arm_smmu_tlb_inv_context(smmu_domain);
1006 976
1007 if (cfg->irptndx != INVALID_IRPTNDX) { 977 if (cfg->irptndx != INVALID_IRPTNDX) {
1008 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 978 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1009 free_irq(irq, domain); 979 free_irq(irq, domain);
1010 } 980 }
1011 981
982 if (smmu_domain->pgtbl_ops)
983 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
984
1012 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 985 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1013} 986}
1014 987
1015static int arm_smmu_domain_init(struct iommu_domain *domain) 988static int arm_smmu_domain_init(struct iommu_domain *domain)
1016{ 989{
1017 struct arm_smmu_domain *smmu_domain; 990 struct arm_smmu_domain *smmu_domain;
1018 pgd_t *pgd;
1019 991
1020 /* 992 /*
1021 * Allocate the domain and initialise some of its data structures. 993 * Allocate the domain and initialise some of its data structures.
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
1026 if (!smmu_domain) 998 if (!smmu_domain)
1027 return -ENOMEM; 999 return -ENOMEM;
1028 1000
1029 pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); 1001 mutex_init(&smmu_domain->init_mutex);
1030 if (!pgd) 1002 spin_lock_init(&smmu_domain->pgtbl_lock);
1031 goto out_free_domain;
1032 smmu_domain->cfg.pgd = pgd;
1033
1034 spin_lock_init(&smmu_domain->lock);
1035 domain->priv = smmu_domain; 1003 domain->priv = smmu_domain;
1036 return 0; 1004 return 0;
1037
1038out_free_domain:
1039 kfree(smmu_domain);
1040 return -ENOMEM;
1041}
1042
1043static void arm_smmu_free_ptes(pmd_t *pmd)
1044{
1045 pgtable_t table = pmd_pgtable(*pmd);
1046
1047 __free_page(table);
1048}
1049
1050static void arm_smmu_free_pmds(pud_t *pud)
1051{
1052 int i;
1053 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
1054
1055 pmd = pmd_base;
1056 for (i = 0; i < PTRS_PER_PMD; ++i) {
1057 if (pmd_none(*pmd))
1058 continue;
1059
1060 arm_smmu_free_ptes(pmd);
1061 pmd++;
1062 }
1063
1064 pmd_free(NULL, pmd_base);
1065}
1066
1067static void arm_smmu_free_puds(pgd_t *pgd)
1068{
1069 int i;
1070 pud_t *pud, *pud_base = pud_offset(pgd, 0);
1071
1072 pud = pud_base;
1073 for (i = 0; i < PTRS_PER_PUD; ++i) {
1074 if (pud_none(*pud))
1075 continue;
1076
1077 arm_smmu_free_pmds(pud);
1078 pud++;
1079 }
1080
1081 pud_free(NULL, pud_base);
1082}
1083
1084static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
1085{
1086 int i;
1087 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1088 pgd_t *pgd, *pgd_base = cfg->pgd;
1089
1090 /*
1091 * Recursively free the page tables for this domain. We don't
1092 * care about speculative TLB filling because the tables should
1093 * not be active in any context bank at this point (SCTLR.M is 0).
1094 */
1095 pgd = pgd_base;
1096 for (i = 0; i < PTRS_PER_PGD; ++i) {
1097 if (pgd_none(*pgd))
1098 continue;
1099 arm_smmu_free_puds(pgd);
1100 pgd++;
1101 }
1102
1103 kfree(pgd_base);
1104} 1005}
1105 1006
1106static void arm_smmu_domain_destroy(struct iommu_domain *domain) 1007static void arm_smmu_domain_destroy(struct iommu_domain *domain)
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain)
1112 * already been detached. 1013 * already been detached.
1113 */ 1014 */
1114 arm_smmu_destroy_domain_context(domain); 1015 arm_smmu_destroy_domain_context(domain);
1115 arm_smmu_free_pgtables(smmu_domain);
1116 kfree(smmu_domain); 1016 kfree(smmu_domain);
1117} 1017}
1118 1018
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1244{ 1144{
1245 int ret; 1145 int ret;
1246 struct arm_smmu_domain *smmu_domain = domain->priv; 1146 struct arm_smmu_domain *smmu_domain = domain->priv;
1247 struct arm_smmu_device *smmu, *dom_smmu; 1147 struct arm_smmu_device *smmu;
1248 struct arm_smmu_master_cfg *cfg; 1148 struct arm_smmu_master_cfg *cfg;
1249 1149
1250 smmu = find_smmu_for_device(dev); 1150 smmu = find_smmu_for_device(dev);
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1258 return -EEXIST; 1158 return -EEXIST;
1259 } 1159 }
1260 1160
1161 /* Ensure that the domain is finalised */
1162 ret = arm_smmu_init_domain_context(domain, smmu);
1163 if (IS_ERR_VALUE(ret))
1164 return ret;
1165
1261 /* 1166 /*
1262 * Sanity check the domain. We don't support domains across 1167 * Sanity check the domain. We don't support domains across
1263 * different SMMUs. 1168 * different SMMUs.
1264 */ 1169 */
1265 dom_smmu = ACCESS_ONCE(smmu_domain->smmu); 1170 if (smmu_domain->smmu != smmu) {
1266 if (!dom_smmu) {
1267 /* Now that we have a master, we can finalise the domain */
1268 ret = arm_smmu_init_domain_context(domain, smmu);
1269 if (IS_ERR_VALUE(ret))
1270 return ret;
1271
1272 dom_smmu = smmu_domain->smmu;
1273 }
1274
1275 if (dom_smmu != smmu) {
1276 dev_err(dev, 1171 dev_err(dev,
1277 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1172 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1278 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); 1173 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1303 arm_smmu_domain_remove_master(smmu_domain, cfg); 1198 arm_smmu_domain_remove_master(smmu_domain, cfg);
1304} 1199}
1305 1200
1306static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, 1201static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1307 unsigned long end) 1202 phys_addr_t paddr, size_t size, int prot)
1308{
1309 return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
1310 (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
1311}
1312
1313static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1314 unsigned long addr, unsigned long end,
1315 unsigned long pfn, int prot, int stage)
1316{
1317 pte_t *pte, *start;
1318 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
1319
1320 if (pmd_none(*pmd)) {
1321 /* Allocate a new set of tables */
1322 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1323
1324 if (!table)
1325 return -ENOMEM;
1326
1327 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1328 pmd_populate(NULL, pmd, table);
1329 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1330 }
1331
1332 if (stage == 1) {
1333 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1334 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
1335 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1336
1337 if (prot & IOMMU_CACHE)
1338 pteval |= (MAIR_ATTR_IDX_CACHE <<
1339 ARM_SMMU_PTE_ATTRINDX_SHIFT);
1340 } else {
1341 pteval |= ARM_SMMU_PTE_HAP_FAULT;
1342 if (prot & IOMMU_READ)
1343 pteval |= ARM_SMMU_PTE_HAP_READ;
1344 if (prot & IOMMU_WRITE)
1345 pteval |= ARM_SMMU_PTE_HAP_WRITE;
1346 if (prot & IOMMU_CACHE)
1347 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
1348 else
1349 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1350 }
1351
1352 if (prot & IOMMU_NOEXEC)
1353 pteval |= ARM_SMMU_PTE_XN;
1354
1355 /* If no access, create a faulting entry to avoid TLB fills */
1356 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1357 pteval &= ~ARM_SMMU_PTE_PAGE;
1358
1359 pteval |= ARM_SMMU_PTE_SH_IS;
1360 start = pmd_page_vaddr(*pmd) + pte_index(addr);
1361 pte = start;
1362
1363 /*
1364 * Install the page table entries. This is fairly complicated
1365 * since we attempt to make use of the contiguous hint in the
1366 * ptes where possible. The contiguous hint indicates a series
1367 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1368 * contiguous region with the following constraints:
1369 *
1370 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1371 * - Each pte in the region has the contiguous hint bit set
1372 *
1373 * This complicates unmapping (also handled by this code, when
1374 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1375 * possible, yet highly unlikely, that a client may unmap only
1376 * part of a contiguous range. This requires clearing of the
1377 * contiguous hint bits in the range before installing the new
1378 * faulting entries.
1379 *
1380 * Note that re-mapping an address range without first unmapping
1381 * it is not supported, so TLB invalidation is not required here
1382 * and is instead performed at unmap and domain-init time.
1383 */
1384 do {
1385 int i = 1;
1386
1387 pteval &= ~ARM_SMMU_PTE_CONT;
1388
1389 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
1390 i = ARM_SMMU_PTE_CONT_ENTRIES;
1391 pteval |= ARM_SMMU_PTE_CONT;
1392 } else if (pte_val(*pte) &
1393 (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
1394 int j;
1395 pte_t *cont_start;
1396 unsigned long idx = pte_index(addr);
1397
1398 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1399 cont_start = pmd_page_vaddr(*pmd) + idx;
1400 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1401 pte_val(*(cont_start + j)) &=
1402 ~ARM_SMMU_PTE_CONT;
1403
1404 arm_smmu_flush_pgtable(smmu, cont_start,
1405 sizeof(*pte) *
1406 ARM_SMMU_PTE_CONT_ENTRIES);
1407 }
1408
1409 do {
1410 *pte = pfn_pte(pfn, __pgprot(pteval));
1411 } while (pte++, pfn++, addr += PAGE_SIZE, --i);
1412 } while (addr != end);
1413
1414 arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
1415 return 0;
1416}
1417
1418static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1419 unsigned long addr, unsigned long end,
1420 phys_addr_t phys, int prot, int stage)
1421{ 1203{
1422 int ret; 1204 int ret;
1423 pmd_t *pmd; 1205 unsigned long flags;
1424 unsigned long next, pfn = __phys_to_pfn(phys); 1206 struct arm_smmu_domain *smmu_domain = domain->priv;
1425 1207 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1426#ifndef __PAGETABLE_PMD_FOLDED
1427 if (pud_none(*pud)) {
1428 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1429 if (!pmd)
1430 return -ENOMEM;
1431
1432 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1433 pud_populate(NULL, pud, pmd);
1434 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1435
1436 pmd += pmd_index(addr);
1437 } else
1438#endif
1439 pmd = pmd_offset(pud, addr);
1440 1208
1441 do { 1209 if (!ops)
1442 next = pmd_addr_end(addr, end); 1210 return -ENODEV;
1443 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
1444 prot, stage);
1445 phys += next - addr;
1446 pfn = __phys_to_pfn(phys);
1447 } while (pmd++, addr = next, addr < end);
1448 1211
1212 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1213 ret = ops->map(ops, iova, paddr, size, prot);
1214 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1449 return ret; 1215 return ret;
1450} 1216}
1451 1217
1452static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, 1218static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1453 unsigned long addr, unsigned long end, 1219 size_t size)
1454 phys_addr_t phys, int prot, int stage)
1455{ 1220{
1456 int ret = 0; 1221 size_t ret;
1457 pud_t *pud; 1222 unsigned long flags;
1458 unsigned long next; 1223 struct arm_smmu_domain *smmu_domain = domain->priv;
1459 1224 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1460#ifndef __PAGETABLE_PUD_FOLDED
1461 if (pgd_none(*pgd)) {
1462 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1463 if (!pud)
1464 return -ENOMEM;
1465
1466 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1467 pgd_populate(NULL, pgd, pud);
1468 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1469
1470 pud += pud_index(addr);
1471 } else
1472#endif
1473 pud = pud_offset(pgd, addr);
1474 1225
1475 do { 1226 if (!ops)
1476 next = pud_addr_end(addr, end); 1227 return 0;
1477 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1478 prot, stage);
1479 phys += next - addr;
1480 } while (pud++, addr = next, addr < end);
1481 1228
1229 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1230 ret = ops->unmap(ops, iova, size);
1231 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1482 return ret; 1232 return ret;
1483} 1233}
1484 1234
1485static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, 1235static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1486 unsigned long iova, phys_addr_t paddr, 1236 dma_addr_t iova)
1487 size_t size, int prot)
1488{ 1237{
1489 int ret, stage; 1238 struct arm_smmu_domain *smmu_domain = domain->priv;
1490 unsigned long end;
1491 phys_addr_t input_mask, output_mask;
1492 struct arm_smmu_device *smmu = smmu_domain->smmu; 1239 struct arm_smmu_device *smmu = smmu_domain->smmu;
1493 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1240 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1494 pgd_t *pgd = cfg->pgd; 1241 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1495 unsigned long flags; 1242 struct device *dev = smmu->dev;
1243 void __iomem *cb_base;
1244 u32 tmp;
1245 u64 phys;
1496 1246
1497 if (cfg->cbar == CBAR_TYPE_S2_TRANS) { 1247 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1498 stage = 2; 1248
1499 input_mask = (1ULL << smmu->s2_input_size) - 1; 1249 if (smmu->version == 1) {
1500 output_mask = (1ULL << smmu->s2_output_size) - 1; 1250 u32 reg = iova & ~0xfff;
1251 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
1501 } else { 1252 } else {
1502 stage = 1; 1253 u32 reg = iova & ~0xfff;
1503 input_mask = (1ULL << smmu->s1_input_size) - 1; 1254 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
1504 output_mask = (1ULL << smmu->s1_output_size) - 1; 1255 reg = ((u64)iova & ~0xfff) >> 32;
1256 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
1505 } 1257 }
1506 1258
1507 if (!pgd) 1259 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1508 return -EINVAL; 1260 !(tmp & ATSR_ACTIVE), 5, 50)) {
1509 1261 dev_err(dev,
1510 if (size & ~PAGE_MASK) 1262 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
1511 return -EINVAL; 1263 &iova);
1512 1264 return ops->iova_to_phys(ops, iova);
1513 if ((phys_addr_t)iova & ~input_mask) 1265 }
1514 return -ERANGE;
1515
1516 if (paddr & ~output_mask)
1517 return -ERANGE;
1518
1519 spin_lock_irqsave(&smmu_domain->lock, flags);
1520 pgd += pgd_index(iova);
1521 end = iova + size;
1522 do {
1523 unsigned long next = pgd_addr_end(iova, end);
1524
1525 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
1526 prot, stage);
1527 if (ret)
1528 goto out_unlock;
1529
1530 paddr += next - iova;
1531 iova = next;
1532 } while (pgd++, iova != end);
1533
1534out_unlock:
1535 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1536
1537 return ret;
1538}
1539
1540static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1541 phys_addr_t paddr, size_t size, int prot)
1542{
1543 struct arm_smmu_domain *smmu_domain = domain->priv;
1544
1545 if (!smmu_domain)
1546 return -ENODEV;
1547 1266
1548 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); 1267 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1549} 1268 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1550 1269
1551static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 1270 if (phys & CB_PAR_F) {
1552 size_t size) 1271 dev_err(dev, "translation fault!\n");
1553{ 1272 dev_err(dev, "PAR = 0x%llx\n", phys);
1554 int ret; 1273 return 0;
1555 struct arm_smmu_domain *smmu_domain = domain->priv; 1274 }
1556 1275
1557 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1276 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1558 arm_smmu_tlb_inv_context(smmu_domain);
1559 return ret ? 0 : size;
1560} 1277}
1561 1278
1562static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1279static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1563 dma_addr_t iova) 1280 dma_addr_t iova)
1564{ 1281{
1565 pgd_t *pgdp, pgd; 1282 phys_addr_t ret;
1566 pud_t pud; 1283 unsigned long flags;
1567 pmd_t pmd;
1568 pte_t pte;
1569 struct arm_smmu_domain *smmu_domain = domain->priv; 1284 struct arm_smmu_domain *smmu_domain = domain->priv;
1570 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1571 1286
1572 pgdp = cfg->pgd; 1287 if (!ops)
1573 if (!pgdp)
1574 return 0; 1288 return 0;
1575 1289
1576 pgd = *(pgdp + pgd_index(iova)); 1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1577 if (pgd_none(pgd)) 1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
1578 return 0; 1292 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1579 1293 else
1580 pud = *pud_offset(&pgd, iova); 1294 ret = ops->iova_to_phys(ops, iova);
1581 if (pud_none(pud)) 1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1582 return 0;
1583
1584 pmd = *pmd_offset(&pud, iova);
1585 if (pmd_none(pmd))
1586 return 0;
1587 1296
1588 pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); 1297 return ret;
1589 if (pte_none(pte))
1590 return 0;
1591
1592 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1593} 1298}
1594 1299
1595static bool arm_smmu_capable(enum iommu_cap cap) 1300static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1698static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1403static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1699 enum iommu_attr attr, void *data) 1404 enum iommu_attr attr, void *data)
1700{ 1405{
1406 int ret = 0;
1701 struct arm_smmu_domain *smmu_domain = domain->priv; 1407 struct arm_smmu_domain *smmu_domain = domain->priv;
1702 1408
1409 mutex_lock(&smmu_domain->init_mutex);
1410
1703 switch (attr) { 1411 switch (attr) {
1704 case DOMAIN_ATTR_NESTING: 1412 case DOMAIN_ATTR_NESTING:
1705 if (smmu_domain->smmu) 1413 if (smmu_domain->smmu) {
1706 return -EPERM; 1414 ret = -EPERM;
1415 goto out_unlock;
1416 }
1417
1707 if (*(int *)data) 1418 if (*(int *)data)
1708 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; 1419 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1709 else 1420 else
1710 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 1421 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1711 1422
1712 return 0; 1423 break;
1713 default: 1424 default:
1714 return -ENODEV; 1425 ret = -ENODEV;
1715 } 1426 }
1427
1428out_unlock:
1429 mutex_unlock(&smmu_domain->init_mutex);
1430 return ret;
1716} 1431}
1717 1432
1718static const struct iommu_ops arm_smmu_ops = { 1433static struct iommu_ops arm_smmu_ops = {
1719 .capable = arm_smmu_capable, 1434 .capable = arm_smmu_capable,
1720 .domain_init = arm_smmu_domain_init, 1435 .domain_init = arm_smmu_domain_init,
1721 .domain_destroy = arm_smmu_domain_destroy, 1436 .domain_destroy = arm_smmu_domain_destroy,
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = {
1729 .remove_device = arm_smmu_remove_device, 1444 .remove_device = arm_smmu_remove_device,
1730 .domain_get_attr = arm_smmu_domain_get_attr, 1445 .domain_get_attr = arm_smmu_domain_get_attr,
1731 .domain_set_attr = arm_smmu_domain_set_attr, 1446 .domain_set_attr = arm_smmu_domain_set_attr,
1732 .pgsize_bitmap = (SECTION_SIZE | 1447 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1733 ARM_SMMU_PTE_CONT_SIZE |
1734 PAGE_SIZE),
1735}; 1448};
1736 1449
1737static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1450static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1760 } 1473 }
1761 1474
1762 /* Invalidate the TLB, just in case */ 1475 /* Invalidate the TLB, just in case */
1763 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1764 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1476 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1765 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); 1477 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1766 1478
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1782 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); 1494 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1783 1495
1784 /* Push the button */ 1496 /* Push the button */
1785 arm_smmu_tlb_sync(smmu); 1497 __arm_smmu_tlb_sync(smmu);
1786 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1498 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1787} 1499}
1788 1500
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1816 1528
1817 /* ID0 */ 1529 /* ID0 */
1818 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); 1530 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1819#ifndef CONFIG_64BIT
1820 if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
1821 dev_err(smmu->dev, "\tno v7 descriptor support!\n");
1822 return -ENODEV;
1823 }
1824#endif
1825 1531
1826 /* Restrict available stages based on module parameter */ 1532 /* Restrict available stages based on module parameter */
1827 if (force_stage == 1) 1533 if (force_stage == 1)
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1850 return -ENODEV; 1556 return -ENODEV;
1851 } 1557 }
1852 1558
1559 if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
1560 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1561 dev_notice(smmu->dev, "\taddress translation ops\n");
1562 }
1563
1853 if (id & ID0_CTTW) { 1564 if (id & ID0_CTTW) {
1854 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; 1565 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1855 dev_notice(smmu->dev, "\tcoherent table walk\n"); 1566 dev_notice(smmu->dev, "\tcoherent table walk\n");
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1894 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; 1605 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1895 1606
1896 /* Check for size mismatch of SMMU address space from mapped region */ 1607 /* Check for size mismatch of SMMU address space from mapped region */
1897 size = 1 << 1608 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1898 (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1899 size *= 2 << smmu->pgshift; 1609 size *= 2 << smmu->pgshift;
1900 if (smmu->size != size) 1610 if (smmu->size != size)
1901 dev_warn(smmu->dev, 1611 dev_warn(smmu->dev,
1902 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", 1612 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1903 size, smmu->size); 1613 size, smmu->size);
1904 1614
1905 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & 1615 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1906 ID1_NUMS2CB_MASK;
1907 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; 1616 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1908 if (smmu->num_s2_context_banks > smmu->num_context_banks) { 1617 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1909 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); 1618 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1915 /* ID2 */ 1624 /* ID2 */
1916 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); 1625 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1917 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); 1626 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1918 smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); 1627 smmu->ipa_size = size;
1919
1920 /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
1921#ifdef CONFIG_64BIT
1922 smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
1923#else
1924 smmu->s2_input_size = min(32UL, size);
1925#endif
1926 1628
1927 /* The stage-2 output mask is also applied for bypass */ 1629 /* The output mask is also applied for bypass */
1928 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); 1630 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1929 smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); 1631 smmu->pa_size = size;
1930 1632
1931 if (smmu->version == ARM_SMMU_V1) { 1633 if (smmu->version == ARM_SMMU_V1) {
1932 smmu->s1_input_size = 32; 1634 smmu->va_size = smmu->ipa_size;
1635 size = SZ_4K | SZ_2M | SZ_1G;
1933 } else { 1636 } else {
1934#ifdef CONFIG_64BIT
1935 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; 1637 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1936 size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); 1638 smmu->va_size = arm_smmu_id_size_to_bits(size);
1937#else 1639#ifndef CONFIG_64BIT
1938 size = 32; 1640 smmu->va_size = min(32UL, smmu->va_size);
1939#endif 1641#endif
1940 smmu->s1_input_size = size; 1642 size = 0;
1941 1643 if (id & ID2_PTFS_4K)
1942 if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || 1644 size |= SZ_4K | SZ_2M | SZ_1G;
1943 (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || 1645 if (id & ID2_PTFS_16K)
1944 (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { 1646 size |= SZ_16K | SZ_32M;
1945 dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", 1647 if (id & ID2_PTFS_64K)
1946 PAGE_SIZE); 1648 size |= SZ_64K | SZ_512M;
1947 return -ENODEV;
1948 }
1949 } 1649 }
1950 1650
1651 arm_smmu_ops.pgsize_bitmap &= size;
1652 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1653
1951 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) 1654 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1952 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", 1655 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1953 smmu->s1_input_size, smmu->s1_output_size); 1656 smmu->va_size, smmu->ipa_size);
1954 1657
1955 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) 1658 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1956 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", 1659 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1957 smmu->s2_input_size, smmu->s2_output_size); 1660 smmu->ipa_size, smmu->pa_size);
1958 1661
1959 return 0; 1662 return 0;
1960} 1663}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 80ac68d884c5..abeedc9a78c2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -18,22 +18,13 @@
18 18
19#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ 19#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
20 20
21#include <linux/init.h> 21#include "fsl_pamu.h"
22#include <linux/iommu.h> 22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h> 23#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/of_platform.h>
30#include <linux/bootmem.h>
31#include <linux/genalloc.h> 24#include <linux/genalloc.h>
32#include <asm/io.h>
33#include <asm/bitops.h>
34#include <asm/fsl_guts.h>
35 25
36#include "fsl_pamu.h" 26#include <asm/mpc85xx.h>
27#include <asm/fsl_guts.h>
37 28
38/* define indexes for each operation mapping scenario */ 29/* define indexes for each operation mapping scenario */
39#define OMI_QMAN 0x00 30#define OMI_QMAN 0x00
@@ -44,13 +35,13 @@
44#define make64(high, low) (((u64)(high) << 32) | (low)) 35#define make64(high, low) (((u64)(high) << 32) | (low))
45 36
46struct pamu_isr_data { 37struct pamu_isr_data {
47 void __iomem *pamu_reg_base; /* Base address of PAMU regs*/ 38 void __iomem *pamu_reg_base; /* Base address of PAMU regs */
48 unsigned int count; /* The number of PAMUs */ 39 unsigned int count; /* The number of PAMUs */
49}; 40};
50 41
51static struct paace *ppaact; 42static struct paace *ppaact;
52static struct paace *spaact; 43static struct paace *spaact;
53static struct ome *omt; 44static struct ome *omt __initdata;
54 45
55/* 46/*
56 * Table for matching compatible strings, for device tree 47 * Table for matching compatible strings, for device tree
@@ -58,14 +49,13 @@ static struct ome *omt;
58 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 49 * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
59 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" 50 * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
60 * string would be used. 51 * string would be used.
61*/ 52 */
62static const struct of_device_id guts_device_ids[] = { 53static const struct of_device_id guts_device_ids[] __initconst = {
63 { .compatible = "fsl,qoriq-device-config-1.0", }, 54 { .compatible = "fsl,qoriq-device-config-1.0", },
64 { .compatible = "fsl,qoriq-device-config-2.0", }, 55 { .compatible = "fsl,qoriq-device-config-2.0", },
65 {} 56 {}
66}; 57};
67 58
68
69/* 59/*
70 * Table for matching compatible strings, for device tree 60 * Table for matching compatible strings, for device tree
71 * L3 cache controller node. 61 * L3 cache controller node.
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = {
73 * "fsl,b4860-l3-cache-controller" corresponds to B4 & 63 * "fsl,b4860-l3-cache-controller" corresponds to B4 &
74 * "fsl,p4080-l3-cache-controller" corresponds to other, 64 * "fsl,p4080-l3-cache-controller" corresponds to other,
75 * SOCs. 65 * SOCs.
76*/ 66 */
77static const struct of_device_id l3_device_ids[] = { 67static const struct of_device_id l3_device_ids[] = {
78 { .compatible = "fsl,t4240-l3-cache-controller", }, 68 { .compatible = "fsl,t4240-l3-cache-controller", },
79 { .compatible = "fsl,b4860-l3-cache-controller", }, 69 { .compatible = "fsl,b4860-l3-cache-controller", },
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = {
85static u32 max_subwindow_count; 75static u32 max_subwindow_count;
86 76
87/* Pool for fspi allocation */ 77/* Pool for fspi allocation */
88struct gen_pool *spaace_pool; 78static struct gen_pool *spaace_pool;
89 79
90/** 80/**
91 * pamu_get_max_subwin_cnt() - Return the maximum supported 81 * pamu_get_max_subwin_cnt() - Return the maximum supported
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn)
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 160static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{ 161{
172 /* Bug if not a power of 2 */ 162 /* Bug if not a power of 2 */
173 BUG_ON((addrspace_size & (addrspace_size - 1))); 163 BUG_ON(addrspace_size & (addrspace_size - 1));
174 164
175 /* window size is 2^(WSE+1) bytes */ 165 /* window size is 2^(WSE+1) bytes */
176 return fls64(addrspace_size) - 2; 166 return fls64(addrspace_size) - 2;
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
179/* Derive the PAACE window count encoding for the subwindow count */ 169/* Derive the PAACE window count encoding for the subwindow count */
180static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) 170static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
181{ 171{
182 /* window count is 2^(WCE+1) bytes */ 172 /* window count is 2^(WCE+1) bytes */
183 return __ffs(subwindow_cnt) - 1; 173 return __ffs(subwindow_cnt) - 1;
184} 174}
185 175
186/* 176/*
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
241 * If no SPAACE entry is available or the allocator can not reserve the required 231 * If no SPAACE entry is available or the allocator can not reserve the required
242 * number of contiguous entries function returns ULONG_MAX indicating a failure. 232 * number of contiguous entries function returns ULONG_MAX indicating a failure.
243 * 233 *
244*/ 234 */
245static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) 235static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
246{ 236{
247 unsigned long spaace_addr; 237 unsigned long spaace_addr;
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
288 } 278 }
289 if (subwin) { 279 if (subwin) {
290 paace = pamu_get_spaace(paace, subwin - 1); 280 paace = pamu_get_spaace(paace, subwin - 1);
291 if (!paace) { 281 if (!paace)
292 return -ENOENT; 282 return -ENOENT;
293 }
294 } 283 }
295 set_bf(paace->impl_attr, PAACE_IA_CID, value); 284 set_bf(paace->impl_attr, PAACE_IA_CID, value);
296 285
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin)
311 } 300 }
312 if (subwin) { 301 if (subwin) {
313 paace = pamu_get_spaace(paace, subwin - 1); 302 paace = pamu_get_spaace(paace, subwin - 1);
314 if (!paace) { 303 if (!paace)
315 return -ENOENT; 304 return -ENOENT;
316 } 305 set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
317 set_bf(paace->addr_bitfields, PAACE_AF_V,
318 PAACE_V_INVALID);
319 } else { 306 } else {
320 set_bf(paace->addr_bitfields, PAACE_AF_AP, 307 set_bf(paace->addr_bitfields, PAACE_AF_AP,
321 PAACE_AP_PERMS_DENIED); 308 PAACE_AP_PERMS_DENIED);
322 } 309 }
323 310
324 mb(); 311 mb();
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin)
326 return 0; 313 return 0;
327} 314}
328 315
329
330/** 316/**
331 * pamu_config_paace() - Sets up PPAACE entry for specified liodn 317 * pamu_config_paace() - Sets up PPAACE entry for specified liodn
332 * 318 *
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
352 unsigned long fspi; 338 unsigned long fspi;
353 339
354 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { 340 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size); 341 pr_debug("window size too small or not a power of two %pa\n",
342 &win_size);
356 return -EINVAL; 343 return -EINVAL;
357 } 344 }
358 345
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
362 } 349 }
363 350
364 ppaace = pamu_get_ppaace(liodn); 351 ppaace = pamu_get_ppaace(liodn);
365 if (!ppaace) { 352 if (!ppaace)
366 return -ENOENT; 353 return -ENOENT;
367 }
368 354
369 /* window size is 2^(WSE+1) bytes */ 355 /* window size is 2^(WSE+1) bytes */
370 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 356 set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
371 map_addrspace_size_to_wse(win_size)); 357 map_addrspace_size_to_wse(win_size));
372 358
373 pamu_init_ppaace(ppaace); 359 pamu_init_ppaace(ppaace);
374 360
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
442{ 428{
443 struct paace *paace; 429 struct paace *paace;
444 430
445
446 /* setup sub-windows */ 431 /* setup sub-windows */
447 if (!subwin_cnt) { 432 if (!subwin_cnt) {
448 pr_debug("Invalid subwindow count\n"); 433 pr_debug("Invalid subwindow count\n");
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
510} 495}
511 496
512/** 497/**
513* get_ome_index() - Returns the index in the operation mapping table 498 * get_ome_index() - Returns the index in the operation mapping table
514* for device. 499 * for device.
515* @*omi_index: pointer for storing the index value 500 * @*omi_index: pointer for storing the index value
516* 501 *
517*/ 502 */
518void get_ome_index(u32 *omi_index, struct device *dev) 503void get_ome_index(u32 *omi_index, struct device *dev)
519{ 504{
520 if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) 505 if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
544 if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { 529 if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
545 node = of_find_matching_node(NULL, l3_device_ids); 530 node = of_find_matching_node(NULL, l3_device_ids);
546 if (node) { 531 if (node) {
547 prop = of_get_property(node, "cache-stash-id", 0); 532 prop = of_get_property(node, "cache-stash-id", NULL);
548 if (!prop) { 533 if (!prop) {
549 pr_debug("missing cache-stash-id at %s\n", node->full_name); 534 pr_debug("missing cache-stash-id at %s\n",
535 node->full_name);
550 of_node_put(node); 536 of_node_put(node);
551 return ~(u32)0; 537 return ~(u32)0;
552 } 538 }
@@ -570,9 +556,10 @@ found_cpu_node:
570 /* find the hwnode that represents the cache */ 556 /* find the hwnode that represents the cache */
571 for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { 557 for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
572 if (stash_dest_hint == cache_level) { 558 if (stash_dest_hint == cache_level) {
573 prop = of_get_property(node, "cache-stash-id", 0); 559 prop = of_get_property(node, "cache-stash-id", NULL);
574 if (!prop) { 560 if (!prop) {
575 pr_debug("missing cache-stash-id at %s\n", node->full_name); 561 pr_debug("missing cache-stash-id at %s\n",
562 node->full_name);
576 of_node_put(node); 563 of_node_put(node);
577 return ~(u32)0; 564 return ~(u32)0;
578 } 565 }
@@ -580,10 +567,10 @@ found_cpu_node:
580 return be32_to_cpup(prop); 567 return be32_to_cpup(prop);
581 } 568 }
582 569
583 prop = of_get_property(node, "next-level-cache", 0); 570 prop = of_get_property(node, "next-level-cache", NULL);
584 if (!prop) { 571 if (!prop) {
585 pr_debug("can't find next-level-cache at %s\n", 572 pr_debug("can't find next-level-cache at %s\n",
586 node->full_name); 573 node->full_name);
587 of_node_put(node); 574 of_node_put(node);
588 return ~(u32)0; /* can't traverse any further */ 575 return ~(u32)0; /* can't traverse any further */
589 } 576 }
@@ -598,7 +585,7 @@ found_cpu_node:
598 } 585 }
599 586
600 pr_debug("stash dest not found for %d on vcpu %d\n", 587 pr_debug("stash dest not found for %d on vcpu %d\n",
601 stash_dest_hint, vcpu); 588 stash_dest_hint, vcpu);
602 return ~(u32)0; 589 return ~(u32)0;
603} 590}
604 591
@@ -612,7 +599,7 @@ found_cpu_node:
612 * Memory accesses to QMAN and BMAN private memory need not be coherent, so 599 * Memory accesses to QMAN and BMAN private memory need not be coherent, so
613 * clear the PAACE entry coherency attribute for them. 600 * clear the PAACE entry coherency attribute for them.
614 */ 601 */
615static void setup_qbman_paace(struct paace *ppaace, int paace_type) 602static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
616{ 603{
617 switch (paace_type) { 604 switch (paace_type) {
618 case QMAN_PAACE: 605 case QMAN_PAACE:
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
626 case QMAN_PORTAL_PAACE: 613 case QMAN_PORTAL_PAACE:
627 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); 614 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
628 ppaace->op_encode.index_ot.omi = OMI_QMAN; 615 ppaace->op_encode.index_ot.omi = OMI_QMAN;
629 /*Set DQRR and Frame stashing for the L3 cache */ 616 /* Set DQRR and Frame stashing for the L3 cache */
630 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); 617 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
631 break; 618 break;
632 case BMAN_PAACE: 619 case BMAN_PAACE:
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt)
679 * Get the maximum number of PAACT table entries 666 * Get the maximum number of PAACT table entries
680 * and subwindows supported by PAMU 667 * and subwindows supported by PAMU
681 */ 668 */
682static void get_pamu_cap_values(unsigned long pamu_reg_base) 669static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
683{ 670{
684 u32 pc_val; 671 u32 pc_val;
685 672
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
689} 676}
690 677
691/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ 678/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
692int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, 679static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
693 phys_addr_t ppaact_phys, phys_addr_t spaact_phys, 680 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
694 phys_addr_t omt_phys) 681 phys_addr_t omt_phys)
695{ 682{
696 u32 *pc; 683 u32 *pc;
697 struct pamu_mmap_regs *pamu_regs; 684 struct pamu_mmap_regs *pamu_regs;
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
727 */ 714 */
728 715
729 out_be32((u32 *)(pamu_reg_base + PAMU_PICS), 716 out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
730 PAMU_ACCESS_VIOLATION_ENABLE); 717 PAMU_ACCESS_VIOLATION_ENABLE);
731 out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); 718 out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
732 return 0; 719 return 0;
733} 720}
@@ -757,9 +744,9 @@ static void __init setup_liodns(void)
757 ppaace->wbah = 0; 744 ppaace->wbah = 0;
758 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); 745 set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
759 set_bf(ppaace->impl_attr, PAACE_IA_ATM, 746 set_bf(ppaace->impl_attr, PAACE_IA_ATM,
760 PAACE_ATM_NO_XLATE); 747 PAACE_ATM_NO_XLATE);
761 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, 748 set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
762 PAACE_AP_PERMS_ALL); 749 PAACE_AP_PERMS_ALL);
763 if (of_device_is_compatible(node, "fsl,qman-portal")) 750 if (of_device_is_compatible(node, "fsl,qman-portal"))
764 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); 751 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
765 if (of_device_is_compatible(node, "fsl,qman")) 752 if (of_device_is_compatible(node, "fsl,qman"))
@@ -772,7 +759,7 @@ static void __init setup_liodns(void)
772 } 759 }
773} 760}
774 761
775irqreturn_t pamu_av_isr(int irq, void *arg) 762static irqreturn_t pamu_av_isr(int irq, void *arg)
776{ 763{
777 struct pamu_isr_data *data = arg; 764 struct pamu_isr_data *data = arg;
778 phys_addr_t phys; 765 phys_addr_t phys;
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
792 pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); 779 pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
793 pr_emerg("AVS1=%08x\n", avs1); 780 pr_emerg("AVS1=%08x\n", avs1);
794 pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); 781 pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
795 pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH), 782 pr_emerg("AVA=%016llx\n",
796 in_be32(p + PAMU_AVAL))); 783 make64(in_be32(p + PAMU_AVAH),
784 in_be32(p + PAMU_AVAL)));
797 pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); 785 pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
798 pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH), 786 pr_emerg("POEA=%016llx\n",
799 in_be32(p + PAMU_POEAL))); 787 make64(in_be32(p + PAMU_POEAH),
788 in_be32(p + PAMU_POEAL)));
800 789
801 phys = make64(in_be32(p + PAMU_POEAH), 790 phys = make64(in_be32(p + PAMU_POEAH),
802 in_be32(p + PAMU_POEAL)); 791 in_be32(p + PAMU_POEAL));
803 792
804 /* Assume that POEA points to a PAACE */ 793 /* Assume that POEA points to a PAACE */
805 if (phys) { 794 if (phys) {
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
807 796
808 /* Only the first four words are relevant */ 797 /* Only the first four words are relevant */
809 for (j = 0; j < 4; j++) 798 for (j = 0; j < 4; j++)
810 pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j)); 799 pr_emerg("PAACE[%u]=%08x\n",
800 j, in_be32(paace + j));
811 } 801 }
812 802
813 /* clear access violation condition */ 803 /* clear access violation condition */
814 out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK); 804 out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
815 paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); 805 paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
816 BUG_ON(!paace); 806 BUG_ON(!paace);
817 /* check if we got a violation for a disabled LIODN */ 807 /* check if we got a violation for a disabled LIODN */
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
827 /* Disable the LIODN */ 817 /* Disable the LIODN */
828 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); 818 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
829 BUG_ON(ret); 819 BUG_ON(ret);
830 pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT); 820 pr_emerg("Disabling liodn %x\n",
821 avs1 >> PAMU_AVS1_LIODN_SHIFT);
831 } 822 }
832 out_be32((p + PAMU_PICS), pics); 823 out_be32((p + PAMU_PICS), pics);
833 } 824 }
834 } 825 }
835 826
836
837 return IRQ_HANDLED; 827 return IRQ_HANDLED;
838} 828}
839 829
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
952 } 942 }
953 943
954 if (i == 0 || i == num_laws) { 944 if (i == 0 || i == num_laws) {
955 /* This should never happen*/ 945 /* This should never happen */
956 ret = -ENOENT; 946 ret = -ENOENT;
957 goto error; 947 goto error;
958 } 948 }
@@ -998,26 +988,27 @@ error:
998static const struct { 988static const struct {
999 u32 svr; 989 u32 svr;
1000 u32 port_id; 990 u32 port_id;
1001} port_id_map[] = { 991} port_id_map[] __initconst = {
1002 {0x82100010, 0xFF000000}, /* P2040 1.0 */ 992 {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
1003 {0x82100011, 0xFF000000}, /* P2040 1.1 */ 993 {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
1004 {0x82100110, 0xFF000000}, /* P2041 1.0 */ 994 {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
1005 {0x82100111, 0xFF000000}, /* P2041 1.1 */ 995 {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */
1006 {0x82110310, 0xFF000000}, /* P3041 1.0 */ 996 {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */
1007 {0x82110311, 0xFF000000}, /* P3041 1.1 */ 997 {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */
1008 {0x82010020, 0xFFF80000}, /* P4040 2.0 */ 998 {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */
1009 {0x82000020, 0xFFF80000}, /* P4080 2.0 */ 999 {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */
1010 {0x82210010, 0xFC000000}, /* P5010 1.0 */ 1000 {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */
1011 {0x82210020, 0xFC000000}, /* P5010 2.0 */ 1001 {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */
1012 {0x82200010, 0xFC000000}, /* P5020 1.0 */ 1002 {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */
1013 {0x82050010, 0xFF800000}, /* P5021 1.0 */ 1003 {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */
1014 {0x82040010, 0xFF800000}, /* P5040 1.0 */ 1004 {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */
1015}; 1005};
1016 1006
1017#define SVR_SECURITY 0x80000 /* The Security (E) bit */ 1007#define SVR_SECURITY 0x80000 /* The Security (E) bit */
1018 1008
1019static int __init fsl_pamu_probe(struct platform_device *pdev) 1009static int __init fsl_pamu_probe(struct platform_device *pdev)
1020{ 1010{
1011 struct device *dev = &pdev->dev;
1021 void __iomem *pamu_regs = NULL; 1012 void __iomem *pamu_regs = NULL;
1022 struct ccsr_guts __iomem *guts_regs = NULL; 1013 struct ccsr_guts __iomem *guts_regs = NULL;
1023 u32 pamubypenr, pamu_counter; 1014 u32 pamubypenr, pamu_counter;
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1042 * NOTE : All PAMUs share the same LIODN tables. 1033 * NOTE : All PAMUs share the same LIODN tables.
1043 */ 1034 */
1044 1035
1045 pamu_regs = of_iomap(pdev->dev.of_node, 0); 1036 pamu_regs = of_iomap(dev->of_node, 0);
1046 if (!pamu_regs) { 1037 if (!pamu_regs) {
1047 dev_err(&pdev->dev, "ioremap of PAMU node failed\n"); 1038 dev_err(dev, "ioremap of PAMU node failed\n");
1048 return -ENOMEM; 1039 return -ENOMEM;
1049 } 1040 }
1050 of_get_address(pdev->dev.of_node, 0, &size, NULL); 1041 of_get_address(dev->of_node, 0, &size, NULL);
1051 1042
1052 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1043 irq = irq_of_parse_and_map(dev->of_node, 0);
1053 if (irq == NO_IRQ) { 1044 if (irq == NO_IRQ) {
1054 dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n"); 1045 dev_warn(dev, "no interrupts listed in PAMU node\n");
1055 goto error; 1046 goto error;
1056 } 1047 }
1057 1048
1058 data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL); 1049 data = kzalloc(sizeof(*data), GFP_KERNEL);
1059 if (!data) { 1050 if (!data) {
1060 dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
1061 ret = -ENOMEM; 1051 ret = -ENOMEM;
1062 goto error; 1052 goto error;
1063 } 1053 }
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1067 /* The ISR needs access to the regs, so we won't iounmap them */ 1057 /* The ISR needs access to the regs, so we won't iounmap them */
1068 ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); 1058 ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
1069 if (ret < 0) { 1059 if (ret < 0) {
1070 dev_err(&pdev->dev, "error %i installing ISR for irq %i\n", 1060 dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
1071 ret, irq);
1072 goto error; 1061 goto error;
1073 } 1062 }
1074 1063
1075 guts_node = of_find_matching_node(NULL, guts_device_ids); 1064 guts_node = of_find_matching_node(NULL, guts_device_ids);
1076 if (!guts_node) { 1065 if (!guts_node) {
1077 dev_err(&pdev->dev, "could not find GUTS node %s\n", 1066 dev_err(dev, "could not find GUTS node %s\n",
1078 pdev->dev.of_node->full_name); 1067 dev->of_node->full_name);
1079 ret = -ENODEV; 1068 ret = -ENODEV;
1080 goto error; 1069 goto error;
1081 } 1070 }
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1083 guts_regs = of_iomap(guts_node, 0); 1072 guts_regs = of_iomap(guts_node, 0);
1084 of_node_put(guts_node); 1073 of_node_put(guts_node);
1085 if (!guts_regs) { 1074 if (!guts_regs) {
1086 dev_err(&pdev->dev, "ioremap of GUTS node failed\n"); 1075 dev_err(dev, "ioremap of GUTS node failed\n");
1087 ret = -ENODEV; 1076 ret = -ENODEV;
1088 goto error; 1077 goto error;
1089 } 1078 }
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1103 1092
1104 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 1093 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1105 if (!p) { 1094 if (!p) {
1106 dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n"); 1095 dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
1107 ret = -ENOMEM; 1096 ret = -ENOMEM;
1108 goto error; 1097 goto error;
1109 } 1098 }
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1113 1102
1114 /* Make sure the memory is naturally aligned */ 1103 /* Make sure the memory is naturally aligned */
1115 if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { 1104 if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
1116 dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n"); 1105 dev_err(dev, "PAACT/OMT block is unaligned\n");
1117 ret = -ENOMEM; 1106 ret = -ENOMEM;
1118 goto error; 1107 goto error;
1119 } 1108 }
@@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1121 spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); 1110 spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
1122 omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); 1111 omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
1123 1112
1124 dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact, 1113 dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
1125 (unsigned long long) ppaact_phys);
1126 1114
1127 /* Check to see if we need to implement the work-around on this SOC */ 1115 /* Check to see if we need to implement the work-around on this SOC */
1128 1116
@@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1130 for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { 1118 for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
1131 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { 1119 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
1132 csd_port_id = port_id_map[i].port_id; 1120 csd_port_id = port_id_map[i].port_id;
1133 dev_dbg(&pdev->dev, "found matching SVR %08x\n", 1121 dev_dbg(dev, "found matching SVR %08x\n",
1134 port_id_map[i].svr); 1122 port_id_map[i].svr);
1135 break; 1123 break;
1136 } 1124 }
1137 } 1125 }
1138 1126
1139 if (csd_port_id) { 1127 if (csd_port_id) {
1140 dev_dbg(&pdev->dev, "creating coherency subdomain at address " 1128 dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
1141 "0x%llx, size %zu, port id 0x%08x", ppaact_phys, 1129 &ppaact_phys, mem_size, csd_port_id);
1142 mem_size, csd_port_id);
1143 1130
1144 ret = create_csd(ppaact_phys, mem_size, csd_port_id); 1131 ret = create_csd(ppaact_phys, mem_size, csd_port_id);
1145 if (ret) { 1132 if (ret) {
1146 dev_err(&pdev->dev, "could not create coherence " 1133 dev_err(dev, "could not create coherence subdomain\n");
1147 "subdomain\n");
1148 return ret; 1134 return ret;
1149 } 1135 }
1150 } 1136 }
@@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1155 spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); 1141 spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
1156 if (!spaace_pool) { 1142 if (!spaace_pool) {
1157 ret = -ENOMEM; 1143 ret = -ENOMEM;
1158 dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n"); 1144 dev_err(dev, "Failed to allocate spaace gen pool\n");
1159 goto error; 1145 goto error;
1160 } 1146 }
1161 1147
@@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1168 for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; 1154 for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
1169 pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { 1155 pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
1170 1156
1171 pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off; 1157 pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
1172 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, 1158 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
1173 spaact_phys, omt_phys); 1159 spaact_phys, omt_phys);
1174 /* Disable PAMU bypass for this PAMU */ 1160 /* Disable PAMU bypass for this PAMU */
1175 pamubypenr &= ~pamu_counter; 1161 pamubypenr &= ~pamu_counter;
1176 } 1162 }
@@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
1182 1168
1183 iounmap(guts_regs); 1169 iounmap(guts_regs);
1184 1170
1185 /* Enable DMA for the LIODNs in the device tree*/ 1171 /* Enable DMA for the LIODNs in the device tree */
1186 1172
1187 setup_liodns(); 1173 setup_liodns();
1188 1174
@@ -1214,17 +1200,7 @@ error:
1214 return ret; 1200 return ret;
1215} 1201}
1216 1202
1217static const struct of_device_id fsl_of_pamu_ids[] = { 1203static struct platform_driver fsl_of_pamu_driver __initdata = {
1218 {
1219 .compatible = "fsl,p4080-pamu",
1220 },
1221 {
1222 .compatible = "fsl,pamu",
1223 },
1224 {},
1225};
1226
1227static struct platform_driver fsl_of_pamu_driver = {
1228 .driver = { 1204 .driver = {
1229 .name = "fsl-of-pamu", 1205 .name = "fsl-of-pamu",
1230 }, 1206 },
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index 8fc1a125b16e..aab723f91f12 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -19,13 +19,15 @@
19#ifndef __FSL_PAMU_H 19#ifndef __FSL_PAMU_H
20#define __FSL_PAMU_H 20#define __FSL_PAMU_H
21 21
22#include <linux/iommu.h>
23
22#include <asm/fsl_pamu_stash.h> 24#include <asm/fsl_pamu_stash.h>
23 25
24/* Bit Field macros 26/* Bit Field macros
25 * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load 27 * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
26 */ 28 */
27#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m))) 29#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m)))
28#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT)) 30#define get_bf(v, m) (((v) & (m)) >> m##_SHIFT)
29 31
30/* PAMU CCSR space */ 32/* PAMU CCSR space */
31#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */ 33#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
@@ -65,7 +67,7 @@ struct pamu_mmap_regs {
65#define PAMU_AVS1_GCV 0x2000 67#define PAMU_AVS1_GCV 0x2000
66#define PAMU_AVS1_PDV 0x4000 68#define PAMU_AVS1_PDV 0x4000
67#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \ 69#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
68 | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV) 70 | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
69#define PAMU_AVS1_LIODN_SHIFT 16 71#define PAMU_AVS1_LIODN_SHIFT 16
70#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400 72#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
71 73
@@ -198,8 +200,7 @@ struct pamu_mmap_regs {
198#define PAACE_ATM_NO_XLATE 0x00 200#define PAACE_ATM_NO_XLATE 0x00
199#define PAACE_ATM_WINDOW_XLATE 0x01 201#define PAACE_ATM_WINDOW_XLATE 0x01
200#define PAACE_ATM_PAGE_XLATE 0x02 202#define PAACE_ATM_PAGE_XLATE 0x02
201#define PAACE_ATM_WIN_PG_XLATE \ 203#define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
202 (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
203#define PAACE_OTM_NO_XLATE 0x00 204#define PAACE_OTM_NO_XLATE 0x00
204#define PAACE_OTM_IMMEDIATE 0x01 205#define PAACE_OTM_IMMEDIATE 0x01
205#define PAACE_OTM_INDEXED 0x02 206#define PAACE_OTM_INDEXED 0x02
@@ -219,7 +220,7 @@ struct pamu_mmap_regs {
219#define PAACE_TCEF_FORMAT0_8B 0x00 220#define PAACE_TCEF_FORMAT0_8B 0x00
220#define PAACE_TCEF_FORMAT1_RSVD 0x01 221#define PAACE_TCEF_FORMAT1_RSVD 0x01
221/* 222/*
222 * Hard coded value for the PAACT size to accomodate 223 * Hard coded value for the PAACT size to accommodate
223 * maximum LIODN value generated by u-boot. 224 * maximum LIODN value generated by u-boot.
224 */ 225 */
225#define PAACE_NUMBER_ENTRIES 0x500 226#define PAACE_NUMBER_ENTRIES 0x500
@@ -332,7 +333,7 @@ struct paace {
332#define NUM_MOE 128 333#define NUM_MOE 128
333struct ome { 334struct ome {
334 u8 moe[NUM_MOE]; 335 u8 moe[NUM_MOE];
335} __attribute__((packed)); 336} __packed;
336 337
337#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES) 338#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
338#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES) 339#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c828f80d48b0..ceebd287b660 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -19,26 +19,10 @@
19 19
20#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ 20#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
21 21
22#include <linux/init.h>
23#include <linux/iommu.h>
24#include <linux/notifier.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <linux/of_platform.h>
32#include <linux/bootmem.h>
33#include <linux/err.h>
34#include <asm/io.h>
35#include <asm/bitops.h>
36
37#include <asm/pci-bridge.h>
38#include <sysdev/fsl_pci.h>
39
40#include "fsl_pamu_domain.h" 22#include "fsl_pamu_domain.h"
41 23
24#include <sysdev/fsl_pci.h>
25
42/* 26/*
43 * Global spinlock that needs to be held while 27 * Global spinlock that needs to be held while
44 * configuring PAMU. 28 * configuring PAMU.
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock);
51 35
52static int __init iommu_init_mempool(void) 36static int __init iommu_init_mempool(void)
53{ 37{
54
55 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", 38 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
56 sizeof(struct fsl_dma_domain), 39 sizeof(struct fsl_dma_domain),
57 0, 40 0,
58 SLAB_HWCACHE_ALIGN, 41 SLAB_HWCACHE_ALIGN,
59 42 NULL);
60 NULL);
61 if (!fsl_pamu_domain_cache) { 43 if (!fsl_pamu_domain_cache) {
62 pr_debug("Couldn't create fsl iommu_domain cache\n"); 44 pr_debug("Couldn't create fsl iommu_domain cache\n");
63 return -ENOMEM; 45 return -ENOMEM;
64 } 46 }
65 47
66 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", 48 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
67 sizeof(struct device_domain_info), 49 sizeof(struct device_domain_info),
68 0, 50 0,
69 SLAB_HWCACHE_ALIGN, 51 SLAB_HWCACHE_ALIGN,
70 NULL); 52 NULL);
71 if (!iommu_devinfo_cache) { 53 if (!iommu_devinfo_cache) {
72 pr_debug("Couldn't create devinfo cache\n"); 54 pr_debug("Couldn't create devinfo cache\n");
73 kmem_cache_destroy(fsl_pamu_domain_cache); 55 kmem_cache_destroy(fsl_pamu_domain_cache);
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void)
80static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) 62static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
81{ 63{
82 u32 win_cnt = dma_domain->win_cnt; 64 u32 win_cnt = dma_domain->win_cnt;
83 struct dma_window *win_ptr = 65 struct dma_window *win_ptr = &dma_domain->win_arr[0];
84 &dma_domain->win_arr[0];
85 struct iommu_domain_geometry *geom; 66 struct iommu_domain_geometry *geom;
86 67
87 geom = &dma_domain->iommu_domain->geometry; 68 geom = &dma_domain->iommu_domain->geometry;
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
103 } 84 }
104 85
105 if (win_ptr->valid) 86 if (win_ptr->valid)
106 return (win_ptr->paddr + (iova & (win_ptr->size - 1))); 87 return win_ptr->paddr + (iova & (win_ptr->size - 1));
107 88
108 return 0; 89 return 0;
109} 90}
110 91
111static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) 92static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
112{ 93{
113 struct dma_window *sub_win_ptr = 94 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
114 &dma_domain->win_arr[0];
115 int i, ret; 95 int i, ret;
116 unsigned long rpn, flags; 96 unsigned long rpn, flags;
117 97
118 for (i = 0; i < dma_domain->win_cnt; i++) { 98 for (i = 0; i < dma_domain->win_cnt; i++) {
119 if (sub_win_ptr[i].valid) { 99 if (sub_win_ptr[i].valid) {
120 rpn = sub_win_ptr[i].paddr >> 100 rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
121 PAMU_PAGE_SHIFT;
122 spin_lock_irqsave(&iommu_lock, flags); 101 spin_lock_irqsave(&iommu_lock, flags);
123 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, 102 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
124 sub_win_ptr[i].size, 103 sub_win_ptr[i].size,
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
130 sub_win_ptr[i].prot); 109 sub_win_ptr[i].prot);
131 spin_unlock_irqrestore(&iommu_lock, flags); 110 spin_unlock_irqrestore(&iommu_lock, flags);
132 if (ret) { 111 if (ret) {
133 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", 112 pr_debug("SPAACE configuration failed for liodn %d\n",
134 liodn); 113 liodn);
135 return ret; 114 return ret;
136 } 115 }
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
156 0, wnd->prot); 135 0, wnd->prot);
157 spin_unlock_irqrestore(&iommu_lock, flags); 136 spin_unlock_irqrestore(&iommu_lock, flags);
158 if (ret) 137 if (ret)
159 pr_debug("PAMU PAACE configuration failed for liodn %d\n", 138 pr_debug("PAACE configuration failed for liodn %d\n", liodn);
160 liodn);
161 139
162 return ret; 140 return ret;
163} 141}
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
169 return map_subwins(liodn, dma_domain); 147 return map_subwins(liodn, dma_domain);
170 else 148 else
171 return map_win(liodn, dma_domain); 149 return map_win(liodn, dma_domain);
172
173} 150}
174 151
175/* Update window/subwindow mapping for the LIODN */ 152/* Update window/subwindow mapping for the LIODN */
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
190 (wnd_nr > 0) ? 1 : 0, 167 (wnd_nr > 0) ? 1 : 0,
191 wnd->prot); 168 wnd->prot);
192 if (ret) 169 if (ret)
193 pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn); 170 pr_debug("Subwindow reconfiguration failed for liodn %d\n",
171 liodn);
194 } else { 172 } else {
195 phys_addr_t wnd_addr; 173 phys_addr_t wnd_addr;
196 174
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
200 wnd->size, 178 wnd->size,
201 ~(u32)0, 179 ~(u32)0,
202 wnd->paddr >> PAMU_PAGE_SHIFT, 180 wnd->paddr >> PAMU_PAGE_SHIFT,
203 dma_domain->snoop_id, dma_domain->stash_id, 181 dma_domain->snoop_id, dma_domain->stash_id,
204 0, wnd->prot); 182 0, wnd->prot);
205 if (ret) 183 if (ret)
206 pr_debug("Window reconfiguration failed for liodn %d\n", liodn); 184 pr_debug("Window reconfiguration failed for liodn %d\n",
185 liodn);
207 } 186 }
208 187
209 spin_unlock_irqrestore(&iommu_lock, flags); 188 spin_unlock_irqrestore(&iommu_lock, flags);
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
212} 191}
213 192
214static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, 193static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
215 u32 val) 194 u32 val)
216{ 195{
217 int ret = 0, i; 196 int ret = 0, i;
218 unsigned long flags; 197 unsigned long flags;
219 198
220 spin_lock_irqsave(&iommu_lock, flags); 199 spin_lock_irqsave(&iommu_lock, flags);
221 if (!dma_domain->win_arr) { 200 if (!dma_domain->win_arr) {
222 pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn); 201 pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
202 liodn);
223 spin_unlock_irqrestore(&iommu_lock, flags); 203 spin_unlock_irqrestore(&iommu_lock, flags);
224 return -EINVAL; 204 return -EINVAL;
225 } 205 }
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
227 for (i = 0; i < dma_domain->win_cnt; i++) { 207 for (i = 0; i < dma_domain->win_cnt; i++) {
228 ret = pamu_update_paace_stash(liodn, i, val); 208 ret = pamu_update_paace_stash(liodn, i, val);
229 if (ret) { 209 if (ret) {
230 pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); 210 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
211 i, liodn);
231 spin_unlock_irqrestore(&iommu_lock, flags); 212 spin_unlock_irqrestore(&iommu_lock, flags);
232 return ret; 213 return ret;
233 } 214 }
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
240 221
241/* Set the geometry parameters for a LIODN */ 222/* Set the geometry parameters for a LIODN */
242static int pamu_set_liodn(int liodn, struct device *dev, 223static int pamu_set_liodn(int liodn, struct device *dev,
243 struct fsl_dma_domain *dma_domain, 224 struct fsl_dma_domain *dma_domain,
244 struct iommu_domain_geometry *geom_attr, 225 struct iommu_domain_geometry *geom_attr,
245 u32 win_cnt) 226 u32 win_cnt)
246{ 227{
247 phys_addr_t window_addr, window_size; 228 phys_addr_t window_addr, window_size;
248 phys_addr_t subwin_size; 229 phys_addr_t subwin_size;
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
268 dma_domain->stash_id, win_cnt, 0); 249 dma_domain->stash_id, win_cnt, 0);
269 spin_unlock_irqrestore(&iommu_lock, flags); 250 spin_unlock_irqrestore(&iommu_lock, flags);
270 if (ret) { 251 if (ret) {
271 pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); 252 pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
253 liodn, win_cnt);
272 return ret; 254 return ret;
273 } 255 }
274 256
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
285 0, 0); 267 0, 0);
286 spin_unlock_irqrestore(&iommu_lock, flags); 268 spin_unlock_irqrestore(&iommu_lock, flags);
287 if (ret) { 269 if (ret) {
288 pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn); 270 pr_debug("SPAACE configuration failed for liodn %d\n",
271 liodn);
289 return ret; 272 return ret;
290 } 273 }
291 } 274 }
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova)
301 * to PAMU page size. 284 * to PAMU page size.
302 */ 285 */
303 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { 286 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
304 pr_debug("%s: size too small or not a power of two\n", __func__); 287 pr_debug("Size too small or not a power of two\n");
305 return -EINVAL; 288 return -EINVAL;
306 } 289 }
307 290
308 /* iova must be page size aligned*/ 291 /* iova must be page size aligned */
309 if (iova & (size - 1)) { 292 if (iova & (size - 1)) {
310 pr_debug("%s: address is not aligned with window size\n", __func__); 293 pr_debug("Address is not aligned with window size\n");
311 return -EINVAL; 294 return -EINVAL;
312 } 295 }
313 296
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
396 if (!dev->archdata.iommu_domain) 379 if (!dev->archdata.iommu_domain)
397 dev->archdata.iommu_domain = info; 380 dev->archdata.iommu_domain = info;
398 spin_unlock_irqrestore(&device_domain_lock, flags); 381 spin_unlock_irqrestore(&device_domain_lock, flags);
399
400} 382}
401 383
402static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, 384static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
403 dma_addr_t iova) 385 dma_addr_t iova)
404{ 386{
405 struct fsl_dma_domain *dma_domain = domain->priv; 387 struct fsl_dma_domain *dma_domain = domain->priv;
406 388
407 if ((iova < domain->geometry.aperture_start) || 389 if (iova < domain->geometry.aperture_start ||
408 iova > (domain->geometry.aperture_end)) 390 iova > domain->geometry.aperture_end)
409 return 0; 391 return 0;
410 392
411 return get_phys_addr(dma_domain, iova); 393 return get_phys_addr(dma_domain, iova);
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
460 442
461 list_for_each_entry(info, &dma_domain->devices, link) { 443 list_for_each_entry(info, &dma_domain->devices, link) {
462 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, 444 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
463 geom_attr, win_cnt); 445 geom_attr, win_cnt);
464 if (ret) 446 if (ret)
465 break; 447 break;
466 } 448 }
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
543 } 525 }
544 526
545 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 527 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
546
547} 528}
548 529
549static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, 530static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
576 557
577 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); 558 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
578 if (size > win_size) { 559 if (size > win_size) {
579 pr_debug("Invalid window size \n"); 560 pr_debug("Invalid window size\n");
580 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 561 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
581 return -EINVAL; 562 return -EINVAL;
582 } 563 }
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
622 * and window mappings. 603 * and window mappings.
623 */ 604 */
624static int handle_attach_device(struct fsl_dma_domain *dma_domain, 605static int handle_attach_device(struct fsl_dma_domain *dma_domain,
625 struct device *dev, const u32 *liodn, 606 struct device *dev, const u32 *liodn,
626 int num) 607 int num)
627{ 608{
628 unsigned long flags; 609 unsigned long flags;
629 struct iommu_domain *domain = dma_domain->iommu_domain; 610 struct iommu_domain *domain = dma_domain->iommu_domain;
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
632 613
633 spin_lock_irqsave(&dma_domain->domain_lock, flags); 614 spin_lock_irqsave(&dma_domain->domain_lock, flags);
634 for (i = 0; i < num; i++) { 615 for (i = 0; i < num; i++) {
635
636 /* Ensure that LIODN value is valid */ 616 /* Ensure that LIODN value is valid */
637 if (liodn[i] >= PAACE_NUMBER_ENTRIES) { 617 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
638 pr_debug("Invalid liodn %d, attach device failed for %s\n", 618 pr_debug("Invalid liodn %d, attach device failed for %s\n",
639 liodn[i], dev->of_node->full_name); 619 liodn[i], dev->of_node->full_name);
640 ret = -EINVAL; 620 ret = -EINVAL;
641 break; 621 break;
642 } 622 }
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
649 */ 629 */
650 if (dma_domain->win_arr) { 630 if (dma_domain->win_arr) {
651 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; 631 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
632
652 ret = pamu_set_liodn(liodn[i], dev, dma_domain, 633 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
653 &domain->geometry, 634 &domain->geometry, win_cnt);
654 win_cnt);
655 if (ret) 635 if (ret)
656 break; 636 break;
657 if (dma_domain->mapped) { 637 if (dma_domain->mapped) {
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
698 liodn = of_get_property(dev->of_node, "fsl,liodn", &len); 678 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
699 if (liodn) { 679 if (liodn) {
700 liodn_cnt = len / sizeof(u32); 680 liodn_cnt = len / sizeof(u32);
701 ret = handle_attach_device(dma_domain, dev, 681 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
702 liodn, liodn_cnt);
703 } else { 682 } else {
704 pr_debug("missing fsl,liodn property at %s\n", 683 pr_debug("missing fsl,liodn property at %s\n",
705 dev->of_node->full_name); 684 dev->of_node->full_name);
706 ret = -EINVAL; 685 ret = -EINVAL;
707 } 686 }
708 687
709 return ret; 688 return ret;
710} 689}
711 690
712static void fsl_pamu_detach_device(struct iommu_domain *domain, 691static void fsl_pamu_detach_device(struct iommu_domain *domain,
713 struct device *dev) 692 struct device *dev)
714{ 693{
715 struct fsl_dma_domain *dma_domain = domain->priv; 694 struct fsl_dma_domain *dma_domain = domain->priv;
716 const u32 *prop; 695 const u32 *prop;
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
738 detach_device(dev, dma_domain); 717 detach_device(dev, dma_domain);
739 else 718 else
740 pr_debug("missing fsl,liodn property at %s\n", 719 pr_debug("missing fsl,liodn property at %s\n",
741 dev->of_node->full_name); 720 dev->of_node->full_name);
742} 721}
743 722
744static int configure_domain_geometry(struct iommu_domain *domain, void *data) 723static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
754 * DMA outside of the geometry. 733 * DMA outside of the geometry.
755 */ 734 */
756 if (check_size(geom_size, geom_attr->aperture_start) || 735 if (check_size(geom_size, geom_attr->aperture_start) ||
757 !geom_attr->force_aperture) { 736 !geom_attr->force_aperture) {
758 pr_debug("Invalid PAMU geometry attributes\n"); 737 pr_debug("Invalid PAMU geometry attributes\n");
759 return -EINVAL; 738 return -EINVAL;
760 } 739 }
761 740
762 spin_lock_irqsave(&dma_domain->domain_lock, flags); 741 spin_lock_irqsave(&dma_domain->domain_lock, flags);
763 if (dma_domain->enabled) { 742 if (dma_domain->enabled) {
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
786 spin_lock_irqsave(&dma_domain->domain_lock, flags); 765 spin_lock_irqsave(&dma_domain->domain_lock, flags);
787 766
788 memcpy(&dma_domain->dma_stash, stash_attr, 767 memcpy(&dma_domain->dma_stash, stash_attr,
789 sizeof(struct pamu_stash_attribute)); 768 sizeof(struct pamu_stash_attribute));
790 769
791 dma_domain->stash_id = get_stash_id(stash_attr->cache, 770 dma_domain->stash_id = get_stash_id(stash_attr->cache,
792 stash_attr->cpu); 771 stash_attr->cpu);
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
803 return ret; 782 return ret;
804} 783}
805 784
806/* Configure domain dma state i.e. enable/disable DMA*/ 785/* Configure domain dma state i.e. enable/disable DMA */
807static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) 786static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
808{ 787{
809 struct device_domain_info *info; 788 struct device_domain_info *info;
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
819 } 798 }
820 799
821 dma_domain->enabled = enable; 800 dma_domain->enabled = enable;
822 list_for_each_entry(info, &dma_domain->devices, 801 list_for_each_entry(info, &dma_domain->devices, link) {
823 link) {
824 ret = (enable) ? pamu_enable_liodn(info->liodn) : 802 ret = (enable) ? pamu_enable_liodn(info->liodn) :
825 pamu_disable_liodn(info->liodn); 803 pamu_disable_liodn(info->liodn);
826 if (ret) 804 if (ret)
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
833} 811}
834 812
835static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, 813static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
836 enum iommu_attr attr_type, void *data) 814 enum iommu_attr attr_type, void *data)
837{ 815{
838 struct fsl_dma_domain *dma_domain = domain->priv; 816 struct fsl_dma_domain *dma_domain = domain->priv;
839 int ret = 0; 817 int ret = 0;
840 818
841
842 switch (attr_type) { 819 switch (attr_type) {
843 case DOMAIN_ATTR_GEOMETRY: 820 case DOMAIN_ATTR_GEOMETRY:
844 ret = configure_domain_geometry(domain, data); 821 ret = configure_domain_geometry(domain, data);
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
853 pr_debug("Unsupported attribute type\n"); 830 pr_debug("Unsupported attribute type\n");
854 ret = -EINVAL; 831 ret = -EINVAL;
855 break; 832 break;
856 }; 833 }
857 834
858 return ret; 835 return ret;
859} 836}
860 837
861static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, 838static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
862 enum iommu_attr attr_type, void *data) 839 enum iommu_attr attr_type, void *data)
863{ 840{
864 struct fsl_dma_domain *dma_domain = domain->priv; 841 struct fsl_dma_domain *dma_domain = domain->priv;
865 int ret = 0; 842 int ret = 0;
866 843
867
868 switch (attr_type) { 844 switch (attr_type) {
869 case DOMAIN_ATTR_FSL_PAMU_STASH: 845 case DOMAIN_ATTR_FSL_PAMU_STASH:
870 memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash, 846 memcpy(data, &dma_domain->dma_stash,
871 sizeof(struct pamu_stash_attribute)); 847 sizeof(struct pamu_stash_attribute));
872 break; 848 break;
873 case DOMAIN_ATTR_FSL_PAMU_ENABLE: 849 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
874 *(int *)data = dma_domain->enabled; 850 *(int *)data = dma_domain->enabled;
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
880 pr_debug("Unsupported attribute type\n"); 856 pr_debug("Unsupported attribute type\n");
881 ret = -EINVAL; 857 ret = -EINVAL;
882 break; 858 break;
883 }; 859 }
884 860
885 return ret; 861 return ret;
886} 862}
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
903 /* Check the PCI controller version number by readding BRR1 register */ 879 /* Check the PCI controller version number by readding BRR1 register */
904 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); 880 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
905 version &= PCI_FSL_BRR1_VER; 881 version &= PCI_FSL_BRR1_VER;
906 /* If PCI controller version is >= 0x204 we can partition endpoints*/ 882 /* If PCI controller version is >= 0x204 we can partition endpoints */
907 if (version >= 0x204) 883 return version >= 0x204;
908 return 1;
909
910 return 0;
911} 884}
912 885
913/* Get iommu group information from peer devices or devices on the parent bus */ 886/* Get iommu group information from peer devices or devices on the parent bus */
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
968 if (pci_ctl->parent->iommu_group) { 941 if (pci_ctl->parent->iommu_group) {
969 group = get_device_iommu_group(pci_ctl->parent); 942 group = get_device_iommu_group(pci_ctl->parent);
970 iommu_group_remove_device(pci_ctl->parent); 943 iommu_group_remove_device(pci_ctl->parent);
971 } else 944 } else {
972 group = get_shared_pci_device_group(pdev); 945 group = get_shared_pci_device_group(pdev);
946 }
973 } 947 }
974 948
975 if (!group) 949 if (!group)
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1055 } 1029 }
1056 1030
1057 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, 1031 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1058 ((w_count > 1) ? w_count : 0)); 1032 w_count > 1 ? w_count : 0);
1059 if (!ret) { 1033 if (!ret) {
1060 kfree(dma_domain->win_arr); 1034 kfree(dma_domain->win_arr);
1061 dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * 1035 dma_domain->win_arr = kcalloc(w_count,
1062 w_count, GFP_ATOMIC); 1036 sizeof(*dma_domain->win_arr),
1037 GFP_ATOMIC);
1063 if (!dma_domain->win_arr) { 1038 if (!dma_domain->win_arr) {
1064 spin_unlock_irqrestore(&dma_domain->domain_lock, flags); 1039 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1065 return -ENOMEM; 1040 return -ENOMEM;
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = {
1095 .remove_device = fsl_pamu_remove_device, 1070 .remove_device = fsl_pamu_remove_device,
1096}; 1071};
1097 1072
1098int pamu_domain_init(void) 1073int __init pamu_domain_init(void)
1099{ 1074{
1100 int ret = 0; 1075 int ret = 0;
1101 1076
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 40dfbc0444c0..ae4c1a854e57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -71,6 +71,9 @@
71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) 71 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) 72#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 73
74/* IO virtual address start page frame number */
75#define IOVA_START_PFN (1)
76
74#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 77#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
75#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 78#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
76#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 79#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@@ -485,7 +488,6 @@ __setup("intel_iommu=", intel_iommu_setup);
485 488
486static struct kmem_cache *iommu_domain_cache; 489static struct kmem_cache *iommu_domain_cache;
487static struct kmem_cache *iommu_devinfo_cache; 490static struct kmem_cache *iommu_devinfo_cache;
488static struct kmem_cache *iommu_iova_cache;
489 491
490static inline void *alloc_pgtable_page(int node) 492static inline void *alloc_pgtable_page(int node)
491{ 493{
@@ -523,16 +525,6 @@ static inline void free_devinfo_mem(void *vaddr)
523 kmem_cache_free(iommu_devinfo_cache, vaddr); 525 kmem_cache_free(iommu_devinfo_cache, vaddr);
524} 526}
525 527
526struct iova *alloc_iova_mem(void)
527{
528 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
529}
530
531void free_iova_mem(struct iova *iova)
532{
533 kmem_cache_free(iommu_iova_cache, iova);
534}
535
536static inline int domain_type_is_vm(struct dmar_domain *domain) 528static inline int domain_type_is_vm(struct dmar_domain *domain)
537{ 529{
538 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; 530 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -1643,7 +1635,8 @@ static int dmar_init_reserved_ranges(void)
1643 struct iova *iova; 1635 struct iova *iova;
1644 int i; 1636 int i;
1645 1637
1646 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); 1638 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1639 DMA_32BIT_PFN);
1647 1640
1648 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1641 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1649 &reserved_rbtree_key); 1642 &reserved_rbtree_key);
@@ -1701,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1701 int adjust_width, agaw; 1694 int adjust_width, agaw;
1702 unsigned long sagaw; 1695 unsigned long sagaw;
1703 1696
1704 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 1697 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1698 DMA_32BIT_PFN);
1705 domain_reserve_special_ranges(domain); 1699 domain_reserve_special_ranges(domain);
1706 1700
1707 /* calculate AGAW */ 1701 /* calculate AGAW */
@@ -3427,23 +3421,6 @@ static inline int iommu_devinfo_cache_init(void)
3427 return ret; 3421 return ret;
3428} 3422}
3429 3423
3430static inline int iommu_iova_cache_init(void)
3431{
3432 int ret = 0;
3433
3434 iommu_iova_cache = kmem_cache_create("iommu_iova",
3435 sizeof(struct iova),
3436 0,
3437 SLAB_HWCACHE_ALIGN,
3438 NULL);
3439 if (!iommu_iova_cache) {
3440 printk(KERN_ERR "Couldn't create iova cache\n");
3441 ret = -ENOMEM;
3442 }
3443
3444 return ret;
3445}
3446
3447static int __init iommu_init_mempool(void) 3424static int __init iommu_init_mempool(void)
3448{ 3425{
3449 int ret; 3426 int ret;
@@ -3461,7 +3438,7 @@ static int __init iommu_init_mempool(void)
3461 3438
3462 kmem_cache_destroy(iommu_domain_cache); 3439 kmem_cache_destroy(iommu_domain_cache);
3463domain_error: 3440domain_error:
3464 kmem_cache_destroy(iommu_iova_cache); 3441 iommu_iova_cache_destroy();
3465 3442
3466 return -ENOMEM; 3443 return -ENOMEM;
3467} 3444}
@@ -3470,8 +3447,7 @@ static void __init iommu_exit_mempool(void)
3470{ 3447{
3471 kmem_cache_destroy(iommu_devinfo_cache); 3448 kmem_cache_destroy(iommu_devinfo_cache);
3472 kmem_cache_destroy(iommu_domain_cache); 3449 kmem_cache_destroy(iommu_domain_cache);
3473 kmem_cache_destroy(iommu_iova_cache); 3450 iommu_iova_cache_destroy();
3474
3475} 3451}
3476 3452
3477static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) 3453static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
@@ -4342,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
4342{ 4318{
4343 int adjust_width; 4319 int adjust_width;
4344 4320
4345 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 4321 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4322 DMA_32BIT_PFN);
4346 domain_reserve_special_ranges(domain); 4323 domain_reserve_special_ranges(domain);
4347 4324
4348 /* calculate AGAW */ 4325 /* calculate AGAW */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#include "io-pgtable.h"
30
31#define ARM_LPAE_MAX_ADDR_BITS 48
32#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33#define ARM_LPAE_MAX_LEVELS 4
34
35/* Struct accessors */
36#define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
38
39#define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
41
42#define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
60
61/*
62 * Calculate the index at level l used to map virtual address a using the
63 * pagetable in d.
64 */
65#define ARM_LPAE_PGD_IDX(l,d) \
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67
68#define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71
72/* Calculate the block/page mapping size at level l for pagetable in d. */
73#define ARM_LPAE_BLOCK_SIZE(l,d) \
74 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
75 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
76
77/* Page table bits */
78#define ARM_LPAE_PTE_TYPE_SHIFT 0
79#define ARM_LPAE_PTE_TYPE_MASK 0x3
80
81#define ARM_LPAE_PTE_TYPE_BLOCK 1
82#define ARM_LPAE_PTE_TYPE_TABLE 3
83#define ARM_LPAE_PTE_TYPE_PAGE 3
84
85#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
86#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
87#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
88#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
89#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
90#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
91#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
92#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
93
94#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
95/* Ignore the contiguous bit for block splitting */
96#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
97#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
98 ARM_LPAE_PTE_ATTR_HI_MASK)
99
100/* Stage-1 PTE */
101#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
102#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
103#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
104#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
105
106/* Stage-2 PTE */
107#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
108#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
109#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
110#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
111#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
112#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
113
114/* Register bits */
115#define ARM_32_LPAE_TCR_EAE (1 << 31)
116#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
117
118#define ARM_LPAE_TCR_TG0_4K (0 << 14)
119#define ARM_LPAE_TCR_TG0_64K (1 << 14)
120#define ARM_LPAE_TCR_TG0_16K (2 << 14)
121
122#define ARM_LPAE_TCR_SH0_SHIFT 12
123#define ARM_LPAE_TCR_SH0_MASK 0x3
124#define ARM_LPAE_TCR_SH_NS 0
125#define ARM_LPAE_TCR_SH_OS 2
126#define ARM_LPAE_TCR_SH_IS 3
127
128#define ARM_LPAE_TCR_ORGN0_SHIFT 10
129#define ARM_LPAE_TCR_IRGN0_SHIFT 8
130#define ARM_LPAE_TCR_RGN_MASK 0x3
131#define ARM_LPAE_TCR_RGN_NC 0
132#define ARM_LPAE_TCR_RGN_WBWA 1
133#define ARM_LPAE_TCR_RGN_WT 2
134#define ARM_LPAE_TCR_RGN_WB 3
135
136#define ARM_LPAE_TCR_SL0_SHIFT 6
137#define ARM_LPAE_TCR_SL0_MASK 0x3
138
139#define ARM_LPAE_TCR_T0SZ_SHIFT 0
140#define ARM_LPAE_TCR_SZ_MASK 0xf
141
142#define ARM_LPAE_TCR_PS_SHIFT 16
143#define ARM_LPAE_TCR_PS_MASK 0x7
144
145#define ARM_LPAE_TCR_IPS_SHIFT 32
146#define ARM_LPAE_TCR_IPS_MASK 0x7
147
148#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
149#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
150#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
151#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
152#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
153#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
154
155#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
156#define ARM_LPAE_MAIR_ATTR_MASK 0xff
157#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
158#define ARM_LPAE_MAIR_ATTR_NC 0x44
159#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
160#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
161#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
162#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
163
164/* IOPTE accessors */
165#define iopte_deref(pte,d) \
166 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
167 & ~((1ULL << (d)->pg_shift) - 1)))
168
169#define iopte_type(pte,l) \
170 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
171
172#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
173
174#define iopte_leaf(pte,l) \
175 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
176 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
177 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
178
179#define iopte_to_pfn(pte,d) \
180 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
181
182#define pfn_to_iopte(pfn,d) \
183 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
184
185struct arm_lpae_io_pgtable {
186 struct io_pgtable iop;
187
188 int levels;
189 size_t pgd_size;
190 unsigned long pg_shift;
191 unsigned long bits_per_level;
192
193 void *pgd;
194};
195
196typedef u64 arm_lpae_iopte;
197
198static bool selftest_running = false;
199
200static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
201 unsigned long iova, phys_addr_t paddr,
202 arm_lpae_iopte prot, int lvl,
203 arm_lpae_iopte *ptep)
204{
205 arm_lpae_iopte pte = prot;
206
207 /* We require an unmap first */
208 if (iopte_leaf(*ptep, lvl)) {
209 WARN_ON(!selftest_running);
210 return -EEXIST;
211 }
212
213 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
214 pte |= ARM_LPAE_PTE_NS;
215
216 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
217 pte |= ARM_LPAE_PTE_TYPE_PAGE;
218 else
219 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
220
221 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
222 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
223
224 *ptep = pte;
225 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
226 return 0;
227}
228
229static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
230 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
231 int lvl, arm_lpae_iopte *ptep)
232{
233 arm_lpae_iopte *cptep, pte;
234 void *cookie = data->iop.cookie;
235 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
236
237 /* Find our entry at the current level */
238 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
239
240 /* If we can install a leaf entry at this level, then do so */
241 if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
242 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
243
244 /* We can't allocate tables at the final level */
245 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
246 return -EINVAL;
247
248 /* Grab a pointer to the next level */
249 pte = *ptep;
250 if (!pte) {
251 cptep = alloc_pages_exact(1UL << data->pg_shift,
252 GFP_ATOMIC | __GFP_ZERO);
253 if (!cptep)
254 return -ENOMEM;
255
256 data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
257 cookie);
258 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
259 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
260 pte |= ARM_LPAE_PTE_NSTABLE;
261 *ptep = pte;
262 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
263 } else {
264 cptep = iopte_deref(pte, data);
265 }
266
267 /* Rinse, repeat */
268 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
269}
270
271static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
272 int prot)
273{
274 arm_lpae_iopte pte;
275
276 if (data->iop.fmt == ARM_64_LPAE_S1 ||
277 data->iop.fmt == ARM_32_LPAE_S1) {
278 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
279
280 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
281 pte |= ARM_LPAE_PTE_AP_RDONLY;
282
283 if (prot & IOMMU_CACHE)
284 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
285 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
286 } else {
287 pte = ARM_LPAE_PTE_HAP_FAULT;
288 if (prot & IOMMU_READ)
289 pte |= ARM_LPAE_PTE_HAP_READ;
290 if (prot & IOMMU_WRITE)
291 pte |= ARM_LPAE_PTE_HAP_WRITE;
292 if (prot & IOMMU_CACHE)
293 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
294 else
295 pte |= ARM_LPAE_PTE_MEMATTR_NC;
296 }
297
298 if (prot & IOMMU_NOEXEC)
299 pte |= ARM_LPAE_PTE_XN;
300
301 return pte;
302}
303
304static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
305 phys_addr_t paddr, size_t size, int iommu_prot)
306{
307 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
308 arm_lpae_iopte *ptep = data->pgd;
309 int lvl = ARM_LPAE_START_LVL(data);
310 arm_lpae_iopte prot;
311
312 /* If no access, then nothing to do */
313 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
314 return 0;
315
316 prot = arm_lpae_prot_to_pte(data, iommu_prot);
317 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
318}
319
320static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
321 arm_lpae_iopte *ptep)
322{
323 arm_lpae_iopte *start, *end;
324 unsigned long table_size;
325
326 /* Only leaf entries at the last level */
327 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
328 return;
329
330 if (lvl == ARM_LPAE_START_LVL(data))
331 table_size = data->pgd_size;
332 else
333 table_size = 1UL << data->pg_shift;
334
335 start = ptep;
336 end = (void *)ptep + table_size;
337
338 while (ptep != end) {
339 arm_lpae_iopte pte = *ptep++;
340
341 if (!pte || iopte_leaf(pte, lvl))
342 continue;
343
344 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
345 }
346
347 free_pages_exact(start, table_size);
348}
349
350static void arm_lpae_free_pgtable(struct io_pgtable *iop)
351{
352 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
353
354 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
355 kfree(data);
356}
357
358static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
359 unsigned long iova, size_t size,
360 arm_lpae_iopte prot, int lvl,
361 arm_lpae_iopte *ptep, size_t blk_size)
362{
363 unsigned long blk_start, blk_end;
364 phys_addr_t blk_paddr;
365 arm_lpae_iopte table = 0;
366 void *cookie = data->iop.cookie;
367 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
368
369 blk_start = iova & ~(blk_size - 1);
370 blk_end = blk_start + blk_size;
371 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
372
373 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
374 arm_lpae_iopte *tablep;
375
376 /* Unmap! */
377 if (blk_start == iova)
378 continue;
379
380 /* __arm_lpae_map expects a pointer to the start of the table */
381 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
382 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
383 tablep) < 0) {
384 if (table) {
385 /* Free the table we allocated */
386 tablep = iopte_deref(table, data);
387 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
388 }
389 return 0; /* Bytes unmapped */
390 }
391 }
392
393 *ptep = table;
394 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
395 iova &= ~(blk_size - 1);
396 tlb->tlb_add_flush(iova, blk_size, true, cookie);
397 return size;
398}
399
400static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
401 unsigned long iova, size_t size, int lvl,
402 arm_lpae_iopte *ptep)
403{
404 arm_lpae_iopte pte;
405 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
406 void *cookie = data->iop.cookie;
407 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
408
409 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
410 pte = *ptep;
411
412 /* Something went horribly wrong and we ran out of page table */
413 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
414 return 0;
415
416 /* If the size matches this level, we're in the right place */
417 if (size == blk_size) {
418 *ptep = 0;
419 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
420
421 if (!iopte_leaf(pte, lvl)) {
422 /* Also flush any partial walks */
423 tlb->tlb_add_flush(iova, size, false, cookie);
424 tlb->tlb_sync(data->iop.cookie);
425 ptep = iopte_deref(pte, data);
426 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
427 } else {
428 tlb->tlb_add_flush(iova, size, true, cookie);
429 }
430
431 return size;
432 } else if (iopte_leaf(pte, lvl)) {
433 /*
434 * Insert a table at the next level to map the old region,
435 * minus the part we want to unmap
436 */
437 return arm_lpae_split_blk_unmap(data, iova, size,
438 iopte_prot(pte), lvl, ptep,
439 blk_size);
440 }
441
442 /* Keep on walkin' */
443 ptep = iopte_deref(pte, data);
444 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
445}
446
447static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
448 size_t size)
449{
450 size_t unmapped;
451 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
452 struct io_pgtable *iop = &data->iop;
453 arm_lpae_iopte *ptep = data->pgd;
454 int lvl = ARM_LPAE_START_LVL(data);
455
456 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
457 if (unmapped)
458 iop->cfg.tlb->tlb_sync(iop->cookie);
459
460 return unmapped;
461}
462
463static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
464 unsigned long iova)
465{
466 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
467 arm_lpae_iopte pte, *ptep = data->pgd;
468 int lvl = ARM_LPAE_START_LVL(data);
469
470 do {
471 /* Valid IOPTE pointer? */
472 if (!ptep)
473 return 0;
474
475 /* Grab the IOPTE we're interested in */
476 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
477
478 /* Valid entry? */
479 if (!pte)
480 return 0;
481
482 /* Leaf entry? */
483 if (iopte_leaf(pte,lvl))
484 goto found_translation;
485
486 /* Take it to the next level */
487 ptep = iopte_deref(pte, data);
488 } while (++lvl < ARM_LPAE_MAX_LEVELS);
489
490 /* Ran out of page tables to walk */
491 return 0;
492
493found_translation:
494 iova &= ((1 << data->pg_shift) - 1);
495 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
496}
497
498static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
499{
500 unsigned long granule;
501
502 /*
503 * We need to restrict the supported page sizes to match the
504 * translation regime for a particular granule. Aim to match
505 * the CPU page size if possible, otherwise prefer smaller sizes.
506 * While we're at it, restrict the block sizes to match the
507 * chosen granule.
508 */
509 if (cfg->pgsize_bitmap & PAGE_SIZE)
510 granule = PAGE_SIZE;
511 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
512 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
513 else if (cfg->pgsize_bitmap & PAGE_MASK)
514 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
515 else
516 granule = 0;
517
518 switch (granule) {
519 case SZ_4K:
520 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
521 break;
522 case SZ_16K:
523 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
524 break;
525 case SZ_64K:
526 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
527 break;
528 default:
529 cfg->pgsize_bitmap = 0;
530 }
531}
532
533static struct arm_lpae_io_pgtable *
534arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
535{
536 unsigned long va_bits, pgd_bits;
537 struct arm_lpae_io_pgtable *data;
538
539 arm_lpae_restrict_pgsizes(cfg);
540
541 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
542 return NULL;
543
544 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
545 return NULL;
546
547 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
548 return NULL;
549
550 data = kmalloc(sizeof(*data), GFP_KERNEL);
551 if (!data)
552 return NULL;
553
554 data->pg_shift = __ffs(cfg->pgsize_bitmap);
555 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
556
557 va_bits = cfg->ias - data->pg_shift;
558 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
559
560 /* Calculate the actual size of our pgd (without concatenation) */
561 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
562 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
563
564 data->iop.ops = (struct io_pgtable_ops) {
565 .map = arm_lpae_map,
566 .unmap = arm_lpae_unmap,
567 .iova_to_phys = arm_lpae_iova_to_phys,
568 };
569
570 return data;
571}
572
573static struct io_pgtable *
574arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
575{
576 u64 reg;
577 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
578
579 if (!data)
580 return NULL;
581
582 /* TCR */
583 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
584 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
585 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
586
587 switch (1 << data->pg_shift) {
588 case SZ_4K:
589 reg |= ARM_LPAE_TCR_TG0_4K;
590 break;
591 case SZ_16K:
592 reg |= ARM_LPAE_TCR_TG0_16K;
593 break;
594 case SZ_64K:
595 reg |= ARM_LPAE_TCR_TG0_64K;
596 break;
597 }
598
599 switch (cfg->oas) {
600 case 32:
601 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
602 break;
603 case 36:
604 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
605 break;
606 case 40:
607 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
608 break;
609 case 42:
610 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
611 break;
612 case 44:
613 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
614 break;
615 case 48:
616 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
617 break;
618 default:
619 goto out_free_data;
620 }
621
622 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
623 cfg->arm_lpae_s1_cfg.tcr = reg;
624
625 /* MAIRs */
626 reg = (ARM_LPAE_MAIR_ATTR_NC
627 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
628 (ARM_LPAE_MAIR_ATTR_WBRWA
629 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
630 (ARM_LPAE_MAIR_ATTR_DEVICE
631 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
632
633 cfg->arm_lpae_s1_cfg.mair[0] = reg;
634 cfg->arm_lpae_s1_cfg.mair[1] = 0;
635
636 /* Looking good; allocate a pgd */
637 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
638 if (!data->pgd)
639 goto out_free_data;
640
641 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
642
643 /* TTBRs */
644 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
645 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
646 return &data->iop;
647
648out_free_data:
649 kfree(data);
650 return NULL;
651}
652
653static struct io_pgtable *
654arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
655{
656 u64 reg, sl;
657 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
658
659 if (!data)
660 return NULL;
661
662 /*
663 * Concatenate PGDs at level 1 if possible in order to reduce
664 * the depth of the stage-2 walk.
665 */
666 if (data->levels == ARM_LPAE_MAX_LEVELS) {
667 unsigned long pgd_pages;
668
669 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
670 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
671 data->pgd_size = pgd_pages << data->pg_shift;
672 data->levels--;
673 }
674 }
675
676 /* VTCR */
677 reg = ARM_64_LPAE_S2_TCR_RES1 |
678 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
679 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
680 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
681
682 sl = ARM_LPAE_START_LVL(data);
683
684 switch (1 << data->pg_shift) {
685 case SZ_4K:
686 reg |= ARM_LPAE_TCR_TG0_4K;
687 sl++; /* SL0 format is different for 4K granule size */
688 break;
689 case SZ_16K:
690 reg |= ARM_LPAE_TCR_TG0_16K;
691 break;
692 case SZ_64K:
693 reg |= ARM_LPAE_TCR_TG0_64K;
694 break;
695 }
696
697 switch (cfg->oas) {
698 case 32:
699 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
700 break;
701 case 36:
702 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
703 break;
704 case 40:
705 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
706 break;
707 case 42:
708 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
709 break;
710 case 44:
711 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
712 break;
713 case 48:
714 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
715 break;
716 default:
717 goto out_free_data;
718 }
719
720 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
721 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
722 cfg->arm_lpae_s2_cfg.vtcr = reg;
723
724 /* Allocate pgd pages */
725 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
726 if (!data->pgd)
727 goto out_free_data;
728
729 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
730
731 /* VTTBR */
732 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
733 return &data->iop;
734
735out_free_data:
736 kfree(data);
737 return NULL;
738}
739
740static struct io_pgtable *
741arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
742{
743 struct io_pgtable *iop;
744
745 if (cfg->ias > 32 || cfg->oas > 40)
746 return NULL;
747
748 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
749 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
750 if (iop) {
751 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
752 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
753 }
754
755 return iop;
756}
757
758static struct io_pgtable *
759arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
760{
761 struct io_pgtable *iop;
762
763 if (cfg->ias > 40 || cfg->oas > 40)
764 return NULL;
765
766 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
767 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
768 if (iop)
769 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
770
771 return iop;
772}
773
774struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
775 .alloc = arm_64_lpae_alloc_pgtable_s1,
776 .free = arm_lpae_free_pgtable,
777};
778
779struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
780 .alloc = arm_64_lpae_alloc_pgtable_s2,
781 .free = arm_lpae_free_pgtable,
782};
783
784struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
785 .alloc = arm_32_lpae_alloc_pgtable_s1,
786 .free = arm_lpae_free_pgtable,
787};
788
789struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
790 .alloc = arm_32_lpae_alloc_pgtable_s2,
791 .free = arm_lpae_free_pgtable,
792};
793
794#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
795
796static struct io_pgtable_cfg *cfg_cookie;
797
798static void dummy_tlb_flush_all(void *cookie)
799{
800 WARN_ON(cookie != cfg_cookie);
801}
802
803static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
804 void *cookie)
805{
806 WARN_ON(cookie != cfg_cookie);
807 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
808}
809
810static void dummy_tlb_sync(void *cookie)
811{
812 WARN_ON(cookie != cfg_cookie);
813}
814
815static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
816{
817 WARN_ON(cookie != cfg_cookie);
818}
819
820static struct iommu_gather_ops dummy_tlb_ops __initdata = {
821 .tlb_flush_all = dummy_tlb_flush_all,
822 .tlb_add_flush = dummy_tlb_add_flush,
823 .tlb_sync = dummy_tlb_sync,
824 .flush_pgtable = dummy_flush_pgtable,
825};
826
827static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
828{
829 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
830 struct io_pgtable_cfg *cfg = &data->iop.cfg;
831
832 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
833 cfg->pgsize_bitmap, cfg->ias);
834 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
835 data->levels, data->pgd_size, data->pg_shift,
836 data->bits_per_level, data->pgd);
837}
838
839#define __FAIL(ops, i) ({ \
840 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
841 arm_lpae_dump_ops(ops); \
842 selftest_running = false; \
843 -EFAULT; \
844})
845
846static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
847{
848 static const enum io_pgtable_fmt fmts[] = {
849 ARM_64_LPAE_S1,
850 ARM_64_LPAE_S2,
851 };
852
853 int i, j;
854 unsigned long iova;
855 size_t size;
856 struct io_pgtable_ops *ops;
857
858 selftest_running = true;
859
860 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
861 cfg_cookie = cfg;
862 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
863 if (!ops) {
864 pr_err("selftest: failed to allocate io pgtable ops\n");
865 return -ENOMEM;
866 }
867
868 /*
869 * Initial sanity checks.
870 * Empty page tables shouldn't provide any translations.
871 */
872 if (ops->iova_to_phys(ops, 42))
873 return __FAIL(ops, i);
874
875 if (ops->iova_to_phys(ops, SZ_1G + 42))
876 return __FAIL(ops, i);
877
878 if (ops->iova_to_phys(ops, SZ_2G + 42))
879 return __FAIL(ops, i);
880
881 /*
882 * Distinct mappings of different granule sizes.
883 */
884 iova = 0;
885 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
886 while (j != BITS_PER_LONG) {
887 size = 1UL << j;
888
889 if (ops->map(ops, iova, iova, size, IOMMU_READ |
890 IOMMU_WRITE |
891 IOMMU_NOEXEC |
892 IOMMU_CACHE))
893 return __FAIL(ops, i);
894
895 /* Overlapping mappings */
896 if (!ops->map(ops, iova, iova + size, size,
897 IOMMU_READ | IOMMU_NOEXEC))
898 return __FAIL(ops, i);
899
900 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
901 return __FAIL(ops, i);
902
903 iova += SZ_1G;
904 j++;
905 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
906 }
907
908 /* Partial unmap */
909 size = 1UL << __ffs(cfg->pgsize_bitmap);
910 if (ops->unmap(ops, SZ_1G + size, size) != size)
911 return __FAIL(ops, i);
912
913 /* Remap of partial unmap */
914 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
915 return __FAIL(ops, i);
916
917 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
918 return __FAIL(ops, i);
919
920 /* Full unmap */
921 iova = 0;
922 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
923 while (j != BITS_PER_LONG) {
924 size = 1UL << j;
925
926 if (ops->unmap(ops, iova, size) != size)
927 return __FAIL(ops, i);
928
929 if (ops->iova_to_phys(ops, iova + 42))
930 return __FAIL(ops, i);
931
932 /* Remap full block */
933 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
934 return __FAIL(ops, i);
935
936 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
937 return __FAIL(ops, i);
938
939 iova += SZ_1G;
940 j++;
941 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
942 }
943
944 free_io_pgtable_ops(ops);
945 }
946
947 selftest_running = false;
948 return 0;
949}
950
951static int __init arm_lpae_do_selftests(void)
952{
953 static const unsigned long pgsize[] = {
954 SZ_4K | SZ_2M | SZ_1G,
955 SZ_16K | SZ_32M,
956 SZ_64K | SZ_512M,
957 };
958
959 static const unsigned int ias[] = {
960 32, 36, 40, 42, 44, 48,
961 };
962
963 int i, j, pass = 0, fail = 0;
964 struct io_pgtable_cfg cfg = {
965 .tlb = &dummy_tlb_ops,
966 .oas = 48,
967 };
968
969 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
970 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
971 cfg.pgsize_bitmap = pgsize[i];
972 cfg.ias = ias[j];
973 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
974 pgsize[i], ias[j]);
975 if (arm_lpae_run_tests(&cfg))
976 fail++;
977 else
978 pass++;
979 }
980 }
981
982 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
983 return fail ? -EFAULT : 0;
984}
985subsys_initcall(arm_lpae_do_selftests);
986#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
1/*
2 * Generic page table allocator for IOMMUs.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bug.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24
25#include "io-pgtable.h"
26
27extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
28extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
29extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
30extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
31
32static const struct io_pgtable_init_fns *
33io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
34{
35#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
36 [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
37 [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
38 [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
39 [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
40#endif
41};
42
43struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44 struct io_pgtable_cfg *cfg,
45 void *cookie)
46{
47 struct io_pgtable *iop;
48 const struct io_pgtable_init_fns *fns;
49
50 if (fmt >= IO_PGTABLE_NUM_FMTS)
51 return NULL;
52
53 fns = io_pgtable_init_table[fmt];
54 if (!fns)
55 return NULL;
56
57 iop = fns->alloc(cfg, cookie);
58 if (!iop)
59 return NULL;
60
61 iop->fmt = fmt;
62 iop->cookie = cookie;
63 iop->cfg = *cfg;
64
65 return &iop->ops;
66}
67
68/*
69 * It is the IOMMU driver's responsibility to ensure that the page table
70 * is no longer accessible to the walker by this point.
71 */
72void free_io_pgtable_ops(struct io_pgtable_ops *ops)
73{
74 struct io_pgtable *iop;
75
76 if (!ops)
77 return;
78
79 iop = container_of(ops, struct io_pgtable, ops);
80 iop->cfg.tlb->tlb_flush_all(iop->cookie);
81 io_pgtable_init_table[iop->fmt]->free(iop);
82}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
1#ifndef __IO_PGTABLE_H
2#define __IO_PGTABLE_H
3
4/*
5 * Public API for use by IOMMU drivers
6 */
7enum io_pgtable_fmt {
8 ARM_32_LPAE_S1,
9 ARM_32_LPAE_S2,
10 ARM_64_LPAE_S1,
11 ARM_64_LPAE_S2,
12 IO_PGTABLE_NUM_FMTS,
13};
14
15/**
16 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
17 *
18 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
19 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
20 * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
21 * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
22 *
23 * Note that these can all be called in atomic context and must therefore
24 * not block.
25 */
26struct iommu_gather_ops {
27 void (*tlb_flush_all)(void *cookie);
28 void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
29 void *cookie);
30 void (*tlb_sync)(void *cookie);
31 void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
32};
33
34/**
35 * struct io_pgtable_cfg - Configuration data for a set of page tables.
36 *
37 * @quirks: A bitmap of hardware quirks that require some special
38 * action by the low-level page table allocator.
39 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
40 * tables.
41 * @ias: Input address (iova) size, in bits.
42 * @oas: Output address (paddr) size, in bits.
43 * @tlb: TLB management callbacks for this set of tables.
44 */
45struct io_pgtable_cfg {
46 #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
47 int quirks;
48 unsigned long pgsize_bitmap;
49 unsigned int ias;
50 unsigned int oas;
51 const struct iommu_gather_ops *tlb;
52
53 /* Low-level data specific to the table format */
54 union {
55 struct {
56 u64 ttbr[2];
57 u64 tcr;
58 u64 mair[2];
59 } arm_lpae_s1_cfg;
60
61 struct {
62 u64 vttbr;
63 u64 vtcr;
64 } arm_lpae_s2_cfg;
65 };
66};
67
68/**
69 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
70 *
71 * @map: Map a physically contiguous memory region.
72 * @unmap: Unmap a physically contiguous memory region.
73 * @iova_to_phys: Translate iova to physical address.
74 *
75 * These functions map directly onto the iommu_ops member functions with
76 * the same names.
77 */
78struct io_pgtable_ops {
79 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
80 phys_addr_t paddr, size_t size, int prot);
81 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
82 size_t size);
83 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
84 unsigned long iova);
85};
86
87/**
88 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
89 *
90 * @fmt: The page table format.
91 * @cfg: The page table configuration. This will be modified to represent
92 * the configuration actually provided by the allocator (e.g. the
93 * pgsize_bitmap may be restricted).
94 * @cookie: An opaque token provided by the IOMMU driver and passed back to
95 * the callback routines in cfg->tlb.
96 */
97struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
98 struct io_pgtable_cfg *cfg,
99 void *cookie);
100
101/**
102 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
103 * *must* ensure that the page table is no longer
104 * live, but the TLB can be dirty.
105 *
106 * @ops: The ops returned from alloc_io_pgtable_ops.
107 */
108void free_io_pgtable_ops(struct io_pgtable_ops *ops);
109
110
111/*
112 * Internal structures for page table allocator implementations.
113 */
114
115/**
116 * struct io_pgtable - Internal structure describing a set of page tables.
117 *
118 * @fmt: The page table format.
119 * @cookie: An opaque token provided by the IOMMU driver and passed back to
120 * any callback routines.
121 * @cfg: A copy of the page table configuration.
122 * @ops: The page table operations in use for this set of page tables.
123 */
124struct io_pgtable {
125 enum io_pgtable_fmt fmt;
126 void *cookie;
127 struct io_pgtable_cfg cfg;
128 struct io_pgtable_ops ops;
129};
130
131/**
132 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
133 * particular format.
134 *
135 * @alloc: Allocate a set of page tables described by cfg.
136 * @free: Free the page tables associated with iop.
137 */
138struct io_pgtable_init_fns {
139 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
140 void (*free)(struct io_pgtable *iop);
141};
142
143#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f7718d73e984..72e683df0731 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -1084,7 +1084,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
1084 if (ret) 1084 if (ret)
1085 iommu_unmap(domain, orig_iova, orig_size - size); 1085 iommu_unmap(domain, orig_iova, orig_size - size);
1086 else 1086 else
1087 trace_map(iova, paddr, size); 1087 trace_map(orig_iova, paddr, orig_size);
1088 1088
1089 return ret; 1089 return ret;
1090} 1090}
@@ -1094,6 +1094,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1094{ 1094{
1095 size_t unmapped_page, unmapped = 0; 1095 size_t unmapped_page, unmapped = 0;
1096 unsigned int min_pagesz; 1096 unsigned int min_pagesz;
1097 unsigned long orig_iova = iova;
1097 1098
1098 if (unlikely(domain->ops->unmap == NULL || 1099 if (unlikely(domain->ops->unmap == NULL ||
1099 domain->ops->pgsize_bitmap == 0UL)) 1100 domain->ops->pgsize_bitmap == 0UL))
@@ -1133,7 +1134,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1133 unmapped += unmapped_page; 1134 unmapped += unmapped_page;
1134 } 1135 }
1135 1136
1136 trace_unmap(iova, 0, size); 1137 trace_unmap(orig_iova, size, unmapped);
1137 return unmapped; 1138 return unmapped;
1138} 1139}
1139EXPORT_SYMBOL_GPL(iommu_unmap); 1140EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f6b17e6af2fb..9dd8208312c2 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,13 +18,58 @@
18 */ 18 */
19 19
20#include <linux/iova.h> 20#include <linux/iova.h>
21#include <linux/slab.h>
22
23static struct kmem_cache *iommu_iova_cache;
24
25int iommu_iova_cache_init(void)
26{
27 int ret = 0;
28
29 iommu_iova_cache = kmem_cache_create("iommu_iova",
30 sizeof(struct iova),
31 0,
32 SLAB_HWCACHE_ALIGN,
33 NULL);
34 if (!iommu_iova_cache) {
35 pr_err("Couldn't create iova cache\n");
36 ret = -ENOMEM;
37 }
38
39 return ret;
40}
41
42void iommu_iova_cache_destroy(void)
43{
44 kmem_cache_destroy(iommu_iova_cache);
45}
46
47struct iova *alloc_iova_mem(void)
48{
49 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
50}
51
52void free_iova_mem(struct iova *iova)
53{
54 kmem_cache_free(iommu_iova_cache, iova);
55}
21 56
22void 57void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 58init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long start_pfn, unsigned long pfn_32bit)
24{ 60{
61 /*
62 * IOVA granularity will normally be equal to the smallest
63 * supported IOMMU page size; both *must* be capable of
64 * representing individual CPU pages exactly.
65 */
66 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
67
25 spin_lock_init(&iovad->iova_rbtree_lock); 68 spin_lock_init(&iovad->iova_rbtree_lock);
26 iovad->rbroot = RB_ROOT; 69 iovad->rbroot = RB_ROOT;
27 iovad->cached32_node = NULL; 70 iovad->cached32_node = NULL;
71 iovad->granule = granule;
72 iovad->start_pfn = start_pfn;
28 iovad->dma_32bit_pfn = pfn_32bit; 73 iovad->dma_32bit_pfn = pfn_32bit;
29} 74}
30 75
@@ -127,7 +172,7 @@ move_left:
127 if (!curr) { 172 if (!curr) {
128 if (size_aligned) 173 if (size_aligned)
129 pad_size = iova_get_pad_size(size, limit_pfn); 174 pad_size = iova_get_pad_size(size, limit_pfn);
130 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { 175 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
131 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 176 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
132 return -ENOMEM; 177 return -ENOMEM;
133 } 178 }
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
202 * @size: - size of page frames to allocate 247 * @size: - size of page frames to allocate
203 * @limit_pfn: - max limit address 248 * @limit_pfn: - max limit address
204 * @size_aligned: - set if size_aligned address range is required 249 * @size_aligned: - set if size_aligned address range is required
205 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN 250 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
206 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned 251 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
207 * flag is set then the allocated address iova->pfn_lo will be naturally 252 * flag is set then the allocated address iova->pfn_lo will be naturally
208 * aligned on roundup_power_of_two(size). 253 * aligned on roundup_power_of_two(size).
209 */ 254 */
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 748693192c20..10186cac7716 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -16,7 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/iommu.h> 17#include <linux/iommu.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_data/ipmmu-vmsa.h> 19#include <linux/of.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/sizes.h> 21#include <linux/sizes.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
@@ -24,12 +24,13 @@
24#include <asm/dma-iommu.h> 24#include <asm/dma-iommu.h>
25#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
26 26
27#include "io-pgtable.h"
28
27struct ipmmu_vmsa_device { 29struct ipmmu_vmsa_device {
28 struct device *dev; 30 struct device *dev;
29 void __iomem *base; 31 void __iomem *base;
30 struct list_head list; 32 struct list_head list;
31 33
32 const struct ipmmu_vmsa_platform_data *pdata;
33 unsigned int num_utlbs; 34 unsigned int num_utlbs;
34 35
35 struct dma_iommu_mapping *mapping; 36 struct dma_iommu_mapping *mapping;
@@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain {
39 struct ipmmu_vmsa_device *mmu; 40 struct ipmmu_vmsa_device *mmu;
40 struct iommu_domain *io_domain; 41 struct iommu_domain *io_domain;
41 42
43 struct io_pgtable_cfg cfg;
44 struct io_pgtable_ops *iop;
45
42 unsigned int context_id; 46 unsigned int context_id;
43 spinlock_t lock; /* Protects mappings */ 47 spinlock_t lock; /* Protects mappings */
44 pgd_t *pgd;
45}; 48};
46 49
47struct ipmmu_vmsa_archdata { 50struct ipmmu_vmsa_archdata {
48 struct ipmmu_vmsa_device *mmu; 51 struct ipmmu_vmsa_device *mmu;
49 unsigned int utlb; 52 unsigned int *utlbs;
53 unsigned int num_utlbs;
50}; 54};
51 55
52static DEFINE_SPINLOCK(ipmmu_devices_lock); 56static DEFINE_SPINLOCK(ipmmu_devices_lock);
@@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices);
58 * Registers Definition 62 * Registers Definition
59 */ 63 */
60 64
65#define IM_NS_ALIAS_OFFSET 0x800
66
61#define IM_CTX_SIZE 0x40 67#define IM_CTX_SIZE 0x40
62 68
63#define IMCTR 0x0000 69#define IMCTR 0x0000
@@ -171,52 +177,6 @@ static LIST_HEAD(ipmmu_devices);
171#define IMUASID_ASID0_SHIFT 0 177#define IMUASID_ASID0_SHIFT 0
172 178
173/* ----------------------------------------------------------------------------- 179/* -----------------------------------------------------------------------------
174 * Page Table Bits
175 */
176
177/*
178 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
179 * Long-descriptor format" that the NStable bit being set in a table descriptor
180 * will result in the NStable and NS bits of all child entries being ignored and
181 * considered as being set. The IPMMU seems not to comply with this, as it
182 * generates a secure access page fault if any of the NStable and NS bits isn't
183 * set when running in non-secure mode.
184 */
185#ifndef PMD_NSTABLE
186#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
187#endif
188
189#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
190#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
191#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
192#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
193#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
194#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
195#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
196#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
197#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
198
199/* Stage-1 PTE */
200#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
201#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
202#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
203#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
204#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
205#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
206
207#define ARM_VMSA_PTE_ATTRS_MASK \
208 (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
209 ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
210 ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
211
212#define ARM_VMSA_PTE_CONT_ENTRIES 16
213#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
214
215#define IPMMU_PTRS_PER_PTE 512
216#define IPMMU_PTRS_PER_PMD 512
217#define IPMMU_PTRS_PER_PGD 4
218
219/* -----------------------------------------------------------------------------
220 * Read/Write Access 180 * Read/Write Access
221 */ 181 */
222 182
@@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
305 ipmmu_write(mmu, IMUCTR(utlb), 0); 265 ipmmu_write(mmu, IMUCTR(utlb), 0);
306} 266}
307 267
308static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, 268static void ipmmu_tlb_flush_all(void *cookie)
309 size_t size) 269{
270 struct ipmmu_vmsa_domain *domain = cookie;
271
272 ipmmu_tlb_invalidate(domain);
273}
274
275static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
276 void *cookie)
310{ 277{
311 unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 278 /* The hardware doesn't support selective TLB flush. */
279}
280
281static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
282{
283 unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
284 struct ipmmu_vmsa_domain *domain = cookie;
312 285
313 /* 286 /*
314 * TODO: Add support for coherent walk through CCI with DVM and remove 287 * TODO: Add support for coherent walk through CCI with DVM and remove
315 * cache handling. 288 * cache handling.
316 */ 289 */
317 dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); 290 dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
291 DMA_TO_DEVICE);
318} 292}
319 293
294static struct iommu_gather_ops ipmmu_gather_ops = {
295 .tlb_flush_all = ipmmu_tlb_flush_all,
296 .tlb_add_flush = ipmmu_tlb_add_flush,
297 .tlb_sync = ipmmu_tlb_flush_all,
298 .flush_pgtable = ipmmu_flush_pgtable,
299};
300
320/* ----------------------------------------------------------------------------- 301/* -----------------------------------------------------------------------------
321 * Domain/Context Management 302 * Domain/Context Management
322 */ 303 */
@@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
324static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) 305static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
325{ 306{
326 phys_addr_t ttbr; 307 phys_addr_t ttbr;
327 u32 reg; 308
309 /*
310 * Allocate the page table operations.
311 *
312 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
313 * access, Long-descriptor format" that the NStable bit being set in a
314 * table descriptor will result in the NStable and NS bits of all child
315 * entries being ignored and considered as being set. The IPMMU seems
316 * not to comply with this, as it generates a secure access page fault
317 * if any of the NStable and NS bits isn't set when running in
318 * non-secure mode.
319 */
320 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
321 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
322 domain->cfg.ias = 32;
323 domain->cfg.oas = 40;
324 domain->cfg.tlb = &ipmmu_gather_ops;
325
326 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
327 domain);
328 if (!domain->iop)
329 return -EINVAL;
328 330
329 /* 331 /*
330 * TODO: When adding support for multiple contexts, find an unused 332 * TODO: When adding support for multiple contexts, find an unused
@@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
333 domain->context_id = 0; 335 domain->context_id = 0;
334 336
335 /* TTBR0 */ 337 /* TTBR0 */
336 ipmmu_flush_pgtable(domain->mmu, domain->pgd, 338 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
337 IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
338 ttbr = __pa(domain->pgd);
339 ipmmu_ctx_write(domain, IMTTLBR0, ttbr); 339 ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
340 ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); 340 ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
341 341
@@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
348 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 348 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
349 IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); 349 IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
350 350
351 /* 351 /* MAIR0 */
352 * MAIR0 352 ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
353 * We need three attributes only, non-cacheable, write-back read/write
354 * allocate and device memory.
355 */
356 reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
357 | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
358 | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
359 ipmmu_ctx_write(domain, IMMAIR0, reg);
360 353
361 /* IMBUSCR */ 354 /* IMBUSCR */
362 ipmmu_ctx_write(domain, IMBUSCR, 355 ipmmu_ctx_write(domain, IMBUSCR,
@@ -461,396 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
461} 454}
462 455
463/* ----------------------------------------------------------------------------- 456/* -----------------------------------------------------------------------------
464 * Page Table Management
465 */
466
467#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
468
469static void ipmmu_free_ptes(pmd_t *pmd)
470{
471 pgtable_t table = pmd_pgtable(*pmd);
472 __free_page(table);
473}
474
475static void ipmmu_free_pmds(pud_t *pud)
476{
477 pmd_t *pmd = pmd_offset(pud, 0);
478 pgtable_t table;
479 unsigned int i;
480
481 for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
482 if (!pmd_table(*pmd))
483 continue;
484
485 ipmmu_free_ptes(pmd);
486 pmd++;
487 }
488
489 table = pud_pgtable(*pud);
490 __free_page(table);
491}
492
493static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
494{
495 pgd_t *pgd, *pgd_base = domain->pgd;
496 unsigned int i;
497
498 /*
499 * Recursively free the page tables for this domain. We don't care about
500 * speculative TLB filling, because the TLB will be nuked next time this
501 * context bank is re-allocated and no devices currently map to these
502 * tables.
503 */
504 pgd = pgd_base;
505 for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
506 if (pgd_none(*pgd))
507 continue;
508 ipmmu_free_pmds((pud_t *)pgd);
509 pgd++;
510 }
511
512 kfree(pgd_base);
513}
514
515/*
516 * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
517 * functions as they would flush the CPU TLB.
518 */
519
520static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
521 unsigned long iova)
522{
523 pte_t *pte;
524
525 if (!pmd_none(*pmd))
526 return pte_offset_kernel(pmd, iova);
527
528 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
529 if (!pte)
530 return NULL;
531
532 ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
533 *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
534 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
535
536 return pte + pte_index(iova);
537}
538
539static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
540 unsigned long iova)
541{
542 pud_t *pud = (pud_t *)pgd;
543 pmd_t *pmd;
544
545 if (!pud_none(*pud))
546 return pmd_offset(pud, iova);
547
548 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
549 if (!pmd)
550 return NULL;
551
552 ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
553 *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
554 ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
555
556 return pmd + pmd_index(iova);
557}
558
559static u64 ipmmu_page_prot(unsigned int prot, u64 type)
560{
561 u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
563 | ARM_VMSA_PTE_NS | type;
564
565 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
566 pgprot |= ARM_VMSA_PTE_AP_RDONLY;
567
568 if (prot & IOMMU_CACHE)
569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
570
571 if (prot & IOMMU_NOEXEC)
572 pgprot |= ARM_VMSA_PTE_XN;
573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
574 /* If no access create a faulting entry to avoid TLB fills. */
575 pgprot &= ~ARM_VMSA_PTE_PAGE;
576
577 return pgprot;
578}
579
580static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
581 unsigned long iova, unsigned long pfn,
582 size_t size, int prot)
583{
584 pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
585 unsigned int num_ptes = 1;
586 pte_t *pte, *start;
587 unsigned int i;
588
589 pte = ipmmu_alloc_pte(mmu, pmd, iova);
590 if (!pte)
591 return -ENOMEM;
592
593 start = pte;
594
595 /*
596 * Install the page table entries. We can be called both for a single
597 * page or for a block of 16 physically contiguous pages. In the latter
598 * case set the PTE contiguous hint.
599 */
600 if (size == SZ_64K) {
601 pteval |= ARM_VMSA_PTE_CONT;
602 num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
603 }
604
605 for (i = num_ptes; i; --i)
606 *pte++ = pfn_pte(pfn++, __pgprot(pteval));
607
608 ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
609
610 return 0;
611}
612
613static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
614 unsigned long iova, unsigned long pfn,
615 int prot)
616{
617 pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
618
619 *pmd = pfn_pmd(pfn, __pgprot(pmdval));
620 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
621
622 return 0;
623}
624
625static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
626 unsigned long iova, phys_addr_t paddr,
627 size_t size, int prot)
628{
629 struct ipmmu_vmsa_device *mmu = domain->mmu;
630 pgd_t *pgd = domain->pgd;
631 unsigned long flags;
632 unsigned long pfn;
633 pmd_t *pmd;
634 int ret;
635
636 if (!pgd)
637 return -EINVAL;
638
639 if (size & ~PAGE_MASK)
640 return -EINVAL;
641
642 if (paddr & ~((1ULL << 40) - 1))
643 return -ERANGE;
644
645 pfn = __phys_to_pfn(paddr);
646 pgd += pgd_index(iova);
647
648 /* Update the page tables. */
649 spin_lock_irqsave(&domain->lock, flags);
650
651 pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
652 if (!pmd) {
653 ret = -ENOMEM;
654 goto done;
655 }
656
657 switch (size) {
658 case SZ_2M:
659 ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
660 break;
661 case SZ_64K:
662 case SZ_4K:
663 ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
664 break;
665 default:
666 ret = -EINVAL;
667 break;
668 }
669
670done:
671 spin_unlock_irqrestore(&domain->lock, flags);
672
673 if (!ret)
674 ipmmu_tlb_invalidate(domain);
675
676 return ret;
677}
678
679static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
680{
681 /* Free the page table. */
682 pgtable_t table = pud_pgtable(*pud);
683 __free_page(table);
684
685 /* Clear the PUD. */
686 *pud = __pud(0);
687 ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
688}
689
690static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
691 pmd_t *pmd)
692{
693 unsigned int i;
694
695 /* Free the page table. */
696 if (pmd_table(*pmd)) {
697 pgtable_t table = pmd_pgtable(*pmd);
698 __free_page(table);
699 }
700
701 /* Clear the PMD. */
702 *pmd = __pmd(0);
703 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
704
705 /* Check whether the PUD is still needed. */
706 pmd = pmd_offset(pud, 0);
707 for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
708 if (!pmd_none(pmd[i]))
709 return;
710 }
711
712 /* Clear the parent PUD. */
713 ipmmu_clear_pud(mmu, pud);
714}
715
716static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
717 pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
718{
719 unsigned int i;
720
721 /* Clear the PTE. */
722 for (i = num_ptes; i; --i)
723 pte[i-1] = __pte(0);
724
725 ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
726
727 /* Check whether the PMD is still needed. */
728 pte = pte_offset_kernel(pmd, 0);
729 for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
730 if (!pte_none(pte[i]))
731 return;
732 }
733
734 /* Clear the parent PMD. */
735 ipmmu_clear_pmd(mmu, pud, pmd);
736}
737
738static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
739{
740 pte_t *pte, *start;
741 pteval_t pteval;
742 unsigned long pfn;
743 unsigned int i;
744
745 pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
746 if (!pte)
747 return -ENOMEM;
748
749 /* Copy the PMD attributes. */
750 pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
751 | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
752
753 pfn = pmd_pfn(*pmd);
754 start = pte;
755
756 for (i = IPMMU_PTRS_PER_PTE; i; --i)
757 *pte++ = pfn_pte(pfn++, __pgprot(pteval));
758
759 ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
760 *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
761 ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
762
763 return 0;
764}
765
766static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
767{
768 unsigned int i;
769
770 for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
771 pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
772
773 ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
774}
775
776static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
777 unsigned long iova, size_t size)
778{
779 struct ipmmu_vmsa_device *mmu = domain->mmu;
780 unsigned long flags;
781 pgd_t *pgd = domain->pgd;
782 pud_t *pud;
783 pmd_t *pmd;
784 pte_t *pte;
785 int ret = 0;
786
787 if (!pgd)
788 return -EINVAL;
789
790 if (size & ~PAGE_MASK)
791 return -EINVAL;
792
793 pgd += pgd_index(iova);
794 pud = (pud_t *)pgd;
795
796 spin_lock_irqsave(&domain->lock, flags);
797
798 /* If there's no PUD or PMD we're done. */
799 if (pud_none(*pud))
800 goto done;
801
802 pmd = pmd_offset(pud, iova);
803 if (pmd_none(*pmd))
804 goto done;
805
806 /*
807 * When freeing a 2MB block just clear the PMD. In the unlikely case the
808 * block is mapped as individual pages this will free the corresponding
809 * PTE page table.
810 */
811 if (size == SZ_2M) {
812 ipmmu_clear_pmd(mmu, pud, pmd);
813 goto done;
814 }
815
816 /*
817 * If the PMD has been mapped as a section remap it as pages to allow
818 * freeing individual pages.
819 */
820 if (pmd_sect(*pmd))
821 ipmmu_split_pmd(mmu, pmd);
822
823 pte = pte_offset_kernel(pmd, iova);
824
825 /*
826 * When freeing a 64kB block just clear the PTE entries. We don't have
827 * to care about the contiguous hint of the surrounding entries.
828 */
829 if (size == SZ_64K) {
830 ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
831 goto done;
832 }
833
834 /*
835 * If the PTE has been mapped with the contiguous hint set remap it and
836 * its surrounding PTEs to allow unmapping a single page.
837 */
838 if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
839 ipmmu_split_pte(mmu, pte);
840
841 /* Clear the PTE. */
842 ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
843
844done:
845 spin_unlock_irqrestore(&domain->lock, flags);
846
847 if (ret)
848 ipmmu_tlb_invalidate(domain);
849
850 return 0;
851}
852
853/* -----------------------------------------------------------------------------
854 * IOMMU Operations 457 * IOMMU Operations
855 */ 458 */
856 459
@@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain)
864 467
865 spin_lock_init(&domain->lock); 468 spin_lock_init(&domain->lock);
866 469
867 domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
868 if (!domain->pgd) {
869 kfree(domain);
870 return -ENOMEM;
871 }
872
873 io_domain->priv = domain; 470 io_domain->priv = domain;
874 domain->io_domain = io_domain; 471 domain->io_domain = io_domain;
875 472
@@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
885 * been detached. 482 * been detached.
886 */ 483 */
887 ipmmu_domain_destroy_context(domain); 484 ipmmu_domain_destroy_context(domain);
888 ipmmu_free_pgtables(domain); 485 free_io_pgtable_ops(domain->iop);
889 kfree(domain); 486 kfree(domain);
890} 487}
891 488
@@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
896 struct ipmmu_vmsa_device *mmu = archdata->mmu; 493 struct ipmmu_vmsa_device *mmu = archdata->mmu;
897 struct ipmmu_vmsa_domain *domain = io_domain->priv; 494 struct ipmmu_vmsa_domain *domain = io_domain->priv;
898 unsigned long flags; 495 unsigned long flags;
496 unsigned int i;
899 int ret = 0; 497 int ret = 0;
900 498
901 if (!mmu) { 499 if (!mmu) {
@@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
924 if (ret < 0) 522 if (ret < 0)
925 return ret; 523 return ret;
926 524
927 ipmmu_utlb_enable(domain, archdata->utlb); 525 for (i = 0; i < archdata->num_utlbs; ++i)
526 ipmmu_utlb_enable(domain, archdata->utlbs[i]);
928 527
929 return 0; 528 return 0;
930} 529}
@@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
934{ 533{
935 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; 534 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
936 struct ipmmu_vmsa_domain *domain = io_domain->priv; 535 struct ipmmu_vmsa_domain *domain = io_domain->priv;
536 unsigned int i;
937 537
938 ipmmu_utlb_disable(domain, archdata->utlb); 538 for (i = 0; i < archdata->num_utlbs; ++i)
539 ipmmu_utlb_disable(domain, archdata->utlbs[i]);
939 540
940 /* 541 /*
941 * TODO: Optimize by disabling the context when no device is attached. 542 * TODO: Optimize by disabling the context when no device is attached.
@@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
950 if (!domain) 551 if (!domain)
951 return -ENODEV; 552 return -ENODEV;
952 553
953 return ipmmu_create_mapping(domain, iova, paddr, size, prot); 554 return domain->iop->map(domain->iop, iova, paddr, size, prot);
954} 555}
955 556
956static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, 557static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
957 size_t size) 558 size_t size)
958{ 559{
959 struct ipmmu_vmsa_domain *domain = io_domain->priv; 560 struct ipmmu_vmsa_domain *domain = io_domain->priv;
960 int ret;
961 561
962 ret = ipmmu_clear_mapping(domain, iova, size); 562 return domain->iop->unmap(domain->iop, iova, size);
963 return ret ? 0 : size;
964} 563}
965 564
966static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, 565static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
967 dma_addr_t iova) 566 dma_addr_t iova)
968{ 567{
969 struct ipmmu_vmsa_domain *domain = io_domain->priv; 568 struct ipmmu_vmsa_domain *domain = io_domain->priv;
970 pgd_t pgd;
971 pud_t pud;
972 pmd_t pmd;
973 pte_t pte;
974 569
975 /* TODO: Is locking needed ? */ 570 /* TODO: Is locking needed ? */
976 571
977 if (!domain->pgd) 572 return domain->iop->iova_to_phys(domain->iop, iova);
978 return 0; 573}
979
980 pgd = *(domain->pgd + pgd_index(iova));
981 if (pgd_none(pgd))
982 return 0;
983
984 pud = *pud_offset(&pgd, iova);
985 if (pud_none(pud))
986 return 0;
987 574
988 pmd = *pmd_offset(&pud, iova); 575static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
989 if (pmd_none(pmd)) 576 unsigned int *utlbs, unsigned int num_utlbs)
990 return 0; 577{
578 unsigned int i;
991 579
992 if (pmd_sect(pmd)) 580 for (i = 0; i < num_utlbs; ++i) {
993 return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK); 581 struct of_phandle_args args;
582 int ret;
994 583
995 pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); 584 ret = of_parse_phandle_with_args(dev->of_node, "iommus",
996 if (pte_none(pte)) 585 "#iommu-cells", i, &args);
997 return 0; 586 if (ret < 0)
587 return ret;
998 588
999 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); 589 of_node_put(args.np);
1000}
1001 590
1002static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev) 591 if (args.np != mmu->dev->of_node || args.args_count != 1)
1003{ 592 return -EINVAL;
1004 const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
1005 const char *devname = dev_name(dev);
1006 unsigned int i;
1007 593
1008 for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) { 594 utlbs[i] = args.args[0];
1009 if (strcmp(master->name, devname) == 0)
1010 return master->utlb;
1011 } 595 }
1012 596
1013 return -1; 597 return 0;
1014} 598}
1015 599
1016static int ipmmu_add_device(struct device *dev) 600static int ipmmu_add_device(struct device *dev)
1017{ 601{
1018 struct ipmmu_vmsa_archdata *archdata; 602 struct ipmmu_vmsa_archdata *archdata;
1019 struct ipmmu_vmsa_device *mmu; 603 struct ipmmu_vmsa_device *mmu;
1020 struct iommu_group *group; 604 struct iommu_group *group = NULL;
1021 int utlb = -1; 605 unsigned int *utlbs;
1022 int ret; 606 unsigned int i;
607 int num_utlbs;
608 int ret = -ENODEV;
1023 609
1024 if (dev->archdata.iommu) { 610 if (dev->archdata.iommu) {
1025 dev_warn(dev, "IOMMU driver already assigned to device %s\n", 611 dev_warn(dev, "IOMMU driver already assigned to device %s\n",
@@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev)
1028 } 614 }
1029 615
1030 /* Find the master corresponding to the device. */ 616 /* Find the master corresponding to the device. */
617
618 num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
619 "#iommu-cells");
620 if (num_utlbs < 0)
621 return -ENODEV;
622
623 utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
624 if (!utlbs)
625 return -ENOMEM;
626
1031 spin_lock(&ipmmu_devices_lock); 627 spin_lock(&ipmmu_devices_lock);
1032 628
1033 list_for_each_entry(mmu, &ipmmu_devices, list) { 629 list_for_each_entry(mmu, &ipmmu_devices, list) {
1034 utlb = ipmmu_find_utlb(mmu, dev); 630 ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
1035 if (utlb >= 0) { 631 if (!ret) {
1036 /* 632 /*
1037 * TODO Take a reference to the MMU to protect 633 * TODO Take a reference to the MMU to protect
1038 * against device removal. 634 * against device removal.
@@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev)
1043 639
1044 spin_unlock(&ipmmu_devices_lock); 640 spin_unlock(&ipmmu_devices_lock);
1045 641
1046 if (utlb < 0) 642 if (ret < 0)
1047 return -ENODEV; 643 return -ENODEV;
1048 644
1049 if (utlb >= mmu->num_utlbs) 645 for (i = 0; i < num_utlbs; ++i) {
1050 return -EINVAL; 646 if (utlbs[i] >= mmu->num_utlbs) {
647 ret = -EINVAL;
648 goto error;
649 }
650 }
1051 651
1052 /* Create a device group and add the device to it. */ 652 /* Create a device group and add the device to it. */
1053 group = iommu_group_alloc(); 653 group = iommu_group_alloc();
1054 if (IS_ERR(group)) { 654 if (IS_ERR(group)) {
1055 dev_err(dev, "Failed to allocate IOMMU group\n"); 655 dev_err(dev, "Failed to allocate IOMMU group\n");
1056 return PTR_ERR(group); 656 ret = PTR_ERR(group);
657 goto error;
1057 } 658 }
1058 659
1059 ret = iommu_group_add_device(group, dev); 660 ret = iommu_group_add_device(group, dev);
@@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev)
1061 662
1062 if (ret < 0) { 663 if (ret < 0) {
1063 dev_err(dev, "Failed to add device to IPMMU group\n"); 664 dev_err(dev, "Failed to add device to IPMMU group\n");
1064 return ret; 665 group = NULL;
666 goto error;
1065 } 667 }
1066 668
1067 archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); 669 archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
@@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev)
1071 } 673 }
1072 674
1073 archdata->mmu = mmu; 675 archdata->mmu = mmu;
1074 archdata->utlb = utlb; 676 archdata->utlbs = utlbs;
677 archdata->num_utlbs = num_utlbs;
1075 dev->archdata.iommu = archdata; 678 dev->archdata.iommu = archdata;
1076 679
1077 /* 680 /*
@@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev)
1090 SZ_1G, SZ_2G); 693 SZ_1G, SZ_2G);
1091 if (IS_ERR(mapping)) { 694 if (IS_ERR(mapping)) {
1092 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); 695 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
1093 return PTR_ERR(mapping); 696 ret = PTR_ERR(mapping);
697 goto error;
1094 } 698 }
1095 699
1096 mmu->mapping = mapping; 700 mmu->mapping = mapping;
@@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev)
1106 return 0; 710 return 0;
1107 711
1108error: 712error:
713 arm_iommu_release_mapping(mmu->mapping);
714
1109 kfree(dev->archdata.iommu); 715 kfree(dev->archdata.iommu);
716 kfree(utlbs);
717
1110 dev->archdata.iommu = NULL; 718 dev->archdata.iommu = NULL;
1111 iommu_group_remove_device(dev); 719
720 if (!IS_ERR_OR_NULL(group))
721 iommu_group_remove_device(dev);
722
1112 return ret; 723 return ret;
1113} 724}
1114 725
1115static void ipmmu_remove_device(struct device *dev) 726static void ipmmu_remove_device(struct device *dev)
1116{ 727{
728 struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
729
1117 arm_iommu_detach_device(dev); 730 arm_iommu_detach_device(dev);
1118 iommu_group_remove_device(dev); 731 iommu_group_remove_device(dev);
1119 kfree(dev->archdata.iommu); 732
733 kfree(archdata->utlbs);
734 kfree(archdata);
735
1120 dev->archdata.iommu = NULL; 736 dev->archdata.iommu = NULL;
1121} 737}
1122 738
@@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = {
1131 .iova_to_phys = ipmmu_iova_to_phys, 747 .iova_to_phys = ipmmu_iova_to_phys,
1132 .add_device = ipmmu_add_device, 748 .add_device = ipmmu_add_device,
1133 .remove_device = ipmmu_remove_device, 749 .remove_device = ipmmu_remove_device,
1134 .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, 750 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
1135}; 751};
1136 752
1137/* ----------------------------------------------------------------------------- 753/* -----------------------------------------------------------------------------
@@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev)
1154 int irq; 770 int irq;
1155 int ret; 771 int ret;
1156 772
1157 if (!pdev->dev.platform_data) { 773 if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
1158 dev_err(&pdev->dev, "missing platform data\n"); 774 dev_err(&pdev->dev, "missing platform data\n");
1159 return -EINVAL; 775 return -EINVAL;
1160 } 776 }
@@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev)
1166 } 782 }
1167 783
1168 mmu->dev = &pdev->dev; 784 mmu->dev = &pdev->dev;
1169 mmu->pdata = pdev->dev.platform_data;
1170 mmu->num_utlbs = 32; 785 mmu->num_utlbs = 32;
1171 786
1172 /* Map I/O memory and request IRQ. */ 787 /* Map I/O memory and request IRQ. */
@@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev)
1175 if (IS_ERR(mmu->base)) 790 if (IS_ERR(mmu->base))
1176 return PTR_ERR(mmu->base); 791 return PTR_ERR(mmu->base);
1177 792
793 /*
794 * The IPMMU has two register banks, for secure and non-secure modes.
795 * The bank mapped at the beginning of the IPMMU address space
796 * corresponds to the running mode of the CPU. When running in secure
797 * mode the non-secure register bank is also available at an offset.
798 *
799 * Secure mode operation isn't clearly documented and is thus currently
800 * not implemented in the driver. Furthermore, preliminary tests of
801 * non-secure operation with the main register bank were not successful.
802 * Offset the registers base unconditionally to point to the non-secure
803 * alias space for now.
804 */
805 mmu->base += IM_NS_ALIAS_OFFSET;
806
1178 irq = platform_get_irq(pdev, 0); 807 irq = platform_get_irq(pdev, 0);
1179 if (irq < 0) { 808 if (irq < 0) {
1180 dev_err(&pdev->dev, "no IRQ found\n"); 809 dev_err(&pdev->dev, "no IRQ found\n");
@@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev)
1220 return 0; 849 return 0;
1221} 850}
1222 851
852static const struct of_device_id ipmmu_of_ids[] = {
853 { .compatible = "renesas,ipmmu-vmsa", },
854};
855
1223static struct platform_driver ipmmu_driver = { 856static struct platform_driver ipmmu_driver = {
1224 .driver = { 857 .driver = {
1225 .name = "ipmmu-vmsa", 858 .name = "ipmmu-vmsa",
859 .of_match_table = of_match_ptr(ipmmu_of_ids),
1226 }, 860 },
1227 .probe = ipmmu_probe, 861 .probe = ipmmu_probe,
1228 .remove = ipmmu_remove, 862 .remove = ipmmu_remove,
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index c448eb48340a..7c70cc29ffe6 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 Advanced Micro Devices, Inc. 2 * Copyright (C) 2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <jroedel@suse.de>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bbb7dcef02d3..f59f857b702e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1126,7 +1126,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1126 return -EINVAL; 1126 return -EINVAL;
1127 } 1127 }
1128 1128
1129 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); 1129 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
1130 1130
1131 iotlb_init_entry(&e, da, pa, omap_pgsz); 1131 iotlb_init_entry(&e, da, pa, omap_pgsz);
1132 1132
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000000..1c30014ed176
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef _LINUX_IOPOLL_H
16#define _LINUX_IOPOLL_H
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/hrtimer.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24
25/**
26 * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
27 * @op: accessor function (takes @addr as its only argument)
28 * @addr: Address to poll
29 * @val: Variable to read the value into
30 * @cond: Break condition (usually involving @val)
31 * @sleep_us: Maximum time to sleep between reads in us (0
32 * tight-loops). Should be less than ~20ms since usleep_range
33 * is used (see Documentation/timers/timers-howto.txt).
34 * @timeout_us: Timeout in us, 0 means never timeout
35 *
36 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
37 * case, the last read value at @addr is stored in @val. Must not
38 * be called from atomic context if sleep_us or timeout_us are used.
39 *
40 * When available, you'll probably want to use one of the specialized
41 * macros defined below rather than this macro directly.
42 */
43#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
44({ \
45 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
46 might_sleep_if(sleep_us); \
47 for (;;) { \
48 (val) = op(addr); \
49 if (cond) \
50 break; \
51 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
52 (val) = op(addr); \
53 break; \
54 } \
55 if (sleep_us) \
56 usleep_range((sleep_us >> 2) + 1, sleep_us); \
57 } \
58 (cond) ? 0 : -ETIMEDOUT; \
59})
60
61/**
62 * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
63 * @op: accessor function (takes @addr as its only argument)
64 * @addr: Address to poll
65 * @val: Variable to read the value into
66 * @cond: Break condition (usually involving @val)
67 * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
68 * be less than ~10us since udelay is used (see
69 * Documentation/timers/timers-howto.txt).
70 * @timeout_us: Timeout in us, 0 means never timeout
71 *
72 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
73 * case, the last read value at @addr is stored in @val.
74 *
75 * When available, you'll probably want to use one of the specialized
76 * macros defined below rather than this macro directly.
77 */
78#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
79({ \
80 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
81 for (;;) { \
82 (val) = op(addr); \
83 if (cond) \
84 break; \
85 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
86 (val) = op(addr); \
87 break; \
88 } \
89 if (delay_us) \
90 udelay(delay_us); \
91 } \
92 (cond) ? 0 : -ETIMEDOUT; \
93})
94
95
96#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
97 readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
98
99#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
100 readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
101
102#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
103 readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
104
105#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
106 readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
107
108#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
109 readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
110
111#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
112 readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
113
114#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
115 readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
116
117#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
118 readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
119
120#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
121 readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
122
123#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
124 readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
125
126#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
127 readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
128
129#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
130 readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
131
132#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
133 readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
134
135#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
136 readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
137
138#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
139 readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
140
141#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
142 readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
143
144#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 19e81d5ccb6d..3920a19d8194 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -16,9 +16,6 @@
16#include <linux/rbtree.h> 16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19/* IO virtual address start page frame number */
20#define IOVA_START_PFN (1)
21
22/* iova structure */ 19/* iova structure */
23struct iova { 20struct iova {
24 struct rb_node node; 21 struct rb_node node;
@@ -31,6 +28,8 @@ struct iova_domain {
31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 28 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
32 struct rb_root rbroot; /* iova domain rbtree root */ 29 struct rb_root rbroot; /* iova domain rbtree root */
33 struct rb_node *cached32_node; /* Save last alloced node */ 30 struct rb_node *cached32_node; /* Save last alloced node */
31 unsigned long granule; /* pfn granularity for this domain */
32 unsigned long start_pfn; /* Lower limit for this domain */
34 unsigned long dma_32bit_pfn; 33 unsigned long dma_32bit_pfn;
35}; 34};
36 35
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
39 return iova->pfn_hi - iova->pfn_lo + 1; 38 return iova->pfn_hi - iova->pfn_lo + 1;
40} 39}
41 40
41static inline unsigned long iova_shift(struct iova_domain *iovad)
42{
43 return __ffs(iovad->granule);
44}
45
46static inline unsigned long iova_mask(struct iova_domain *iovad)
47{
48 return iovad->granule - 1;
49}
50
51static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
52{
53 return iova & iova_mask(iovad);
54}
55
56static inline size_t iova_align(struct iova_domain *iovad, size_t size)
57{
58 return ALIGN(size, iovad->granule);
59}
60
61static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
62{
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
64}
65
66static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
67{
68 return iova >> iova_shift(iovad);
69}
70
71int iommu_iova_cache_init(void);
72void iommu_iova_cache_destroy(void);
73
42struct iova *alloc_iova_mem(void); 74struct iova *alloc_iova_mem(void);
43void free_iova_mem(struct iova *iova); 75void free_iova_mem(struct iova *iova);
44void free_iova(struct iova_domain *iovad, unsigned long pfn); 76void free_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
49struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 81struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
50 unsigned long pfn_hi); 82 unsigned long pfn_hi);
51void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 83void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
52void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); 84void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
85 unsigned long start_pfn, unsigned long pfn_32bit);
53struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 86struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
54void put_iova_domain(struct iova_domain *iovad); 87void put_iova_domain(struct iova_domain *iovad);
55struct iova *split_and_remove_iova(struct iova_domain *iovad, 88struct iova *split_and_remove_iova(struct iova_domain *iovad,
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h
deleted file mode 100644
index 5275b3ac6d37..000000000000
--- a/include/linux/platform_data/ipmmu-vmsa.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * IPMMU VMSA Platform Data
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 */
10
11#ifndef __IPMMU_VMSA_H__
12#define __IPMMU_VMSA_H__
13
14struct ipmmu_vmsa_master {
15 const char *name;
16 unsigned int utlb;
17};
18
19struct ipmmu_vmsa_platform_data {
20 const struct ipmmu_vmsa_master *masters;
21 unsigned int num_masters;
22};
23
24#endif /* __IPMMU_VMSA_H__ */
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index a8f5c32d174b..2c7befb10f13 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
83 TP_ARGS(dev) 83 TP_ARGS(dev)
84); 84);
85 85
86DECLARE_EVENT_CLASS(iommu_map_unmap, 86TRACE_EVENT(map,
87 87
88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
89 89
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
92 TP_STRUCT__entry( 92 TP_STRUCT__entry(
93 __field(u64, iova) 93 __field(u64, iova)
94 __field(u64, paddr) 94 __field(u64, paddr)
95 __field(int, size) 95 __field(size_t, size)
96 ), 96 ),
97 97
98 TP_fast_assign( 98 TP_fast_assign(
@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
101 __entry->size = size; 101 __entry->size = size;
102 ), 102 ),
103 103
104 TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", 104 TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
105 __entry->iova, __entry->paddr, __entry->size 105 __entry->iova, __entry->paddr, __entry->size
106 ) 106 )
107); 107);
108 108
109DEFINE_EVENT(iommu_map_unmap, map, 109TRACE_EVENT(unmap,
110 110
111 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
112
113 TP_ARGS(iova, paddr, size)
114);
115 112
116DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, 113 TP_ARGS(iova, size, unmapped_size),
117 114
118 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 115 TP_STRUCT__entry(
116 __field(u64, iova)
117 __field(size_t, size)
118 __field(size_t, unmapped_size)
119 ),
119 120
120 TP_ARGS(iova, paddr, size), 121 TP_fast_assign(
122 __entry->iova = iova;
123 __entry->size = size;
124 __entry->unmapped_size = unmapped_size;
125 ),
121 126
122 TP_printk("IOMMU: iova=0x%016llx size=0x%x", 127 TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
123 __entry->iova, __entry->size 128 __entry->iova, __entry->size, __entry->unmapped_size
124 ) 129 )
125); 130);
126 131