aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-01-26 07:22:25 -0500
committerJoerg Roedel <jroedel@suse.de>2015-01-26 07:22:25 -0500
commit16753322983bcca0eca6d81f20d23277df0d6cf7 (patch)
tree8064489ae62d2fc8ea344b6736f0b204e3187a3b
parent26bc420b59a38e4e6685a73345a0def461136dce (diff)
parent859a732e4f713270152c78df6e09accbde006734 (diff)
Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--drivers/iommu/Kconfig32
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/arm-smmu.c935
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--include/linux/iopoll.h144
9 files changed, 1706 insertions, 620 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2ebb056cbe0a..c437f7e9e9a4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1582,6 +1582,7 @@ M: Will Deacon <will.deacon@arm.com>
1582L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1582L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1583S: Maintained 1583S: Maintained
1584F: drivers/iommu/arm-smmu.c 1584F: drivers/iommu/arm-smmu.c
1585F: drivers/iommu/io-pgtable-arm.c
1585 1586
1586ARM64 PORT (AARCH64 ARCHITECTURE) 1587ARM64 PORT (AARCH64 ARCHITECTURE)
1587M: Catalin Marinas <catalin.marinas@arm.com> 1588M: Catalin Marinas <catalin.marinas@arm.com>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1f9a20a3677..528c3fd2d4c1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -349,7 +349,6 @@ config ARM64_VA_BITS_42
349 349
350config ARM64_VA_BITS_48 350config ARM64_VA_BITS_48
351 bool "48-bit" 351 bool "48-bit"
352 depends on !ARM_SMMU
353 352
354endchoice 353endchoice
355 354
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 325188eef1c1..87060ad6829d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -13,6 +13,32 @@ menuconfig IOMMU_SUPPORT
13 13
14if IOMMU_SUPPORT 14if IOMMU_SUPPORT
15 15
16menu "Generic IOMMU Pagetable Support"
17
18# Selected by the actual pagetable implementations
19config IOMMU_IO_PGTABLE
20 bool
21
22config IOMMU_IO_PGTABLE_LPAE
23 bool "ARMv7/v8 Long Descriptor Format"
24 select IOMMU_IO_PGTABLE
25 help
26 Enable support for the ARM long descriptor pagetable format.
27 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
28 sizes at both stage-1 and stage-2, as well as address spaces
29 up to 48-bits in size.
30
31config IOMMU_IO_PGTABLE_LPAE_SELFTEST
32 bool "LPAE selftests"
33 depends on IOMMU_IO_PGTABLE_LPAE
34 help
35 Enable self-tests for LPAE page table allocator. This performs
36 a series of page-table consistency checks during boot.
37
38 If unsure, say N here.
39
40endmenu
41
16config OF_IOMMU 42config OF_IOMMU
17 def_bool y 43 def_bool y
18 depends on OF && IOMMU_API 44 depends on OF && IOMMU_API
@@ -304,13 +330,13 @@ config SPAPR_TCE_IOMMU
304 330
305config ARM_SMMU 331config ARM_SMMU
306 bool "ARM Ltd. System MMU (SMMU) Support" 332 bool "ARM Ltd. System MMU (SMMU) Support"
307 depends on ARM64 || (ARM_LPAE && OF) 333 depends on ARM64 || ARM
308 select IOMMU_API 334 select IOMMU_API
335 select IOMMU_IO_PGTABLE_LPAE
309 select ARM_DMA_USE_IOMMU if ARM 336 select ARM_DMA_USE_IOMMU if ARM
310 help 337 help
311 Support for implementations of the ARM System MMU architecture 338 Support for implementations of the ARM System MMU architecture
312 versions 1 and 2. The driver supports both v7l and v8l table 339 versions 1 and 2.
313 formats with 4k and 64k page sizes.
314 340
315 Say Y here if your SoC includes an IOMMU device implementing 341 Say Y here if your SoC includes an IOMMU device implementing
316 the ARM SMMU architecture. 342 the ARM SMMU architecture.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7b976f294a69..d6889b487d55 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,8 @@
1obj-$(CONFIG_IOMMU_API) += iommu.o 1obj-$(CONFIG_IOMMU_API) += iommu.o
2obj-$(CONFIG_IOMMU_API) += iommu-traces.o 2obj-$(CONFIG_IOMMU_API) += iommu-traces.o
3obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o 3obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
4obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
5obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
4obj-$(CONFIG_OF_IOMMU) += of_iommu.o 6obj-$(CONFIG_OF_IOMMU) += of_iommu.o
5obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o 7obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
6obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 8obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6cd47b75286f..1d6d43bb3395 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -23,8 +23,6 @@
23 * - Stream-matching and stream-indexing 23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format 24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU 25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 48-bit addressing (dependent on VA_BITS)
28 * - Context fault reporting 26 * - Context fault reporting
29 */ 27 */
30 28
@@ -36,7 +34,7 @@
36#include <linux/interrupt.h> 34#include <linux/interrupt.h>
37#include <linux/io.h> 35#include <linux/io.h>
38#include <linux/iommu.h> 36#include <linux/iommu.h>
39#include <linux/mm.h> 37#include <linux/iopoll.h>
40#include <linux/module.h> 38#include <linux/module.h>
41#include <linux/of.h> 39#include <linux/of.h>
42#include <linux/pci.h> 40#include <linux/pci.h>
@@ -46,7 +44,7 @@
46 44
47#include <linux/amba/bus.h> 45#include <linux/amba/bus.h>
48 46
49#include <asm/pgalloc.h> 47#include "io-pgtable.h"
50 48
51/* Maximum number of stream IDs assigned to a single device */ 49/* Maximum number of stream IDs assigned to a single device */
52#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS 50#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
@@ -71,40 +69,6 @@
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ 69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0)) 70 ? 0x400 : 0))
73 71
74/* Page table bits */
75#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
76#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
77#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
78#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
79#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
80#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
81#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
82
83#if PAGE_SIZE == SZ_4K
84#define ARM_SMMU_PTE_CONT_ENTRIES 16
85#elif PAGE_SIZE == SZ_64K
86#define ARM_SMMU_PTE_CONT_ENTRIES 32
87#else
88#define ARM_SMMU_PTE_CONT_ENTRIES 1
89#endif
90
91#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
92#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
93
94/* Stage-1 PTE */
95#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
96#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
97#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
98#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
99
100/* Stage-2 PTE */
101#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
102#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
103#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
104#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
105#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
106#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
107
108/* Configuration registers */ 72/* Configuration registers */
109#define ARM_SMMU_GR0_sCR0 0x0 73#define ARM_SMMU_GR0_sCR0 0x0
110#define sCR0_CLIENTPD (1 << 0) 74#define sCR0_CLIENTPD (1 << 0)
@@ -132,17 +96,12 @@
132#define ARM_SMMU_GR0_sGFSYNR0 0x50 96#define ARM_SMMU_GR0_sGFSYNR0 0x50
133#define ARM_SMMU_GR0_sGFSYNR1 0x54 97#define ARM_SMMU_GR0_sGFSYNR1 0x54
134#define ARM_SMMU_GR0_sGFSYNR2 0x58 98#define ARM_SMMU_GR0_sGFSYNR2 0x58
135#define ARM_SMMU_GR0_PIDR0 0xfe0
136#define ARM_SMMU_GR0_PIDR1 0xfe4
137#define ARM_SMMU_GR0_PIDR2 0xfe8
138 99
139#define ID0_S1TS (1 << 30) 100#define ID0_S1TS (1 << 30)
140#define ID0_S2TS (1 << 29) 101#define ID0_S2TS (1 << 29)
141#define ID0_NTS (1 << 28) 102#define ID0_NTS (1 << 28)
142#define ID0_SMS (1 << 27) 103#define ID0_SMS (1 << 27)
143#define ID0_PTFS_SHIFT 24 104#define ID0_ATOSNS (1 << 26)
144#define ID0_PTFS_MASK 0x2
145#define ID0_PTFS_V8_ONLY 0x2
146#define ID0_CTTW (1 << 14) 105#define ID0_CTTW (1 << 14)
147#define ID0_NUMIRPT_SHIFT 16 106#define ID0_NUMIRPT_SHIFT 16
148#define ID0_NUMIRPT_MASK 0xff 107#define ID0_NUMIRPT_MASK 0xff
@@ -169,11 +128,7 @@
169#define ID2_PTFS_16K (1 << 13) 128#define ID2_PTFS_16K (1 << 13)
170#define ID2_PTFS_64K (1 << 14) 129#define ID2_PTFS_64K (1 << 14)
171 130
172#define PIDR2_ARCH_SHIFT 4
173#define PIDR2_ARCH_MASK 0xf
174
175/* Global TLB invalidation */ 131/* Global TLB invalidation */
176#define ARM_SMMU_GR0_STLBIALL 0x60
177#define ARM_SMMU_GR0_TLBIVMID 0x64 132#define ARM_SMMU_GR0_TLBIVMID 0x64
178#define ARM_SMMU_GR0_TLBIALLNSNH 0x68 133#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
179#define ARM_SMMU_GR0_TLBIALLH 0x6c 134#define ARM_SMMU_GR0_TLBIALLH 0x6c
@@ -231,13 +186,25 @@
231#define ARM_SMMU_CB_TTBCR2 0x10 186#define ARM_SMMU_CB_TTBCR2 0x10
232#define ARM_SMMU_CB_TTBR0_LO 0x20 187#define ARM_SMMU_CB_TTBR0_LO 0x20
233#define ARM_SMMU_CB_TTBR0_HI 0x24 188#define ARM_SMMU_CB_TTBR0_HI 0x24
189#define ARM_SMMU_CB_TTBR1_LO 0x28
190#define ARM_SMMU_CB_TTBR1_HI 0x2c
234#define ARM_SMMU_CB_TTBCR 0x30 191#define ARM_SMMU_CB_TTBCR 0x30
235#define ARM_SMMU_CB_S1_MAIR0 0x38 192#define ARM_SMMU_CB_S1_MAIR0 0x38
193#define ARM_SMMU_CB_S1_MAIR1 0x3c
194#define ARM_SMMU_CB_PAR_LO 0x50
195#define ARM_SMMU_CB_PAR_HI 0x54
236#define ARM_SMMU_CB_FSR 0x58 196#define ARM_SMMU_CB_FSR 0x58
237#define ARM_SMMU_CB_FAR_LO 0x60 197#define ARM_SMMU_CB_FAR_LO 0x60
238#define ARM_SMMU_CB_FAR_HI 0x64 198#define ARM_SMMU_CB_FAR_HI 0x64
239#define ARM_SMMU_CB_FSYNR0 0x68 199#define ARM_SMMU_CB_FSYNR0 0x68
200#define ARM_SMMU_CB_S1_TLBIVA 0x600
240#define ARM_SMMU_CB_S1_TLBIASID 0x610 201#define ARM_SMMU_CB_S1_TLBIASID 0x610
202#define ARM_SMMU_CB_S1_TLBIVAL 0x620
203#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
204#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
205#define ARM_SMMU_CB_ATS1PR_LO 0x800
206#define ARM_SMMU_CB_ATS1PR_HI 0x804
207#define ARM_SMMU_CB_ATSR 0x8f0
241 208
242#define SCTLR_S1_ASIDPNE (1 << 12) 209#define SCTLR_S1_ASIDPNE (1 << 12)
243#define SCTLR_CFCFG (1 << 7) 210#define SCTLR_CFCFG (1 << 7)
@@ -249,47 +216,16 @@
249#define SCTLR_M (1 << 0) 216#define SCTLR_M (1 << 0)
250#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) 217#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
251 218
252#define RESUME_RETRY (0 << 0) 219#define CB_PAR_F (1 << 0)
253#define RESUME_TERMINATE (1 << 0)
254
255#define TTBCR_EAE (1 << 31)
256 220
257#define TTBCR_PASIZE_SHIFT 16 221#define ATSR_ACTIVE (1 << 0)
258#define TTBCR_PASIZE_MASK 0x7
259 222
260#define TTBCR_TG0_4K (0 << 14) 223#define RESUME_RETRY (0 << 0)
261#define TTBCR_TG0_64K (1 << 14) 224#define RESUME_TERMINATE (1 << 0)
262
263#define TTBCR_SH0_SHIFT 12
264#define TTBCR_SH0_MASK 0x3
265#define TTBCR_SH_NS 0
266#define TTBCR_SH_OS 2
267#define TTBCR_SH_IS 3
268
269#define TTBCR_ORGN0_SHIFT 10
270#define TTBCR_IRGN0_SHIFT 8
271#define TTBCR_RGN_MASK 0x3
272#define TTBCR_RGN_NC 0
273#define TTBCR_RGN_WBWA 1
274#define TTBCR_RGN_WT 2
275#define TTBCR_RGN_WB 3
276
277#define TTBCR_SL0_SHIFT 6
278#define TTBCR_SL0_MASK 0x3
279#define TTBCR_SL0_LVL_2 0
280#define TTBCR_SL0_LVL_1 1
281
282#define TTBCR_T1SZ_SHIFT 16
283#define TTBCR_T0SZ_SHIFT 0
284#define TTBCR_SZ_MASK 0xf
285 225
286#define TTBCR2_SEP_SHIFT 15 226#define TTBCR2_SEP_SHIFT 15
287#define TTBCR2_SEP_MASK 0x7 227#define TTBCR2_SEP_MASK 0x7
288 228
289#define TTBCR2_PASIZE_SHIFT 0
290#define TTBCR2_PASIZE_MASK 0x7
291
292/* Common definitions for PASize and SEP fields */
293#define TTBCR2_ADDR_32 0 229#define TTBCR2_ADDR_32 0
294#define TTBCR2_ADDR_36 1 230#define TTBCR2_ADDR_36 1
295#define TTBCR2_ADDR_40 2 231#define TTBCR2_ADDR_40 2
@@ -297,16 +233,7 @@
297#define TTBCR2_ADDR_44 4 233#define TTBCR2_ADDR_44 4
298#define TTBCR2_ADDR_48 5 234#define TTBCR2_ADDR_48 5
299 235
300#define TTBRn_HI_ASID_SHIFT 16 236#define TTBRn_HI_ASID_SHIFT 16
301
302#define MAIR_ATTR_SHIFT(n) ((n) << 3)
303#define MAIR_ATTR_MASK 0xff
304#define MAIR_ATTR_DEVICE 0x04
305#define MAIR_ATTR_NC 0x44
306#define MAIR_ATTR_WBRWA 0xff
307#define MAIR_ATTR_IDX_NC 0
308#define MAIR_ATTR_IDX_CACHE 1
309#define MAIR_ATTR_IDX_DEV 2
310 237
311#define FSR_MULTI (1 << 31) 238#define FSR_MULTI (1 << 31)
312#define FSR_SS (1 << 30) 239#define FSR_SS (1 << 30)
@@ -366,6 +293,7 @@ struct arm_smmu_device {
366#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) 293#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
367#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) 294#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
368#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) 295#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
296#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
369 u32 features; 297 u32 features;
370 298
371#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) 299#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,10 +308,9 @@ struct arm_smmu_device {
380 u32 num_mapping_groups; 308 u32 num_mapping_groups;
381 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); 309 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
382 310
383 unsigned long s1_input_size; 311 unsigned long va_size;
384 unsigned long s1_output_size; 312 unsigned long ipa_size;
385 unsigned long s2_input_size; 313 unsigned long pa_size;
386 unsigned long s2_output_size;
387 314
388 u32 num_global_irqs; 315 u32 num_global_irqs;
389 u32 num_context_irqs; 316 u32 num_context_irqs;
@@ -397,7 +324,6 @@ struct arm_smmu_cfg {
397 u8 cbndx; 324 u8 cbndx;
398 u8 irptndx; 325 u8 irptndx;
399 u32 cbar; 326 u32 cbar;
400 pgd_t *pgd;
401}; 327};
402#define INVALID_IRPTNDX 0xff 328#define INVALID_IRPTNDX 0xff
403 329
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage {
412 338
413struct arm_smmu_domain { 339struct arm_smmu_domain {
414 struct arm_smmu_device *smmu; 340 struct arm_smmu_device *smmu;
341 struct io_pgtable_ops *pgtbl_ops;
342 spinlock_t pgtbl_lock;
415 struct arm_smmu_cfg cfg; 343 struct arm_smmu_cfg cfg;
416 enum arm_smmu_domain_stage stage; 344 enum arm_smmu_domain_stage stage;
417 spinlock_t lock; 345 struct mutex init_mutex; /* Protects smmu pointer */
418}; 346};
419 347
348static struct iommu_ops arm_smmu_ops;
349
420static DEFINE_SPINLOCK(arm_smmu_devices_lock); 350static DEFINE_SPINLOCK(arm_smmu_devices_lock);
421static LIST_HEAD(arm_smmu_devices); 351static LIST_HEAD(arm_smmu_devices);
422 352
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
597} 527}
598 528
599/* Wait for any pending TLB invalidations to complete */ 529/* Wait for any pending TLB invalidations to complete */
600static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) 530static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
601{ 531{
602 int count = 0; 532 int count = 0;
603 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 533 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
615 } 545 }
616} 546}
617 547
618static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) 548static void arm_smmu_tlb_sync(void *cookie)
619{ 549{
550 struct arm_smmu_domain *smmu_domain = cookie;
551 __arm_smmu_tlb_sync(smmu_domain->smmu);
552}
553
554static void arm_smmu_tlb_inv_context(void *cookie)
555{
556 struct arm_smmu_domain *smmu_domain = cookie;
620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 557 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621 struct arm_smmu_device *smmu = smmu_domain->smmu; 558 struct arm_smmu_device *smmu = smmu_domain->smmu;
622 void __iomem *base = ARM_SMMU_GR0(smmu);
623 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; 559 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
560 void __iomem *base;
624 561
625 if (stage1) { 562 if (stage1) {
626 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 563 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
632 base + ARM_SMMU_GR0_TLBIVMID); 569 base + ARM_SMMU_GR0_TLBIVMID);
633 } 570 }
634 571
635 arm_smmu_tlb_sync(smmu); 572 __arm_smmu_tlb_sync(smmu);
573}
574
575static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
576 bool leaf, void *cookie)
577{
578 struct arm_smmu_domain *smmu_domain = cookie;
579 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
580 struct arm_smmu_device *smmu = smmu_domain->smmu;
581 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
582 void __iomem *reg;
583
584 if (stage1) {
585 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
586 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
587
588 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
589 iova &= ~12UL;
590 iova |= ARM_SMMU_CB_ASID(cfg);
591 writel_relaxed(iova, reg);
592#ifdef CONFIG_64BIT
593 } else {
594 iova >>= 12;
595 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
596 writeq_relaxed(iova, reg);
597#endif
598 }
599#ifdef CONFIG_64BIT
600 } else if (smmu->version == ARM_SMMU_V2) {
601 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
602 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
603 ARM_SMMU_CB_S2_TLBIIPAS2;
604 writeq_relaxed(iova >> 12, reg);
605#endif
606 } else {
607 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
608 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
609 }
610}
611
612static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
613{
614 struct arm_smmu_domain *smmu_domain = cookie;
615 struct arm_smmu_device *smmu = smmu_domain->smmu;
616 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
617
618
619 /* Ensure new page tables are visible to the hardware walker */
620 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
621 dsb(ishst);
622 } else {
623 /*
624 * If the SMMU can't walk tables in the CPU caches, treat them
625 * like non-coherent DMA since we need to flush the new entries
626 * all the way out to memory. There's no possibility of
627 * recursion here as the SMMU table walker will not be wired
628 * through another SMMU.
629 */
630 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
631 DMA_TO_DEVICE);
632 }
636} 633}
637 634
635static struct iommu_gather_ops arm_smmu_gather_ops = {
636 .tlb_flush_all = arm_smmu_tlb_inv_context,
637 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
638 .tlb_sync = arm_smmu_tlb_sync,
639 .flush_pgtable = arm_smmu_flush_pgtable,
640};
641
638static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 642static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
639{ 643{
640 int flags, ret; 644 int flags, ret;
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
712 return IRQ_HANDLED; 716 return IRQ_HANDLED;
713} 717}
714 718
715static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 719static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
716 size_t size) 720 struct io_pgtable_cfg *pgtbl_cfg)
717{
718 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
719
720
721 /* Ensure new page tables are visible to the hardware walker */
722 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
723 dsb(ishst);
724 } else {
725 /*
726 * If the SMMU can't walk tables in the CPU caches, treat them
727 * like non-coherent DMA since we need to flush the new entries
728 * all the way out to memory. There's no possibility of
729 * recursion here as the SMMU table walker will not be wired
730 * through another SMMU.
731 */
732 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
733 DMA_TO_DEVICE);
734 }
735}
736
737static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
738{ 721{
739 u32 reg; 722 u32 reg;
740 bool stage1; 723 bool stage1;
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
771#else 754#else
772 reg = CBA2R_RW64_32BIT; 755 reg = CBA2R_RW64_32BIT;
773#endif 756#endif
774 writel_relaxed(reg, 757 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
775 gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
776
777 /* TTBCR2 */
778 switch (smmu->s1_input_size) {
779 case 32:
780 reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
781 break;
782 case 36:
783 reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
784 break;
785 case 39:
786 case 40:
787 reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
788 break;
789 case 42:
790 reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
791 break;
792 case 44:
793 reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
794 break;
795 case 48:
796 reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
797 break;
798 }
799
800 switch (smmu->s1_output_size) {
801 case 32:
802 reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
803 break;
804 case 36:
805 reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
806 break;
807 case 39:
808 case 40:
809 reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
810 break;
811 case 42:
812 reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
813 break;
814 case 44:
815 reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
816 break;
817 case 48:
818 reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
819 break;
820 }
821
822 if (stage1)
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
824 } 758 }
825 759
826 /* TTBR0 */ 760 /* TTBRs */
827 arm_smmu_flush_pgtable(smmu, cfg->pgd, 761 if (stage1) {
828 PTRS_PER_PGD * sizeof(pgd_t)); 762 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
829 reg = __pa(cfg->pgd); 763 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
830 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 764 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
831 reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
832 if (stage1)
833 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; 765 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
834 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); 766 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
835
836 /*
837 * TTBCR
838 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
839 */
840 if (smmu->version > ARM_SMMU_V1) {
841 if (PAGE_SIZE == SZ_4K)
842 reg = TTBCR_TG0_4K;
843 else
844 reg = TTBCR_TG0_64K;
845 767
846 if (!stage1) { 768 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
847 reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; 769 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
770 reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
771 reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
772 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
773 } else {
774 reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
775 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
776 reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
777 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
778 }
848 779
849 switch (smmu->s2_output_size) { 780 /* TTBCR */
781 if (stage1) {
782 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
783 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
784 if (smmu->version > ARM_SMMU_V1) {
785 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
786 switch (smmu->va_size) {
850 case 32: 787 case 32:
851 reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); 788 reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
852 break; 789 break;
853 case 36: 790 case 36:
854 reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); 791 reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
855 break; 792 break;
856 case 40: 793 case 40:
857 reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); 794 reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
858 break; 795 break;
859 case 42: 796 case 42:
860 reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); 797 reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
861 break; 798 break;
862 case 44: 799 case 44:
863 reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); 800 reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
864 break; 801 break;
865 case 48: 802 case 48:
866 reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); 803 reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
867 break; 804 break;
868 } 805 }
869 } else { 806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
870 reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
871 } 807 }
872 } else { 808 } else {
873 reg = 0; 809 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
810 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
874 } 811 }
875 812
876 reg |= TTBCR_EAE | 813 /* MAIRs (stage-1 only) */
877 (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
878 (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
879 (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
880
881 if (!stage1)
882 reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
883
884 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
885
886 /* MAIR0 (stage-1 only) */
887 if (stage1) { 814 if (stage1) {
888 reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | 815 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
889 (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
890 (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
891 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); 816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
817 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
818 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
892 } 819 }
893 820
894 /* SCTLR */ 821 /* SCTLR */
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
905 struct arm_smmu_device *smmu) 832 struct arm_smmu_device *smmu)
906{ 833{
907 int irq, start, ret = 0; 834 int irq, start, ret = 0;
908 unsigned long flags; 835 unsigned long ias, oas;
836 struct io_pgtable_ops *pgtbl_ops;
837 struct io_pgtable_cfg pgtbl_cfg;
838 enum io_pgtable_fmt fmt;
909 struct arm_smmu_domain *smmu_domain = domain->priv; 839 struct arm_smmu_domain *smmu_domain = domain->priv;
910 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 840 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
911 841
912 spin_lock_irqsave(&smmu_domain->lock, flags); 842 mutex_lock(&smmu_domain->init_mutex);
913 if (smmu_domain->smmu) 843 if (smmu_domain->smmu)
914 goto out_unlock; 844 goto out_unlock;
915 845
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
940 case ARM_SMMU_DOMAIN_S1: 870 case ARM_SMMU_DOMAIN_S1:
941 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 871 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
942 start = smmu->num_s2_context_banks; 872 start = smmu->num_s2_context_banks;
873 ias = smmu->va_size;
874 oas = smmu->ipa_size;
875 if (IS_ENABLED(CONFIG_64BIT))
876 fmt = ARM_64_LPAE_S1;
877 else
878 fmt = ARM_32_LPAE_S1;
943 break; 879 break;
944 case ARM_SMMU_DOMAIN_NESTED: 880 case ARM_SMMU_DOMAIN_NESTED:
945 /* 881 /*
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
949 case ARM_SMMU_DOMAIN_S2: 885 case ARM_SMMU_DOMAIN_S2:
950 cfg->cbar = CBAR_TYPE_S2_TRANS; 886 cfg->cbar = CBAR_TYPE_S2_TRANS;
951 start = 0; 887 start = 0;
888 ias = smmu->ipa_size;
889 oas = smmu->pa_size;
890 if (IS_ENABLED(CONFIG_64BIT))
891 fmt = ARM_64_LPAE_S2;
892 else
893 fmt = ARM_32_LPAE_S2;
952 break; 894 break;
953 default: 895 default:
954 ret = -EINVAL; 896 ret = -EINVAL;
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
968 cfg->irptndx = cfg->cbndx; 910 cfg->irptndx = cfg->cbndx;
969 } 911 }
970 912
971 ACCESS_ONCE(smmu_domain->smmu) = smmu; 913 pgtbl_cfg = (struct io_pgtable_cfg) {
972 arm_smmu_init_context_bank(smmu_domain); 914 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
973 spin_unlock_irqrestore(&smmu_domain->lock, flags); 915 .ias = ias,
916 .oas = oas,
917 .tlb = &arm_smmu_gather_ops,
918 };
919
920 smmu_domain->smmu = smmu;
921 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
922 if (!pgtbl_ops) {
923 ret = -ENOMEM;
924 goto out_clear_smmu;
925 }
926
927 /* Update our support page sizes to reflect the page table format */
928 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
974 929
930 /* Initialise the context bank with our page table cfg */
931 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
932
933 /*
934 * Request context fault interrupt. Do this last to avoid the
935 * handler seeing a half-initialised domain state.
936 */
975 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 937 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
976 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, 938 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
977 "arm-smmu-context-fault", domain); 939 "arm-smmu-context-fault", domain);
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
981 cfg->irptndx = INVALID_IRPTNDX; 943 cfg->irptndx = INVALID_IRPTNDX;
982 } 944 }
983 945
946 mutex_unlock(&smmu_domain->init_mutex);
947
948 /* Publish page table ops for map/unmap */
949 smmu_domain->pgtbl_ops = pgtbl_ops;
984 return 0; 950 return 0;
985 951
952out_clear_smmu:
953 smmu_domain->smmu = NULL;
986out_unlock: 954out_unlock:
987 spin_unlock_irqrestore(&smmu_domain->lock, flags); 955 mutex_unlock(&smmu_domain->init_mutex);
988 return ret; 956 return ret;
989} 957}
990 958
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
999 if (!smmu) 967 if (!smmu)
1000 return; 968 return;
1001 969
1002 /* Disable the context bank and nuke the TLB before freeing it. */ 970 /*
971 * Disable the context bank and free the page tables before freeing
972 * it.
973 */
1003 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); 974 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1004 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 975 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1005 arm_smmu_tlb_inv_context(smmu_domain);
1006 976
1007 if (cfg->irptndx != INVALID_IRPTNDX) { 977 if (cfg->irptndx != INVALID_IRPTNDX) {
1008 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 978 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1009 free_irq(irq, domain); 979 free_irq(irq, domain);
1010 } 980 }
1011 981
982 if (smmu_domain->pgtbl_ops)
983 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
984
1012 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 985 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1013} 986}
1014 987
1015static int arm_smmu_domain_init(struct iommu_domain *domain) 988static int arm_smmu_domain_init(struct iommu_domain *domain)
1016{ 989{
1017 struct arm_smmu_domain *smmu_domain; 990 struct arm_smmu_domain *smmu_domain;
1018 pgd_t *pgd;
1019 991
1020 /* 992 /*
1021 * Allocate the domain and initialise some of its data structures. 993 * Allocate the domain and initialise some of its data structures.
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
1026 if (!smmu_domain) 998 if (!smmu_domain)
1027 return -ENOMEM; 999 return -ENOMEM;
1028 1000
1029 pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); 1001 mutex_init(&smmu_domain->init_mutex);
1030 if (!pgd) 1002 spin_lock_init(&smmu_domain->pgtbl_lock);
1031 goto out_free_domain;
1032 smmu_domain->cfg.pgd = pgd;
1033
1034 spin_lock_init(&smmu_domain->lock);
1035 domain->priv = smmu_domain; 1003 domain->priv = smmu_domain;
1036 return 0; 1004 return 0;
1037
1038out_free_domain:
1039 kfree(smmu_domain);
1040 return -ENOMEM;
1041}
1042
1043static void arm_smmu_free_ptes(pmd_t *pmd)
1044{
1045 pgtable_t table = pmd_pgtable(*pmd);
1046
1047 __free_page(table);
1048}
1049
1050static void arm_smmu_free_pmds(pud_t *pud)
1051{
1052 int i;
1053 pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
1054
1055 pmd = pmd_base;
1056 for (i = 0; i < PTRS_PER_PMD; ++i) {
1057 if (pmd_none(*pmd))
1058 continue;
1059
1060 arm_smmu_free_ptes(pmd);
1061 pmd++;
1062 }
1063
1064 pmd_free(NULL, pmd_base);
1065}
1066
1067static void arm_smmu_free_puds(pgd_t *pgd)
1068{
1069 int i;
1070 pud_t *pud, *pud_base = pud_offset(pgd, 0);
1071
1072 pud = pud_base;
1073 for (i = 0; i < PTRS_PER_PUD; ++i) {
1074 if (pud_none(*pud))
1075 continue;
1076
1077 arm_smmu_free_pmds(pud);
1078 pud++;
1079 }
1080
1081 pud_free(NULL, pud_base);
1082}
1083
1084static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
1085{
1086 int i;
1087 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1088 pgd_t *pgd, *pgd_base = cfg->pgd;
1089
1090 /*
1091 * Recursively free the page tables for this domain. We don't
1092 * care about speculative TLB filling because the tables should
1093 * not be active in any context bank at this point (SCTLR.M is 0).
1094 */
1095 pgd = pgd_base;
1096 for (i = 0; i < PTRS_PER_PGD; ++i) {
1097 if (pgd_none(*pgd))
1098 continue;
1099 arm_smmu_free_puds(pgd);
1100 pgd++;
1101 }
1102
1103 kfree(pgd_base);
1104} 1005}
1105 1006
1106static void arm_smmu_domain_destroy(struct iommu_domain *domain) 1007static void arm_smmu_domain_destroy(struct iommu_domain *domain)
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain)
1112 * already been detached. 1013 * already been detached.
1113 */ 1014 */
1114 arm_smmu_destroy_domain_context(domain); 1015 arm_smmu_destroy_domain_context(domain);
1115 arm_smmu_free_pgtables(smmu_domain);
1116 kfree(smmu_domain); 1016 kfree(smmu_domain);
1117} 1017}
1118 1018
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1244{ 1144{
1245 int ret; 1145 int ret;
1246 struct arm_smmu_domain *smmu_domain = domain->priv; 1146 struct arm_smmu_domain *smmu_domain = domain->priv;
1247 struct arm_smmu_device *smmu, *dom_smmu; 1147 struct arm_smmu_device *smmu;
1248 struct arm_smmu_master_cfg *cfg; 1148 struct arm_smmu_master_cfg *cfg;
1249 1149
1250 smmu = find_smmu_for_device(dev); 1150 smmu = find_smmu_for_device(dev);
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1258 return -EEXIST; 1158 return -EEXIST;
1259 } 1159 }
1260 1160
1161 /* Ensure that the domain is finalised */
1162 ret = arm_smmu_init_domain_context(domain, smmu);
1163 if (IS_ERR_VALUE(ret))
1164 return ret;
1165
1261 /* 1166 /*
1262 * Sanity check the domain. We don't support domains across 1167 * Sanity check the domain. We don't support domains across
1263 * different SMMUs. 1168 * different SMMUs.
1264 */ 1169 */
1265 dom_smmu = ACCESS_ONCE(smmu_domain->smmu); 1170 if (smmu_domain->smmu != smmu) {
1266 if (!dom_smmu) {
1267 /* Now that we have a master, we can finalise the domain */
1268 ret = arm_smmu_init_domain_context(domain, smmu);
1269 if (IS_ERR_VALUE(ret))
1270 return ret;
1271
1272 dom_smmu = smmu_domain->smmu;
1273 }
1274
1275 if (dom_smmu != smmu) {
1276 dev_err(dev, 1171 dev_err(dev,
1277 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", 1172 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1278 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); 1173 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1303 arm_smmu_domain_remove_master(smmu_domain, cfg); 1198 arm_smmu_domain_remove_master(smmu_domain, cfg);
1304} 1199}
1305 1200
1306static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, 1201static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1307 unsigned long end) 1202 phys_addr_t paddr, size_t size, int prot)
1308{
1309 return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
1310 (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
1311}
1312
1313static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1314 unsigned long addr, unsigned long end,
1315 unsigned long pfn, int prot, int stage)
1316{
1317 pte_t *pte, *start;
1318 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
1319
1320 if (pmd_none(*pmd)) {
1321 /* Allocate a new set of tables */
1322 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1323
1324 if (!table)
1325 return -ENOMEM;
1326
1327 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1328 pmd_populate(NULL, pmd, table);
1329 arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
1330 }
1331
1332 if (stage == 1) {
1333 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
1334 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
1335 pteval |= ARM_SMMU_PTE_AP_RDONLY;
1336
1337 if (prot & IOMMU_CACHE)
1338 pteval |= (MAIR_ATTR_IDX_CACHE <<
1339 ARM_SMMU_PTE_ATTRINDX_SHIFT);
1340 } else {
1341 pteval |= ARM_SMMU_PTE_HAP_FAULT;
1342 if (prot & IOMMU_READ)
1343 pteval |= ARM_SMMU_PTE_HAP_READ;
1344 if (prot & IOMMU_WRITE)
1345 pteval |= ARM_SMMU_PTE_HAP_WRITE;
1346 if (prot & IOMMU_CACHE)
1347 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
1348 else
1349 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1350 }
1351
1352 if (prot & IOMMU_NOEXEC)
1353 pteval |= ARM_SMMU_PTE_XN;
1354
1355 /* If no access, create a faulting entry to avoid TLB fills */
1356 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1357 pteval &= ~ARM_SMMU_PTE_PAGE;
1358
1359 pteval |= ARM_SMMU_PTE_SH_IS;
1360 start = pmd_page_vaddr(*pmd) + pte_index(addr);
1361 pte = start;
1362
1363 /*
1364 * Install the page table entries. This is fairly complicated
1365 * since we attempt to make use of the contiguous hint in the
1366 * ptes where possible. The contiguous hint indicates a series
1367 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1368 * contiguous region with the following constraints:
1369 *
1370 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1371 * - Each pte in the region has the contiguous hint bit set
1372 *
1373 * This complicates unmapping (also handled by this code, when
1374 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1375 * possible, yet highly unlikely, that a client may unmap only
1376 * part of a contiguous range. This requires clearing of the
1377 * contiguous hint bits in the range before installing the new
1378 * faulting entries.
1379 *
1380 * Note that re-mapping an address range without first unmapping
1381 * it is not supported, so TLB invalidation is not required here
1382 * and is instead performed at unmap and domain-init time.
1383 */
1384 do {
1385 int i = 1;
1386
1387 pteval &= ~ARM_SMMU_PTE_CONT;
1388
1389 if (arm_smmu_pte_is_contiguous_range(addr, end)) {
1390 i = ARM_SMMU_PTE_CONT_ENTRIES;
1391 pteval |= ARM_SMMU_PTE_CONT;
1392 } else if (pte_val(*pte) &
1393 (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
1394 int j;
1395 pte_t *cont_start;
1396 unsigned long idx = pte_index(addr);
1397
1398 idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
1399 cont_start = pmd_page_vaddr(*pmd) + idx;
1400 for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
1401 pte_val(*(cont_start + j)) &=
1402 ~ARM_SMMU_PTE_CONT;
1403
1404 arm_smmu_flush_pgtable(smmu, cont_start,
1405 sizeof(*pte) *
1406 ARM_SMMU_PTE_CONT_ENTRIES);
1407 }
1408
1409 do {
1410 *pte = pfn_pte(pfn, __pgprot(pteval));
1411 } while (pte++, pfn++, addr += PAGE_SIZE, --i);
1412 } while (addr != end);
1413
1414 arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
1415 return 0;
1416}
1417
1418static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1419 unsigned long addr, unsigned long end,
1420 phys_addr_t phys, int prot, int stage)
1421{ 1203{
1422 int ret; 1204 int ret;
1423 pmd_t *pmd; 1205 unsigned long flags;
1424 unsigned long next, pfn = __phys_to_pfn(phys); 1206 struct arm_smmu_domain *smmu_domain = domain->priv;
1425 1207 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1426#ifndef __PAGETABLE_PMD_FOLDED
1427 if (pud_none(*pud)) {
1428 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1429 if (!pmd)
1430 return -ENOMEM;
1431
1432 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1433 pud_populate(NULL, pud, pmd);
1434 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1435
1436 pmd += pmd_index(addr);
1437 } else
1438#endif
1439 pmd = pmd_offset(pud, addr);
1440 1208
1441 do { 1209 if (!ops)
1442 next = pmd_addr_end(addr, end); 1210 return -ENODEV;
1443 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
1444 prot, stage);
1445 phys += next - addr;
1446 pfn = __phys_to_pfn(phys);
1447 } while (pmd++, addr = next, addr < end);
1448 1211
1212 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1213 ret = ops->map(ops, iova, paddr, size, prot);
1214 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1449 return ret; 1215 return ret;
1450} 1216}
1451 1217
1452static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, 1218static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1453 unsigned long addr, unsigned long end, 1219 size_t size)
1454 phys_addr_t phys, int prot, int stage)
1455{ 1220{
1456 int ret = 0; 1221 size_t ret;
1457 pud_t *pud; 1222 unsigned long flags;
1458 unsigned long next; 1223 struct arm_smmu_domain *smmu_domain = domain->priv;
1459 1224 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1460#ifndef __PAGETABLE_PUD_FOLDED
1461 if (pgd_none(*pgd)) {
1462 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1463 if (!pud)
1464 return -ENOMEM;
1465
1466 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1467 pgd_populate(NULL, pgd, pud);
1468 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1469
1470 pud += pud_index(addr);
1471 } else
1472#endif
1473 pud = pud_offset(pgd, addr);
1474 1225
1475 do { 1226 if (!ops)
1476 next = pud_addr_end(addr, end); 1227 return 0;
1477 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1478 prot, stage);
1479 phys += next - addr;
1480 } while (pud++, addr = next, addr < end);
1481 1228
1229 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1230 ret = ops->unmap(ops, iova, size);
1231 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1482 return ret; 1232 return ret;
1483} 1233}
1484 1234
1485static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, 1235static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1486 unsigned long iova, phys_addr_t paddr, 1236 dma_addr_t iova)
1487 size_t size, int prot)
1488{ 1237{
1489 int ret, stage; 1238 struct arm_smmu_domain *smmu_domain = domain->priv;
1490 unsigned long end;
1491 phys_addr_t input_mask, output_mask;
1492 struct arm_smmu_device *smmu = smmu_domain->smmu; 1239 struct arm_smmu_device *smmu = smmu_domain->smmu;
1493 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1240 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1494 pgd_t *pgd = cfg->pgd; 1241 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1495 unsigned long flags; 1242 struct device *dev = smmu->dev;
1243 void __iomem *cb_base;
1244 u32 tmp;
1245 u64 phys;
1496 1246
1497 if (cfg->cbar == CBAR_TYPE_S2_TRANS) { 1247 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1498 stage = 2; 1248
1499 input_mask = (1ULL << smmu->s2_input_size) - 1; 1249 if (smmu->version == 1) {
1500 output_mask = (1ULL << smmu->s2_output_size) - 1; 1250 u32 reg = iova & ~0xfff;
1251 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
1501 } else { 1252 } else {
1502 stage = 1; 1253 u32 reg = iova & ~0xfff;
1503 input_mask = (1ULL << smmu->s1_input_size) - 1; 1254 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
1504 output_mask = (1ULL << smmu->s1_output_size) - 1; 1255 reg = (iova & ~0xfff) >> 32;
1256 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
1505 } 1257 }
1506 1258
1507 if (!pgd) 1259 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1508 return -EINVAL; 1260 !(tmp & ATSR_ACTIVE), 5, 50)) {
1509 1261 dev_err(dev,
1510 if (size & ~PAGE_MASK) 1262 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
1511 return -EINVAL; 1263 &iova);
1512 1264 return ops->iova_to_phys(ops, iova);
1513 if ((phys_addr_t)iova & ~input_mask) 1265 }
1514 return -ERANGE;
1515
1516 if (paddr & ~output_mask)
1517 return -ERANGE;
1518
1519 spin_lock_irqsave(&smmu_domain->lock, flags);
1520 pgd += pgd_index(iova);
1521 end = iova + size;
1522 do {
1523 unsigned long next = pgd_addr_end(iova, end);
1524
1525 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
1526 prot, stage);
1527 if (ret)
1528 goto out_unlock;
1529
1530 paddr += next - iova;
1531 iova = next;
1532 } while (pgd++, iova != end);
1533
1534out_unlock:
1535 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1536
1537 return ret;
1538}
1539
1540static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1541 phys_addr_t paddr, size_t size, int prot)
1542{
1543 struct arm_smmu_domain *smmu_domain = domain->priv;
1544
1545 if (!smmu_domain)
1546 return -ENODEV;
1547 1266
1548 return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); 1267 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1549} 1268 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1550 1269
1551static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 1270 if (phys & CB_PAR_F) {
1552 size_t size) 1271 dev_err(dev, "translation fault!\n");
1553{ 1272 dev_err(dev, "PAR = 0x%llx\n", phys);
1554 int ret; 1273 return 0;
1555 struct arm_smmu_domain *smmu_domain = domain->priv; 1274 }
1556 1275
1557 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1276 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1558 arm_smmu_tlb_inv_context(smmu_domain);
1559 return ret ? 0 : size;
1560} 1277}
1561 1278
1562static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1279static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1563 dma_addr_t iova) 1280 dma_addr_t iova)
1564{ 1281{
1565 pgd_t *pgdp, pgd; 1282 phys_addr_t ret;
1566 pud_t pud; 1283 unsigned long flags;
1567 pmd_t pmd;
1568 pte_t pte;
1569 struct arm_smmu_domain *smmu_domain = domain->priv; 1284 struct arm_smmu_domain *smmu_domain = domain->priv;
1570 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 1285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1571 1286
1572 pgdp = cfg->pgd; 1287 if (!ops)
1573 if (!pgdp)
1574 return 0; 1288 return 0;
1575 1289
1576 pgd = *(pgdp + pgd_index(iova)); 1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1577 if (pgd_none(pgd)) 1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
1578 return 0; 1292 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1579 1293 else
1580 pud = *pud_offset(&pgd, iova); 1294 ret = ops->iova_to_phys(ops, iova);
1581 if (pud_none(pud)) 1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1582 return 0;
1583
1584 pmd = *pmd_offset(&pud, iova);
1585 if (pmd_none(pmd))
1586 return 0;
1587 1296
1588 pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); 1297 return ret;
1589 if (pte_none(pte))
1590 return 0;
1591
1592 return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
1593} 1298}
1594 1299
1595static bool arm_smmu_capable(enum iommu_cap cap) 1300static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1698static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1403static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1699 enum iommu_attr attr, void *data) 1404 enum iommu_attr attr, void *data)
1700{ 1405{
1406 int ret = 0;
1701 struct arm_smmu_domain *smmu_domain = domain->priv; 1407 struct arm_smmu_domain *smmu_domain = domain->priv;
1702 1408
1409 mutex_lock(&smmu_domain->init_mutex);
1410
1703 switch (attr) { 1411 switch (attr) {
1704 case DOMAIN_ATTR_NESTING: 1412 case DOMAIN_ATTR_NESTING:
1705 if (smmu_domain->smmu) 1413 if (smmu_domain->smmu) {
1706 return -EPERM; 1414 ret = -EPERM;
1415 goto out_unlock;
1416 }
1417
1707 if (*(int *)data) 1418 if (*(int *)data)
1708 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; 1419 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1709 else 1420 else
1710 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 1421 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1711 1422
1712 return 0; 1423 break;
1713 default: 1424 default:
1714 return -ENODEV; 1425 ret = -ENODEV;
1715 } 1426 }
1427
1428out_unlock:
1429 mutex_unlock(&smmu_domain->init_mutex);
1430 return ret;
1716} 1431}
1717 1432
1718static const struct iommu_ops arm_smmu_ops = { 1433static struct iommu_ops arm_smmu_ops = {
1719 .capable = arm_smmu_capable, 1434 .capable = arm_smmu_capable,
1720 .domain_init = arm_smmu_domain_init, 1435 .domain_init = arm_smmu_domain_init,
1721 .domain_destroy = arm_smmu_domain_destroy, 1436 .domain_destroy = arm_smmu_domain_destroy,
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = {
1729 .remove_device = arm_smmu_remove_device, 1444 .remove_device = arm_smmu_remove_device,
1730 .domain_get_attr = arm_smmu_domain_get_attr, 1445 .domain_get_attr = arm_smmu_domain_get_attr,
1731 .domain_set_attr = arm_smmu_domain_set_attr, 1446 .domain_set_attr = arm_smmu_domain_set_attr,
1732 .pgsize_bitmap = (SECTION_SIZE | 1447 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1733 ARM_SMMU_PTE_CONT_SIZE |
1734 PAGE_SIZE),
1735}; 1448};
1736 1449
1737static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1450static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1760 } 1473 }
1761 1474
1762 /* Invalidate the TLB, just in case */ 1475 /* Invalidate the TLB, just in case */
1763 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
1764 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1476 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1765 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); 1477 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1766 1478
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1782 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); 1494 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1783 1495
1784 /* Push the button */ 1496 /* Push the button */
1785 arm_smmu_tlb_sync(smmu); 1497 __arm_smmu_tlb_sync(smmu);
1786 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1498 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1787} 1499}
1788 1500
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1816 1528
1817 /* ID0 */ 1529 /* ID0 */
1818 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); 1530 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1819#ifndef CONFIG_64BIT
1820 if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
1821 dev_err(smmu->dev, "\tno v7 descriptor support!\n");
1822 return -ENODEV;
1823 }
1824#endif
1825 1531
1826 /* Restrict available stages based on module parameter */ 1532 /* Restrict available stages based on module parameter */
1827 if (force_stage == 1) 1533 if (force_stage == 1)
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1850 return -ENODEV; 1556 return -ENODEV;
1851 } 1557 }
1852 1558
1559 if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
1560 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1561 dev_notice(smmu->dev, "\taddress translation ops\n");
1562 }
1563
1853 if (id & ID0_CTTW) { 1564 if (id & ID0_CTTW) {
1854 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; 1565 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1855 dev_notice(smmu->dev, "\tcoherent table walk\n"); 1566 dev_notice(smmu->dev, "\tcoherent table walk\n");
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1894 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; 1605 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1895 1606
1896 /* Check for size mismatch of SMMU address space from mapped region */ 1607 /* Check for size mismatch of SMMU address space from mapped region */
1897 size = 1 << 1608 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1898 (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1899 size *= 2 << smmu->pgshift; 1609 size *= 2 << smmu->pgshift;
1900 if (smmu->size != size) 1610 if (smmu->size != size)
1901 dev_warn(smmu->dev, 1611 dev_warn(smmu->dev,
1902 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", 1612 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1903 size, smmu->size); 1613 size, smmu->size);
1904 1614
1905 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & 1615 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1906 ID1_NUMS2CB_MASK;
1907 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; 1616 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1908 if (smmu->num_s2_context_banks > smmu->num_context_banks) { 1617 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1909 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); 1618 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1915 /* ID2 */ 1624 /* ID2 */
1916 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); 1625 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1917 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); 1626 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1918 smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); 1627 smmu->ipa_size = size;
1919
1920 /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
1921#ifdef CONFIG_64BIT
1922 smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
1923#else
1924 smmu->s2_input_size = min(32UL, size);
1925#endif
1926 1628
1927 /* The stage-2 output mask is also applied for bypass */ 1629 /* The output mask is also applied for bypass */
1928 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); 1630 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1929 smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); 1631 smmu->pa_size = size;
1930 1632
1931 if (smmu->version == ARM_SMMU_V1) { 1633 if (smmu->version == ARM_SMMU_V1) {
1932 smmu->s1_input_size = 32; 1634 smmu->va_size = smmu->ipa_size;
1635 size = SZ_4K | SZ_2M | SZ_1G;
1933 } else { 1636 } else {
1934#ifdef CONFIG_64BIT
1935 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; 1637 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1936 size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); 1638 smmu->va_size = arm_smmu_id_size_to_bits(size);
1937#else 1639#ifndef CONFIG_64BIT
1938 size = 32; 1640 smmu->va_size = min(32UL, smmu->va_size);
1939#endif 1641#endif
1940 smmu->s1_input_size = size; 1642 size = 0;
1941 1643 if (id & ID2_PTFS_4K)
1942 if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || 1644 size |= SZ_4K | SZ_2M | SZ_1G;
1943 (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || 1645 if (id & ID2_PTFS_16K)
1944 (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { 1646 size |= SZ_16K | SZ_32M;
1945 dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", 1647 if (id & ID2_PTFS_64K)
1946 PAGE_SIZE); 1648 size |= SZ_64K | SZ_512M;
1947 return -ENODEV;
1948 }
1949 } 1649 }
1950 1650
1651 arm_smmu_ops.pgsize_bitmap &= size;
1652 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1653
1951 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) 1654 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1952 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", 1655 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1953 smmu->s1_input_size, smmu->s1_output_size); 1656 smmu->va_size, smmu->ipa_size);
1954 1657
1955 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) 1658 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1956 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", 1659 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1957 smmu->s2_input_size, smmu->s2_output_size); 1660 smmu->ipa_size, smmu->pa_size);
1958 1661
1959 return 0; 1662 return 0;
1960} 1663}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28
29#include "io-pgtable.h"
30
31#define ARM_LPAE_MAX_ADDR_BITS 48
32#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33#define ARM_LPAE_MAX_LEVELS 4
34
35/* Struct accessors */
36#define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
38
39#define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
41
42#define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
60
61/*
62 * Calculate the index at level l used to map virtual address a using the
63 * pagetable in d.
64 */
65#define ARM_LPAE_PGD_IDX(l,d) \
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67
68#define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71
72/* Calculate the block/page mapping size at level l for pagetable in d. */
73#define ARM_LPAE_BLOCK_SIZE(l,d) \
74 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
75 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
76
77/* Page table bits */
78#define ARM_LPAE_PTE_TYPE_SHIFT 0
79#define ARM_LPAE_PTE_TYPE_MASK 0x3
80
81#define ARM_LPAE_PTE_TYPE_BLOCK 1
82#define ARM_LPAE_PTE_TYPE_TABLE 3
83#define ARM_LPAE_PTE_TYPE_PAGE 3
84
85#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
86#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
87#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
88#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
89#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
90#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
91#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
92#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
93
94#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
95/* Ignore the contiguous bit for block splitting */
96#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
97#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
98 ARM_LPAE_PTE_ATTR_HI_MASK)
99
100/* Stage-1 PTE */
101#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
102#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
103#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
104#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
105
106/* Stage-2 PTE */
107#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
108#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
109#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
110#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
111#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
112#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
113
114/* Register bits */
115#define ARM_32_LPAE_TCR_EAE (1 << 31)
116#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
117
118#define ARM_LPAE_TCR_TG0_4K (0 << 14)
119#define ARM_LPAE_TCR_TG0_64K (1 << 14)
120#define ARM_LPAE_TCR_TG0_16K (2 << 14)
121
122#define ARM_LPAE_TCR_SH0_SHIFT 12
123#define ARM_LPAE_TCR_SH0_MASK 0x3
124#define ARM_LPAE_TCR_SH_NS 0
125#define ARM_LPAE_TCR_SH_OS 2
126#define ARM_LPAE_TCR_SH_IS 3
127
128#define ARM_LPAE_TCR_ORGN0_SHIFT 10
129#define ARM_LPAE_TCR_IRGN0_SHIFT 8
130#define ARM_LPAE_TCR_RGN_MASK 0x3
131#define ARM_LPAE_TCR_RGN_NC 0
132#define ARM_LPAE_TCR_RGN_WBWA 1
133#define ARM_LPAE_TCR_RGN_WT 2
134#define ARM_LPAE_TCR_RGN_WB 3
135
136#define ARM_LPAE_TCR_SL0_SHIFT 6
137#define ARM_LPAE_TCR_SL0_MASK 0x3
138
139#define ARM_LPAE_TCR_T0SZ_SHIFT 0
140#define ARM_LPAE_TCR_SZ_MASK 0xf
141
142#define ARM_LPAE_TCR_PS_SHIFT 16
143#define ARM_LPAE_TCR_PS_MASK 0x7
144
145#define ARM_LPAE_TCR_IPS_SHIFT 32
146#define ARM_LPAE_TCR_IPS_MASK 0x7
147
148#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
149#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
150#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
151#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
152#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
153#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
154
155#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
156#define ARM_LPAE_MAIR_ATTR_MASK 0xff
157#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
158#define ARM_LPAE_MAIR_ATTR_NC 0x44
159#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
160#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
161#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
162#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
163
164/* IOPTE accessors */
165#define iopte_deref(pte,d) \
166 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
167 & ~((1ULL << (d)->pg_shift) - 1)))
168
169#define iopte_type(pte,l) \
170 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
171
172#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
173
174#define iopte_leaf(pte,l) \
175 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
176 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
177 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
178
179#define iopte_to_pfn(pte,d) \
180 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
181
182#define pfn_to_iopte(pfn,d) \
183 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
184
185struct arm_lpae_io_pgtable {
186 struct io_pgtable iop;
187
188 int levels;
189 size_t pgd_size;
190 unsigned long pg_shift;
191 unsigned long bits_per_level;
192
193 void *pgd;
194};
195
196typedef u64 arm_lpae_iopte;
197
198static bool selftest_running = false;
199
200static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
201 unsigned long iova, phys_addr_t paddr,
202 arm_lpae_iopte prot, int lvl,
203 arm_lpae_iopte *ptep)
204{
205 arm_lpae_iopte pte = prot;
206
207 /* We require an unmap first */
208 if (iopte_leaf(*ptep, lvl)) {
209 WARN_ON(!selftest_running);
210 return -EEXIST;
211 }
212
213 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
214 pte |= ARM_LPAE_PTE_NS;
215
216 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
217 pte |= ARM_LPAE_PTE_TYPE_PAGE;
218 else
219 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
220
221 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
222 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
223
224 *ptep = pte;
225 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
226 return 0;
227}
228
229static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
230 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
231 int lvl, arm_lpae_iopte *ptep)
232{
233 arm_lpae_iopte *cptep, pte;
234 void *cookie = data->iop.cookie;
235 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
236
237 /* Find our entry at the current level */
238 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
239
240 /* If we can install a leaf entry at this level, then do so */
241 if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
242 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
243
244 /* We can't allocate tables at the final level */
245 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
246 return -EINVAL;
247
248 /* Grab a pointer to the next level */
249 pte = *ptep;
250 if (!pte) {
251 cptep = alloc_pages_exact(1UL << data->pg_shift,
252 GFP_ATOMIC | __GFP_ZERO);
253 if (!cptep)
254 return -ENOMEM;
255
256 data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
257 cookie);
258 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
259 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
260 pte |= ARM_LPAE_PTE_NSTABLE;
261 *ptep = pte;
262 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
263 } else {
264 cptep = iopte_deref(pte, data);
265 }
266
267 /* Rinse, repeat */
268 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
269}
270
271static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
272 int prot)
273{
274 arm_lpae_iopte pte;
275
276 if (data->iop.fmt == ARM_64_LPAE_S1 ||
277 data->iop.fmt == ARM_32_LPAE_S1) {
278 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
279
280 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
281 pte |= ARM_LPAE_PTE_AP_RDONLY;
282
283 if (prot & IOMMU_CACHE)
284 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
285 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
286 } else {
287 pte = ARM_LPAE_PTE_HAP_FAULT;
288 if (prot & IOMMU_READ)
289 pte |= ARM_LPAE_PTE_HAP_READ;
290 if (prot & IOMMU_WRITE)
291 pte |= ARM_LPAE_PTE_HAP_WRITE;
292 if (prot & IOMMU_CACHE)
293 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
294 else
295 pte |= ARM_LPAE_PTE_MEMATTR_NC;
296 }
297
298 if (prot & IOMMU_NOEXEC)
299 pte |= ARM_LPAE_PTE_XN;
300
301 return pte;
302}
303
304static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
305 phys_addr_t paddr, size_t size, int iommu_prot)
306{
307 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
308 arm_lpae_iopte *ptep = data->pgd;
309 int lvl = ARM_LPAE_START_LVL(data);
310 arm_lpae_iopte prot;
311
312 /* If no access, then nothing to do */
313 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
314 return 0;
315
316 prot = arm_lpae_prot_to_pte(data, iommu_prot);
317 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
318}
319
320static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
321 arm_lpae_iopte *ptep)
322{
323 arm_lpae_iopte *start, *end;
324 unsigned long table_size;
325
326 /* Only leaf entries at the last level */
327 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
328 return;
329
330 if (lvl == ARM_LPAE_START_LVL(data))
331 table_size = data->pgd_size;
332 else
333 table_size = 1UL << data->pg_shift;
334
335 start = ptep;
336 end = (void *)ptep + table_size;
337
338 while (ptep != end) {
339 arm_lpae_iopte pte = *ptep++;
340
341 if (!pte || iopte_leaf(pte, lvl))
342 continue;
343
344 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
345 }
346
347 free_pages_exact(start, table_size);
348}
349
350static void arm_lpae_free_pgtable(struct io_pgtable *iop)
351{
352 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
353
354 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
355 kfree(data);
356}
357
358static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
359 unsigned long iova, size_t size,
360 arm_lpae_iopte prot, int lvl,
361 arm_lpae_iopte *ptep, size_t blk_size)
362{
363 unsigned long blk_start, blk_end;
364 phys_addr_t blk_paddr;
365 arm_lpae_iopte table = 0;
366 void *cookie = data->iop.cookie;
367 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
368
369 blk_start = iova & ~(blk_size - 1);
370 blk_end = blk_start + blk_size;
371 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
372
373 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
374 arm_lpae_iopte *tablep;
375
376 /* Unmap! */
377 if (blk_start == iova)
378 continue;
379
380 /* __arm_lpae_map expects a pointer to the start of the table */
381 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
382 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
383 tablep) < 0) {
384 if (table) {
385 /* Free the table we allocated */
386 tablep = iopte_deref(table, data);
387 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
388 }
389 return 0; /* Bytes unmapped */
390 }
391 }
392
393 *ptep = table;
394 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
395 iova &= ~(blk_size - 1);
396 tlb->tlb_add_flush(iova, blk_size, true, cookie);
397 return size;
398}
399
400static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
401 unsigned long iova, size_t size, int lvl,
402 arm_lpae_iopte *ptep)
403{
404 arm_lpae_iopte pte;
405 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
406 void *cookie = data->iop.cookie;
407 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
408
409 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
410 pte = *ptep;
411
412 /* Something went horribly wrong and we ran out of page table */
413 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
414 return 0;
415
416 /* If the size matches this level, we're in the right place */
417 if (size == blk_size) {
418 *ptep = 0;
419 tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
420
421 if (!iopte_leaf(pte, lvl)) {
422 /* Also flush any partial walks */
423 tlb->tlb_add_flush(iova, size, false, cookie);
424 tlb->tlb_sync(data->iop.cookie);
425 ptep = iopte_deref(pte, data);
426 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
427 } else {
428 tlb->tlb_add_flush(iova, size, true, cookie);
429 }
430
431 return size;
432 } else if (iopte_leaf(pte, lvl)) {
433 /*
434 * Insert a table at the next level to map the old region,
435 * minus the part we want to unmap
436 */
437 return arm_lpae_split_blk_unmap(data, iova, size,
438 iopte_prot(pte), lvl, ptep,
439 blk_size);
440 }
441
442 /* Keep on walkin' */
443 ptep = iopte_deref(pte, data);
444 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
445}
446
447static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
448 size_t size)
449{
450 size_t unmapped;
451 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
452 struct io_pgtable *iop = &data->iop;
453 arm_lpae_iopte *ptep = data->pgd;
454 int lvl = ARM_LPAE_START_LVL(data);
455
456 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
457 if (unmapped)
458 iop->cfg.tlb->tlb_sync(iop->cookie);
459
460 return unmapped;
461}
462
463static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
464 unsigned long iova)
465{
466 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
467 arm_lpae_iopte pte, *ptep = data->pgd;
468 int lvl = ARM_LPAE_START_LVL(data);
469
470 do {
471 /* Valid IOPTE pointer? */
472 if (!ptep)
473 return 0;
474
475 /* Grab the IOPTE we're interested in */
476 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
477
478 /* Valid entry? */
479 if (!pte)
480 return 0;
481
482 /* Leaf entry? */
483 if (iopte_leaf(pte,lvl))
484 goto found_translation;
485
486 /* Take it to the next level */
487 ptep = iopte_deref(pte, data);
488 } while (++lvl < ARM_LPAE_MAX_LEVELS);
489
490 /* Ran out of page tables to walk */
491 return 0;
492
493found_translation:
494 iova &= ((1 << data->pg_shift) - 1);
495 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
496}
497
498static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
499{
500 unsigned long granule;
501
502 /*
503 * We need to restrict the supported page sizes to match the
504 * translation regime for a particular granule. Aim to match
505 * the CPU page size if possible, otherwise prefer smaller sizes.
506 * While we're at it, restrict the block sizes to match the
507 * chosen granule.
508 */
509 if (cfg->pgsize_bitmap & PAGE_SIZE)
510 granule = PAGE_SIZE;
511 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
512 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
513 else if (cfg->pgsize_bitmap & PAGE_MASK)
514 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
515 else
516 granule = 0;
517
518 switch (granule) {
519 case SZ_4K:
520 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
521 break;
522 case SZ_16K:
523 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
524 break;
525 case SZ_64K:
526 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
527 break;
528 default:
529 cfg->pgsize_bitmap = 0;
530 }
531}
532
533static struct arm_lpae_io_pgtable *
534arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
535{
536 unsigned long va_bits, pgd_bits;
537 struct arm_lpae_io_pgtable *data;
538
539 arm_lpae_restrict_pgsizes(cfg);
540
541 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
542 return NULL;
543
544 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
545 return NULL;
546
547 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
548 return NULL;
549
550 data = kmalloc(sizeof(*data), GFP_KERNEL);
551 if (!data)
552 return NULL;
553
554 data->pg_shift = __ffs(cfg->pgsize_bitmap);
555 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
556
557 va_bits = cfg->ias - data->pg_shift;
558 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
559
560 /* Calculate the actual size of our pgd (without concatenation) */
561 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
562 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
563
564 data->iop.ops = (struct io_pgtable_ops) {
565 .map = arm_lpae_map,
566 .unmap = arm_lpae_unmap,
567 .iova_to_phys = arm_lpae_iova_to_phys,
568 };
569
570 return data;
571}
572
573static struct io_pgtable *
574arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
575{
576 u64 reg;
577 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
578
579 if (!data)
580 return NULL;
581
582 /* TCR */
583 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
584 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
585 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
586
587 switch (1 << data->pg_shift) {
588 case SZ_4K:
589 reg |= ARM_LPAE_TCR_TG0_4K;
590 break;
591 case SZ_16K:
592 reg |= ARM_LPAE_TCR_TG0_16K;
593 break;
594 case SZ_64K:
595 reg |= ARM_LPAE_TCR_TG0_64K;
596 break;
597 }
598
599 switch (cfg->oas) {
600 case 32:
601 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
602 break;
603 case 36:
604 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
605 break;
606 case 40:
607 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
608 break;
609 case 42:
610 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
611 break;
612 case 44:
613 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
614 break;
615 case 48:
616 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
617 break;
618 default:
619 goto out_free_data;
620 }
621
622 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
623 cfg->arm_lpae_s1_cfg.tcr = reg;
624
625 /* MAIRs */
626 reg = (ARM_LPAE_MAIR_ATTR_NC
627 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
628 (ARM_LPAE_MAIR_ATTR_WBRWA
629 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
630 (ARM_LPAE_MAIR_ATTR_DEVICE
631 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
632
633 cfg->arm_lpae_s1_cfg.mair[0] = reg;
634 cfg->arm_lpae_s1_cfg.mair[1] = 0;
635
636 /* Looking good; allocate a pgd */
637 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
638 if (!data->pgd)
639 goto out_free_data;
640
641 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
642
643 /* TTBRs */
644 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
645 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
646 return &data->iop;
647
648out_free_data:
649 kfree(data);
650 return NULL;
651}
652
653static struct io_pgtable *
654arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
655{
656 u64 reg, sl;
657 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
658
659 if (!data)
660 return NULL;
661
662 /*
663 * Concatenate PGDs at level 1 if possible in order to reduce
664 * the depth of the stage-2 walk.
665 */
666 if (data->levels == ARM_LPAE_MAX_LEVELS) {
667 unsigned long pgd_pages;
668
669 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
670 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
671 data->pgd_size = pgd_pages << data->pg_shift;
672 data->levels--;
673 }
674 }
675
676 /* VTCR */
677 reg = ARM_64_LPAE_S2_TCR_RES1 |
678 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
679 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
680 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
681
682 sl = ARM_LPAE_START_LVL(data);
683
684 switch (1 << data->pg_shift) {
685 case SZ_4K:
686 reg |= ARM_LPAE_TCR_TG0_4K;
687 sl++; /* SL0 format is different for 4K granule size */
688 break;
689 case SZ_16K:
690 reg |= ARM_LPAE_TCR_TG0_16K;
691 break;
692 case SZ_64K:
693 reg |= ARM_LPAE_TCR_TG0_64K;
694 break;
695 }
696
697 switch (cfg->oas) {
698 case 32:
699 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
700 break;
701 case 36:
702 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
703 break;
704 case 40:
705 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
706 break;
707 case 42:
708 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
709 break;
710 case 44:
711 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
712 break;
713 case 48:
714 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
715 break;
716 default:
717 goto out_free_data;
718 }
719
720 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
721 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
722 cfg->arm_lpae_s2_cfg.vtcr = reg;
723
724 /* Allocate pgd pages */
725 data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
726 if (!data->pgd)
727 goto out_free_data;
728
729 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
730
731 /* VTTBR */
732 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
733 return &data->iop;
734
735out_free_data:
736 kfree(data);
737 return NULL;
738}
739
740static struct io_pgtable *
741arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
742{
743 struct io_pgtable *iop;
744
745 if (cfg->ias > 32 || cfg->oas > 40)
746 return NULL;
747
748 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
749 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
750 if (iop) {
751 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
752 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
753 }
754
755 return iop;
756}
757
758static struct io_pgtable *
759arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
760{
761 struct io_pgtable *iop;
762
763 if (cfg->ias > 40 || cfg->oas > 40)
764 return NULL;
765
766 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
767 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
768 if (iop)
769 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
770
771 return iop;
772}
773
774struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
775 .alloc = arm_64_lpae_alloc_pgtable_s1,
776 .free = arm_lpae_free_pgtable,
777};
778
779struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
780 .alloc = arm_64_lpae_alloc_pgtable_s2,
781 .free = arm_lpae_free_pgtable,
782};
783
784struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
785 .alloc = arm_32_lpae_alloc_pgtable_s1,
786 .free = arm_lpae_free_pgtable,
787};
788
789struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
790 .alloc = arm_32_lpae_alloc_pgtable_s2,
791 .free = arm_lpae_free_pgtable,
792};
793
794#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
795
796static struct io_pgtable_cfg *cfg_cookie;
797
798static void dummy_tlb_flush_all(void *cookie)
799{
800 WARN_ON(cookie != cfg_cookie);
801}
802
803static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
804 void *cookie)
805{
806 WARN_ON(cookie != cfg_cookie);
807 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
808}
809
810static void dummy_tlb_sync(void *cookie)
811{
812 WARN_ON(cookie != cfg_cookie);
813}
814
815static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
816{
817 WARN_ON(cookie != cfg_cookie);
818}
819
820static struct iommu_gather_ops dummy_tlb_ops __initdata = {
821 .tlb_flush_all = dummy_tlb_flush_all,
822 .tlb_add_flush = dummy_tlb_add_flush,
823 .tlb_sync = dummy_tlb_sync,
824 .flush_pgtable = dummy_flush_pgtable,
825};
826
827static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
828{
829 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
830 struct io_pgtable_cfg *cfg = &data->iop.cfg;
831
832 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
833 cfg->pgsize_bitmap, cfg->ias);
834 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
835 data->levels, data->pgd_size, data->pg_shift,
836 data->bits_per_level, data->pgd);
837}
838
839#define __FAIL(ops, i) ({ \
840 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
841 arm_lpae_dump_ops(ops); \
842 selftest_running = false; \
843 -EFAULT; \
844})
845
846static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
847{
848 static const enum io_pgtable_fmt fmts[] = {
849 ARM_64_LPAE_S1,
850 ARM_64_LPAE_S2,
851 };
852
853 int i, j;
854 unsigned long iova;
855 size_t size;
856 struct io_pgtable_ops *ops;
857
858 selftest_running = true;
859
860 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
861 cfg_cookie = cfg;
862 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
863 if (!ops) {
864 pr_err("selftest: failed to allocate io pgtable ops\n");
865 return -ENOMEM;
866 }
867
868 /*
869 * Initial sanity checks.
870 * Empty page tables shouldn't provide any translations.
871 */
872 if (ops->iova_to_phys(ops, 42))
873 return __FAIL(ops, i);
874
875 if (ops->iova_to_phys(ops, SZ_1G + 42))
876 return __FAIL(ops, i);
877
878 if (ops->iova_to_phys(ops, SZ_2G + 42))
879 return __FAIL(ops, i);
880
881 /*
882 * Distinct mappings of different granule sizes.
883 */
884 iova = 0;
885 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
886 while (j != BITS_PER_LONG) {
887 size = 1UL << j;
888
889 if (ops->map(ops, iova, iova, size, IOMMU_READ |
890 IOMMU_WRITE |
891 IOMMU_NOEXEC |
892 IOMMU_CACHE))
893 return __FAIL(ops, i);
894
895 /* Overlapping mappings */
896 if (!ops->map(ops, iova, iova + size, size,
897 IOMMU_READ | IOMMU_NOEXEC))
898 return __FAIL(ops, i);
899
900 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
901 return __FAIL(ops, i);
902
903 iova += SZ_1G;
904 j++;
905 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
906 }
907
908 /* Partial unmap */
909 size = 1UL << __ffs(cfg->pgsize_bitmap);
910 if (ops->unmap(ops, SZ_1G + size, size) != size)
911 return __FAIL(ops, i);
912
913 /* Remap of partial unmap */
914 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
915 return __FAIL(ops, i);
916
917 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
918 return __FAIL(ops, i);
919
920 /* Full unmap */
921 iova = 0;
922 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
923 while (j != BITS_PER_LONG) {
924 size = 1UL << j;
925
926 if (ops->unmap(ops, iova, size) != size)
927 return __FAIL(ops, i);
928
929 if (ops->iova_to_phys(ops, iova + 42))
930 return __FAIL(ops, i);
931
932 /* Remap full block */
933 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
934 return __FAIL(ops, i);
935
936 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
937 return __FAIL(ops, i);
938
939 iova += SZ_1G;
940 j++;
941 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
942 }
943
944 free_io_pgtable_ops(ops);
945 }
946
947 selftest_running = false;
948 return 0;
949}
950
951static int __init arm_lpae_do_selftests(void)
952{
953 static const unsigned long pgsize[] = {
954 SZ_4K | SZ_2M | SZ_1G,
955 SZ_16K | SZ_32M,
956 SZ_64K | SZ_512M,
957 };
958
959 static const unsigned int ias[] = {
960 32, 36, 40, 42, 44, 48,
961 };
962
963 int i, j, pass = 0, fail = 0;
964 struct io_pgtable_cfg cfg = {
965 .tlb = &dummy_tlb_ops,
966 .oas = 48,
967 };
968
969 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
970 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
971 cfg.pgsize_bitmap = pgsize[i];
972 cfg.ias = ias[j];
973 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
974 pgsize[i], ias[j]);
975 if (arm_lpae_run_tests(&cfg))
976 fail++;
977 else
978 pass++;
979 }
980 }
981
982 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
983 return fail ? -EFAULT : 0;
984}
985subsys_initcall(arm_lpae_do_selftests);
986#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
1/*
2 * Generic page table allocator for IOMMUs.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bug.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24
25#include "io-pgtable.h"
26
27extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
28extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
29extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
30extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
31
32static const struct io_pgtable_init_fns *
33io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
34{
35#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
36 [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
37 [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
38 [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
39 [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
40#endif
41};
42
43struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44 struct io_pgtable_cfg *cfg,
45 void *cookie)
46{
47 struct io_pgtable *iop;
48 const struct io_pgtable_init_fns *fns;
49
50 if (fmt >= IO_PGTABLE_NUM_FMTS)
51 return NULL;
52
53 fns = io_pgtable_init_table[fmt];
54 if (!fns)
55 return NULL;
56
57 iop = fns->alloc(cfg, cookie);
58 if (!iop)
59 return NULL;
60
61 iop->fmt = fmt;
62 iop->cookie = cookie;
63 iop->cfg = *cfg;
64
65 return &iop->ops;
66}
67
68/*
69 * It is the IOMMU driver's responsibility to ensure that the page table
70 * is no longer accessible to the walker by this point.
71 */
72void free_io_pgtable_ops(struct io_pgtable_ops *ops)
73{
74 struct io_pgtable *iop;
75
76 if (!ops)
77 return;
78
79 iop = container_of(ops, struct io_pgtable, ops);
80 iop->cfg.tlb->tlb_flush_all(iop->cookie);
81 io_pgtable_init_table[iop->fmt]->free(iop);
82}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
1#ifndef __IO_PGTABLE_H
2#define __IO_PGTABLE_H
3
4/*
5 * Public API for use by IOMMU drivers
6 */
7enum io_pgtable_fmt {
8 ARM_32_LPAE_S1,
9 ARM_32_LPAE_S2,
10 ARM_64_LPAE_S1,
11 ARM_64_LPAE_S2,
12 IO_PGTABLE_NUM_FMTS,
13};
14
15/**
16 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
17 *
18 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
19 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
20 * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
21 * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
22 *
23 * Note that these can all be called in atomic context and must therefore
24 * not block.
25 */
26struct iommu_gather_ops {
27 void (*tlb_flush_all)(void *cookie);
28 void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
29 void *cookie);
30 void (*tlb_sync)(void *cookie);
31 void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
32};
33
34/**
35 * struct io_pgtable_cfg - Configuration data for a set of page tables.
36 *
37 * @quirks: A bitmap of hardware quirks that require some special
38 * action by the low-level page table allocator.
39 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
40 * tables.
41 * @ias: Input address (iova) size, in bits.
42 * @oas: Output address (paddr) size, in bits.
43 * @tlb: TLB management callbacks for this set of tables.
44 */
45struct io_pgtable_cfg {
46 #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
47 int quirks;
48 unsigned long pgsize_bitmap;
49 unsigned int ias;
50 unsigned int oas;
51 const struct iommu_gather_ops *tlb;
52
53 /* Low-level data specific to the table format */
54 union {
55 struct {
56 u64 ttbr[2];
57 u64 tcr;
58 u64 mair[2];
59 } arm_lpae_s1_cfg;
60
61 struct {
62 u64 vttbr;
63 u64 vtcr;
64 } arm_lpae_s2_cfg;
65 };
66};
67
68/**
69 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
70 *
71 * @map: Map a physically contiguous memory region.
72 * @unmap: Unmap a physically contiguous memory region.
73 * @iova_to_phys: Translate iova to physical address.
74 *
75 * These functions map directly onto the iommu_ops member functions with
76 * the same names.
77 */
78struct io_pgtable_ops {
79 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
80 phys_addr_t paddr, size_t size, int prot);
81 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
82 size_t size);
83 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
84 unsigned long iova);
85};
86
87/**
88 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
89 *
90 * @fmt: The page table format.
91 * @cfg: The page table configuration. This will be modified to represent
92 * the configuration actually provided by the allocator (e.g. the
93 * pgsize_bitmap may be restricted).
94 * @cookie: An opaque token provided by the IOMMU driver and passed back to
95 * the callback routines in cfg->tlb.
96 */
97struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
98 struct io_pgtable_cfg *cfg,
99 void *cookie);
100
101/**
102 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
103 * *must* ensure that the page table is no longer
104 * live, but the TLB can be dirty.
105 *
106 * @ops: The ops returned from alloc_io_pgtable_ops.
107 */
108void free_io_pgtable_ops(struct io_pgtable_ops *ops);
109
110
111/*
112 * Internal structures for page table allocator implementations.
113 */
114
115/**
116 * struct io_pgtable - Internal structure describing a set of page tables.
117 *
118 * @fmt: The page table format.
119 * @cookie: An opaque token provided by the IOMMU driver and passed back to
120 * any callback routines.
121 * @cfg: A copy of the page table configuration.
122 * @ops: The page table operations in use for this set of page tables.
123 */
124struct io_pgtable {
125 enum io_pgtable_fmt fmt;
126 void *cookie;
127 struct io_pgtable_cfg cfg;
128 struct io_pgtable_ops ops;
129};
130
131/**
132 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
133 * particular format.
134 *
135 * @alloc: Allocate a set of page tables described by cfg.
136 * @free: Free the page tables associated with iop.
137 */
138struct io_pgtable_init_fns {
139 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
140 void (*free)(struct io_pgtable *iop);
141};
142
143#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000000..1c30014ed176
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef _LINUX_IOPOLL_H
16#define _LINUX_IOPOLL_H
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/hrtimer.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24
25/**
26 * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
27 * @op: accessor function (takes @addr as its only argument)
28 * @addr: Address to poll
29 * @val: Variable to read the value into
30 * @cond: Break condition (usually involving @val)
31 * @sleep_us: Maximum time to sleep between reads in us (0
32 * tight-loops). Should be less than ~20ms since usleep_range
33 * is used (see Documentation/timers/timers-howto.txt).
34 * @timeout_us: Timeout in us, 0 means never timeout
35 *
36 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
37 * case, the last read value at @addr is stored in @val. Must not
38 * be called from atomic context if sleep_us or timeout_us are used.
39 *
40 * When available, you'll probably want to use one of the specialized
41 * macros defined below rather than this macro directly.
42 */
43#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
44({ \
45 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
46 might_sleep_if(sleep_us); \
47 for (;;) { \
48 (val) = op(addr); \
49 if (cond) \
50 break; \
51 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
52 (val) = op(addr); \
53 break; \
54 } \
55 if (sleep_us) \
56 usleep_range((sleep_us >> 2) + 1, sleep_us); \
57 } \
58 (cond) ? 0 : -ETIMEDOUT; \
59})
60
61/**
62 * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
63 * @op: accessor function (takes @addr as its only argument)
64 * @addr: Address to poll
65 * @val: Variable to read the value into
66 * @cond: Break condition (usually involving @val)
67 * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
68 * be less than ~10us since udelay is used (see
69 * Documentation/timers/timers-howto.txt).
70 * @timeout_us: Timeout in us, 0 means never timeout
71 *
72 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
73 * case, the last read value at @addr is stored in @val.
74 *
75 * When available, you'll probably want to use one of the specialized
76 * macros defined below rather than this macro directly.
77 */
78#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
79({ \
80 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
81 for (;;) { \
82 (val) = op(addr); \
83 if (cond) \
84 break; \
85 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
86 (val) = op(addr); \
87 break; \
88 } \
89 if (delay_us) \
90 udelay(delay_us); \
91 } \
92 (cond) ? 0 : -ETIMEDOUT; \
93})
94
95
96#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
97 readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
98
99#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
100 readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
101
102#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
103 readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
104
105#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
106 readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
107
108#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
109 readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
110
111#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
112 readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
113
114#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
115 readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
116
117#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
118 readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
119
120#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
121 readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
122
123#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
124 readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
125
126#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
127 readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
128
129#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
130 readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
131
132#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
133 readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
134
135#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
136 readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
137
138#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
139 readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
140
141#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
142 readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
143
144#endif /* _LINUX_IOPOLL_H */