aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2014-04-03 10:57:15 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2014-05-09 10:53:37 -0400
commita501e32430d4232012ab708b8f0ce841f29e0f02 (patch)
tree96ae526e57f5b5931025893b87419b5e4084fbb0 /arch/arm64
parentbc07c2c6e9ed125d362af0214b6313dca180cb08 (diff)
arm64: Clean up the default pgprot setting
The primary aim of this patchset is to remove the pgprot_default and prot_sect_default global variables and rely strictly on predefined values. The original goal was to be able to run SMP kernels on UP hardware by not setting the Shareability bit. However, it is unlikely to see UP ARMv8 hardware and even if we do, the Shareability bit is no longer assumed to disable cacheable accesses. A side effect is that the device mappings now have the Shareability attribute set. The hardware, however, should ignore it since Device accesses are always Outer Shareable. Following the removal of the two global variables, there is some PROT_* macro reshuffling and cleanup, including the __PAGE_* macros (replaced by PAGE_*). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/io.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h98
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/mmu.c36
5 files changed, 50 insertions, 95 deletions
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index a1bef78f0303..e0ecdcf6632d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -230,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
230extern void __iounmap(volatile void __iomem *addr); 230extern void __iounmap(volatile void __iomem *addr);
231extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 231extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
232 232
233#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
234#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
235#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
236#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
237
238#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 233#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
239#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 234#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
240#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 235#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
241#define iounmap __iounmap 236#define iounmap __iounmap
242 237
243#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
244#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
245
246#define ARCH_HAS_IOREMAP_WC 238#define ARCH_HAS_IOREMAP_WC
247#include <asm-generic/iomap.h> 239#include <asm-generic/iomap.h>
248 240
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e50bb3cbd8f2..752348dbb4f3 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -52,67 +52,60 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
52#endif 52#endif
53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
54 54
55/* 55#ifdef CONFIG_SMP
56 * The pgprot_* and protection_map entries will be fixed up at runtime to 56#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
57 * include the cachable and bufferable bits based on memory policy, as well as 57#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
58 * any architecture dependent bits like global/ASID and SMP shared mapping 58#else
59 * bits. 59#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
60 */ 60#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
61#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF 61#endif
62 62
63extern pgprot_t pgprot_default; 63#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
64#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
65#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
64 66
65#define __pgprot_modify(prot,mask,bits) \ 67#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
66 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 68#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
69#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
67 70
68#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 71#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
69 72
70#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) 73#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
71#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 74#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
72#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
73#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
74#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
75#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
76#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
77#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
78#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
79 75
80#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 76#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
81#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 77#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
82 78
83#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 79#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
84#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 80#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
85 81
86#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 82#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
87#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 83#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
88#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 84#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
89#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 85#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
90#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 86#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
91#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 87#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
92#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 88#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
93#define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) 89#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
94 90
95#endif /* __ASSEMBLY__ */ 91#define __P000 PAGE_NONE
96 92#define __P001 PAGE_READONLY
97#define __P000 __PAGE_NONE 93#define __P010 PAGE_COPY
98#define __P001 __PAGE_READONLY 94#define __P011 PAGE_COPY
99#define __P010 __PAGE_COPY 95#define __P100 PAGE_EXECONLY
100#define __P011 __PAGE_COPY 96#define __P101 PAGE_READONLY_EXEC
101#define __P100 __PAGE_EXECONLY 97#define __P110 PAGE_COPY_EXEC
102#define __P101 __PAGE_READONLY_EXEC 98#define __P111 PAGE_COPY_EXEC
103#define __P110 __PAGE_COPY_EXEC 99
104#define __P111 __PAGE_COPY_EXEC 100#define __S000 PAGE_NONE
105 101#define __S001 PAGE_READONLY
106#define __S000 __PAGE_NONE 102#define __S010 PAGE_SHARED
107#define __S001 __PAGE_READONLY 103#define __S011 PAGE_SHARED
108#define __S010 __PAGE_SHARED 104#define __S100 PAGE_EXECONLY
109#define __S011 __PAGE_SHARED 105#define __S101 PAGE_READONLY_EXEC
110#define __S100 __PAGE_EXECONLY 106#define __S110 PAGE_SHARED_EXEC
111#define __S101 __PAGE_READONLY_EXEC 107#define __S111 PAGE_SHARED_EXEC
112#define __S110 __PAGE_SHARED_EXEC
113#define __S111 __PAGE_SHARED_EXEC
114 108
115#ifndef __ASSEMBLY__
116/* 109/*
117 * ZERO_PAGE is a global shared page that is always zero: used 110 * ZERO_PAGE is a global shared page that is always zero: used
118 * for zero-mapped memory areas etc.. 111 * for zero-mapped memory areas etc..
@@ -274,6 +267,9 @@ static inline int has_transparent_hugepage(void)
274 return 1; 267 return 1;
275} 268}
276 269
270#define __pgprot_modify(prot,mask,bits) \
271 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
272
277/* 273/*
278 * Mark the prot value as uncacheable and unbufferable. 274 * Mark the prot value as uncacheable and unbufferable.
279 */ 275 */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 5b9e046d580e..7450c5802c3f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -376,7 +376,6 @@ void __init setup_arch(char **cmdline_p)
376 376
377 *cmdline_p = boot_command_line; 377 *cmdline_p = boot_command_line;
378 378
379 init_mem_pgprot();
380 early_ioremap_init(); 379 early_ioremap_init();
381 380
382 parse_early_param(); 381 parse_early_param();
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c851eb44dc50..4164c5ace9f8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
115 for (i = 0; i < (size >> PAGE_SHIFT); i++) 115 for (i = 0; i < (size >> PAGE_SHIFT); i++)
116 map[i] = page + i; 116 map[i] = page + i;
117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, 117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
118 __get_dma_pgprot(attrs, pgprot_default, false)); 118 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
119 kfree(map); 119 kfree(map);
120 if (!coherent_ptr) 120 if (!coherent_ptr)
121 goto no_map; 121 goto no_map;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0a472c41a67f..2c0e1dda8163 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
43struct page *empty_zero_page; 43struct page *empty_zero_page;
44EXPORT_SYMBOL(empty_zero_page); 44EXPORT_SYMBOL(empty_zero_page);
45 45
46pgprot_t pgprot_default;
47EXPORT_SYMBOL(pgprot_default);
48
49static pmdval_t prot_sect_kernel;
50
51struct cachepolicy { 46struct cachepolicy {
52 const char policy[16]; 47 const char policy[16];
53 u64 mair; 48 u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
122} 117}
123early_param("cachepolicy", early_cachepolicy); 118early_param("cachepolicy", early_cachepolicy);
124 119
125/*
126 * Adjust the PMD section entries according to the CPU in use.
127 */
128void __init init_mem_pgprot(void)
129{
130 pteval_t default_pgprot;
131 int i;
132
133 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
134 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
135
136#ifdef CONFIG_SMP
137 /*
138 * Mark memory with the "shared" attribute for SMP systems
139 */
140 default_pgprot |= PTE_SHARED;
141 prot_sect_kernel |= PMD_SECT_S;
142#endif
143
144 for (i = 0; i < 16; i++) {
145 unsigned long v = pgprot_val(protection_map[i]);
146 protection_map[i] = __pgprot(v | default_pgprot);
147 }
148
149 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
150}
151
152pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 120pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
153 unsigned long size, pgprot_t vma_prot) 121 unsigned long size, pgprot_t vma_prot)
154{ 122{
@@ -205,7 +173,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
205 /* try section mapping first */ 173 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 174 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd; 175 pmd_t old_pmd =*pmd;
208 set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 176 set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC));
209 /* 177 /*
210 * Check for previous table entries created during 178 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them. 179 * boot (__create_page_tables) and flush them.
@@ -417,7 +385,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
417 if (!p) 385 if (!p)
418 return -ENOMEM; 386 return -ENOMEM;
419 387
420 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); 388 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
421 } else 389 } else
422 vmemmap_verify((pte_t *)pmd, node, addr, next); 390 vmemmap_verify((pte_t *)pmd, node, addr, next);
423 } while (addr = next, addr != end); 391 } while (addr = next, addr != end);