diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 13:43:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 13:43:28 -0400 |
commit | cc07aabc53978ae09a1d539237189f7c9841060a (patch) | |
tree | 6f47580d19ab5ad85f319bdb260615e991a93399 /arch/arm64/mm/mmu.c | |
parent | 9e47aaef0bd3a50a43626fa6b19e1f964ac173d6 (diff) | |
parent | 9358d755bd5cba8965ea79f2a446e689323409f9 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into next
Pull arm64 updates from Catalin Marinas:
- Optimised assembly string/memory routines (based on the AArch64
Cortex Strings library contributed to glibc but re-licensed under
GPLv2)
- Optimised crypto algorithms making use of the ARMv8 crypto extensions
(together with kernel API for using FPSIMD instructions in interrupt
context)
- Ftrace support
- CPU topology parsing from DT
- ESR_EL1 (Exception Syndrome Register) exposed to user space signal
handlers for SIGSEGV/SIGBUS (useful to emulation tools like Qemu)
- 1GB section linear mapping if applicable
- Barriers usage clean-up
- Default pgprot clean-up
Conflicts as per Catalin.
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (57 commits)
arm64: kernel: initialize broadcast hrtimer based clock event device
arm64: ftrace: Add system call tracepoint
arm64: ftrace: Add CALLER_ADDRx macros
arm64: ftrace: Add dynamic ftrace support
arm64: Add ftrace support
ftrace: Add arm64 support to recordmcount
arm64: Add 'notrace' attribute to unwind_frame() for ftrace
arm64: add __ASSEMBLY__ in asm/insn.h
arm64: Fix linker script entry point
arm64: lib: Implement optimized string length routines
arm64: lib: Implement optimized string compare routines
arm64: lib: Implement optimized memcmp routine
arm64: lib: Implement optimized memset routine
arm64: lib: Implement optimized memmove routine
arm64: lib: Implement optimized memcpy routine
arm64: defconfig: enable a few more common/useful options in defconfig
ftrace: Make CALLER_ADDRx macros more generic
arm64: Fix deadlock scenario with smp_send_stop()
arm64: Fix machine_shutdown() definition
arm64: Support arch_irq_work_raise() via self IPIs
...
Diffstat (limited to 'arch/arm64/mm/mmu.c')
-rw-r--r-- | arch/arm64/mm/mmu.c | 67 |
1 files changed, 30 insertions, 37 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4a829a210bb6..c43f1dd19489 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -43,11 +43,6 @@ | |||
43 | struct page *empty_zero_page; | 43 | struct page *empty_zero_page; |
44 | EXPORT_SYMBOL(empty_zero_page); | 44 | EXPORT_SYMBOL(empty_zero_page); |
45 | 45 | ||
46 | pgprot_t pgprot_default; | ||
47 | EXPORT_SYMBOL(pgprot_default); | ||
48 | |||
49 | static pmdval_t prot_sect_kernel; | ||
50 | |||
51 | struct cachepolicy { | 46 | struct cachepolicy { |
52 | const char policy[16]; | 47 | const char policy[16]; |
53 | u64 mair; | 48 | u64 mair; |
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p) | |||
122 | } | 117 | } |
123 | early_param("cachepolicy", early_cachepolicy); | 118 | early_param("cachepolicy", early_cachepolicy); |
124 | 119 | ||
125 | /* | ||
126 | * Adjust the PMD section entries according to the CPU in use. | ||
127 | */ | ||
128 | void __init init_mem_pgprot(void) | ||
129 | { | ||
130 | pteval_t default_pgprot; | ||
131 | int i; | ||
132 | |||
133 | default_pgprot = PTE_ATTRINDX(MT_NORMAL); | ||
134 | prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); | ||
135 | |||
136 | #ifdef CONFIG_SMP | ||
137 | /* | ||
138 | * Mark memory with the "shared" attribute for SMP systems | ||
139 | */ | ||
140 | default_pgprot |= PTE_SHARED; | ||
141 | prot_sect_kernel |= PMD_SECT_S; | ||
142 | #endif | ||
143 | |||
144 | for (i = 0; i < 16; i++) { | ||
145 | unsigned long v = pgprot_val(protection_map[i]); | ||
146 | protection_map[i] = __pgprot(v | default_pgprot); | ||
147 | } | ||
148 | |||
149 | pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); | ||
150 | } | ||
151 | |||
152 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 120 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
153 | unsigned long size, pgprot_t vma_prot) | 121 | unsigned long size, pgprot_t vma_prot) |
154 | { | 122 | { |
@@ -196,11 +164,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
196 | pgprot_t prot_pte; | 164 | pgprot_t prot_pte; |
197 | 165 | ||
198 | if (map_io) { | 166 | if (map_io) { |
199 | prot_sect = PMD_TYPE_SECT | PMD_SECT_AF | | 167 | prot_sect = PROT_SECT_DEVICE_nGnRE; |
200 | PMD_ATTRINDX(MT_DEVICE_nGnRE); | ||
201 | prot_pte = __pgprot(PROT_DEVICE_nGnRE); | 168 | prot_pte = __pgprot(PROT_DEVICE_nGnRE); |
202 | } else { | 169 | } else { |
203 | prot_sect = prot_sect_kernel; | 170 | prot_sect = PROT_SECT_NORMAL_EXEC; |
204 | prot_pte = PAGE_KERNEL_EXEC; | 171 | prot_pte = PAGE_KERNEL_EXEC; |
205 | } | 172 | } |
206 | 173 | ||
@@ -242,7 +209,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
242 | 209 | ||
243 | do { | 210 | do { |
244 | next = pud_addr_end(addr, end); | 211 | next = pud_addr_end(addr, end); |
245 | alloc_init_pmd(pud, addr, next, phys, map_io); | 212 | |
213 | /* | ||
214 | * For 4K granule only, attempt to put down a 1GB block | ||
215 | */ | ||
216 | if (!map_io && (PAGE_SHIFT == 12) && | ||
217 | ((addr | next | phys) & ~PUD_MASK) == 0) { | ||
218 | pud_t old_pud = *pud; | ||
219 | set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); | ||
220 | |||
221 | /* | ||
222 | * If we have an old value for a pud, it will | ||
223 | * be pointing to a pmd table that we no longer | ||
224 | * need (from swapper_pg_dir). | ||
225 | * | ||
226 | * Look up the old pmd table and free it. | ||
227 | */ | ||
228 | if (!pud_none(old_pud)) { | ||
229 | phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); | ||
230 | memblock_free(table, PAGE_SIZE); | ||
231 | flush_tlb_all(); | ||
232 | } | ||
233 | } else { | ||
234 | alloc_init_pmd(pud, addr, next, phys, map_io); | ||
235 | } | ||
246 | phys += next - addr; | 236 | phys += next - addr; |
247 | } while (pud++, addr = next, addr != end); | 237 | } while (pud++, addr = next, addr != end); |
248 | } | 238 | } |
@@ -399,6 +389,9 @@ int kern_addr_valid(unsigned long addr) | |||
399 | if (pud_none(*pud)) | 389 | if (pud_none(*pud)) |
400 | return 0; | 390 | return 0; |
401 | 391 | ||
392 | if (pud_sect(*pud)) | ||
393 | return pfn_valid(pud_pfn(*pud)); | ||
394 | |||
402 | pmd = pmd_offset(pud, addr); | 395 | pmd = pmd_offset(pud, addr); |
403 | if (pmd_none(*pmd)) | 396 | if (pmd_none(*pmd)) |
404 | return 0; | 397 | return 0; |
@@ -446,7 +439,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
446 | if (!p) | 439 | if (!p) |
447 | return -ENOMEM; | 440 | return -ENOMEM; |
448 | 441 | ||
449 | set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); | 442 | set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); |
450 | } else | 443 | } else |
451 | vmemmap_verify((pte_t *)pmd, node, addr, next); | 444 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
452 | } while (addr = next, addr != end); | 445 | } while (addr = next, addr != end); |