aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 13:43:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 13:43:28 -0400
commitcc07aabc53978ae09a1d539237189f7c9841060a (patch)
tree6f47580d19ab5ad85f319bdb260615e991a93399 /arch/arm64/mm
parent9e47aaef0bd3a50a43626fa6b19e1f964ac173d6 (diff)
parent9358d755bd5cba8965ea79f2a446e689323409f9 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into next
Pull arm64 updates from Catalin Marinas: - Optimised assembly string/memory routines (based on the AArch64 Cortex Strings library contributed to glibc but re-licensed under GPLv2) - Optimised crypto algorithms making use of the ARMv8 crypto extensions (together with kernel API for using FPSIMD instructions in interrupt context) - Ftrace support - CPU topology parsing from DT - ESR_EL1 (Exception Syndrome Register) exposed to user space signal handlers for SIGSEGV/SIGBUS (useful to emulation tools like Qemu) - 1GB section linear mapping if applicable - Barriers usage clean-up - Default pgprot clean-up Conflicts as per Catalin. * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (57 commits) arm64: kernel: initialize broadcast hrtimer based clock event device arm64: ftrace: Add system call tracepoint arm64: ftrace: Add CALLER_ADDRx macros arm64: ftrace: Add dynamic ftrace support arm64: Add ftrace support ftrace: Add arm64 support to recordmcount arm64: Add 'notrace' attribute to unwind_frame() for ftrace arm64: add __ASSEMBLY__ in asm/insn.h arm64: Fix linker script entry point arm64: lib: Implement optimized string length routines arm64: lib: Implement optimized string compare routines arm64: lib: Implement optimized memcmp routine arm64: lib: Implement optimized memset routine arm64: lib: Implement optimized memmove routine arm64: lib: Implement optimized memcpy routine arm64: defconfig: enable a few more common/useful options in defconfig ftrace: Make CALLER_ADDRx macros more generic arm64: Fix deadlock scenario with smp_send_stop() arm64: Fix machine_shutdown() definition arm64: Support arch_irq_work_raise() via self IPIs ...
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/mmu.c67
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/mm/tlb.S71
7 files changed, 40 insertions, 118 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index b51d36401d83..3ecb56c624d3 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
1obj-y := dma-mapping.o extable.o fault.o init.o \ 1obj-y := dma-mapping.o extable.o fault.o init.o \
2 cache.o copypage.o flush.o \ 2 cache.o copypage.o flush.o \
3 ioremap.o mmap.o pgd.o mmu.o \ 3 ioremap.o mmap.o pgd.o mmu.o \
4 context.o tlb.o proc.o 4 context.o proc.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index fda756875fa6..23663837acff 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -31,7 +31,7 @@
31 * Corrupted registers: x0-x7, x9-x11 31 * Corrupted registers: x0-x7, x9-x11
32 */ 32 */
33__flush_dcache_all: 33__flush_dcache_all:
34 dsb sy // ensure ordering with previous memory accesses 34 dmb sy // ensure ordering with previous memory accesses
35 mrs x0, clidr_el1 // read clidr 35 mrs x0, clidr_el1 // read clidr
36 and x3, x0, #0x7000000 // extract loc from clidr 36 and x3, x0, #0x7000000 // extract loc from clidr
37 lsr x3, x3, #23 // left align loc bit field 37 lsr x3, x3, #23 // left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc cvau, x4 ) // clean D line to PoU
128 add x4, x4, x2 128 add x4, x4, x2
129 cmp x4, x1 129 cmp x4, x1
130 b.lo 1b 130 b.lo 1b
131 dsb sy 131 dsb ish
132 132
133 icache_line_size x2, x3 133 icache_line_size x2, x3
134 sub x3, x2, #1 134 sub x3, x2, #1
@@ -139,7 +139,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
139 cmp x4, x1 139 cmp x4, x1
140 b.lo 1b 140 b.lo 1b
1419: // ignore any faulting cache operation 1419: // ignore any faulting cache operation
142 dsb sy 142 dsb ish
143 isb 143 isb
144 ret 144 ret
145ENDPROC(flush_icache_range) 145ENDPROC(flush_icache_range)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c851eb44dc50..4164c5ace9f8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
115 for (i = 0; i < (size >> PAGE_SHIFT); i++) 115 for (i = 0; i < (size >> PAGE_SHIFT); i++)
116 map[i] = page + i; 116 map[i] = page + i;
117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, 117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
118 __get_dma_pgprot(attrs, pgprot_default, false)); 118 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
119 kfree(map); 119 kfree(map);
120 if (!coherent_ptr) 120 if (!coherent_ptr)
121 goto no_map; 121 goto no_map;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c23751b06120..bcc965e2cce1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -32,6 +32,7 @@
32 32
33#include <asm/exception.h> 33#include <asm/exception.h>
34#include <asm/debug-monitors.h> 34#include <asm/debug-monitors.h>
35#include <asm/esr.h>
35#include <asm/system_misc.h> 36#include <asm/system_misc.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
@@ -123,6 +124,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
123 } 124 }
124 125
125 tsk->thread.fault_address = addr; 126 tsk->thread.fault_address = addr;
127 tsk->thread.fault_code = esr;
126 si.si_signo = sig; 128 si.si_signo = sig;
127 si.si_errno = 0; 129 si.si_errno = 0;
128 si.si_code = code; 130 si.si_code = code;
@@ -148,8 +150,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
148#define VM_FAULT_BADMAP 0x010000 150#define VM_FAULT_BADMAP 0x010000
149#define VM_FAULT_BADACCESS 0x020000 151#define VM_FAULT_BADACCESS 0x020000
150 152
151#define ESR_WRITE (1 << 6)
152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
@@ -218,7 +218,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
218 218
219 if (esr & ESR_LNX_EXEC) { 219 if (esr & ESR_LNX_EXEC) {
220 vm_flags = VM_EXEC; 220 vm_flags = VM_EXEC;
221 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { 221 } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
222 vm_flags = VM_WRITE; 222 vm_flags = VM_WRITE;
223 mm_flags |= FAULT_FLAG_WRITE; 223 mm_flags |= FAULT_FLAG_WRITE;
224 } 224 }
@@ -525,7 +525,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
525 info.si_errno = 0; 525 info.si_errno = 0;
526 info.si_code = inf->code; 526 info.si_code = inf->code;
527 info.si_addr = (void __user *)addr; 527 info.si_addr = (void __user *)addr;
528 arm64_notify_die("", regs, &info, esr); 528 arm64_notify_die("", regs, &info, 0);
529 529
530 return 0; 530 return 0;
531} 531}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4a829a210bb6..c43f1dd19489 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
43struct page *empty_zero_page; 43struct page *empty_zero_page;
44EXPORT_SYMBOL(empty_zero_page); 44EXPORT_SYMBOL(empty_zero_page);
45 45
46pgprot_t pgprot_default;
47EXPORT_SYMBOL(pgprot_default);
48
49static pmdval_t prot_sect_kernel;
50
51struct cachepolicy { 46struct cachepolicy {
52 const char policy[16]; 47 const char policy[16];
53 u64 mair; 48 u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
122} 117}
123early_param("cachepolicy", early_cachepolicy); 118early_param("cachepolicy", early_cachepolicy);
124 119
125/*
126 * Adjust the PMD section entries according to the CPU in use.
127 */
128void __init init_mem_pgprot(void)
129{
130 pteval_t default_pgprot;
131 int i;
132
133 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
134 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
135
136#ifdef CONFIG_SMP
137 /*
138 * Mark memory with the "shared" attribute for SMP systems
139 */
140 default_pgprot |= PTE_SHARED;
141 prot_sect_kernel |= PMD_SECT_S;
142#endif
143
144 for (i = 0; i < 16; i++) {
145 unsigned long v = pgprot_val(protection_map[i]);
146 protection_map[i] = __pgprot(v | default_pgprot);
147 }
148
149 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
150}
151
152pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 120pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
153 unsigned long size, pgprot_t vma_prot) 121 unsigned long size, pgprot_t vma_prot)
154{ 122{
@@ -196,11 +164,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
196 pgprot_t prot_pte; 164 pgprot_t prot_pte;
197 165
198 if (map_io) { 166 if (map_io) {
199 prot_sect = PMD_TYPE_SECT | PMD_SECT_AF | 167 prot_sect = PROT_SECT_DEVICE_nGnRE;
200 PMD_ATTRINDX(MT_DEVICE_nGnRE);
201 prot_pte = __pgprot(PROT_DEVICE_nGnRE); 168 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
202 } else { 169 } else {
203 prot_sect = prot_sect_kernel; 170 prot_sect = PROT_SECT_NORMAL_EXEC;
204 prot_pte = PAGE_KERNEL_EXEC; 171 prot_pte = PAGE_KERNEL_EXEC;
205 } 172 }
206 173
@@ -242,7 +209,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
242 209
243 do { 210 do {
244 next = pud_addr_end(addr, end); 211 next = pud_addr_end(addr, end);
245 alloc_init_pmd(pud, addr, next, phys, map_io); 212
213 /*
214 * For 4K granule only, attempt to put down a 1GB block
215 */
216 if (!map_io && (PAGE_SHIFT == 12) &&
217 ((addr | next | phys) & ~PUD_MASK) == 0) {
218 pud_t old_pud = *pud;
219 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
220
221 /*
222 * If we have an old value for a pud, it will
223 * be pointing to a pmd table that we no longer
224 * need (from swapper_pg_dir).
225 *
226 * Look up the old pmd table and free it.
227 */
228 if (!pud_none(old_pud)) {
229 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
230 memblock_free(table, PAGE_SIZE);
231 flush_tlb_all();
232 }
233 } else {
234 alloc_init_pmd(pud, addr, next, phys, map_io);
235 }
246 phys += next - addr; 236 phys += next - addr;
247 } while (pud++, addr = next, addr != end); 237 } while (pud++, addr = next, addr != end);
248} 238}
@@ -399,6 +389,9 @@ int kern_addr_valid(unsigned long addr)
399 if (pud_none(*pud)) 389 if (pud_none(*pud))
400 return 0; 390 return 0;
401 391
392 if (pud_sect(*pud))
393 return pfn_valid(pud_pfn(*pud));
394
402 pmd = pmd_offset(pud, addr); 395 pmd = pmd_offset(pud, addr);
403 if (pmd_none(*pmd)) 396 if (pmd_none(*pmd))
404 return 0; 397 return 0;
@@ -446,7 +439,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
446 if (!p) 439 if (!p)
447 return -ENOMEM; 440 return -ENOMEM;
448 441
449 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); 442 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
450 } else 443 } else
451 vmemmap_verify((pte_t *)pmd, node, addr, next); 444 vmemmap_verify((pte_t *)pmd, node, addr, next);
452 } while (addr = next, addr != end); 445 } while (addr = next, addr != end);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9042aff5e9e3..7736779c9809 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -182,7 +182,7 @@ ENDPROC(cpu_do_switch_mm)
182ENTRY(__cpu_setup) 182ENTRY(__cpu_setup)
183 ic iallu // I+BTB cache invalidate 183 ic iallu // I+BTB cache invalidate
184 tlbi vmalle1is // invalidate I + D TLBs 184 tlbi vmalle1is // invalidate I + D TLBs
185 dsb sy 185 dsb ish
186 186
187 mov x0, #3 << 20 187 mov x0, #3 << 20
188 msr cpacr_el1, x0 // Enable FP/ASIMD 188 msr cpacr_el1, x0 // Enable FP/ASIMD
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
deleted file mode 100644
index 19da91e0cd27..000000000000
--- a/arch/arm64/mm/tlb.S
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Based on arch/arm/mm/tlb.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/asm-offsets.h>
23#include <asm/page.h>
24#include <asm/tlbflush.h>
25#include "proc-macros.S"
26
27/*
28 * __cpu_flush_user_tlb_range(start, end, vma)
29 *
30 * Invalidate a range of TLB entries in the specified address space.
31 *
32 * - start - start address (may not be aligned)
33 * - end - end address (exclusive, may not be aligned)
34 * - vma - vma_struct describing address range
35 */
36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid w3, x3 // get vm_mm->context.id
39 dsb sy
40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12
42 bfi x0, x3, #48, #16 // start VA and ASID
43 bfi x1, x3, #48, #16 // end VA and ASID
441: tlbi vae1is, x0 // TLB invalidate by address and ASID
45 add x0, x0, #1
46 cmp x0, x1
47 b.lo 1b
48 dsb sy
49 ret
50ENDPROC(__cpu_flush_user_tlb_range)
51
52/*
53 * __cpu_flush_kern_tlb_range(start,end)
54 *
55 * Invalidate a range of kernel TLB entries.
56 *
57 * - start - start address (may not be aligned)
58 * - end - end address (exclusive, may not be aligned)
59 */
60ENTRY(__cpu_flush_kern_tlb_range)
61 dsb sy
62 lsr x0, x0, #12 // align address
63 lsr x1, x1, #12
641: tlbi vaae1is, x0 // TLB invalidate by address
65 add x0, x0, #1
66 cmp x0, x1
67 b.lo 1b
68 dsb sy
69 isb
70 ret
71ENDPROC(__cpu_flush_kern_tlb_range)