aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig5
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi3
-rw-r--r--arch/arm64/include/asm/assembler.h23
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/barrier.h20
-rw-r--r--arch/arm64/include/asm/cache.h13
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/include/asm/cachetype.h11
-rw-r--r--arch/arm64/include/asm/cmpxchg.h7
-rw-r--r--arch/arm64/include/asm/compat.h5
-rw-r--r--arch/arm64/include/asm/esr.h6
-rw-r--r--arch/arm64/include/asm/io.h8
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h114
-rw-r--r--arch/arm64/include/asm/processor.h1
-rw-r--r--arch/arm64/include/asm/ptrace.h5
-rw-r--r--arch/arm64/include/asm/sigcontext.h31
-rw-r--r--arch/arm64/include/asm/thread_info.h13
-rw-r--r--arch/arm64/include/asm/tlb.h6
-rw-r--r--arch/arm64/include/asm/tlbflush.h44
-rw-r--r--arch/arm64/include/asm/unistd32.h3
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h7
-rw-r--r--arch/arm64/kernel/debug-monitors.c3
-rw-r--r--arch/arm64/kernel/early_printk.c6
-rw-r--r--arch/arm64/kernel/entry.S88
-rw-r--r--arch/arm64/kernel/head.S8
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/arm64/kernel/ptrace.c51
-rw-r--r--arch/arm64/kernel/setup.c19
-rw-r--r--arch/arm64/kernel/signal.c39
-rw-r--r--arch/arm64/kernel/signal32.c7
-rw-r--r--arch/arm64/kernel/smp_spin_table.c39
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kernel/traps.c7
-rw-r--r--arch/arm64/kvm/hyp.S12
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/dma-mapping.c37
-rw-r--r--arch/arm64/mm/fault.c13
-rw-r--r--arch/arm64/mm/mmu.c67
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/mm/tlb.S71
45 files changed, 415 insertions, 409 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1cefc6fe969a..78b356d079dd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -242,6 +242,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
242config HAVE_ARCH_TRANSPARENT_HUGEPAGE 242config HAVE_ARCH_TRANSPARENT_HUGEPAGE
243 def_bool y 243 def_bool y
244 244
245config ARCH_HAS_CACHE_LINE_SIZE
246 def_bool y
247
245source "mm/Kconfig" 248source "mm/Kconfig"
246 249
247config XEN_DOM0 250config XEN_DOM0
@@ -323,8 +326,6 @@ menu "CPU Power Management"
323 326
324source "drivers/cpuidle/Kconfig" 327source "drivers/cpuidle/Kconfig"
325 328
326source "kernel/power/Kconfig"
327
328source "drivers/cpufreq/Kconfig" 329source "drivers/cpufreq/Kconfig"
329 330
330endmenu 331endmenu
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 93f4b2dd9248..f8c40a66e65d 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -307,6 +307,7 @@
307 <0x0 0x1f21e000 0x0 0x1000>, 307 <0x0 0x1f21e000 0x0 0x1000>,
308 <0x0 0x1f217000 0x0 0x1000>; 308 <0x0 0x1f217000 0x0 0x1000>;
309 interrupts = <0x0 0x86 0x4>; 309 interrupts = <0x0 0x86 0x4>;
310 dma-coherent;
310 status = "disabled"; 311 status = "disabled";
311 clocks = <&sata01clk 0>; 312 clocks = <&sata01clk 0>;
312 phys = <&phy1 0>; 313 phys = <&phy1 0>;
@@ -321,6 +322,7 @@
321 <0x0 0x1f22e000 0x0 0x1000>, 322 <0x0 0x1f22e000 0x0 0x1000>,
322 <0x0 0x1f227000 0x0 0x1000>; 323 <0x0 0x1f227000 0x0 0x1000>;
323 interrupts = <0x0 0x87 0x4>; 324 interrupts = <0x0 0x87 0x4>;
325 dma-coherent;
324 status = "ok"; 326 status = "ok";
325 clocks = <&sata23clk 0>; 327 clocks = <&sata23clk 0>;
326 phys = <&phy2 0>; 328 phys = <&phy2 0>;
@@ -334,6 +336,7 @@
334 <0x0 0x1f23d000 0x0 0x1000>, 336 <0x0 0x1f23d000 0x0 0x1000>,
335 <0x0 0x1f23e000 0x0 0x1000>; 337 <0x0 0x1f23e000 0x0 0x1000>;
336 interrupts = <0x0 0x88 0x4>; 338 interrupts = <0x0 0x88 0x4>;
339 dma-coherent;
337 status = "ok"; 340 status = "ok";
338 clocks = <&sata45clk 0>; 341 clocks = <&sata45clk 0>;
339 phys = <&phy3 0>; 342 phys = <&phy3 0>;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index fd3e3924041b..5901480bfdca 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -21,6 +21,7 @@
21#endif 21#endif
22 22
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/thread_info.h>
24 25
25/* 26/*
26 * Stack pushing/popping (register pairs only). Equivalent to store decrement 27 * Stack pushing/popping (register pairs only). Equivalent to store decrement
@@ -68,23 +69,31 @@
68 msr daifclr, #8 69 msr daifclr, #8
69 .endm 70 .endm
70 71
71 .macro disable_step, tmp 72 .macro disable_step_tsk, flgs, tmp
73 tbz \flgs, #TIF_SINGLESTEP, 9990f
72 mrs \tmp, mdscr_el1 74 mrs \tmp, mdscr_el1
73 bic \tmp, \tmp, #1 75 bic \tmp, \tmp, #1
74 msr mdscr_el1, \tmp 76 msr mdscr_el1, \tmp
77 isb // Synchronise with enable_dbg
789990:
75 .endm 79 .endm
76 80
77 .macro enable_step, tmp 81 .macro enable_step_tsk, flgs, tmp
82 tbz \flgs, #TIF_SINGLESTEP, 9990f
83 disable_dbg
78 mrs \tmp, mdscr_el1 84 mrs \tmp, mdscr_el1
79 orr \tmp, \tmp, #1 85 orr \tmp, \tmp, #1
80 msr mdscr_el1, \tmp 86 msr mdscr_el1, \tmp
879990:
81 .endm 88 .endm
82 89
83 .macro enable_dbg_if_not_stepping, tmp 90/*
84 mrs \tmp, mdscr_el1 91 * Enable both debug exceptions and interrupts. This is likely to be
85 tbnz \tmp, #0, 9990f 92 * faster than two daifclr operations, since writes to this register
86 enable_dbg 93 * are self-synchronising.
879990: 94 */
95 .macro enable_dbg_and_irq
96 msr daifclr, #(8 | 2)
88 .endm 97 .endm
89 98
90/* 99/*
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 0237f0867e37..9c48d74652cb 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -162,7 +162,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
162 */ 162 */
163#define ATOMIC64_INIT(i) { (i) } 163#define ATOMIC64_INIT(i) { (i) }
164 164
165#define atomic64_read(v) (*(volatile long long *)&(v)->counter) 165#define atomic64_read(v) (*(volatile long *)&(v)->counter)
166#define atomic64_set(v,i) (((v)->counter) = (i)) 166#define atomic64_set(v,i) (((v)->counter) = (i))
167 167
168static inline void atomic64_add(u64 i, atomic64_t *v) 168static inline void atomic64_add(u64 i, atomic64_t *v)
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 66eb7648043b..709f1f6d6bbd 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,12 +25,12 @@
25#define wfi() asm volatile("wfi" : : : "memory") 25#define wfi() asm volatile("wfi" : : : "memory")
26 26
27#define isb() asm volatile("isb" : : : "memory") 27#define isb() asm volatile("isb" : : : "memory")
28#define dmb(opt) asm volatile("dmb sy" : : : "memory") 28#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
29#define dsb(opt) asm volatile("dsb sy" : : : "memory") 29#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
30 30
31#define mb() dsb() 31#define mb() dsb(sy)
32#define rmb() asm volatile("dsb ld" : : : "memory") 32#define rmb() dsb(ld)
33#define wmb() asm volatile("dsb st" : : : "memory") 33#define wmb() dsb(st)
34 34
35#ifndef CONFIG_SMP 35#ifndef CONFIG_SMP
36#define smp_mb() barrier() 36#define smp_mb() barrier()
@@ -40,7 +40,7 @@
40#define smp_store_release(p, v) \ 40#define smp_store_release(p, v) \
41do { \ 41do { \
42 compiletime_assert_atomic_type(*p); \ 42 compiletime_assert_atomic_type(*p); \
43 smp_mb(); \ 43 barrier(); \
44 ACCESS_ONCE(*p) = (v); \ 44 ACCESS_ONCE(*p) = (v); \
45} while (0) 45} while (0)
46 46
@@ -48,15 +48,15 @@ do { \
48({ \ 48({ \
49 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 49 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
50 compiletime_assert_atomic_type(*p); \ 50 compiletime_assert_atomic_type(*p); \
51 smp_mb(); \ 51 barrier(); \
52 ___p1; \ 52 ___p1; \
53}) 53})
54 54
55#else 55#else
56 56
57#define smp_mb() asm volatile("dmb ish" : : : "memory") 57#define smp_mb() dmb(ish)
58#define smp_rmb() asm volatile("dmb ishld" : : : "memory") 58#define smp_rmb() dmb(ishld)
59#define smp_wmb() asm volatile("dmb ishst" : : : "memory") 59#define smp_wmb() dmb(ishst)
60 60
61#define smp_store_release(p, v) \ 61#define smp_store_release(p, v) \
62do { \ 62do { \
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 390308a67f0d..88cc05b5f3ac 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_CACHE_H 16#ifndef __ASM_CACHE_H
17#define __ASM_CACHE_H 17#define __ASM_CACHE_H
18 18
19#include <asm/cachetype.h>
20
19#define L1_CACHE_SHIFT 6 21#define L1_CACHE_SHIFT 6
20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 22#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
21 23
@@ -27,6 +29,15 @@
27 * the CPU. 29 * the CPU.
28 */ 30 */
29#define ARCH_DMA_MINALIGN L1_CACHE_BYTES 31#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
30#define ARCH_SLAB_MINALIGN 8 32
33#ifndef __ASSEMBLY__
34
35static inline int cache_line_size(void)
36{
37 u32 cwg = cache_type_cwg();
38 return cwg ? 4 << cwg : L1_CACHE_BYTES;
39}
40
41#endif /* __ASSEMBLY__ */
31 42
32#endif 43#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 4c60e64a801c..a5176cf32dad 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -123,7 +123,7 @@ extern void flush_dcache_page(struct page *);
123static inline void __flush_icache_all(void) 123static inline void __flush_icache_all(void)
124{ 124{
125 asm("ic ialluis"); 125 asm("ic ialluis");
126 dsb(); 126 dsb(ish);
127} 127}
128 128
129#define flush_dcache_mmap_lock(mapping) \ 129#define flush_dcache_mmap_lock(mapping) \
@@ -150,7 +150,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
150 * set_pte_at() called from vmap_pte_range() does not 150 * set_pte_at() called from vmap_pte_range() does not
151 * have a DSB after cleaning the cache line. 151 * have a DSB after cleaning the cache line.
152 */ 152 */
153 dsb(); 153 dsb(ish);
154} 154}
155 155
156static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 156static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 85f5f511352a..4b23e758d5e0 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -20,12 +20,16 @@
20 20
21#define CTR_L1IP_SHIFT 14 21#define CTR_L1IP_SHIFT 14
22#define CTR_L1IP_MASK 3 22#define CTR_L1IP_MASK 3
23#define CTR_CWG_SHIFT 24
24#define CTR_CWG_MASK 15
23 25
24#define ICACHE_POLICY_RESERVED 0 26#define ICACHE_POLICY_RESERVED 0
25#define ICACHE_POLICY_AIVIVT 1 27#define ICACHE_POLICY_AIVIVT 1
26#define ICACHE_POLICY_VIPT 2 28#define ICACHE_POLICY_VIPT 2
27#define ICACHE_POLICY_PIPT 3 29#define ICACHE_POLICY_PIPT 3
28 30
31#ifndef __ASSEMBLY__
32
29static inline u32 icache_policy(void) 33static inline u32 icache_policy(void)
30{ 34{
31 return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK; 35 return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK;
@@ -45,4 +49,11 @@ static inline int icache_is_aivivt(void)
45 return icache_policy() == ICACHE_POLICY_AIVIVT; 49 return icache_policy() == ICACHE_POLICY_AIVIVT;
46} 50}
47 51
52static inline u32 cache_type_cwg(void)
53{
54 return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
55}
56
57#endif /* __ASSEMBLY__ */
58
48#endif /* __ASM_CACHETYPE_H */ 59#endif /* __ASM_CACHETYPE_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 57c0fa7bf711..ddb9d7830558 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -72,7 +72,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
72} 72}
73 73
74#define xchg(ptr,x) \ 74#define xchg(ptr,x) \
75 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 75({ \
76 __typeof__(*(ptr)) __ret; \
77 __ret = (__typeof__(*(ptr))) \
78 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
79 __ret; \
80})
76 81
77static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 82static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
78 unsigned long new, int size) 83 unsigned long new, int size)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index e71f81fe127a..253e33bc94fb 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -305,11 +305,6 @@ static inline int is_compat_thread(struct thread_info *thread)
305 305
306#else /* !CONFIG_COMPAT */ 306#else /* !CONFIG_COMPAT */
307 307
308static inline int is_compat_task(void)
309{
310 return 0;
311}
312
313static inline int is_compat_thread(struct thread_info *thread) 308static inline int is_compat_thread(struct thread_info *thread)
314{ 309{
315 return 0; 310 return 0;
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index c4a7f940b387..72674f4c3871 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,9 +18,11 @@
18#ifndef __ASM_ESR_H 18#ifndef __ASM_ESR_H
19#define __ASM_ESR_H 19#define __ASM_ESR_H
20 20
21#define ESR_EL1_EC_SHIFT (26) 21#define ESR_EL1_WRITE (1 << 6)
22#define ESR_EL1_IL (1U << 25) 22#define ESR_EL1_CM (1 << 8)
23#define ESR_EL1_IL (1 << 25)
23 24
25#define ESR_EL1_EC_SHIFT (26)
24#define ESR_EL1_EC_UNKNOWN (0x00) 26#define ESR_EL1_EC_UNKNOWN (0x00)
25#define ESR_EL1_EC_WFI (0x01) 27#define ESR_EL1_EC_WFI (0x01)
26#define ESR_EL1_EC_CP15_32 (0x03) 28#define ESR_EL1_EC_CP15_32 (0x03)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index a1bef78f0303..e0ecdcf6632d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -230,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
230extern void __iounmap(volatile void __iomem *addr); 230extern void __iounmap(volatile void __iomem *addr);
231extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 231extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
232 232
233#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
234#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
235#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
236#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
237
238#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 233#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
239#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 234#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
240#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 235#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
241#define iounmap __iounmap 236#define iounmap __iounmap
242 237
243#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
244#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
245
246#define ARCH_HAS_IOREMAP_WC 238#define ARCH_HAS_IOREMAP_WC
247#include <asm-generic/iomap.h> 239#include <asm-generic/iomap.h>
248 240
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index f600d400c07d..aff0292c8f4d 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -22,6 +22,9 @@ typedef struct {
22 void *vdso; 22 void *vdso;
23} mm_context_t; 23} mm_context_t;
24 24
25#define INIT_MM_CONTEXT(name) \
26 .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
27
25#define ASID(mm) ((mm)->context.id & 0xffff) 28#define ASID(mm) ((mm)->context.id & 0xffff)
26 29
27extern void paging_init(void); 30extern void paging_init(void);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5fc8a66c3924..955e8c5f0afb 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -29,6 +29,8 @@
29 */ 29 */
30 30
31#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) 31#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
32#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
33#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
32 34
33/* 35/*
34 * Level 2 descriptor (PMD). 36 * Level 2 descriptor (PMD).
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 90c811f05a2e..e4c60d6e18b8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -52,66 +52,60 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
52#endif 52#endif
53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 53#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
54 54
55/* 55#ifdef CONFIG_SMP
56 * The pgprot_* and protection_map entries will be fixed up at runtime to 56#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
57 * include the cachable and bufferable bits based on memory policy, as well as 57#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
58 * any architecture dependent bits like global/ASID and SMP shared mapping 58#else
59 * bits. 59#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
60 */ 60#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
61#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF 61#endif
62 62
63extern pgprot_t pgprot_default; 63#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
64#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
65#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
64 66
65#define __pgprot_modify(prot,mask,bits) \ 67#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
66 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 68#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
69#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
67 70
68#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 71#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
69 72
70#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) 73#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
71#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 74#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
72#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
73#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
74#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
75#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
76#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
77#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
78#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
79 75
80#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 76#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
81#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 77#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
82 78
83#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 79#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
84#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 80#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
85 81
86#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 82#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
87#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 83#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
88#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 84#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
89#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 85#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
90#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 86#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
91#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 87#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
92#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 88#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
93 89#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
94#endif /* __ASSEMBLY__ */ 90
95 91#define __P000 PAGE_NONE
96#define __P000 __PAGE_NONE 92#define __P001 PAGE_READONLY
97#define __P001 __PAGE_READONLY 93#define __P010 PAGE_COPY
98#define __P010 __PAGE_COPY 94#define __P011 PAGE_COPY
99#define __P011 __PAGE_COPY 95#define __P100 PAGE_EXECONLY
100#define __P100 __PAGE_READONLY_EXEC 96#define __P101 PAGE_READONLY_EXEC
101#define __P101 __PAGE_READONLY_EXEC 97#define __P110 PAGE_COPY_EXEC
102#define __P110 __PAGE_COPY_EXEC 98#define __P111 PAGE_COPY_EXEC
103#define __P111 __PAGE_COPY_EXEC 99
104 100#define __S000 PAGE_NONE
105#define __S000 __PAGE_NONE 101#define __S001 PAGE_READONLY
106#define __S001 __PAGE_READONLY 102#define __S010 PAGE_SHARED
107#define __S010 __PAGE_SHARED 103#define __S011 PAGE_SHARED
108#define __S011 __PAGE_SHARED 104#define __S100 PAGE_EXECONLY
109#define __S100 __PAGE_READONLY_EXEC 105#define __S101 PAGE_READONLY_EXEC
110#define __S101 __PAGE_READONLY_EXEC 106#define __S110 PAGE_SHARED_EXEC
111#define __S110 __PAGE_SHARED_EXEC 107#define __S111 PAGE_SHARED_EXEC
112#define __S111 __PAGE_SHARED_EXEC
113 108
114#ifndef __ASSEMBLY__
115/* 109/*
116 * ZERO_PAGE is a global shared page that is always zero: used 110 * ZERO_PAGE is a global shared page that is always zero: used
117 * for zero-mapped memory areas etc.. 111 * for zero-mapped memory areas etc..
@@ -143,8 +137,8 @@ extern struct page *empty_zero_page;
143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 137#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 138#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 139
146#define pte_valid_user(pte) \ 140#define pte_valid_ng(pte) \
147 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 141 ((pte_val(pte) & (PTE_VALID | PTE_NG)) == (PTE_VALID | PTE_NG))
148 142
149static inline pte_t pte_wrprotect(pte_t pte) 143static inline pte_t pte_wrprotect(pte_t pte)
150{ 144{
@@ -198,7 +192,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
198static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 192static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte) 193 pte_t *ptep, pte_t pte)
200{ 194{
201 if (pte_valid_user(pte)) { 195 if (pte_valid_ng(pte)) {
202 if (!pte_special(pte) && pte_exec(pte)) 196 if (!pte_special(pte) && pte_exec(pte))
203 __sync_icache_dcache(pte, addr); 197 __sync_icache_dcache(pte, addr);
204 if (pte_dirty(pte) && pte_write(pte)) 198 if (pte_dirty(pte) && pte_write(pte))
@@ -265,6 +259,7 @@ static inline pmd_t pte_pmd(pte_t pte)
265#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 259#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
266 260
267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 261#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
262#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
268 263
269#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) 264#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
270 265
@@ -273,6 +268,9 @@ static inline int has_transparent_hugepage(void)
273 return 1; 268 return 1;
274} 269}
275 270
271#define __pgprot_modify(prot,mask,bits) \
272 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
273
276/* 274/*
277 * Mark the prot value as uncacheable and unbufferable. 275 * Mark the prot value as uncacheable and unbufferable.
278 */ 276 */
@@ -295,11 +293,17 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
295#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 293#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
296 PMD_TYPE_SECT) 294 PMD_TYPE_SECT)
297 295
296#ifdef ARM64_64K_PAGES
297#define pud_sect(pud) (0)
298#else
299#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
300 PUD_TYPE_SECT)
301#endif
298 302
299static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 303static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
300{ 304{
301 *pmdp = pmd; 305 *pmdp = pmd;
302 dsb(); 306 dsb(ishst);
303} 307}
304 308
305static inline void pmd_clear(pmd_t *pmdp) 309static inline void pmd_clear(pmd_t *pmdp)
@@ -329,7 +333,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
329static inline void set_pud(pud_t *pudp, pud_t pud) 333static inline void set_pud(pud_t *pudp, pud_t pud)
330{ 334{
331 *pudp = pud; 335 *pudp = pud;
332 dsb(); 336 dsb(ishst);
333} 337}
334 338
335static inline void pud_clear(pud_t *pudp) 339static inline void pud_clear(pud_t *pudp)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 45b20cd6cbca..34de2a8f7d93 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -79,6 +79,7 @@ struct thread_struct {
79 unsigned long tp_value; 79 unsigned long tp_value;
80 struct fpsimd_state fpsimd_state; 80 struct fpsimd_state fpsimd_state;
81 unsigned long fault_address; /* fault info */ 81 unsigned long fault_address; /* fault info */
82 unsigned long fault_code; /* ESR_EL1 value */
82 struct debug_info debug; /* debugging */ 83 struct debug_info debug; /* debugging */
83}; 84};
84 85
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index c7ba261dd4b3..a429b5940be2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -135,6 +135,11 @@ struct pt_regs {
135#define user_stack_pointer(regs) \ 135#define user_stack_pointer(regs) \
136 (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) 136 (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
137 137
138static inline unsigned long regs_return_value(struct pt_regs *regs)
139{
140 return regs->regs[0];
141}
142
138/* 143/*
139 * Are the current registers suitable for user mode? (used to maintain 144 * Are the current registers suitable for user mode? (used to maintain
140 * security in signal handlers) 145 * security in signal handlers)
diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h
deleted file mode 100644
index dca1094acc74..000000000000
--- a/arch/arm64/include/asm/sigcontext.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_SIGCONTEXT_H
17#define __ASM_SIGCONTEXT_H
18
19#include <uapi/asm/sigcontext.h>
20
21/*
22 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
23 * user space as it will change with the addition of new context. User space
24 * should check the magic/size information.
25 */
26struct aux_context {
27 struct fpsimd_context fpsimd;
28 /* additional context to be added before "end" */
29 struct _aarch64_ctx end;
30};
31#endif
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 4a1ca1cfb2f8..9c086c63f911 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -91,6 +91,9 @@ static inline struct thread_info *current_thread_info(void)
91/* 91/*
92 * thread information flags: 92 * thread information flags:
93 * TIF_SYSCALL_TRACE - syscall trace active 93 * TIF_SYSCALL_TRACE - syscall trace active
94 * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
95 * TIF_SYSCALL_AUDIT - syscall auditing
96 * TIF_SECOMP - syscall secure computing
94 * TIF_SIGPENDING - signal pending 97 * TIF_SIGPENDING - signal pending
95 * TIF_NEED_RESCHED - rescheduling necessary 98 * TIF_NEED_RESCHED - rescheduling necessary
96 * TIF_NOTIFY_RESUME - callback before returning to user 99 * TIF_NOTIFY_RESUME - callback before returning to user
@@ -102,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
102#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 105#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
103#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ 106#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
104#define TIF_SYSCALL_TRACE 8 107#define TIF_SYSCALL_TRACE 8
108#define TIF_SYSCALL_AUDIT 9
109#define TIF_SYSCALL_TRACEPOINT 10
110#define TIF_SECCOMP 11
105#define TIF_POLLING_NRFLAG 16 111#define TIF_POLLING_NRFLAG 16
106#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 112#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
107#define TIF_FREEZE 19 113#define TIF_FREEZE 19
@@ -114,10 +120,17 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 120#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
115#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 121#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
116#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) 122#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
123#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
124#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
125#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
126#define _TIF_SECCOMP (1 << TIF_SECCOMP)
117#define _TIF_32BIT (1 << TIF_32BIT) 127#define _TIF_32BIT (1 << TIF_32BIT)
118 128
119#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ 129#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
120 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) 130 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
121 131
132#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
133 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
134
122#endif /* __KERNEL__ */ 135#endif /* __KERNEL__ */
123#endif /* __ASM_THREAD_INFO_H */ 136#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 72cadf52ca80..80e2c08900d6 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -19,6 +19,7 @@
19#ifndef __ASM_TLB_H 19#ifndef __ASM_TLB_H
20#define __ASM_TLB_H 20#define __ASM_TLB_H
21 21
22#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
22 23
23#include <asm-generic/tlb.h> 24#include <asm-generic/tlb.h>
24 25
@@ -99,5 +100,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
99} 100}
100#endif 101#endif
101 102
103static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
104 unsigned long address)
105{
106 tlb_add_flush(tlb, address);
107}
102 108
103#endif 109#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8b482035cfc2..b9349c4513ea 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -72,9 +72,9 @@ extern struct cpu_tlb_fns cpu_tlb;
72 */ 72 */
73static inline void flush_tlb_all(void) 73static inline void flush_tlb_all(void)
74{ 74{
75 dsb(); 75 dsb(ishst);
76 asm("tlbi vmalle1is"); 76 asm("tlbi vmalle1is");
77 dsb(); 77 dsb(ish);
78 isb(); 78 isb();
79} 79}
80 80
@@ -82,9 +82,9 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
82{ 82{
83 unsigned long asid = (unsigned long)ASID(mm) << 48; 83 unsigned long asid = (unsigned long)ASID(mm) << 48;
84 84
85 dsb(); 85 dsb(ishst);
86 asm("tlbi aside1is, %0" : : "r" (asid)); 86 asm("tlbi aside1is, %0" : : "r" (asid));
87 dsb(); 87 dsb(ish);
88} 88}
89 89
90static inline void flush_tlb_page(struct vm_area_struct *vma, 90static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -93,16 +93,36 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
93 unsigned long addr = uaddr >> 12 | 93 unsigned long addr = uaddr >> 12 |
94 ((unsigned long)ASID(vma->vm_mm) << 48); 94 ((unsigned long)ASID(vma->vm_mm) << 48);
95 95
96 dsb(); 96 dsb(ishst);
97 asm("tlbi vae1is, %0" : : "r" (addr)); 97 asm("tlbi vae1is, %0" : : "r" (addr));
98 dsb(); 98 dsb(ish);
99} 99}
100 100
101/* 101static inline void flush_tlb_range(struct vm_area_struct *vma,
102 * Convert calls to our calling convention. 102 unsigned long start, unsigned long end)
103 */ 103{
104#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) 104 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
105#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) 105 unsigned long addr;
106 start = asid | (start >> 12);
107 end = asid | (end >> 12);
108
109 dsb(ishst);
110 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
111 asm("tlbi vae1is, %0" : : "r"(addr));
112 dsb(ish);
113}
114
115static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
116{
117 unsigned long addr;
118 start >>= 12;
119 end >>= 12;
120
121 dsb(ishst);
122 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
123 asm("tlbi vaae1is, %0" : : "r"(addr));
124 dsb(ish);
125}
106 126
107/* 127/*
108 * On AArch64, the cache coherency is handled via the set_pte_at() function. 128 * On AArch64, the cache coherency is handled via the set_pte_at() function.
@@ -114,7 +134,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
114 * set_pte() does not have a DSB, so make sure that the page table 134 * set_pte() does not have a DSB, so make sure that the page table
115 * write is visible. 135 * write is visible.
116 */ 136 */
117 dsb(); 137 dsb(ishst);
118} 138}
119 139
120#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 140#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index bb8eb8a78e67..c8d8fc17bd5a 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -403,8 +403,9 @@ __SYSCALL(378, sys_kcmp)
403__SYSCALL(379, sys_finit_module) 403__SYSCALL(379, sys_finit_module)
404__SYSCALL(380, sys_sched_setattr) 404__SYSCALL(380, sys_sched_setattr)
405__SYSCALL(381, sys_sched_getattr) 405__SYSCALL(381, sys_sched_getattr)
406__SYSCALL(382, sys_renameat2)
406 407
407#define __NR_compat_syscalls 379 408#define __NR_compat_syscalls 383
408 409
409/* 410/*
410 * Compat syscall numbers used by the AArch64 kernel. 411 * Compat syscall numbers used by the AArch64 kernel.
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 690ad51cc901..b72cf405b3fe 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -53,5 +53,12 @@ struct fpsimd_context {
53 __uint128_t vregs[32]; 53 __uint128_t vregs[32];
54}; 54};
55 55
56/* ESR_EL1 context */
57#define ESR_MAGIC 0x45535201
58
59struct esr_context {
60 struct _aarch64_ctx head;
61 u64 esr;
62};
56 63
57#endif /* _UAPI__ASM_SIGCONTEXT_H */ 64#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index ed3955a95747..a7fb874b595e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -318,9 +318,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
318 if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) 318 if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
319 return 0; 319 return 0;
320 320
321 pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
322 (long)instruction_pointer(regs), esr);
323
324 if (!user_mode(regs)) 321 if (!user_mode(regs))
325 return -EFAULT; 322 return -EFAULT;
326 323
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index ffbbdde7aba1..2dc36d00addf 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
143 } 143 }
144 /* no options parsing yet */ 144 /* no options parsing yet */
145 145
146 if (paddr) { 146 if (paddr)
147 set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr); 147 early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
148 early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
149 }
150 148
151 printch = match->printch; 149 printch = match->printch;
152 early_console = &early_console_dev; 150 early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 80464e2fb1a5..bf017f4ffb4f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -60,6 +60,9 @@
60 push x0, x1 60 push x0, x1
61 .if \el == 0 61 .if \el == 0
62 mrs x21, sp_el0 62 mrs x21, sp_el0
63 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
64 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
65 disable_step_tsk x19, x20 // exceptions when scheduling.
63 .else 66 .else
64 add x21, sp, #S_FRAME_SIZE 67 add x21, sp, #S_FRAME_SIZE
65 .endif 68 .endif
@@ -259,7 +262,7 @@ el1_da:
259 * Data abort handling 262 * Data abort handling
260 */ 263 */
261 mrs x0, far_el1 264 mrs x0, far_el1
262 enable_dbg_if_not_stepping x2 265 enable_dbg
263 // re-enable interrupts if they were enabled in the aborted context 266 // re-enable interrupts if they were enabled in the aborted context
264 tbnz x23, #7, 1f // PSR_I_BIT 267 tbnz x23, #7, 1f // PSR_I_BIT
265 enable_irq 268 enable_irq
@@ -275,6 +278,7 @@ el1_sp_pc:
275 * Stack or PC alignment exception handling 278 * Stack or PC alignment exception handling
276 */ 279 */
277 mrs x0, far_el1 280 mrs x0, far_el1
281 enable_dbg
278 mov x1, x25 282 mov x1, x25
279 mov x2, sp 283 mov x2, sp
280 b do_sp_pc_abort 284 b do_sp_pc_abort
@@ -282,6 +286,7 @@ el1_undef:
282 /* 286 /*
283 * Undefined instruction 287 * Undefined instruction
284 */ 288 */
289 enable_dbg
285 mov x0, sp 290 mov x0, sp
286 b do_undefinstr 291 b do_undefinstr
287el1_dbg: 292el1_dbg:
@@ -294,10 +299,11 @@ el1_dbg:
294 mrs x0, far_el1 299 mrs x0, far_el1
295 mov x2, sp // struct pt_regs 300 mov x2, sp // struct pt_regs
296 bl do_debug_exception 301 bl do_debug_exception
297 302 enable_dbg
298 kernel_exit 1 303 kernel_exit 1
299el1_inv: 304el1_inv:
300 // TODO: add support for undefined instructions in kernel mode 305 // TODO: add support for undefined instructions in kernel mode
306 enable_dbg
301 mov x0, sp 307 mov x0, sp
302 mov x1, #BAD_SYNC 308 mov x1, #BAD_SYNC
303 mrs x2, esr_el1 309 mrs x2, esr_el1
@@ -307,7 +313,7 @@ ENDPROC(el1_sync)
307 .align 6 313 .align 6
308el1_irq: 314el1_irq:
309 kernel_entry 1 315 kernel_entry 1
310 enable_dbg_if_not_stepping x0 316 enable_dbg
311#ifdef CONFIG_TRACE_IRQFLAGS 317#ifdef CONFIG_TRACE_IRQFLAGS
312 bl trace_hardirqs_off 318 bl trace_hardirqs_off
313#endif 319#endif
@@ -332,8 +338,7 @@ ENDPROC(el1_irq)
332#ifdef CONFIG_PREEMPT 338#ifdef CONFIG_PREEMPT
333el1_preempt: 339el1_preempt:
334 mov x24, lr 340 mov x24, lr
3351: enable_dbg 3411: bl preempt_schedule_irq // irq en/disable is done inside
336 bl preempt_schedule_irq // irq en/disable is done inside
337 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS 342 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
338 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? 343 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
339 ret x24 344 ret x24
@@ -349,7 +354,7 @@ el0_sync:
349 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class 354 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
350 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state 355 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
351 b.eq el0_svc 356 b.eq el0_svc
352 adr lr, ret_from_exception 357 adr lr, ret_to_user
353 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 358 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
354 b.eq el0_da 359 b.eq el0_da
355 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 360 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -378,7 +383,7 @@ el0_sync_compat:
378 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class 383 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
379 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state 384 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
380 b.eq el0_svc_compat 385 b.eq el0_svc_compat
381 adr lr, ret_from_exception 386 adr lr, ret_to_user
382 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 387 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
383 b.eq el0_da 388 b.eq el0_da
384 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 389 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -423,11 +428,8 @@ el0_da:
423 */ 428 */
424 mrs x0, far_el1 429 mrs x0, far_el1
425 bic x0, x0, #(0xff << 56) 430 bic x0, x0, #(0xff << 56)
426 disable_step x1
427 isb
428 enable_dbg
429 // enable interrupts before calling the main handler 431 // enable interrupts before calling the main handler
430 enable_irq 432 enable_dbg_and_irq
431 mov x1, x25 433 mov x1, x25
432 mov x2, sp 434 mov x2, sp
433 b do_mem_abort 435 b do_mem_abort
@@ -436,11 +438,8 @@ el0_ia:
436 * Instruction abort handling 438 * Instruction abort handling
437 */ 439 */
438 mrs x0, far_el1 440 mrs x0, far_el1
439 disable_step x1
440 isb
441 enable_dbg
442 // enable interrupts before calling the main handler 441 // enable interrupts before calling the main handler
443 enable_irq 442 enable_dbg_and_irq
444 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts 443 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
445 mov x2, sp 444 mov x2, sp
446 b do_mem_abort 445 b do_mem_abort
@@ -448,6 +447,7 @@ el0_fpsimd_acc:
448 /* 447 /*
449 * Floating Point or Advanced SIMD access 448 * Floating Point or Advanced SIMD access
450 */ 449 */
450 enable_dbg
451 mov x0, x25 451 mov x0, x25
452 mov x1, sp 452 mov x1, sp
453 b do_fpsimd_acc 453 b do_fpsimd_acc
@@ -455,6 +455,7 @@ el0_fpsimd_exc:
455 /* 455 /*
456 * Floating Point or Advanced SIMD exception 456 * Floating Point or Advanced SIMD exception
457 */ 457 */
458 enable_dbg
458 mov x0, x25 459 mov x0, x25
459 mov x1, sp 460 mov x1, sp
460 b do_fpsimd_exc 461 b do_fpsimd_exc
@@ -463,11 +464,8 @@ el0_sp_pc:
463 * Stack or PC alignment exception handling 464 * Stack or PC alignment exception handling
464 */ 465 */
465 mrs x0, far_el1 466 mrs x0, far_el1
466 disable_step x1
467 isb
468 enable_dbg
469 // enable interrupts before calling the main handler 467 // enable interrupts before calling the main handler
470 enable_irq 468 enable_dbg_and_irq
471 mov x1, x25 469 mov x1, x25
472 mov x2, sp 470 mov x2, sp
473 b do_sp_pc_abort 471 b do_sp_pc_abort
@@ -475,9 +473,9 @@ el0_undef:
475 /* 473 /*
476 * Undefined instruction 474 * Undefined instruction
477 */ 475 */
478 mov x0, sp
479 // enable interrupts before calling the main handler 476 // enable interrupts before calling the main handler
480 enable_irq 477 enable_dbg_and_irq
478 mov x0, sp
481 b do_undefinstr 479 b do_undefinstr
482el0_dbg: 480el0_dbg:
483 /* 481 /*
@@ -485,11 +483,13 @@ el0_dbg:
485 */ 483 */
486 tbnz x24, #0, el0_inv // EL0 only 484 tbnz x24, #0, el0_inv // EL0 only
487 mrs x0, far_el1 485 mrs x0, far_el1
488 disable_step x1
489 mov x1, x25 486 mov x1, x25
490 mov x2, sp 487 mov x2, sp
491 b do_debug_exception 488 bl do_debug_exception
489 enable_dbg
490 b ret_to_user
492el0_inv: 491el0_inv:
492 enable_dbg
493 mov x0, sp 493 mov x0, sp
494 mov x1, #BAD_SYNC 494 mov x1, #BAD_SYNC
495 mrs x2, esr_el1 495 mrs x2, esr_el1
@@ -500,15 +500,12 @@ ENDPROC(el0_sync)
500el0_irq: 500el0_irq:
501 kernel_entry 0 501 kernel_entry 0
502el0_irq_naked: 502el0_irq_naked:
503 disable_step x1
504 isb
505 enable_dbg 503 enable_dbg
506#ifdef CONFIG_TRACE_IRQFLAGS 504#ifdef CONFIG_TRACE_IRQFLAGS
507 bl trace_hardirqs_off 505 bl trace_hardirqs_off
508#endif 506#endif
509 507
510 irq_handler 508 irq_handler
511 get_thread_info tsk
512 509
513#ifdef CONFIG_TRACE_IRQFLAGS 510#ifdef CONFIG_TRACE_IRQFLAGS
514 bl trace_hardirqs_on 511 bl trace_hardirqs_on
@@ -517,14 +514,6 @@ el0_irq_naked:
517ENDPROC(el0_irq) 514ENDPROC(el0_irq)
518 515
519/* 516/*
520 * This is the return code to user mode for abort handlers
521 */
522ret_from_exception:
523 get_thread_info tsk
524 b ret_to_user
525ENDPROC(ret_from_exception)
526
527/*
528 * Register switch for AArch64. The callee-saved registers need to be saved 517 * Register switch for AArch64. The callee-saved registers need to be saved
529 * and restored. On entry: 518 * and restored. On entry:
530 * x0 = previous task_struct (must be preserved across the switch) 519 * x0 = previous task_struct (must be preserved across the switch)
@@ -563,10 +552,7 @@ ret_fast_syscall:
563 ldr x1, [tsk, #TI_FLAGS] 552 ldr x1, [tsk, #TI_FLAGS]
564 and x2, x1, #_TIF_WORK_MASK 553 and x2, x1, #_TIF_WORK_MASK
565 cbnz x2, fast_work_pending 554 cbnz x2, fast_work_pending
566 tbz x1, #TIF_SINGLESTEP, fast_exit 555 enable_step_tsk x1, x2
567 disable_dbg
568 enable_step x2
569fast_exit:
570 kernel_exit 0, ret = 1 556 kernel_exit 0, ret = 1
571 557
572/* 558/*
@@ -585,7 +571,6 @@ work_pending:
585 bl do_notify_resume 571 bl do_notify_resume
586 b ret_to_user 572 b ret_to_user
587work_resched: 573work_resched:
588 enable_dbg
589 bl schedule 574 bl schedule
590 575
591/* 576/*
@@ -596,9 +581,7 @@ ret_to_user:
596 ldr x1, [tsk, #TI_FLAGS] 581 ldr x1, [tsk, #TI_FLAGS]
597 and x2, x1, #_TIF_WORK_MASK 582 and x2, x1, #_TIF_WORK_MASK
598 cbnz x2, work_pending 583 cbnz x2, work_pending
599 tbz x1, #TIF_SINGLESTEP, no_work_pending 584 enable_step_tsk x1, x2
600 disable_dbg
601 enable_step x2
602no_work_pending: 585no_work_pending:
603 kernel_exit 0, ret = 0 586 kernel_exit 0, ret = 0
604ENDPROC(ret_to_user) 587ENDPROC(ret_to_user)
@@ -625,14 +608,11 @@ el0_svc:
625 mov sc_nr, #__NR_syscalls 608 mov sc_nr, #__NR_syscalls
626el0_svc_naked: // compat entry point 609el0_svc_naked: // compat entry point
627 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number 610 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
628 disable_step x16 611 enable_dbg_and_irq
629 isb
630 enable_dbg
631 enable_irq
632 612
633 get_thread_info tsk 613 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
634 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing 614 tst x16, #_TIF_SYSCALL_WORK
635 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? 615 b.ne __sys_trace
636 adr lr, ret_fast_syscall // return address 616 adr lr, ret_fast_syscall // return address
637 cmp scno, sc_nr // check upper syscall limit 617 cmp scno, sc_nr // check upper syscall limit
638 b.hs ni_sys 618 b.hs ni_sys
@@ -648,9 +628,8 @@ ENDPROC(el0_svc)
648 * switches, and waiting for our parent to respond. 628 * switches, and waiting for our parent to respond.
649 */ 629 */
650__sys_trace: 630__sys_trace:
651 mov x1, sp 631 mov x0, sp
652 mov w0, #0 // trace entry 632 bl syscall_trace_enter
653 bl syscall_trace
654 adr lr, __sys_trace_return // return address 633 adr lr, __sys_trace_return // return address
655 uxtw scno, w0 // syscall number (possibly new) 634 uxtw scno, w0 // syscall number (possibly new)
656 mov x1, sp // pointer to regs 635 mov x1, sp // pointer to regs
@@ -665,9 +644,8 @@ __sys_trace:
665 644
666__sys_trace_return: 645__sys_trace_return:
667 str x0, [sp] // save returned x0 646 str x0, [sp] // save returned x0
668 mov x1, sp 647 mov x0, sp
669 mov w0, #1 // trace exit 648 bl syscall_trace_exit
670 bl syscall_trace
671 b ret_to_user 649 b ret_to_user
672 650
673/* 651/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0fd565000772..b96a732e4859 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -230,11 +230,9 @@ ENTRY(set_cpu_boot_mode_flag)
230 cmp w20, #BOOT_CPU_MODE_EL2 230 cmp w20, #BOOT_CPU_MODE_EL2
231 b.ne 1f 231 b.ne 1f
232 add x1, x1, #4 232 add x1, x1, #4
2331: dc cvac, x1 // Clean potentially dirty cache line 2331: str w20, [x1] // This CPU has booted in EL1
234 dsb sy 234 dmb sy
235 str w20, [x1] // This CPU has booted in EL1 235 dc ivac, x1 // Invalidate potentially stale cache line
236 dc civac, x1 // Clean&invalidate potentially stale cache line
237 dsb sy
238 ret 236 ret
239ENDPROC(set_cpu_boot_mode_flag) 237ENDPROC(set_cpu_boot_mode_flag)
240 238
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index bee789757806..df1cf15377b4 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,6 +20,7 @@
20 20
21#define pr_fmt(fmt) "hw-breakpoint: " fmt 21#define pr_fmt(fmt) "hw-breakpoint: " fmt
22 22
23#include <linux/compat.h>
23#include <linux/cpu_pm.h> 24#include <linux/cpu_pm.h>
24#include <linux/errno.h> 25#include <linux/errno.h>
25#include <linux/hw_breakpoint.h> 26#include <linux/hw_breakpoint.h>
@@ -27,7 +28,6 @@
27#include <linux/ptrace.h> 28#include <linux/ptrace.h>
28#include <linux/smp.h> 29#include <linux/smp.h>
29 30
30#include <asm/compat.h>
31#include <asm/current.h> 31#include <asm/current.h>
32#include <asm/debug-monitors.h> 32#include <asm/debug-monitors.h>
33#include <asm/hw_breakpoint.h> 33#include <asm/hw_breakpoint.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c5693163408c..9f2d6020b6c2 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
20 20
21#include <stdarg.h> 21#include <stdarg.h>
22 22
23#include <linux/compat.h>
23#include <linux/export.h> 24#include <linux/export.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
@@ -300,7 +301,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
300 * Complete any pending TLB or cache maintenance on this CPU in case 301 * Complete any pending TLB or cache maintenance on this CPU in case
301 * the thread migrates to a different CPU. 302 * the thread migrates to a different CPU.
302 */ 303 */
303 dsb(); 304 dsb(ish);
304 305
305 /* the actual thread switch */ 306 /* the actual thread switch */
306 last = cpu_switch_to(prev, next); 307 last = cpu_switch_to(prev, next);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index f8700eca24e7..32d52d3b079c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */ 20 */
21 21
22#include <linux/compat.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/sched.h> 24#include <linux/sched.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -1060,35 +1061,43 @@ long arch_ptrace(struct task_struct *child, long request,
1060 return ptrace_request(child, request, addr, data); 1061 return ptrace_request(child, request, addr, data);
1061} 1062}
1062 1063
1063asmlinkage int syscall_trace(int dir, struct pt_regs *regs) 1064enum ptrace_syscall_dir {
1065 PTRACE_SYSCALL_ENTER = 0,
1066 PTRACE_SYSCALL_EXIT,
1067};
1068
1069static void tracehook_report_syscall(struct pt_regs *regs,
1070 enum ptrace_syscall_dir dir)
1064{ 1071{
1072 int regno;
1065 unsigned long saved_reg; 1073 unsigned long saved_reg;
1066 1074
1067 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 1075 /*
1068 return regs->syscallno; 1076 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1069 1077 * used to denote syscall entry/exit:
1070 if (is_compat_task()) { 1078 */
1071 /* AArch32 uses ip (r12) for scratch */ 1079 regno = (is_compat_task() ? 12 : 7);
1072 saved_reg = regs->regs[12]; 1080 saved_reg = regs->regs[regno];
1073 regs->regs[12] = dir; 1081 regs->regs[regno] = dir;
1074 } else {
1075 /*
1076 * Save X7. X7 is used to denote syscall entry/exit:
1077 * X7 = 0 -> entry, = 1 -> exit
1078 */
1079 saved_reg = regs->regs[7];
1080 regs->regs[7] = dir;
1081 }
1082 1082
1083 if (dir) 1083 if (dir == PTRACE_SYSCALL_EXIT)
1084 tracehook_report_syscall_exit(regs, 0); 1084 tracehook_report_syscall_exit(regs, 0);
1085 else if (tracehook_report_syscall_entry(regs)) 1085 else if (tracehook_report_syscall_entry(regs))
1086 regs->syscallno = ~0UL; 1086 regs->syscallno = ~0UL;
1087 1087
1088 if (is_compat_task()) 1088 regs->regs[regno] = saved_reg;
1089 regs->regs[12] = saved_reg; 1089}
1090 else 1090
1091 regs->regs[7] = saved_reg; 1091asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1092{
1093 if (test_thread_flag(TIF_SYSCALL_TRACE))
1094 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1092 1095
1093 return regs->syscallno; 1096 return regs->syscallno;
1094} 1097}
1098
1099asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1100{
1101 if (test_thread_flag(TIF_SYSCALL_TRACE))
1102 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1103}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 720853f70b6b..7450c5802c3f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -25,6 +25,7 @@
25#include <linux/utsname.h> 25#include <linux/utsname.h>
26#include <linux/initrd.h> 26#include <linux/initrd.h>
27#include <linux/console.h> 27#include <linux/console.h>
28#include <linux/cache.h>
28#include <linux/bootmem.h> 29#include <linux/bootmem.h>
29#include <linux/seq_file.h> 30#include <linux/seq_file.h>
30#include <linux/screen_info.h> 31#include <linux/screen_info.h>
@@ -198,6 +199,8 @@ static void __init setup_processor(void)
198{ 199{
199 struct cpu_info *cpu_info; 200 struct cpu_info *cpu_info;
200 u64 features, block; 201 u64 features, block;
202 u32 cwg;
203 int cls;
201 204
202 cpu_info = lookup_processor_type(read_cpuid_id()); 205 cpu_info = lookup_processor_type(read_cpuid_id());
203 if (!cpu_info) { 206 if (!cpu_info) {
@@ -215,6 +218,18 @@ static void __init setup_processor(void)
215 elf_hwcap = 0; 218 elf_hwcap = 0;
216 219
217 /* 220 /*
221 * Check for sane CTR_EL0.CWG value.
222 */
223 cwg = cache_type_cwg();
224 cls = cache_line_size();
225 if (!cwg)
226 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
227 cls);
228 if (L1_CACHE_BYTES < cls)
229 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
230 L1_CACHE_BYTES, cls);
231
232 /*
218 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. 233 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
219 * The blocks we test below represent incremental functionality 234 * The blocks we test below represent incremental functionality
220 * for non-negative values. Negative values are reserved. 235 * for non-negative values. Negative values are reserved.
@@ -361,7 +376,6 @@ void __init setup_arch(char **cmdline_p)
361 376
362 *cmdline_p = boot_command_line; 377 *cmdline_p = boot_command_line;
363 378
364 init_mem_pgprot();
365 early_ioremap_init(); 379 early_ioremap_init();
366 380
367 parse_early_param(); 381 parse_early_param();
@@ -393,11 +407,10 @@ void __init setup_arch(char **cmdline_p)
393 407
394static int __init arm64_device_init(void) 408static int __init arm64_device_init(void)
395{ 409{
396 of_clk_init(NULL);
397 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 410 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
398 return 0; 411 return 0;
399} 412}
400arch_initcall(arm64_device_init); 413arch_initcall_sync(arm64_device_init);
401 414
402static DEFINE_PER_CPU(struct cpu, cpu_data); 415static DEFINE_PER_CPU(struct cpu, cpu_data);
403 416
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 882f01774365..6357b9c6c90e 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/compat.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/signal.h> 22#include <linux/signal.h>
22#include <linux/personality.h> 23#include <linux/personality.h>
@@ -25,7 +26,6 @@
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
27 28
28#include <asm/compat.h>
29#include <asm/debug-monitors.h> 29#include <asm/debug-monitors.h>
30#include <asm/elf.h> 30#include <asm/elf.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
@@ -97,8 +97,7 @@ static int restore_sigframe(struct pt_regs *regs,
97{ 97{
98 sigset_t set; 98 sigset_t set;
99 int i, err; 99 int i, err;
100 struct aux_context __user *aux = 100 void *aux = sf->uc.uc_mcontext.__reserved;
101 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
102 101
103 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 102 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
104 if (err == 0) 103 if (err == 0)
@@ -118,8 +117,11 @@ static int restore_sigframe(struct pt_regs *regs,
118 117
119 err |= !valid_user_regs(&regs->user_regs); 118 err |= !valid_user_regs(&regs->user_regs);
120 119
121 if (err == 0) 120 if (err == 0) {
122 err |= restore_fpsimd_context(&aux->fpsimd); 121 struct fpsimd_context *fpsimd_ctx =
122 container_of(aux, struct fpsimd_context, head);
123 err |= restore_fpsimd_context(fpsimd_ctx);
124 }
123 125
124 return err; 126 return err;
125} 127}
@@ -164,8 +166,8 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
164 struct pt_regs *regs, sigset_t *set) 166 struct pt_regs *regs, sigset_t *set)
165{ 167{
166 int i, err = 0; 168 int i, err = 0;
167 struct aux_context __user *aux = 169 void *aux = sf->uc.uc_mcontext.__reserved;
168 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; 170 struct _aarch64_ctx *end;
169 171
170 /* set up the stack frame for unwinding */ 172 /* set up the stack frame for unwinding */
171 __put_user_error(regs->regs[29], &sf->fp, err); 173 __put_user_error(regs->regs[29], &sf->fp, err);
@@ -182,12 +184,27 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
182 184
183 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 185 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
184 186
185 if (err == 0) 187 if (err == 0) {
186 err |= preserve_fpsimd_context(&aux->fpsimd); 188 struct fpsimd_context *fpsimd_ctx =
189 container_of(aux, struct fpsimd_context, head);
190 err |= preserve_fpsimd_context(fpsimd_ctx);
191 aux += sizeof(*fpsimd_ctx);
192 }
193
194 /* fault information, if valid */
195 if (current->thread.fault_code) {
196 struct esr_context *esr_ctx =
197 container_of(aux, struct esr_context, head);
198 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
199 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
200 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
201 aux += sizeof(*esr_ctx);
202 }
187 203
188 /* set the "end" magic */ 204 /* set the "end" magic */
189 __put_user_error(0, &aux->end.magic, err); 205 end = aux;
190 __put_user_error(0, &aux->end.size, err); 206 __put_user_error(0, &end->magic, err);
207 __put_user_error(0, &end->size, err);
191 208
192 return err; 209 return err;
193} 210}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index ac7e237d0bda..3491c638f172 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -23,6 +23,7 @@
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/ratelimit.h> 24#include <linux/ratelimit.h>
25 25
26#include <asm/esr.h>
26#include <asm/fpsimd.h> 27#include <asm/fpsimd.h>
27#include <asm/signal32.h> 28#include <asm/signal32.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -81,6 +82,8 @@ struct compat_vfp_sigframe {
81#define VFP_MAGIC 0x56465001 82#define VFP_MAGIC 0x56465001
82#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) 83#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
83 84
85#define FSR_WRITE_SHIFT (11)
86
84struct compat_aux_sigframe { 87struct compat_aux_sigframe {
85 struct compat_vfp_sigframe vfp; 88 struct compat_vfp_sigframe vfp;
86 89
@@ -497,7 +500,9 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
497 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); 500 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
498 501
499 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); 502 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
500 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err); 503 /* set the compat FSR WnR */
504 __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) <<
505 FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
501 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 506 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
502 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); 507 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
503 508
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7a530d2cc807..0347d38eea29 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -30,7 +30,6 @@ extern void secondary_holding_pen(void);
30volatile unsigned long secondary_holding_pen_release = INVALID_HWID; 30volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
31 31
32static phys_addr_t cpu_release_addr[NR_CPUS]; 32static phys_addr_t cpu_release_addr[NR_CPUS];
33static DEFINE_RAW_SPINLOCK(boot_lock);
34 33
35/* 34/*
36 * Write secondary_holding_pen_release in a way that is guaranteed to be 35 * Write secondary_holding_pen_release in a way that is guaranteed to be
@@ -94,14 +93,6 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
94 93
95static int smp_spin_table_cpu_boot(unsigned int cpu) 94static int smp_spin_table_cpu_boot(unsigned int cpu)
96{ 95{
97 unsigned long timeout;
98
99 /*
100 * Set synchronisation state between this boot processor
101 * and the secondary one
102 */
103 raw_spin_lock(&boot_lock);
104
105 /* 96 /*
106 * Update the pen release flag. 97 * Update the pen release flag.
107 */ 98 */
@@ -112,34 +103,7 @@ static int smp_spin_table_cpu_boot(unsigned int cpu)
112 */ 103 */
113 sev(); 104 sev();
114 105
115 timeout = jiffies + (1 * HZ); 106 return 0;
116 while (time_before(jiffies, timeout)) {
117 if (secondary_holding_pen_release == INVALID_HWID)
118 break;
119 udelay(10);
120 }
121
122 /*
123 * Now the secondary core is starting up let it run its
124 * calibrations, then wait for it to finish
125 */
126 raw_spin_unlock(&boot_lock);
127
128 return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
129}
130
131static void smp_spin_table_cpu_postboot(void)
132{
133 /*
134 * Let the primary processor know we're out of the pen.
135 */
136 write_pen_release(INVALID_HWID);
137
138 /*
139 * Synchronise with the boot thread.
140 */
141 raw_spin_lock(&boot_lock);
142 raw_spin_unlock(&boot_lock);
143} 107}
144 108
145const struct cpu_operations smp_spin_table_ops = { 109const struct cpu_operations smp_spin_table_ops = {
@@ -147,5 +111,4 @@ const struct cpu_operations smp_spin_table_ops = {
147 .cpu_init = smp_spin_table_cpu_init, 111 .cpu_init = smp_spin_table_cpu_init,
148 .cpu_prepare = smp_spin_table_cpu_prepare, 112 .cpu_prepare = smp_spin_table_cpu_prepare,
149 .cpu_boot = smp_spin_table_cpu_boot, 113 .cpu_boot = smp_spin_table_cpu_boot,
150 .cpu_postboot = smp_spin_table_cpu_postboot,
151}; 114};
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 29c39d5d77e3..6815987b50f8 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -33,6 +33,7 @@
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/clocksource.h> 35#include <linux/clocksource.h>
36#include <linux/clk-provider.h>
36 37
37#include <clocksource/arm_arch_timer.h> 38#include <clocksource/arm_arch_timer.h>
38 39
@@ -65,6 +66,7 @@ void __init time_init(void)
65{ 66{
66 u32 arch_timer_rate; 67 u32 arch_timer_rate;
67 68
69 of_clk_init(NULL);
68 clocksource_of_init(); 70 clocksource_of_init();
69 71
70 arch_timer_rate = arch_timer_get_rate(); 72 arch_timer_rate = arch_timer_get_rate();
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7ffadddb645d..c43cfa9b8304 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -251,10 +251,13 @@ void die(const char *str, struct pt_regs *regs, int err)
251void arm64_notify_die(const char *str, struct pt_regs *regs, 251void arm64_notify_die(const char *str, struct pt_regs *regs,
252 struct siginfo *info, int err) 252 struct siginfo *info, int err)
253{ 253{
254 if (user_mode(regs)) 254 if (user_mode(regs)) {
255 current->thread.fault_address = 0;
256 current->thread.fault_code = err;
255 force_sig_info(info->si_signo, info, current); 257 force_sig_info(info->si_signo, info, current);
256 else 258 } else {
257 die(str, regs, err); 259 die(str, regs, err);
260 }
258} 261}
259 262
260asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 263asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 2c56012cb2d2..b0d1512acf08 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -630,9 +630,15 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
630 * whole of Stage-1. Weep... 630 * whole of Stage-1. Weep...
631 */ 631 */
632 tlbi ipas2e1is, x1 632 tlbi ipas2e1is, x1
633 dsb sy 633 /*
634 * We have to ensure completion of the invalidation at Stage-2,
635 * since a table walk on another CPU could refill a TLB with a
636 * complete (S1 + S2) walk based on the old Stage-2 mapping if
637 * the Stage-1 invalidation happened first.
638 */
639 dsb ish
634 tlbi vmalle1is 640 tlbi vmalle1is
635 dsb sy 641 dsb ish
636 isb 642 isb
637 643
638 msr vttbr_el2, xzr 644 msr vttbr_el2, xzr
@@ -643,7 +649,7 @@ ENTRY(__kvm_flush_vm_context)
643 dsb ishst 649 dsb ishst
644 tlbi alle1is 650 tlbi alle1is
645 ic ialluis 651 ic ialluis
646 dsb sy 652 dsb ish
647 ret 653 ret
648ENDPROC(__kvm_flush_vm_context) 654ENDPROC(__kvm_flush_vm_context)
649 655
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 03244582bc55..c59a1bdab5eb 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -71,13 +71,13 @@ static u32 get_ccsidr(u32 csselr)
71static void do_dc_cisw(u32 val) 71static void do_dc_cisw(u32 val)
72{ 72{
73 asm volatile("dc cisw, %x0" : : "r" (val)); 73 asm volatile("dc cisw, %x0" : : "r" (val));
74 dsb(); 74 dsb(ish);
75} 75}
76 76
77static void do_dc_csw(u32 val) 77static void do_dc_csw(u32 val)
78{ 78{
79 asm volatile("dc csw, %x0" : : "r" (val)); 79 asm volatile("dc csw, %x0" : : "r" (val));
80 dsb(); 80 dsb(ish);
81} 81}
82 82
83/* See note at ARM ARM B1.14.4 */ 83/* See note at ARM ARM B1.14.4 */
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index b51d36401d83..3ecb56c624d3 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
1obj-y := dma-mapping.o extable.o fault.o init.o \ 1obj-y := dma-mapping.o extable.o fault.o init.o \
2 cache.o copypage.o flush.o \ 2 cache.o copypage.o flush.o \
3 ioremap.o mmap.o pgd.o mmu.o \ 3 ioremap.o mmap.o pgd.o mmu.o \
4 context.o tlb.o proc.o 4 context.o proc.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index fda756875fa6..23663837acff 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -31,7 +31,7 @@
31 * Corrupted registers: x0-x7, x9-x11 31 * Corrupted registers: x0-x7, x9-x11
32 */ 32 */
33__flush_dcache_all: 33__flush_dcache_all:
34 dsb sy // ensure ordering with previous memory accesses 34 dmb sy // ensure ordering with previous memory accesses
35 mrs x0, clidr_el1 // read clidr 35 mrs x0, clidr_el1 // read clidr
36 and x3, x0, #0x7000000 // extract loc from clidr 36 and x3, x0, #0x7000000 // extract loc from clidr
37 lsr x3, x3, #23 // left align loc bit field 37 lsr x3, x3, #23 // left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc cvau, x4 ) // clean D line to PoU
128 add x4, x4, x2 128 add x4, x4, x2
129 cmp x4, x1 129 cmp x4, x1
130 b.lo 1b 130 b.lo 1b
131 dsb sy 131 dsb ish
132 132
133 icache_line_size x2, x3 133 icache_line_size x2, x3
134 sub x3, x2, #1 134 sub x3, x2, #1
@@ -139,7 +139,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
139 cmp x4, x1 139 cmp x4, x1
140 b.lo 1b 140 b.lo 1b
1419: // ignore any faulting cache operation 1419: // ignore any faulting cache operation
142 dsb sy 142 dsb ish
143 isb 143 isb
144 ret 144 ret
145ENDPROC(flush_icache_range) 145ENDPROC(flush_icache_range)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0ba347e59f06..4164c5ace9f8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -22,8 +22,11 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/dma-contiguous.h> 24#include <linux/dma-contiguous.h>
25#include <linux/of.h>
26#include <linux/platform_device.h>
25#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
26#include <linux/swiotlb.h> 28#include <linux/swiotlb.h>
29#include <linux/amba/bus.h>
27 30
28#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
29 32
@@ -112,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
112 for (i = 0; i < (size >> PAGE_SHIFT); i++) 115 for (i = 0; i < (size >> PAGE_SHIFT); i++)
113 map[i] = page + i; 116 map[i] = page + i;
114 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, 117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
115 __get_dma_pgprot(attrs, pgprot_default, false)); 118 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
116 kfree(map); 119 kfree(map);
117 if (!coherent_ptr) 120 if (!coherent_ptr)
118 goto no_map; 121 goto no_map;
@@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
305}; 308};
306EXPORT_SYMBOL(coherent_swiotlb_dma_ops); 309EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
307 310
311static int dma_bus_notifier(struct notifier_block *nb,
312 unsigned long event, void *_dev)
313{
314 struct device *dev = _dev;
315
316 if (event != BUS_NOTIFY_ADD_DEVICE)
317 return NOTIFY_DONE;
318
319 if (of_property_read_bool(dev->of_node, "dma-coherent"))
320 set_dma_ops(dev, &coherent_swiotlb_dma_ops);
321
322 return NOTIFY_OK;
323}
324
325static struct notifier_block platform_bus_nb = {
326 .notifier_call = dma_bus_notifier,
327};
328
329static struct notifier_block amba_bus_nb = {
330 .notifier_call = dma_bus_notifier,
331};
332
308extern int swiotlb_late_init_with_default_size(size_t default_size); 333extern int swiotlb_late_init_with_default_size(size_t default_size);
309 334
310static int __init swiotlb_late_init(void) 335static int __init swiotlb_late_init(void)
311{ 336{
312 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 337 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
313 338
314 dma_ops = &coherent_swiotlb_dma_ops; 339 /*
340 * These must be registered before of_platform_populate().
341 */
342 bus_register_notifier(&platform_bus_type, &platform_bus_nb);
343 bus_register_notifier(&amba_bustype, &amba_bus_nb);
344
345 dma_ops = &noncoherent_swiotlb_dma_ops;
315 346
316 return swiotlb_late_init_with_default_size(swiotlb_size); 347 return swiotlb_late_init_with_default_size(swiotlb_size);
317} 348}
318subsys_initcall(swiotlb_late_init); 349arch_initcall(swiotlb_late_init);
319 350
320#define PREALLOC_DMA_DEBUG_ENTRIES 4096 351#define PREALLOC_DMA_DEBUG_ENTRIES 4096
321 352
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c23751b06120..89c6763d5e7e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -32,6 +32,7 @@
32 32
33#include <asm/exception.h> 33#include <asm/exception.h>
34#include <asm/debug-monitors.h> 34#include <asm/debug-monitors.h>
35#include <asm/esr.h>
35#include <asm/system_misc.h> 36#include <asm/system_misc.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
@@ -123,6 +124,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
123 } 124 }
124 125
125 tsk->thread.fault_address = addr; 126 tsk->thread.fault_address = addr;
127 tsk->thread.fault_code = esr;
126 si.si_signo = sig; 128 si.si_signo = sig;
127 si.si_errno = 0; 129 si.si_errno = 0;
128 si.si_code = code; 130 si.si_code = code;
@@ -148,8 +150,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
148#define VM_FAULT_BADMAP 0x010000 150#define VM_FAULT_BADMAP 0x010000
149#define VM_FAULT_BADACCESS 0x020000 151#define VM_FAULT_BADACCESS 0x020000
150 152
151#define ESR_WRITE (1 << 6)
152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
@@ -173,8 +173,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173good_area: 173good_area:
174 /* 174 /*
175 * Check that the permissions on the VMA allow for the fault which 175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have 176 * occurred.
177 * appropriate permissions, otherwise we allow any permission.
178 */ 177 */
179 if (!(vma->vm_flags & vm_flags)) { 178 if (!(vma->vm_flags & vm_flags)) {
180 fault = VM_FAULT_BADACCESS; 179 fault = VM_FAULT_BADACCESS;
@@ -196,7 +195,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
196 struct task_struct *tsk; 195 struct task_struct *tsk;
197 struct mm_struct *mm; 196 struct mm_struct *mm;
198 int fault, sig, code; 197 int fault, sig, code;
199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 198 unsigned long vm_flags = VM_READ | VM_WRITE;
200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 199 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
201 200
202 tsk = current; 201 tsk = current;
@@ -218,7 +217,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
218 217
219 if (esr & ESR_LNX_EXEC) { 218 if (esr & ESR_LNX_EXEC) {
220 vm_flags = VM_EXEC; 219 vm_flags = VM_EXEC;
221 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { 220 } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
222 vm_flags = VM_WRITE; 221 vm_flags = VM_WRITE;
223 mm_flags |= FAULT_FLAG_WRITE; 222 mm_flags |= FAULT_FLAG_WRITE;
224 } 223 }
@@ -525,7 +524,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
525 info.si_errno = 0; 524 info.si_errno = 0;
526 info.si_code = inf->code; 525 info.si_code = inf->code;
527 info.si_addr = (void __user *)addr; 526 info.si_addr = (void __user *)addr;
528 arm64_notify_die("", regs, &info, esr); 527 arm64_notify_die("", regs, &info, 0);
529 528
530 return 0; 529 return 0;
531} 530}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6b7e89569a3a..3a729de96f15 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
43struct page *empty_zero_page; 43struct page *empty_zero_page;
44EXPORT_SYMBOL(empty_zero_page); 44EXPORT_SYMBOL(empty_zero_page);
45 45
46pgprot_t pgprot_default;
47EXPORT_SYMBOL(pgprot_default);
48
49static pmdval_t prot_sect_kernel;
50
51struct cachepolicy { 46struct cachepolicy {
52 const char policy[16]; 47 const char policy[16];
53 u64 mair; 48 u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
122} 117}
123early_param("cachepolicy", early_cachepolicy); 118early_param("cachepolicy", early_cachepolicy);
124 119
125/*
126 * Adjust the PMD section entries according to the CPU in use.
127 */
128void __init init_mem_pgprot(void)
129{
130 pteval_t default_pgprot;
131 int i;
132
133 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
134 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
135
136#ifdef CONFIG_SMP
137 /*
138 * Mark memory with the "shared" attribute for SMP systems
139 */
140 default_pgprot |= PTE_SHARED;
141 prot_sect_kernel |= PMD_SECT_S;
142#endif
143
144 for (i = 0; i < 16; i++) {
145 unsigned long v = pgprot_val(protection_map[i]);
146 protection_map[i] = __pgprot(v | default_pgprot);
147 }
148
149 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
150}
151
152pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 120pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
153 unsigned long size, pgprot_t vma_prot) 121 unsigned long size, pgprot_t vma_prot)
154{ 122{
@@ -205,7 +173,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
205 /* try section mapping first */ 173 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 174 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd; 175 pmd_t old_pmd =*pmd;
208 set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 176 set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC));
209 /* 177 /*
210 * Check for previous table entries created during 178 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them. 179 * boot (__create_page_tables) and flush them.
@@ -227,7 +195,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
227 195
228 do { 196 do {
229 next = pud_addr_end(addr, end); 197 next = pud_addr_end(addr, end);
230 alloc_init_pmd(pud, addr, next, phys); 198
199 /*
200 * For 4K granule only, attempt to put down a 1GB block
201 */
202 if ((PAGE_SHIFT == 12) &&
203 ((addr | next | phys) & ~PUD_MASK) == 0) {
204 pud_t old_pud = *pud;
205 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
206
207 /*
208 * If we have an old value for a pud, it will
209 * be pointing to a pmd table that we no longer
210 * need (from swapper_pg_dir).
211 *
212 * Look up the old pmd table and free it.
213 */
214 if (!pud_none(old_pud)) {
215 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
216 memblock_free(table, PAGE_SIZE);
217 flush_tlb_all();
218 }
219 } else {
220 alloc_init_pmd(pud, addr, next, phys);
221 }
231 phys += next - addr; 222 phys += next - addr;
232 } while (pud++, addr = next, addr != end); 223 } while (pud++, addr = next, addr != end);
233} 224}
@@ -370,10 +361,16 @@ int kern_addr_valid(unsigned long addr)
370 if (pud_none(*pud)) 361 if (pud_none(*pud))
371 return 0; 362 return 0;
372 363
364 if (pud_sect(*pud))
365 return pfn_valid(pud_pfn(*pud));
366
373 pmd = pmd_offset(pud, addr); 367 pmd = pmd_offset(pud, addr);
374 if (pmd_none(*pmd)) 368 if (pmd_none(*pmd))
375 return 0; 369 return 0;
376 370
371 if (pmd_sect(*pmd))
372 return pfn_valid(pmd_pfn(*pmd));
373
377 pte = pte_offset_kernel(pmd, addr); 374 pte = pte_offset_kernel(pmd, addr);
378 if (pte_none(*pte)) 375 if (pte_none(*pte))
379 return 0; 376 return 0;
@@ -414,7 +411,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
414 if (!p) 411 if (!p)
415 return -ENOMEM; 412 return -ENOMEM;
416 413
417 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); 414 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
418 } else 415 } else
419 vmemmap_verify((pte_t *)pmd, node, addr, next); 416 vmemmap_verify((pte_t *)pmd, node, addr, next);
420 } while (addr = next, addr != end); 417 } while (addr = next, addr != end);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9042aff5e9e3..7736779c9809 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -182,7 +182,7 @@ ENDPROC(cpu_do_switch_mm)
182ENTRY(__cpu_setup) 182ENTRY(__cpu_setup)
183 ic iallu // I+BTB cache invalidate 183 ic iallu // I+BTB cache invalidate
184 tlbi vmalle1is // invalidate I + D TLBs 184 tlbi vmalle1is // invalidate I + D TLBs
185 dsb sy 185 dsb ish
186 186
187 mov x0, #3 << 20 187 mov x0, #3 << 20
188 msr cpacr_el1, x0 // Enable FP/ASIMD 188 msr cpacr_el1, x0 // Enable FP/ASIMD
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
deleted file mode 100644
index 19da91e0cd27..000000000000
--- a/arch/arm64/mm/tlb.S
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Based on arch/arm/mm/tlb.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/asm-offsets.h>
23#include <asm/page.h>
24#include <asm/tlbflush.h>
25#include "proc-macros.S"
26
27/*
28 * __cpu_flush_user_tlb_range(start, end, vma)
29 *
30 * Invalidate a range of TLB entries in the specified address space.
31 *
32 * - start - start address (may not be aligned)
33 * - end - end address (exclusive, may not be aligned)
34 * - vma - vma_struct describing address range
35 */
36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid w3, x3 // get vm_mm->context.id
39 dsb sy
40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12
42 bfi x0, x3, #48, #16 // start VA and ASID
43 bfi x1, x3, #48, #16 // end VA and ASID
441: tlbi vae1is, x0 // TLB invalidate by address and ASID
45 add x0, x0, #1
46 cmp x0, x1
47 b.lo 1b
48 dsb sy
49 ret
50ENDPROC(__cpu_flush_user_tlb_range)
51
52/*
53 * __cpu_flush_kern_tlb_range(start,end)
54 *
55 * Invalidate a range of kernel TLB entries.
56 *
57 * - start - start address (may not be aligned)
58 * - end - end address (exclusive, may not be aligned)
59 */
60ENTRY(__cpu_flush_kern_tlb_range)
61 dsb sy
62 lsr x0, x0, #12 // align address
63 lsr x1, x1, #12
641: tlbi vaae1is, x0 // TLB invalidate by address
65 add x0, x0, #1
66 cmp x0, x1
67 b.lo 1b
68 dsb sy
69 isb
70 ret
71ENDPROC(__cpu_flush_kern_tlb_range)