aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 21:07:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 21:07:32 -0400
commit2e032852245b3dcfe5461d7353e34eb6da095ccf (patch)
tree69f9fdf03b54d76bb539096e0ec96e91ea8216b1 /arch/arm/include/asm
parent356f9e74ffaafd11741589a9aa21d6c9d2721417 (diff)
parent141b97433d77e39ac3ac111a7b3852192035259c (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "This set includes adding support for Neon acceleration of RAID6 XOR code from Ard Biesheuvel, cache flushing and barrier updates from Will Deacon, and a cleanup to the ARM debug code which reduces the amount of code by about 500 lines. A few other cleanups, such as constifying the machine descriptors which already shouldn't be written to, cleaning up the printing of the L2 cache size" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (55 commits) ARM: 7826/1: debug: support debug ll on hisilicon soc ARM: 7830/1: delay: don't bother reporting bogomips in /proc/cpuinfo ARM: 7829/1: Add ".text.unlikely" and ".text.hot" to arm unwind tables ARM: 7828/1: ARMv7-M: implement restart routine common to all v7-M machines ARM: 7827/1: highbank: fix debug uart virtual address for LPAE ARM: 7823/1: errata: workaround Cortex-A15 erratum 773022 ARM: 7806/1: allow DEBUG_UNCOMPRESS for Tegra ARM: 7793/1: debug: use generic option for ep93xx PL10x debug port ARM: debug: move SPEAr debug to generic PL01x code ARM: debug: move davinci debug to generic 8250 code ARM: debug: move keystone debug to generic 8250 code ARM: debug: remove DEBUG_ROCKCHIP_UART ARM: debug: provide generic option choices for 8250 and PL01x ports ARM: debug: move PL01X debug include into arch/arm/include/debug/ ARM: debug: provide PL01x debug uart phys/virt address configuration options ARM: debug: add support for word accesses to debug/8250.S ARM: debug: move 8250 debug include into arch/arm/include/debug/ ARM: debug: provide 8250 debug uart phys/virt address configuration options ARM: debug: provide 8250 debug uart register shift configuration option ARM: debug: provide 8250 debug uart flow control configuration option ...
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/cacheflush.h5
-rw-r--r--arch/arm/include/asm/hardware/debug-8250.S29
-rw-r--r--arch/arm/include/asm/hardware/debug-pl01x.S29
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/memblock.h3
-rw-r--r--arch/arm/include/asm/module.h2
-rw-r--r--arch/arm/include/asm/neon.h36
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/prom.h4
-rw-r--r--arch/arm/include/asm/spinlock.h2
-rw-r--r--arch/arm/include/asm/switch_to.h10
-rw-r--r--arch/arm/include/asm/thread_info.h11
-rw-r--r--arch/arm/include/asm/tlbflush.h181
-rw-r--r--arch/arm/include/asm/types.h40
-rw-r--r--arch/arm/include/asm/v7m.h12
-rw-r--r--arch/arm/include/asm/xor.h73
18 files changed, 360 insertions, 119 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a5fef710af32..fcc1b5bf6979 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -220,9 +220,9 @@
220#ifdef CONFIG_SMP 220#ifdef CONFIG_SMP
221#if __LINUX_ARM_ARCH__ >= 7 221#if __LINUX_ARM_ARCH__ >= 7
222 .ifeqs "\mode","arm" 222 .ifeqs "\mode","arm"
223 ALT_SMP(dmb) 223 ALT_SMP(dmb ish)
224 .else 224 .else
225 ALT_SMP(W(dmb)) 225 ALT_SMP(W(dmb) ish)
226 .endif 226 .endif
227#elif __LINUX_ARM_ARCH__ == 6 227#elif __LINUX_ARM_ARCH__ == 6
228 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 228 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d90..60f15e274e6d 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -14,27 +14,27 @@
14#endif 14#endif
15 15
16#if __LINUX_ARM_ARCH__ >= 7 16#if __LINUX_ARM_ARCH__ >= 7
17#define isb() __asm__ __volatile__ ("isb" : : : "memory") 17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
18#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
19#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
21#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 21#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
22 : : "r" (0) : "memory") 22 : : "r" (0) : "memory")
23#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 23#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
24 : : "r" (0) : "memory") 24 : : "r" (0) : "memory")
25#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 25#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
26 : : "r" (0) : "memory") 26 : : "r" (0) : "memory")
27#elif defined(CONFIG_CPU_FA526) 27#elif defined(CONFIG_CPU_FA526)
28#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 28#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
29 : : "r" (0) : "memory") 29 : : "r" (0) : "memory")
30#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 30#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
31 : : "r" (0) : "memory") 31 : : "r" (0) : "memory")
32#define dmb() __asm__ __volatile__ ("" : : : "memory") 32#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
33#else 33#else
34#define isb() __asm__ __volatile__ ("" : : : "memory") 34#define isb(x) __asm__ __volatile__ ("" : : : "memory")
35#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 35#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
36 : : "r" (0) : "memory") 36 : : "r" (0) : "memory")
37#define dmb() __asm__ __volatile__ ("" : : : "memory") 37#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 38#endif
39 39
40#ifdef CONFIG_ARCH_HAS_BARRIERS 40#ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
43#define mb() do { dsb(); outer_sync(); } while (0) 43#define mb() do { dsb(); outer_sync(); } while (0)
44#define rmb() dsb() 44#define rmb() dsb()
45#define wmb() mb() 45#define wmb() do { dsb(st); outer_sync(); } while (0)
46#else 46#else
47#define mb() barrier() 47#define mb() barrier()
48#define rmb() barrier() 48#define rmb() barrier()
@@ -54,9 +54,9 @@
54#define smp_rmb() barrier() 54#define smp_rmb() barrier()
55#define smp_wmb() barrier() 55#define smp_wmb() barrier()
56#else 56#else
57#define smp_mb() dmb() 57#define smp_mb() dmb(ish)
58#define smp_rmb() dmb() 58#define smp_rmb() smp_mb()
59#define smp_wmb() dmb() 59#define smp_wmb() dmb(ishst)
60#endif 60#endif
61 61
62#define read_barrier_depends() do { } while(0) 62#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672fa..15f2d5bf8875 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
268 * Harvard caches are synchronised for the user space address range. 268 * Harvard caches are synchronised for the user space address range.
269 * This is used for the ARM private sys_cacheflush system call. 269 * This is used for the ARM private sys_cacheflush system call.
270 */ 270 */
271#define flush_cache_user_range(start,end) \ 271#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
272 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
273 272
274/* 273/*
275 * Perform necessary cache operations to ensure that data previously 274 * Perform necessary cache operations to ensure that data previously
@@ -352,7 +351,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
352 * set_pte_at() called from vmap_pte_range() does not 351 * set_pte_at() called from vmap_pte_range() does not
353 * have a DSB after cleaning the cache line. 352 * have a DSB after cleaning the cache line.
354 */ 353 */
355 dsb(); 354 dsb(ishst);
356} 355}
357 356
358static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 357static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644
index 22c689255e6e..000000000000
--- a/arch/arm/include/asm/hardware/debug-8250.S
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * arch/arm/include/asm/hardware/debug-8250.S
3 *
4 * Copyright (C) 1994-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/serial_reg.h>
11
12 .macro senduart,rd,rx
13 strb \rd, [\rx, #UART_TX << UART_SHIFT]
14 .endm
15
16 .macro busyuart,rd,rx
171002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
18 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
19 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
20 bne 1002b
21 .endm
22
23 .macro waituart,rd,rx
24#ifdef FLOW_CONTROL
251001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
26 tst \rd, #UART_MSR_CTS
27 beq 1001b
28#endif
29 .endm
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S
deleted file mode 100644
index f9fd083eff63..000000000000
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ /dev/null
@@ -1,29 +0,0 @@
1/* arch/arm/include/asm/hardware/debug-pl01x.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13#include <linux/amba/serial.h>
14
15 .macro senduart,rd,rx
16 strb \rd, [\rx, #UART01x_DR]
17 .endm
18
19 .macro waituart,rd,rx
201001: ldr \rd, [\rx, #UART01x_FR]
21 tst \rd, #UART01x_FR_TXFF
22 bne 1001b
23 .endm
24
25 .macro busyuart,rd,rx
261001: ldr \rd, [\rx, #UART01x_FR]
27 tst \rd, #UART01x_FR_BUSY
28 bne 1001b
29 .endm
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 441efc491b50..69b879ac0289 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -65,12 +65,12 @@ struct machine_desc {
65/* 65/*
66 * Current machine - only accessible during boot. 66 * Current machine - only accessible during boot.
67 */ 67 */
68extern struct machine_desc *machine_desc; 68extern const struct machine_desc *machine_desc;
69 69
70/* 70/*
71 * Machine type table - also only accessible during boot 71 * Machine type table - also only accessible during boot
72 */ 72 */
73extern struct machine_desc __arch_info_begin[], __arch_info_end[]; 73extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
74#define for_each_machine_desc(p) \ 74#define for_each_machine_desc(p) \
75 for (p = __arch_info_begin; p < __arch_info_end; p++) 75 for (p = __arch_info_begin; p < __arch_info_end; p++)
76 76
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index 00ca5f92648e..c2f5102ae659 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -4,8 +4,7 @@
4struct meminfo; 4struct meminfo;
5struct machine_desc; 5struct machine_desc;
6 6
7extern void arm_memblock_init(struct meminfo *, struct machine_desc *); 7void arm_memblock_init(struct meminfo *, const struct machine_desc *);
8
9phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); 8phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
10 9
11#endif 10#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e5..ed690c49ef93 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -12,6 +12,8 @@ enum {
12 ARM_SEC_CORE, 12 ARM_SEC_CORE,
13 ARM_SEC_EXIT, 13 ARM_SEC_EXIT,
14 ARM_SEC_DEVEXIT, 14 ARM_SEC_DEVEXIT,
15 ARM_SEC_HOT,
16 ARM_SEC_UNLIKELY,
15 ARM_SEC_MAX, 17 ARM_SEC_MAX,
16}; 18};
17 19
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 000000000000..8f730fe70093
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/arm/include/asm/neon.h
3 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/hwcap.h>
12
13#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
14
15#ifdef __ARM_NEON__
16
17/*
18 * If you are affected by the BUILD_BUG below, it probably means that you are
19 * using NEON code /and/ calling the kernel_neon_begin() function from the same
20 * compilation unit. To prevent issues that may arise from GCC reordering or
21 * generating(1) NEON instructions outside of these begin/end functions, the
22 * only supported way of using NEON code in the kernel is by isolating it in a
23 * separate compilation unit, and calling it from another unit from inside a
24 * kernel_neon_begin/kernel_neon_end pair.
25 *
26 * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
27 * -mpfu=neon is set.
28 */
29
30#define kernel_neon_begin() \
31 BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
32
33#else
34void kernel_neon_begin(void);
35#endif
36void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 04aeb02d2e11..be956dbf6bae 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device;
100#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) 100#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
101#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) 101#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
102#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) 102#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
103#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) 103#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
104 104
105#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 105#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
106#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 106#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index a219227c3e43..4a2985e21969 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -15,13 +15,13 @@
15 15
16#ifdef CONFIG_OF 16#ifdef CONFIG_OF
17 17
18extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 18extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
19extern void arm_dt_memblock_reserve(void); 19extern void arm_dt_memblock_reserve(void);
20extern void __init arm_dt_init_cpu_maps(void); 20extern void __init arm_dt_init_cpu_maps(void);
21 21
22#else /* CONFIG_OF */ 22#else /* CONFIG_OF */
23 23
24static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) 24static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
25{ 25{
26 return NULL; 26 return NULL;
27} 27}
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b07c09e5a0ac..4f2c28060c9a 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -46,7 +46,7 @@ static inline void dsb_sev(void)
46{ 46{
47#if __LINUX_ARM_ARCH__ >= 7 47#if __LINUX_ARM_ARCH__ >= 7
48 __asm__ __volatile__ ( 48 __asm__ __volatile__ (
49 "dsb\n" 49 "dsb ishst\n"
50 SEV 50 SEV
51 ); 51 );
52#else 52#else
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index fa09e6b49bf1..c99e259469f7 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -4,6 +4,16 @@
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6/*
7 * For v7 SMP cores running a preemptible kernel we may be pre-empted
8 * during a TLB maintenance operation, so execute an inner-shareable dsb
9 * to ensure that the maintenance completes in case we migrate to another
10 * CPU.
11 */
12#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
13#define finish_arch_switch(prev) dsb(ish)
14#endif
15
16/*
7 * switch_to(prev, next) should switch from task `prev' to `next' 17 * switch_to(prev, next) should switch from task `prev' to `next'
8 * `prev' will never be the same as `next'. schedule() itself 18 * `prev' will never be the same as `next'. schedule() itself
9 * contains the memory barrier to tell GCC not to cache `current'. 19 * contains the memory barrier to tell GCC not to cache `current'.
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2b8114fcba09..df5e13d64f2c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -43,6 +43,16 @@ struct cpu_context_save {
43 __u32 extra[2]; /* Xscale 'acc' register, etc */ 43 __u32 extra[2]; /* Xscale 'acc' register, etc */
44}; 44};
45 45
46struct arm_restart_block {
47 union {
48 /* For user cache flushing */
49 struct {
50 unsigned long start;
51 unsigned long end;
52 } cache;
53 };
54};
55
46/* 56/*
47 * low level task data that entry.S needs immediate access to. 57 * low level task data that entry.S needs immediate access to.
48 * __switch_to() assumes cpu_context follows immediately after cpu_domain. 58 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -68,6 +78,7 @@ struct thread_info {
68 unsigned long thumbee_state; /* ThumbEE Handler Base register */ 78 unsigned long thumbee_state; /* ThumbEE Handler Base register */
69#endif 79#endif
70 struct restart_block restart_block; 80 struct restart_block restart_block;
81 struct arm_restart_block arm_restart_block;
71}; 82};
72 83
73#define INIT_THREAD_INFO(tsk) \ 84#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index f467e9b3f8d5..38960264040c 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -319,67 +319,110 @@ extern struct cpu_tlb_fns cpu_tlb;
319#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) 319#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
320#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) 320#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
321 321
322static inline void local_flush_tlb_all(void) 322static inline void __local_flush_tlb_all(void)
323{ 323{
324 const int zero = 0; 324 const int zero = 0;
325 const unsigned int __tlb_flag = __cpu_tlb_flags; 325 const unsigned int __tlb_flag = __cpu_tlb_flags;
326 326
327 if (tlb_flag(TLB_WB))
328 dsb();
329
330 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 327 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
331 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 328 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
332 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 329 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
333 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); 330}
331
332static inline void local_flush_tlb_all(void)
333{
334 const int zero = 0;
335 const unsigned int __tlb_flag = __cpu_tlb_flags;
336
337 if (tlb_flag(TLB_WB))
338 dsb(nshst);
339
340 __local_flush_tlb_all();
341 tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
334 342
335 if (tlb_flag(TLB_BARRIER)) { 343 if (tlb_flag(TLB_BARRIER)) {
336 dsb(); 344 dsb(nsh);
337 isb(); 345 isb();
338 } 346 }
339} 347}
340 348
341static inline void local_flush_tlb_mm(struct mm_struct *mm) 349static inline void __flush_tlb_all(void)
342{ 350{
343 const int zero = 0; 351 const int zero = 0;
344 const int asid = ASID(mm);
345 const unsigned int __tlb_flag = __cpu_tlb_flags; 352 const unsigned int __tlb_flag = __cpu_tlb_flags;
346 353
347 if (tlb_flag(TLB_WB)) 354 if (tlb_flag(TLB_WB))
348 dsb(); 355 dsb(ishst);
356
357 __local_flush_tlb_all();
358 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
359
360 if (tlb_flag(TLB_BARRIER)) {
361 dsb(ish);
362 isb();
363 }
364}
365
366static inline void __local_flush_tlb_mm(struct mm_struct *mm)
367{
368 const int zero = 0;
369 const int asid = ASID(mm);
370 const unsigned int __tlb_flag = __cpu_tlb_flags;
349 371
350 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 372 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
351 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 373 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
352 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 374 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
353 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 375 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
354 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 376 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
355 } 377 }
356 put_cpu();
357 } 378 }
358 379
359 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); 380 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
360 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); 381 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
361 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); 382 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
383}
384
385static inline void local_flush_tlb_mm(struct mm_struct *mm)
386{
387 const int asid = ASID(mm);
388 const unsigned int __tlb_flag = __cpu_tlb_flags;
389
390 if (tlb_flag(TLB_WB))
391 dsb(nshst);
392
393 __local_flush_tlb_mm(mm);
394 tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
395
396 if (tlb_flag(TLB_BARRIER))
397 dsb(nsh);
398}
399
400static inline void __flush_tlb_mm(struct mm_struct *mm)
401{
402 const unsigned int __tlb_flag = __cpu_tlb_flags;
403
404 if (tlb_flag(TLB_WB))
405 dsb(ishst);
406
407 __local_flush_tlb_mm(mm);
362#ifdef CONFIG_ARM_ERRATA_720789 408#ifdef CONFIG_ARM_ERRATA_720789
363 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); 409 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
364#else 410#else
365 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); 411 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
366#endif 412#endif
367 413
368 if (tlb_flag(TLB_BARRIER)) 414 if (tlb_flag(TLB_BARRIER))
369 dsb(); 415 dsb(ish);
370} 416}
371 417
372static inline void 418static inline void
373local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 419__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
374{ 420{
375 const int zero = 0; 421 const int zero = 0;
376 const unsigned int __tlb_flag = __cpu_tlb_flags; 422 const unsigned int __tlb_flag = __cpu_tlb_flags;
377 423
378 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
379 425
380 if (tlb_flag(TLB_WB))
381 dsb();
382
383 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 426 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
384 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 428 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
@@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
392 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); 435 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
393 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); 436 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
394 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); 437 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
438}
439
440static inline void
441local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
442{
443 const unsigned int __tlb_flag = __cpu_tlb_flags;
444
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
446
447 if (tlb_flag(TLB_WB))
448 dsb(nshst);
449
450 __local_flush_tlb_page(vma, uaddr);
451 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
452
453 if (tlb_flag(TLB_BARRIER))
454 dsb(nsh);
455}
456
457static inline void
458__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
459{
460 const unsigned int __tlb_flag = __cpu_tlb_flags;
461
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
463
464 if (tlb_flag(TLB_WB))
465 dsb(ishst);
466
467 __local_flush_tlb_page(vma, uaddr);
395#ifdef CONFIG_ARM_ERRATA_720789 468#ifdef CONFIG_ARM_ERRATA_720789
396 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); 469 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
397#else 470#else
@@ -399,19 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
399#endif 472#endif
400 473
401 if (tlb_flag(TLB_BARRIER)) 474 if (tlb_flag(TLB_BARRIER))
402 dsb(); 475 dsb(ish);
403} 476}
404 477
405static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 478static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
406{ 479{
407 const int zero = 0; 480 const int zero = 0;
408 const unsigned int __tlb_flag = __cpu_tlb_flags; 481 const unsigned int __tlb_flag = __cpu_tlb_flags;
409 482
410 kaddr &= PAGE_MASK;
411
412 if (tlb_flag(TLB_WB))
413 dsb();
414
415 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 483 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
416 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 484 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
417 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 485 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -421,26 +489,75 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
421 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); 489 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
422 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); 490 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
423 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); 491 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
492}
493
494static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
495{
496 const unsigned int __tlb_flag = __cpu_tlb_flags;
497
498 kaddr &= PAGE_MASK;
499
500 if (tlb_flag(TLB_WB))
501 dsb(nshst);
502
503 __local_flush_tlb_kernel_page(kaddr);
504 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
505
506 if (tlb_flag(TLB_BARRIER)) {
507 dsb(nsh);
508 isb();
509 }
510}
511
512static inline void __flush_tlb_kernel_page(unsigned long kaddr)
513{
514 const unsigned int __tlb_flag = __cpu_tlb_flags;
515
516 kaddr &= PAGE_MASK;
517
518 if (tlb_flag(TLB_WB))
519 dsb(ishst);
520
521 __local_flush_tlb_kernel_page(kaddr);
424 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); 522 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
425 523
426 if (tlb_flag(TLB_BARRIER)) { 524 if (tlb_flag(TLB_BARRIER)) {
427 dsb(); 525 dsb(ish);
428 isb(); 526 isb();
429 } 527 }
430} 528}
431 529
530/*
531 * Branch predictor maintenance is paired with full TLB invalidation, so
532 * there is no need for any barriers here.
533 */
534static inline void __local_flush_bp_all(void)
535{
536 const int zero = 0;
537 const unsigned int __tlb_flag = __cpu_tlb_flags;
538
539 if (tlb_flag(TLB_V6_BP))
540 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
541}
542
432static inline void local_flush_bp_all(void) 543static inline void local_flush_bp_all(void)
433{ 544{
434 const int zero = 0; 545 const int zero = 0;
435 const unsigned int __tlb_flag = __cpu_tlb_flags; 546 const unsigned int __tlb_flag = __cpu_tlb_flags;
436 547
548 __local_flush_bp_all();
437 if (tlb_flag(TLB_V7_UIS_BP)) 549 if (tlb_flag(TLB_V7_UIS_BP))
438 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
439 else if (tlb_flag(TLB_V6_BP))
440 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); 550 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
551}
441 552
442 if (tlb_flag(TLB_BARRIER)) 553static inline void __flush_bp_all(void)
443 isb(); 554{
555 const int zero = 0;
556 const unsigned int __tlb_flag = __cpu_tlb_flags;
557
558 __local_flush_bp_all();
559 if (tlb_flag(TLB_V7_UIS_BP))
560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
444} 561}
445 562
446#include <asm/cputype.h> 563#include <asm/cputype.h>
@@ -461,7 +578,7 @@ static inline void dummy_flush_tlb_a15_erratum(void)
461 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. 578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
462 */ 579 */
463 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
464 dsb(); 581 dsb(ish);
465} 582}
466#else 583#else
467static inline int erratum_a15_798181(void) 584static inline int erratum_a15_798181(void)
@@ -495,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd)
495 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); 612 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
496 613
497 if (tlb_flag(TLB_WB)) 614 if (tlb_flag(TLB_WB))
498 dsb(); 615 dsb(ishst);
499} 616}
500 617
501static inline void clean_pmd_entry(void *pmd) 618static inline void clean_pmd_entry(void *pmd)
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644
index 000000000000..a53cdb8f068c
--- /dev/null
+++ b/arch/arm/include/asm/types.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_TYPES_H
2#define _ASM_TYPES_H
3
4#include <asm-generic/int-ll64.h>
5
6/*
7 * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
8 * unambiguous on ARM as you would expect. For the types below, there is a
9 * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
10 * and the kernel itself, which results in build errors if you try to build with
11 * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
12 * in order to use NEON intrinsics)
13 *
14 * As the typedefs for these types in 'stdint.h' are based on builtin defines
15 * supplied by GCC, we can tweak these to align with the kernel's idea of those
16 * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
17 * source file (provided that -ffreestanding is used).
18 *
19 * int32_t uint32_t uintptr_t
20 * bare metal GCC long unsigned long unsigned int
21 * glibc GCC int unsigned int unsigned int
22 * kernel int unsigned int unsigned long
23 */
24
25#ifdef __INT32_TYPE__
26#undef __INT32_TYPE__
27#define __INT32_TYPE__ int
28#endif
29
30#ifdef __UINT32_TYPE__
31#undef __UINT32_TYPE__
32#define __UINT32_TYPE__ unsigned int
33#endif
34
35#ifdef __UINTPTR_TYPE__
36#undef __UINTPTR_TYPE__
37#define __UINTPTR_TYPE__ unsigned long
38#endif
39
40#endif /* _ASM_TYPES_H */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index fa88d09fa3d9..615781c61627 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -15,6 +15,10 @@
15 15
16#define V7M_SCB_VTOR 0x08 16#define V7M_SCB_VTOR 0x08
17 17
18#define V7M_SCB_AIRCR 0x0c
19#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
20#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
21
18#define V7M_SCB_SCR 0x10 22#define V7M_SCB_SCR 0x10
19#define V7M_SCB_SCR_SLEEPDEEP (1 << 2) 23#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
20 24
@@ -42,3 +46,11 @@
42 */ 46 */
43#define EXC_RET_STACK_MASK 0x00000004 47#define EXC_RET_STACK_MASK 0x00000004
44#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd 48#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
49
50#ifndef __ASSEMBLY__
51
52enum reboot_mode;
53
54void armv7m_restart(enum reboot_mode mode, const char *cmd);
55
56#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 7604673dc427..4ffb26d4cad8 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -7,7 +7,10 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/hardirq.h>
10#include <asm-generic/xor.h> 11#include <asm-generic/xor.h>
12#include <asm/hwcap.h>
13#include <asm/neon.h>
11 14
12#define __XOR(a1, a2) a1 ^= a2 15#define __XOR(a1, a2) a1 ^= a2
13 16
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
138 xor_speed(&xor_block_arm4regs); \ 141 xor_speed(&xor_block_arm4regs); \
139 xor_speed(&xor_block_8regs); \ 142 xor_speed(&xor_block_8regs); \
140 xor_speed(&xor_block_32regs); \ 143 xor_speed(&xor_block_32regs); \
144 NEON_TEMPLATES; \
141 } while (0) 145 } while (0)
146
147#ifdef CONFIG_KERNEL_MODE_NEON
148
149extern struct xor_block_template const xor_block_neon_inner;
150
151static void
152xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
153{
154 if (in_interrupt()) {
155 xor_arm4regs_2(bytes, p1, p2);
156 } else {
157 kernel_neon_begin();
158 xor_block_neon_inner.do_2(bytes, p1, p2);
159 kernel_neon_end();
160 }
161}
162
163static void
164xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
165 unsigned long *p3)
166{
167 if (in_interrupt()) {
168 xor_arm4regs_3(bytes, p1, p2, p3);
169 } else {
170 kernel_neon_begin();
171 xor_block_neon_inner.do_3(bytes, p1, p2, p3);
172 kernel_neon_end();
173 }
174}
175
176static void
177xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
178 unsigned long *p3, unsigned long *p4)
179{
180 if (in_interrupt()) {
181 xor_arm4regs_4(bytes, p1, p2, p3, p4);
182 } else {
183 kernel_neon_begin();
184 xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
185 kernel_neon_end();
186 }
187}
188
189static void
190xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
191 unsigned long *p3, unsigned long *p4, unsigned long *p5)
192{
193 if (in_interrupt()) {
194 xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
195 } else {
196 kernel_neon_begin();
197 xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
198 kernel_neon_end();
199 }
200}
201
202static struct xor_block_template xor_block_neon = {
203 .name = "neon",
204 .do_2 = xor_neon_2,
205 .do_3 = xor_neon_3,
206 .do_4 = xor_neon_4,
207 .do_5 = xor_neon_5
208};
209
210#define NEON_TEMPLATES \
211 do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
212#else
213#define NEON_TEMPLATES
214#endif