diff options
29 files changed, 855 insertions, 173 deletions
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt index d758702fc03c..5f583af0a6e1 100644 --- a/Documentation/arm64/memory.txt +++ b/Documentation/arm64/memory.txt | |||
@@ -35,6 +35,8 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap | |||
35 | 35 | ||
36 | ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] | 36 | ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] |
37 | 37 | ||
38 | ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device | ||
39 | |||
38 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space | 40 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space |
39 | 41 | ||
40 | ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] | 42 | ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 75e915b72471..7c43569e3141 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -2,6 +2,7 @@ config ARM64 | |||
2 | def_bool y | 2 | def_bool y |
3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | 4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
5 | select ARCH_WANT_FRAME_POINTERS | ||
5 | select ARM_AMBA | 6 | select ARM_AMBA |
6 | select CLONE_BACKWARDS | 7 | select CLONE_BACKWARDS |
7 | select COMMON_CLK | 8 | select COMMON_CLK |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index d7553f2bda66..51493430f142 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE | |||
24 | Enables the display of the minimum amount of free stack which each | 24 | Enables the display of the minimum amount of free stack which each |
25 | task has ever had available in the sysrq-T output. | 25 | task has ever had available in the sysrq-T output. |
26 | 26 | ||
27 | config EARLY_PRINTK | ||
28 | bool "Early printk support" | ||
29 | default y | ||
30 | help | ||
31 | Say Y here if you want to have an early console using the | ||
32 | earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It | ||
33 | is assumed that the early console device has been initialised | ||
34 | by the boot loader prior to starting the Linux kernel. | ||
35 | |||
36 | config PID_IN_CONTEXTIDR | ||
37 | bool "Write the current PID to the CONTEXTIDR register" | ||
38 | help | ||
39 | Enabling this option causes the kernel to write the current PID to | ||
40 | the CONTEXTIDR register, at the expense of some additional | ||
41 | instructions during context switch. Say Y here only if you are | ||
42 | planning to use hardware trace tools with this kernel. | ||
43 | |||
27 | endmenu | 44 | endmenu |
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 14a9d5a2b85b..e5fe4f99fe10 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -19,6 +19,7 @@ generic-y += ipcbuf.h | |||
19 | generic-y += irq_regs.h | 19 | generic-y += irq_regs.h |
20 | generic-y += kdebug.h | 20 | generic-y += kdebug.h |
21 | generic-y += kmap_types.h | 21 | generic-y += kmap_types.h |
22 | generic-y += kvm_para.h | ||
22 | generic-y += local.h | 23 | generic-y += local.h |
23 | generic-y += local64.h | 24 | generic-y += local64.h |
24 | generic-y += mman.h | 25 | generic-y += mman.h |
@@ -48,3 +49,4 @@ generic-y += trace_clock.h | |||
48 | generic-y += types.h | 49 | generic-y += types.h |
49 | generic-y += unaligned.h | 50 | generic-y += unaligned.h |
50 | generic-y += user.h | 51 | generic-y += user.h |
52 | generic-y += xor.h | ||
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 407717ba060e..836364468571 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) | |||
49 | int result; | 49 | int result; |
50 | 50 | ||
51 | asm volatile("// atomic_add\n" | 51 | asm volatile("// atomic_add\n" |
52 | "1: ldxr %w0, [%3]\n" | 52 | "1: ldxr %w0, %2\n" |
53 | " add %w0, %w0, %w4\n" | 53 | " add %w0, %w0, %w3\n" |
54 | " stxr %w1, %w0, [%3]\n" | 54 | " stxr %w1, %w0, %2\n" |
55 | " cbnz %w1, 1b" | 55 | " cbnz %w1, 1b" |
56 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "r" (&v->counter), "Ir" (i) | 57 | : "Ir" (i) |
58 | : "cc"); | 58 | : "cc"); |
59 | } | 59 | } |
60 | 60 | ||
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
64 | int result; | 64 | int result; |
65 | 65 | ||
66 | asm volatile("// atomic_add_return\n" | 66 | asm volatile("// atomic_add_return\n" |
67 | "1: ldaxr %w0, [%3]\n" | 67 | "1: ldaxr %w0, %2\n" |
68 | " add %w0, %w0, %w4\n" | 68 | " add %w0, %w0, %w3\n" |
69 | " stlxr %w1, %w0, [%3]\n" | 69 | " stlxr %w1, %w0, %2\n" |
70 | " cbnz %w1, 1b" | 70 | " cbnz %w1, 1b" |
71 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "r" (&v->counter), "Ir" (i) | 72 | : "Ir" (i) |
73 | : "cc"); | 73 | : "cc", "memory"); |
74 | 74 | ||
75 | return result; | 75 | return result; |
76 | } | 76 | } |
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
81 | int result; | 81 | int result; |
82 | 82 | ||
83 | asm volatile("// atomic_sub\n" | 83 | asm volatile("// atomic_sub\n" |
84 | "1: ldxr %w0, [%3]\n" | 84 | "1: ldxr %w0, %2\n" |
85 | " sub %w0, %w0, %w4\n" | 85 | " sub %w0, %w0, %w3\n" |
86 | " stxr %w1, %w0, [%3]\n" | 86 | " stxr %w1, %w0, %2\n" |
87 | " cbnz %w1, 1b" | 87 | " cbnz %w1, 1b" |
88 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "r" (&v->counter), "Ir" (i) | 89 | : "Ir" (i) |
90 | : "cc"); | 90 | : "cc"); |
91 | } | 91 | } |
92 | 92 | ||
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
96 | int result; | 96 | int result; |
97 | 97 | ||
98 | asm volatile("// atomic_sub_return\n" | 98 | asm volatile("// atomic_sub_return\n" |
99 | "1: ldaxr %w0, [%3]\n" | 99 | "1: ldaxr %w0, %2\n" |
100 | " sub %w0, %w0, %w4\n" | 100 | " sub %w0, %w0, %w3\n" |
101 | " stlxr %w1, %w0, [%3]\n" | 101 | " stlxr %w1, %w0, %2\n" |
102 | " cbnz %w1, 1b" | 102 | " cbnz %w1, 1b" |
103 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "r" (&v->counter), "Ir" (i) | 104 | : "Ir" (i) |
105 | : "cc"); | 105 | : "cc", "memory"); |
106 | 106 | ||
107 | return result; | 107 | return result; |
108 | } | 108 | } |
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
113 | int oldval; | 113 | int oldval; |
114 | 114 | ||
115 | asm volatile("// atomic_cmpxchg\n" | 115 | asm volatile("// atomic_cmpxchg\n" |
116 | "1: ldaxr %w1, [%3]\n" | 116 | "1: ldaxr %w1, %2\n" |
117 | " cmp %w1, %w4\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 2f\n" | 118 | " b.ne 2f\n" |
119 | " stlxr %w0, %w5, [%3]\n" | 119 | " stlxr %w0, %w4, %2\n" |
120 | " cbnz %w0, 1b\n" | 120 | " cbnz %w0, 1b\n" |
121 | "2:" | 121 | "2:" |
122 | : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) | 122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 123 | : "Ir" (old), "r" (new) |
124 | : "cc"); | 124 | : "cc", "memory"); |
125 | 125 | ||
126 | return oldval; | 126 | return oldval; |
127 | } | 127 | } |
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
131 | unsigned long tmp, tmp2; | 131 | unsigned long tmp, tmp2; |
132 | 132 | ||
133 | asm volatile("// atomic_clear_mask\n" | 133 | asm volatile("// atomic_clear_mask\n" |
134 | "1: ldxr %0, [%3]\n" | 134 | "1: ldxr %0, %2\n" |
135 | " bic %0, %0, %4\n" | 135 | " bic %0, %0, %3\n" |
136 | " stxr %w1, %0, [%3]\n" | 136 | " stxr %w1, %0, %2\n" |
137 | " cbnz %w1, 1b" | 137 | " cbnz %w1, 1b" |
138 | : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) | 138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) |
139 | : "r" (addr), "Ir" (mask) | 139 | : "Ir" (mask) |
140 | : "cc"); | 140 | : "cc"); |
141 | } | 141 | } |
142 | 142 | ||
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
182 | unsigned long tmp; | 182 | unsigned long tmp; |
183 | 183 | ||
184 | asm volatile("// atomic64_add\n" | 184 | asm volatile("// atomic64_add\n" |
185 | "1: ldxr %0, [%3]\n" | 185 | "1: ldxr %0, %2\n" |
186 | " add %0, %0, %4\n" | 186 | " add %0, %0, %3\n" |
187 | " stxr %w1, %0, [%3]\n" | 187 | " stxr %w1, %0, %2\n" |
188 | " cbnz %w1, 1b" | 188 | " cbnz %w1, 1b" |
189 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 189 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
190 | : "r" (&v->counter), "Ir" (i) | 190 | : "Ir" (i) |
191 | : "cc"); | 191 | : "cc"); |
192 | } | 192 | } |
193 | 193 | ||
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) | |||
197 | unsigned long tmp; | 197 | unsigned long tmp; |
198 | 198 | ||
199 | asm volatile("// atomic64_add_return\n" | 199 | asm volatile("// atomic64_add_return\n" |
200 | "1: ldaxr %0, [%3]\n" | 200 | "1: ldaxr %0, %2\n" |
201 | " add %0, %0, %4\n" | 201 | " add %0, %0, %3\n" |
202 | " stlxr %w1, %0, [%3]\n" | 202 | " stlxr %w1, %0, %2\n" |
203 | " cbnz %w1, 1b" | 203 | " cbnz %w1, 1b" |
204 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 204 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
205 | : "r" (&v->counter), "Ir" (i) | 205 | : "Ir" (i) |
206 | : "cc"); | 206 | : "cc", "memory"); |
207 | 207 | ||
208 | return result; | 208 | return result; |
209 | } | 209 | } |
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
214 | unsigned long tmp; | 214 | unsigned long tmp; |
215 | 215 | ||
216 | asm volatile("// atomic64_sub\n" | 216 | asm volatile("// atomic64_sub\n" |
217 | "1: ldxr %0, [%3]\n" | 217 | "1: ldxr %0, %2\n" |
218 | " sub %0, %0, %4\n" | 218 | " sub %0, %0, %3\n" |
219 | " stxr %w1, %0, [%3]\n" | 219 | " stxr %w1, %0, %2\n" |
220 | " cbnz %w1, 1b" | 220 | " cbnz %w1, 1b" |
221 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 221 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
222 | : "r" (&v->counter), "Ir" (i) | 222 | : "Ir" (i) |
223 | : "cc"); | 223 | : "cc"); |
224 | } | 224 | } |
225 | 225 | ||
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
229 | unsigned long tmp; | 229 | unsigned long tmp; |
230 | 230 | ||
231 | asm volatile("// atomic64_sub_return\n" | 231 | asm volatile("// atomic64_sub_return\n" |
232 | "1: ldaxr %0, [%3]\n" | 232 | "1: ldaxr %0, %2\n" |
233 | " sub %0, %0, %4\n" | 233 | " sub %0, %0, %3\n" |
234 | " stlxr %w1, %0, [%3]\n" | 234 | " stlxr %w1, %0, %2\n" |
235 | " cbnz %w1, 1b" | 235 | " cbnz %w1, 1b" |
236 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 236 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
237 | : "r" (&v->counter), "Ir" (i) | 237 | : "Ir" (i) |
238 | : "cc"); | 238 | : "cc", "memory"); |
239 | 239 | ||
240 | return result; | 240 | return result; |
241 | } | 241 | } |
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | |||
246 | unsigned long res; | 246 | unsigned long res; |
247 | 247 | ||
248 | asm volatile("// atomic64_cmpxchg\n" | 248 | asm volatile("// atomic64_cmpxchg\n" |
249 | "1: ldaxr %1, [%3]\n" | 249 | "1: ldaxr %1, %2\n" |
250 | " cmp %1, %4\n" | 250 | " cmp %1, %3\n" |
251 | " b.ne 2f\n" | 251 | " b.ne 2f\n" |
252 | " stlxr %w0, %5, [%3]\n" | 252 | " stlxr %w0, %4, %2\n" |
253 | " cbnz %w0, 1b\n" | 253 | " cbnz %w0, 1b\n" |
254 | "2:" | 254 | "2:" |
255 | : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) | 255 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
256 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 256 | : "Ir" (old), "r" (new) |
257 | : "cc"); | 257 | : "cc", "memory"); |
258 | 258 | ||
259 | return oldval; | 259 | return oldval; |
260 | } | 260 | } |
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |||
267 | unsigned long tmp; | 267 | unsigned long tmp; |
268 | 268 | ||
269 | asm volatile("// atomic64_dec_if_positive\n" | 269 | asm volatile("// atomic64_dec_if_positive\n" |
270 | "1: ldaxr %0, [%3]\n" | 270 | "1: ldaxr %0, %2\n" |
271 | " subs %0, %0, #1\n" | 271 | " subs %0, %0, #1\n" |
272 | " b.mi 2f\n" | 272 | " b.mi 2f\n" |
273 | " stlxr %w1, %0, [%3]\n" | 273 | " stlxr %w1, %0, %2\n" |
274 | " cbnz %w1, 1b\n" | 274 | " cbnz %w1, 1b\n" |
275 | "2:" | 275 | "2:" |
276 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 276 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
277 | : "r" (&v->counter) | 277 | : |
278 | : "cc"); | 278 | : "cc", "memory"); |
279 | 279 | ||
280 | return result; | 280 | return result; |
281 | } | 281 | } |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9e..968b5cbfc260 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |||
29 | switch (size) { | 29 | switch (size) { |
30 | case 1: | 30 | case 1: |
31 | asm volatile("// __xchg1\n" | 31 | asm volatile("// __xchg1\n" |
32 | "1: ldaxrb %w0, [%3]\n" | 32 | "1: ldaxrb %w0, %2\n" |
33 | " stlxrb %w1, %w2, [%3]\n" | 33 | " stlxrb %w1, %w3, %2\n" |
34 | " cbnz %w1, 1b\n" | 34 | " cbnz %w1, 1b\n" |
35 | : "=&r" (ret), "=&r" (tmp) | 35 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
36 | : "r" (x), "r" (ptr) | 36 | : "r" (x) |
37 | : "memory", "cc"); | 37 | : "cc", "memory"); |
38 | break; | 38 | break; |
39 | case 2: | 39 | case 2: |
40 | asm volatile("// __xchg2\n" | 40 | asm volatile("// __xchg2\n" |
41 | "1: ldaxrh %w0, [%3]\n" | 41 | "1: ldaxrh %w0, %2\n" |
42 | " stlxrh %w1, %w2, [%3]\n" | 42 | " stlxrh %w1, %w3, %2\n" |
43 | " cbnz %w1, 1b\n" | 43 | " cbnz %w1, 1b\n" |
44 | : "=&r" (ret), "=&r" (tmp) | 44 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
45 | : "r" (x), "r" (ptr) | 45 | : "r" (x) |
46 | : "memory", "cc"); | 46 | : "cc", "memory"); |
47 | break; | 47 | break; |
48 | case 4: | 48 | case 4: |
49 | asm volatile("// __xchg4\n" | 49 | asm volatile("// __xchg4\n" |
50 | "1: ldaxr %w0, [%3]\n" | 50 | "1: ldaxr %w0, %2\n" |
51 | " stlxr %w1, %w2, [%3]\n" | 51 | " stlxr %w1, %w3, %2\n" |
52 | " cbnz %w1, 1b\n" | 52 | " cbnz %w1, 1b\n" |
53 | : "=&r" (ret), "=&r" (tmp) | 53 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
54 | : "r" (x), "r" (ptr) | 54 | : "r" (x) |
55 | : "memory", "cc"); | 55 | : "cc", "memory"); |
56 | break; | 56 | break; |
57 | case 8: | 57 | case 8: |
58 | asm volatile("// __xchg8\n" | 58 | asm volatile("// __xchg8\n" |
59 | "1: ldaxr %0, [%3]\n" | 59 | "1: ldaxr %0, %2\n" |
60 | " stlxr %w1, %2, [%3]\n" | 60 | " stlxr %w1, %3, %2\n" |
61 | " cbnz %w1, 1b\n" | 61 | " cbnz %w1, 1b\n" |
62 | : "=&r" (ret), "=&r" (tmp) | 62 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
63 | : "r" (x), "r" (ptr) | 63 | : "r" (x) |
64 | : "memory", "cc"); | 64 | : "cc", "memory"); |
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | BUILD_BUG(); | 67 | BUILD_BUG(); |
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
82 | case 1: | 82 | case 1: |
83 | do { | 83 | do { |
84 | asm volatile("// __cmpxchg1\n" | 84 | asm volatile("// __cmpxchg1\n" |
85 | " ldxrb %w1, [%2]\n" | 85 | " ldxrb %w1, %2\n" |
86 | " mov %w0, #0\n" | 86 | " mov %w0, #0\n" |
87 | " cmp %w1, %w3\n" | 87 | " cmp %w1, %w3\n" |
88 | " b.ne 1f\n" | 88 | " b.ne 1f\n" |
89 | " stxrb %w0, %w4, [%2]\n" | 89 | " stxrb %w0, %w4, %2\n" |
90 | "1:\n" | 90 | "1:\n" |
91 | : "=&r" (res), "=&r" (oldval) | 91 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) |
92 | : "r" (ptr), "Ir" (old), "r" (new) | 92 | : "Ir" (old), "r" (new) |
93 | : "cc"); | 93 | : "cc"); |
94 | } while (res); | 94 | } while (res); |
95 | break; | 95 | break; |
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
97 | case 2: | 97 | case 2: |
98 | do { | 98 | do { |
99 | asm volatile("// __cmpxchg2\n" | 99 | asm volatile("// __cmpxchg2\n" |
100 | " ldxrh %w1, [%2]\n" | 100 | " ldxrh %w1, %2\n" |
101 | " mov %w0, #0\n" | 101 | " mov %w0, #0\n" |
102 | " cmp %w1, %w3\n" | 102 | " cmp %w1, %w3\n" |
103 | " b.ne 1f\n" | 103 | " b.ne 1f\n" |
104 | " stxrh %w0, %w4, [%2]\n" | 104 | " stxrh %w0, %w4, %2\n" |
105 | "1:\n" | 105 | "1:\n" |
106 | : "=&r" (res), "=&r" (oldval) | 106 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) |
107 | : "r" (ptr), "Ir" (old), "r" (new) | 107 | : "Ir" (old), "r" (new) |
108 | : "memory", "cc"); | 108 | : "cc"); |
109 | } while (res); | 109 | } while (res); |
110 | break; | 110 | break; |
111 | 111 | ||
112 | case 4: | 112 | case 4: |
113 | do { | 113 | do { |
114 | asm volatile("// __cmpxchg4\n" | 114 | asm volatile("// __cmpxchg4\n" |
115 | " ldxr %w1, [%2]\n" | 115 | " ldxr %w1, %2\n" |
116 | " mov %w0, #0\n" | 116 | " mov %w0, #0\n" |
117 | " cmp %w1, %w3\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 1f\n" | 118 | " b.ne 1f\n" |
119 | " stxr %w0, %w4, [%2]\n" | 119 | " stxr %w0, %w4, %2\n" |
120 | "1:\n" | 120 | "1:\n" |
121 | : "=&r" (res), "=&r" (oldval) | 121 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) |
122 | : "r" (ptr), "Ir" (old), "r" (new) | 122 | : "Ir" (old), "r" (new) |
123 | : "cc"); | 123 | : "cc"); |
124 | } while (res); | 124 | } while (res); |
125 | break; | 125 | break; |
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
127 | case 8: | 127 | case 8: |
128 | do { | 128 | do { |
129 | asm volatile("// __cmpxchg8\n" | 129 | asm volatile("// __cmpxchg8\n" |
130 | " ldxr %1, [%2]\n" | 130 | " ldxr %1, %2\n" |
131 | " mov %w0, #0\n" | 131 | " mov %w0, #0\n" |
132 | " cmp %1, %3\n" | 132 | " cmp %1, %3\n" |
133 | " b.ne 1f\n" | 133 | " b.ne 1f\n" |
134 | " stxr %w0, %4, [%2]\n" | 134 | " stxr %w0, %4, %2\n" |
135 | "1:\n" | 135 | "1:\n" |
136 | : "=&r" (res), "=&r" (oldval) | 136 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) |
137 | : "r" (ptr), "Ir" (old), "r" (new) | 137 | : "Ir" (old), "r" (new) |
138 | : "cc"); | 138 | : "cc"); |
139 | } while (res); | 139 | } while (res); |
140 | break; | 140 | break; |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 3468ae8439fa..c582fa316366 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -39,7 +39,7 @@ | |||
39 | " .popsection\n" \ | 39 | " .popsection\n" \ |
40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ |
41 | : "r" (oparg), "Ir" (-EFAULT) \ | 41 | : "r" (oparg), "Ir" (-EFAULT) \ |
42 | : "cc") | 42 | : "cc", "memory") |
43 | 43 | ||
44 | static inline int | 44 | static inline int |
45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index d2f05a608274..57f12c991de2 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr); | |||
230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
231 | #define iounmap __iounmap | 231 | #define iounmap __iounmap |
232 | 232 | ||
233 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | ||
234 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) | ||
235 | |||
233 | #define ARCH_HAS_IOREMAP_WC | 236 | #define ARCH_HAS_IOREMAP_WC |
234 | #include <asm-generic/iomap.h> | 237 | #include <asm-generic/iomap.h> |
235 | 238 | ||
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1cac16a001cb..381f556b664e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define PAGE_OFFSET UL(0xffffffc000000000) | 43 | #define PAGE_OFFSET UL(0xffffffc000000000) |
44 | #define MODULES_END (PAGE_OFFSET) | 44 | #define MODULES_END (PAGE_OFFSET) |
45 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 45 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
46 | #define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) | ||
46 | #define VA_BITS (39) | 47 | #define VA_BITS (39) |
47 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 48 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
48 | 49 | ||
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index d4f7fd5b9e33..2494fc01896a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -26,5 +26,6 @@ typedef struct { | |||
26 | 26 | ||
27 | extern void paging_init(void); | 27 | extern void paging_init(void); |
28 | extern void setup_mm_for_reboot(void); | 28 | extern void setup_mm_for_reboot(void); |
29 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | ||
29 | 30 | ||
30 | #endif | 31 | #endif |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f68465dee026..e2bc385adb6b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid; | |||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
36 | void __new_context(struct mm_struct *mm); | 36 | void __new_context(struct mm_struct *mm); |
37 | 37 | ||
38 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
39 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
40 | { | ||
41 | asm( | ||
42 | " msr contextidr_el1, %0\n" | ||
43 | " isb" | ||
44 | : | ||
45 | : "r" (task_pid_nr(next))); | ||
46 | } | ||
47 | #else | ||
48 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
49 | { | ||
50 | } | ||
51 | #endif | ||
52 | |||
38 | /* | 53 | /* |
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | 54 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. |
40 | */ | 55 | */ |
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index a6fffd511c5e..d26d1d53c0d7 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h | |||
@@ -17,6 +17,11 @@ | |||
17 | #ifndef __ASM_PERF_EVENT_H | 17 | #ifndef __ASM_PERF_EVENT_H |
18 | #define __ASM_PERF_EVENT_H | 18 | #define __ASM_PERF_EVENT_H |
19 | 19 | ||
20 | /* It's quiet around here... */ | 20 | #ifdef CONFIG_HW_PERF_EVENTS |
21 | struct pt_regs; | ||
22 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
23 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
24 | #define perf_misc_flags(regs) perf_misc_flags(regs) | ||
25 | #endif | ||
21 | 26 | ||
22 | #endif | 27 | #endif |
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h new file mode 100644 index 000000000000..0604237ecd99 --- /dev/null +++ b/arch/arm64/include/asm/psci.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_PSCI_H | ||
15 | #define __ASM_PSCI_H | ||
16 | |||
17 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | ||
18 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
19 | |||
20 | struct psci_power_state { | ||
21 | u16 id; | ||
22 | u8 type; | ||
23 | u8 affinity_level; | ||
24 | }; | ||
25 | |||
26 | struct psci_operations { | ||
27 | int (*cpu_suspend)(struct psci_power_state state, | ||
28 | unsigned long entry_point); | ||
29 | int (*cpu_off)(struct psci_power_state state); | ||
30 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
31 | int (*migrate)(unsigned long cpuid); | ||
32 | }; | ||
33 | |||
34 | extern struct psci_operations psci_ops; | ||
35 | |||
36 | int psci_init(void); | ||
37 | |||
38 | #endif /* __ASM_PSCI_H */ | ||
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 4ce845f8ee1c..41a71ee4c3df 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -42,6 +42,16 @@ | |||
42 | #define COMPAT_PSR_MODE_UND 0x0000001b | 42 | #define COMPAT_PSR_MODE_UND 0x0000001b |
43 | #define COMPAT_PSR_MODE_SYS 0x0000001f | 43 | #define COMPAT_PSR_MODE_SYS 0x0000001f |
44 | #define COMPAT_PSR_T_BIT 0x00000020 | 44 | #define COMPAT_PSR_T_BIT 0x00000020 |
45 | #define COMPAT_PSR_F_BIT 0x00000040 | ||
46 | #define COMPAT_PSR_I_BIT 0x00000080 | ||
47 | #define COMPAT_PSR_A_BIT 0x00000100 | ||
48 | #define COMPAT_PSR_E_BIT 0x00000200 | ||
49 | #define COMPAT_PSR_J_BIT 0x01000000 | ||
50 | #define COMPAT_PSR_Q_BIT 0x08000000 | ||
51 | #define COMPAT_PSR_V_BIT 0x10000000 | ||
52 | #define COMPAT_PSR_C_BIT 0x20000000 | ||
53 | #define COMPAT_PSR_Z_BIT 0x40000000 | ||
54 | #define COMPAT_PSR_N_BIT 0x80000000 | ||
45 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | 55 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ |
46 | /* | 56 | /* |
47 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a | 57 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 7e34295f78e3..4b8023c5d146 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release; | |||
66 | extern void arch_send_call_function_single_ipi(int cpu); | 66 | extern void arch_send_call_function_single_ipi(int cpu); |
67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
68 | 68 | ||
69 | struct device_node; | ||
70 | |||
71 | struct smp_enable_ops { | ||
72 | const char *name; | ||
73 | int (*init_cpu)(struct device_node *, int); | ||
74 | int (*prepare_cpu)(int); | ||
75 | }; | ||
76 | |||
77 | extern const struct smp_enable_ops smp_spin_table_ops; | ||
78 | extern const struct smp_enable_ops smp_psci_ops; | ||
79 | |||
69 | #endif /* ifndef __ASM_SMP_H */ | 80 | #endif /* ifndef __ASM_SMP_H */ |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 41112fe2f8b1..7065e920149d 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
45 | asm volatile( | 45 | asm volatile( |
46 | " sevl\n" | 46 | " sevl\n" |
47 | "1: wfe\n" | 47 | "1: wfe\n" |
48 | "2: ldaxr %w0, [%1]\n" | 48 | "2: ldaxr %w0, %1\n" |
49 | " cbnz %w0, 1b\n" | 49 | " cbnz %w0, 1b\n" |
50 | " stxr %w0, %w2, [%1]\n" | 50 | " stxr %w0, %w2, %1\n" |
51 | " cbnz %w0, 2b\n" | 51 | " cbnz %w0, 2b\n" |
52 | : "=&r" (tmp) | 52 | : "=&r" (tmp), "+Q" (lock->lock) |
53 | : "r" (&lock->lock), "r" (1) | 53 | : "r" (1) |
54 | : "memory"); | 54 | : "cc", "memory"); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, [%1]\n" | 62 | " ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, [%1]\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | "1:\n" | 65 | "1:\n" |
66 | : "=&r" (tmp) | 66 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (&lock->lock), "r" (1) | 67 | : "r" (1) |
68 | : "memory"); | 68 | : "cc", "memory"); |
69 | 69 | ||
70 | return !tmp; | 70 | return !tmp; |
71 | } | 71 | } |
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
74 | { | 74 | { |
75 | asm volatile( | 75 | asm volatile( |
76 | " stlr %w1, [%0]\n" | 76 | " stlr %w1, %0\n" |
77 | : : "r" (&lock->lock), "r" (0) : "memory"); | 77 | : "=Q" (lock->lock) : "r" (0) : "memory"); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
94 | asm volatile( | 94 | asm volatile( |
95 | " sevl\n" | 95 | " sevl\n" |
96 | "1: wfe\n" | 96 | "1: wfe\n" |
97 | "2: ldaxr %w0, [%1]\n" | 97 | "2: ldaxr %w0, %1\n" |
98 | " cbnz %w0, 1b\n" | 98 | " cbnz %w0, 1b\n" |
99 | " stxr %w0, %w2, [%1]\n" | 99 | " stxr %w0, %w2, %1\n" |
100 | " cbnz %w0, 2b\n" | 100 | " cbnz %w0, 2b\n" |
101 | : "=&r" (tmp) | 101 | : "=&r" (tmp), "+Q" (rw->lock) |
102 | : "r" (&rw->lock), "r" (0x80000000) | 102 | : "r" (0x80000000) |
103 | : "memory"); | 103 | : "cc", "memory"); |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 106 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
108 | unsigned int tmp; | 108 | unsigned int tmp; |
109 | 109 | ||
110 | asm volatile( | 110 | asm volatile( |
111 | " ldaxr %w0, [%1]\n" | 111 | " ldaxr %w0, %1\n" |
112 | " cbnz %w0, 1f\n" | 112 | " cbnz %w0, 1f\n" |
113 | " stxr %w0, %w2, [%1]\n" | 113 | " stxr %w0, %w2, %1\n" |
114 | "1:\n" | 114 | "1:\n" |
115 | : "=&r" (tmp) | 115 | : "=&r" (tmp), "+Q" (rw->lock) |
116 | : "r" (&rw->lock), "r" (0x80000000) | 116 | : "r" (0x80000000) |
117 | : "memory"); | 117 | : "cc", "memory"); |
118 | 118 | ||
119 | return !tmp; | 119 | return !tmp; |
120 | } | 120 | } |
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
122 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 122 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | asm volatile( | 124 | asm volatile( |
125 | " stlr %w1, [%0]\n" | 125 | " stlr %w1, %0\n" |
126 | : : "r" (&rw->lock), "r" (0) : "memory"); | 126 | : "=Q" (rw->lock) : "r" (0) : "memory"); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* write_can_lock - would write_trylock() succeed? */ | 129 | /* write_can_lock - would write_trylock() succeed? */ |
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
148 | asm volatile( | 148 | asm volatile( |
149 | " sevl\n" | 149 | " sevl\n" |
150 | "1: wfe\n" | 150 | "1: wfe\n" |
151 | "2: ldaxr %w0, [%2]\n" | 151 | "2: ldaxr %w0, %2\n" |
152 | " add %w0, %w0, #1\n" | 152 | " add %w0, %w0, #1\n" |
153 | " tbnz %w0, #31, 1b\n" | 153 | " tbnz %w0, #31, 1b\n" |
154 | " stxr %w1, %w0, [%2]\n" | 154 | " stxr %w1, %w0, %2\n" |
155 | " cbnz %w1, 2b\n" | 155 | " cbnz %w1, 2b\n" |
156 | : "=&r" (tmp), "=&r" (tmp2) | 156 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
157 | : "r" (&rw->lock) | 157 | : |
158 | : "memory"); | 158 | : "cc", "memory"); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 161 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
163 | unsigned int tmp, tmp2; | 163 | unsigned int tmp, tmp2; |
164 | 164 | ||
165 | asm volatile( | 165 | asm volatile( |
166 | "1: ldxr %w0, [%2]\n" | 166 | "1: ldxr %w0, %2\n" |
167 | " sub %w0, %w0, #1\n" | 167 | " sub %w0, %w0, #1\n" |
168 | " stlxr %w1, %w0, [%2]\n" | 168 | " stlxr %w1, %w0, %2\n" |
169 | " cbnz %w1, 1b\n" | 169 | " cbnz %w1, 1b\n" |
170 | : "=&r" (tmp), "=&r" (tmp2) | 170 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
171 | : "r" (&rw->lock) | 171 | : |
172 | : "memory"); | 172 | : "cc", "memory"); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
177 | unsigned int tmp, tmp2 = 1; | 177 | unsigned int tmp, tmp2 = 1; |
178 | 178 | ||
179 | asm volatile( | 179 | asm volatile( |
180 | " ldaxr %w0, [%2]\n" | 180 | " ldaxr %w0, %2\n" |
181 | " add %w0, %w0, #1\n" | 181 | " add %w0, %w0, #1\n" |
182 | " tbnz %w0, #31, 1f\n" | 182 | " tbnz %w0, #31, 1f\n" |
183 | " stxr %w1, %w0, [%2]\n" | 183 | " stxr %w1, %w0, %2\n" |
184 | "1:\n" | 184 | "1:\n" |
185 | : "=&r" (tmp), "+r" (tmp2) | 185 | : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) |
186 | : "r" (&rw->lock) | 186 | : |
187 | : "memory"); | 187 | : "cc", "memory"); |
188 | 188 | ||
189 | return !tmp2; | 189 | return !tmp2; |
190 | } | 190 | } |
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index ca5b65f75c7b..e4b78bdca19e 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild | |||
@@ -1,11 +1,14 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
3 | 3 | ||
4 | generic-y += kvm_para.h | ||
5 | |||
4 | header-y += auxvec.h | 6 | header-y += auxvec.h |
5 | header-y += bitsperlong.h | 7 | header-y += bitsperlong.h |
6 | header-y += byteorder.h | 8 | header-y += byteorder.h |
7 | header-y += fcntl.h | 9 | header-y += fcntl.h |
8 | header-y += hwcap.h | 10 | header-y += hwcap.h |
11 | header-y += kvm_para.h | ||
9 | header-y += param.h | 12 | header-y += param.h |
10 | header-y += ptrace.h | 13 | header-y += ptrace.h |
11 | header-y += setup.h | 14 | header-y += setup.h |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 74239c31e25a..7b4b564961d4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
12 | hyp-stub.o | 12 | hyp-stub.o psci.o |
13 | 13 | ||
14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
15 | sys_compat.o | 15 | sys_compat.o |
16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
17 | arm64-obj-$(CONFIG_SMP) += smp.o | 17 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o |
18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o | 19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o |
20 | arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
20 | 21 | ||
21 | obj-y += $(arm64-obj-y) vdso/ | 22 | obj-y += $(arm64-obj-y) vdso/ |
22 | obj-m += $(arm64-obj-m) | 23 | obj-m += $(arm64-obj-m) |
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c new file mode 100644 index 000000000000..7e320a2edb9b --- /dev/null +++ b/arch/arm64/kernel/early_printk.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Earlyprintk support. | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/console.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/io.h> | ||
25 | |||
26 | #include <linux/amba/serial.h> | ||
27 | |||
28 | static void __iomem *early_base; | ||
29 | static void (*printch)(char ch); | ||
30 | |||
31 | /* | ||
32 | * PL011 single character TX. | ||
33 | */ | ||
34 | static void pl011_printch(char ch) | ||
35 | { | ||
36 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF) | ||
37 | ; | ||
38 | writeb_relaxed(ch, early_base + UART01x_DR); | ||
39 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY) | ||
40 | ; | ||
41 | } | ||
42 | |||
43 | struct earlycon_match { | ||
44 | const char *name; | ||
45 | void (*printch)(char ch); | ||
46 | }; | ||
47 | |||
48 | static const struct earlycon_match earlycon_match[] __initconst = { | ||
49 | { .name = "pl011", .printch = pl011_printch, }, | ||
50 | {} | ||
51 | }; | ||
52 | |||
53 | static void early_write(struct console *con, const char *s, unsigned n) | ||
54 | { | ||
55 | while (n-- > 0) { | ||
56 | if (*s == '\n') | ||
57 | printch('\r'); | ||
58 | printch(*s); | ||
59 | s++; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static struct console early_console = { | ||
64 | .name = "earlycon", | ||
65 | .write = early_write, | ||
66 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
67 | .index = -1, | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * Parse earlyprintk=... parameter in the format: | ||
72 | * | ||
73 | * <name>[,<addr>][,<options>] | ||
74 | * | ||
75 | * and register the early console. It is assumed that the UART has been | ||
76 | * initialised by the bootloader already. | ||
77 | */ | ||
78 | static int __init setup_early_printk(char *buf) | ||
79 | { | ||
80 | const struct earlycon_match *match = earlycon_match; | ||
81 | phys_addr_t paddr = 0; | ||
82 | |||
83 | if (!buf) { | ||
84 | pr_warning("No earlyprintk arguments passed.\n"); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | while (match->name) { | ||
89 | size_t len = strlen(match->name); | ||
90 | if (!strncmp(buf, match->name, len)) { | ||
91 | buf += len; | ||
92 | break; | ||
93 | } | ||
94 | match++; | ||
95 | } | ||
96 | if (!match->name) { | ||
97 | pr_warning("Unknown earlyprintk arguments: %s\n", buf); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* I/O address */ | ||
102 | if (!strncmp(buf, ",0x", 3)) { | ||
103 | char *e; | ||
104 | paddr = simple_strtoul(buf + 1, &e, 16); | ||
105 | buf = e; | ||
106 | } | ||
107 | /* no options parsing yet */ | ||
108 | |||
109 | if (paddr) | ||
110 | early_base = early_io_map(paddr, EARLYCON_IOBASE); | ||
111 | |||
112 | printch = match->printch; | ||
113 | register_console(&early_console); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 368ad1f7c36c..0a0a49756826 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -82,10 +82,8 @@ | |||
82 | 82 | ||
83 | #ifdef CONFIG_ARM64_64K_PAGES | 83 | #ifdef CONFIG_ARM64_64K_PAGES |
84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS | 84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS |
85 | #define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS | ||
86 | #else | 85 | #else |
87 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS | 86 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS |
88 | #define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS | ||
89 | #endif | 87 | #endif |
90 | 88 | ||
91 | /* | 89 | /* |
@@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset) | |||
368 | * - identity mapping to enable the MMU (low address, TTBR0) | 366 | * - identity mapping to enable the MMU (low address, TTBR0) |
369 | * - first few MB of the kernel linear mapping to jump to once the MMU has | 367 | * - first few MB of the kernel linear mapping to jump to once the MMU has |
370 | * been enabled, including the FDT blob (TTBR1) | 368 | * been enabled, including the FDT blob (TTBR1) |
369 | * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) | ||
371 | */ | 370 | */ |
372 | __create_page_tables: | 371 | __create_page_tables: |
373 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses | 372 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses |
@@ -420,6 +419,15 @@ __create_page_tables: | |||
420 | sub x6, x6, #1 // inclusive range | 419 | sub x6, x6, #1 // inclusive range |
421 | create_block_map x0, x7, x3, x5, x6 | 420 | create_block_map x0, x7, x3, x5, x6 |
422 | 1: | 421 | 1: |
422 | #ifdef CONFIG_EARLY_PRINTK | ||
423 | /* | ||
424 | * Create the pgd entry for the UART mapping. The full mapping is done | ||
425 | * later based earlyprintk kernel parameter. | ||
426 | */ | ||
427 | ldr x5, =EARLYCON_IOBASE // UART virtual address | ||
428 | add x0, x26, #2 * PAGE_SIZE // section table address | ||
429 | create_pgd_entry x26, x0, x5, x6, x7 | ||
430 | #endif | ||
423 | ret | 431 | ret |
424 | ENDPROC(__create_page_tables) | 432 | ENDPROC(__create_page_tables) |
425 | .ltorg | 433 | .ltorg |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f7073c7b1ca9..1e49e5eb81e9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
1331 | { | 1331 | { |
1332 | struct frame_tail __user *tail; | 1332 | struct frame_tail __user *tail; |
1333 | 1333 | ||
1334 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1335 | /* We don't support guest os callchain now */ | ||
1336 | return; | ||
1337 | } | ||
1338 | |||
1334 | tail = (struct frame_tail __user *)regs->regs[29]; | 1339 | tail = (struct frame_tail __user *)regs->regs[29]; |
1335 | 1340 | ||
1336 | while (entry->nr < PERF_MAX_STACK_DEPTH && | 1341 | while (entry->nr < PERF_MAX_STACK_DEPTH && |
@@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, | |||
1355 | { | 1360 | { |
1356 | struct stackframe frame; | 1361 | struct stackframe frame; |
1357 | 1362 | ||
1363 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1364 | /* We don't support guest os callchain now */ | ||
1365 | return; | ||
1366 | } | ||
1367 | |||
1358 | frame.fp = regs->regs[29]; | 1368 | frame.fp = regs->regs[29]; |
1359 | frame.sp = regs->sp; | 1369 | frame.sp = regs->sp; |
1360 | frame.pc = regs->pc; | 1370 | frame.pc = regs->pc; |
1361 | walk_stackframe(&frame, callchain_trace, entry); | 1371 | walk_stackframe(&frame, callchain_trace, entry); |
1362 | } | 1372 | } |
1373 | |||
1374 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
1375 | { | ||
1376 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | ||
1377 | return perf_guest_cbs->get_guest_ip(); | ||
1378 | |||
1379 | return instruction_pointer(regs); | ||
1380 | } | ||
1381 | |||
1382 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
1383 | { | ||
1384 | int misc = 0; | ||
1385 | |||
1386 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1387 | if (perf_guest_cbs->is_user_mode()) | ||
1388 | misc |= PERF_RECORD_MISC_GUEST_USER; | ||
1389 | else | ||
1390 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | ||
1391 | } else { | ||
1392 | if (user_mode(regs)) | ||
1393 | misc |= PERF_RECORD_MISC_USER; | ||
1394 | else | ||
1395 | misc |= PERF_RECORD_MISC_KERNEL; | ||
1396 | } | ||
1397 | |||
1398 | return misc; | ||
1399 | } | ||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c7002d40a9b0..0337cdb0667b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -45,9 +45,10 @@ | |||
45 | 45 | ||
46 | #include <asm/compat.h> | 46 | #include <asm/compat.h> |
47 | #include <asm/cacheflush.h> | 47 | #include <asm/cacheflush.h> |
48 | #include <asm/fpsimd.h> | ||
49 | #include <asm/mmu_context.h> | ||
48 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
49 | #include <asm/stacktrace.h> | 51 | #include <asm/stacktrace.h> |
50 | #include <asm/fpsimd.h> | ||
51 | 52 | ||
52 | static void setup_restart(void) | 53 | static void setup_restart(void) |
53 | { | 54 | { |
@@ -314,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
314 | /* the actual thread switch */ | 315 | /* the actual thread switch */ |
315 | last = cpu_switch_to(prev, next); | 316 | last = cpu_switch_to(prev, next); |
316 | 317 | ||
318 | contextidr_thread_switch(next); | ||
317 | return last; | 319 | return last; |
318 | } | 320 | } |
319 | 321 | ||
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c new file mode 100644 index 000000000000..14f73c445ff5 --- /dev/null +++ b/arch/arm64/kernel/psci.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "psci: " fmt | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/compiler.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/psci.h> | ||
24 | |||
25 | struct psci_operations psci_ops; | ||
26 | |||
27 | static int (*invoke_psci_fn)(u64, u64, u64, u64); | ||
28 | |||
29 | enum psci_function { | ||
30 | PSCI_FN_CPU_SUSPEND, | ||
31 | PSCI_FN_CPU_ON, | ||
32 | PSCI_FN_CPU_OFF, | ||
33 | PSCI_FN_MIGRATE, | ||
34 | PSCI_FN_MAX, | ||
35 | }; | ||
36 | |||
37 | static u32 psci_function_id[PSCI_FN_MAX]; | ||
38 | |||
39 | #define PSCI_RET_SUCCESS 0 | ||
40 | #define PSCI_RET_EOPNOTSUPP -1 | ||
41 | #define PSCI_RET_EINVAL -2 | ||
42 | #define PSCI_RET_EPERM -3 | ||
43 | |||
44 | static int psci_to_linux_errno(int errno) | ||
45 | { | ||
46 | switch (errno) { | ||
47 | case PSCI_RET_SUCCESS: | ||
48 | return 0; | ||
49 | case PSCI_RET_EOPNOTSUPP: | ||
50 | return -EOPNOTSUPP; | ||
51 | case PSCI_RET_EINVAL: | ||
52 | return -EINVAL; | ||
53 | case PSCI_RET_EPERM: | ||
54 | return -EPERM; | ||
55 | }; | ||
56 | |||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | #define PSCI_POWER_STATE_ID_MASK 0xffff | ||
61 | #define PSCI_POWER_STATE_ID_SHIFT 0 | ||
62 | #define PSCI_POWER_STATE_TYPE_MASK 0x1 | ||
63 | #define PSCI_POWER_STATE_TYPE_SHIFT 16 | ||
64 | #define PSCI_POWER_STATE_AFFL_MASK 0x3 | ||
65 | #define PSCI_POWER_STATE_AFFL_SHIFT 24 | ||
66 | |||
67 | static u32 psci_power_state_pack(struct psci_power_state state) | ||
68 | { | ||
69 | return ((state.id & PSCI_POWER_STATE_ID_MASK) | ||
70 | << PSCI_POWER_STATE_ID_SHIFT) | | ||
71 | ((state.type & PSCI_POWER_STATE_TYPE_MASK) | ||
72 | << PSCI_POWER_STATE_TYPE_SHIFT) | | ||
73 | ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) | ||
74 | << PSCI_POWER_STATE_AFFL_SHIFT); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
79 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
80 | */ | ||
81 | static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, | ||
82 | u64 arg2) | ||
83 | { | ||
84 | asm volatile( | ||
85 | __asmeq("%0", "x0") | ||
86 | __asmeq("%1", "x1") | ||
87 | __asmeq("%2", "x2") | ||
88 | __asmeq("%3", "x3") | ||
89 | "hvc #0\n" | ||
90 | : "+r" (function_id) | ||
91 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
92 | |||
93 | return function_id; | ||
94 | } | ||
95 | |||
96 | static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, | ||
97 | u64 arg2) | ||
98 | { | ||
99 | asm volatile( | ||
100 | __asmeq("%0", "x0") | ||
101 | __asmeq("%1", "x1") | ||
102 | __asmeq("%2", "x2") | ||
103 | __asmeq("%3", "x3") | ||
104 | "smc #0\n" | ||
105 | : "+r" (function_id) | ||
106 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
107 | |||
108 | return function_id; | ||
109 | } | ||
110 | |||
111 | static int psci_cpu_suspend(struct psci_power_state state, | ||
112 | unsigned long entry_point) | ||
113 | { | ||
114 | int err; | ||
115 | u32 fn, power_state; | ||
116 | |||
117 | fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; | ||
118 | power_state = psci_power_state_pack(state); | ||
119 | err = invoke_psci_fn(fn, power_state, entry_point, 0); | ||
120 | return psci_to_linux_errno(err); | ||
121 | } | ||
122 | |||
123 | static int psci_cpu_off(struct psci_power_state state) | ||
124 | { | ||
125 | int err; | ||
126 | u32 fn, power_state; | ||
127 | |||
128 | fn = psci_function_id[PSCI_FN_CPU_OFF]; | ||
129 | power_state = psci_power_state_pack(state); | ||
130 | err = invoke_psci_fn(fn, power_state, 0, 0); | ||
131 | return psci_to_linux_errno(err); | ||
132 | } | ||
133 | |||
134 | static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) | ||
135 | { | ||
136 | int err; | ||
137 | u32 fn; | ||
138 | |||
139 | fn = psci_function_id[PSCI_FN_CPU_ON]; | ||
140 | err = invoke_psci_fn(fn, cpuid, entry_point, 0); | ||
141 | return psci_to_linux_errno(err); | ||
142 | } | ||
143 | |||
144 | static int psci_migrate(unsigned long cpuid) | ||
145 | { | ||
146 | int err; | ||
147 | u32 fn; | ||
148 | |||
149 | fn = psci_function_id[PSCI_FN_MIGRATE]; | ||
150 | err = invoke_psci_fn(fn, cpuid, 0, 0); | ||
151 | return psci_to_linux_errno(err); | ||
152 | } | ||
153 | |||
154 | static const struct of_device_id psci_of_match[] __initconst = { | ||
155 | { .compatible = "arm,psci", }, | ||
156 | {}, | ||
157 | }; | ||
158 | |||
159 | int __init psci_init(void) | ||
160 | { | ||
161 | struct device_node *np; | ||
162 | const char *method; | ||
163 | u32 id; | ||
164 | int err = 0; | ||
165 | |||
166 | np = of_find_matching_node(NULL, psci_of_match); | ||
167 | if (!np) | ||
168 | return -ENODEV; | ||
169 | |||
170 | pr_info("probing function IDs from device-tree\n"); | ||
171 | |||
172 | if (of_property_read_string(np, "method", &method)) { | ||
173 | pr_warning("missing \"method\" property\n"); | ||
174 | err = -ENXIO; | ||
175 | goto out_put_node; | ||
176 | } | ||
177 | |||
178 | if (!strcmp("hvc", method)) { | ||
179 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
180 | } else if (!strcmp("smc", method)) { | ||
181 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
182 | } else { | ||
183 | pr_warning("invalid \"method\" property: %s\n", method); | ||
184 | err = -EINVAL; | ||
185 | goto out_put_node; | ||
186 | } | ||
187 | |||
188 | if (!of_property_read_u32(np, "cpu_suspend", &id)) { | ||
189 | psci_function_id[PSCI_FN_CPU_SUSPEND] = id; | ||
190 | psci_ops.cpu_suspend = psci_cpu_suspend; | ||
191 | } | ||
192 | |||
193 | if (!of_property_read_u32(np, "cpu_off", &id)) { | ||
194 | psci_function_id[PSCI_FN_CPU_OFF] = id; | ||
195 | psci_ops.cpu_off = psci_cpu_off; | ||
196 | } | ||
197 | |||
198 | if (!of_property_read_u32(np, "cpu_on", &id)) { | ||
199 | psci_function_id[PSCI_FN_CPU_ON] = id; | ||
200 | psci_ops.cpu_on = psci_cpu_on; | ||
201 | } | ||
202 | |||
203 | if (!of_property_read_u32(np, "migrate", &id)) { | ||
204 | psci_function_id[PSCI_FN_MIGRATE] = id; | ||
205 | psci_ops.migrate = psci_migrate; | ||
206 | } | ||
207 | |||
208 | out_put_node: | ||
209 | of_node_put(np); | ||
210 | return err; | ||
211 | } | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7665a9bfdb1e..113db863f832 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
40 | #include <linux/memblock.h> | 40 | #include <linux/memblock.h> |
41 | #include <linux/of_fdt.h> | 41 | #include <linux/of_fdt.h> |
42 | #include <linux/of_platform.h> | ||
42 | 43 | ||
43 | #include <asm/cputype.h> | 44 | #include <asm/cputype.h> |
44 | #include <asm/elf.h> | 45 | #include <asm/elf.h> |
@@ -49,6 +50,7 @@ | |||
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
51 | #include <asm/memblock.h> | 52 | #include <asm/memblock.h> |
53 | #include <asm/psci.h> | ||
52 | 54 | ||
53 | unsigned int processor_id; | 55 | unsigned int processor_id; |
54 | EXPORT_SYMBOL(processor_id); | 56 | EXPORT_SYMBOL(processor_id); |
@@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p) | |||
260 | 262 | ||
261 | unflatten_device_tree(); | 263 | unflatten_device_tree(); |
262 | 264 | ||
265 | psci_init(); | ||
266 | |||
263 | #ifdef CONFIG_SMP | 267 | #ifdef CONFIG_SMP |
264 | smp_init_cpus(); | 268 | smp_init_cpus(); |
265 | #endif | 269 | #endif |
@@ -289,6 +293,13 @@ static int __init topology_init(void) | |||
289 | } | 293 | } |
290 | subsys_initcall(topology_init); | 294 | subsys_initcall(topology_init); |
291 | 295 | ||
296 | static int __init arm64_device_probe(void) | ||
297 | { | ||
298 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
299 | return 0; | ||
300 | } | ||
301 | device_initcall(arm64_device_probe); | ||
302 | |||
292 | static const char *hwcap_str[] = { | 303 | static const char *hwcap_str[] = { |
293 | "fp", | 304 | "fp", |
294 | "asimd", | 305 | "asimd", |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index a4db3d22aac4..41db148a7eb9 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -76,7 +76,7 @@ struct compat_sigcontext { | |||
76 | 76 | ||
77 | struct compat_ucontext { | 77 | struct compat_ucontext { |
78 | compat_ulong_t uc_flags; | 78 | compat_ulong_t uc_flags; |
79 | struct compat_ucontext *uc_link; | 79 | compat_uptr_t uc_link; |
80 | compat_stack_t uc_stack; | 80 | compat_stack_t uc_stack; |
81 | struct compat_sigcontext uc_mcontext; | 81 | struct compat_sigcontext uc_mcontext; |
82 | compat_sigset_t uc_sigmask; | 82 | compat_sigset_t uc_sigmask; |
@@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
703 | err |= copy_siginfo_to_user32(&frame->info, info); | 703 | err |= copy_siginfo_to_user32(&frame->info, info); |
704 | 704 | ||
705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); | 705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); |
706 | __put_user_error(NULL, &frame->sig.uc.uc_link, err); | 706 | __put_user_error(0, &frame->sig.uc.uc_link, err); |
707 | 707 | ||
708 | memset(&stack, 0, sizeof(stack)); | 708 | memset(&stack, 0, sizeof(stack)); |
709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; | 709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 538300f2273d..bdd34597254b 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
236 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 236 | |
237 | static const struct smp_enable_ops *enable_ops[] __initconst = { | ||
238 | &smp_spin_table_ops, | ||
239 | &smp_psci_ops, | ||
240 | NULL, | ||
241 | }; | ||
242 | |||
243 | static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; | ||
244 | |||
245 | static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) | ||
246 | { | ||
247 | const struct smp_enable_ops *ops = enable_ops[0]; | ||
248 | |||
249 | while (ops) { | ||
250 | if (!strcmp(name, ops->name)) | ||
251 | return ops; | ||
252 | |||
253 | ops++; | ||
254 | } | ||
255 | |||
256 | return NULL; | ||
257 | } | ||
237 | 258 | ||
238 | /* | 259 | /* |
239 | * Enumerate the possible CPU set from the device tree. | 260 | * Enumerate the possible CPU set from the device tree. |
@@ -252,22 +273,22 @@ void __init smp_init_cpus(void) | |||
252 | * We currently support only the "spin-table" enable-method. | 273 | * We currently support only the "spin-table" enable-method. |
253 | */ | 274 | */ |
254 | enable_method = of_get_property(dn, "enable-method", NULL); | 275 | enable_method = of_get_property(dn, "enable-method", NULL); |
255 | if (!enable_method || strcmp(enable_method, "spin-table")) { | 276 | if (!enable_method) { |
256 | pr_err("CPU %d: missing or invalid enable-method property: %s\n", | 277 | pr_err("CPU %d: missing enable-method property\n", cpu); |
257 | cpu, enable_method); | ||
258 | goto next; | 278 | goto next; |
259 | } | 279 | } |
260 | 280 | ||
261 | /* | 281 | smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); |
262 | * Determine the address from which the CPU is polling. | 282 | |
263 | */ | 283 | if (!smp_enable_ops[cpu]) { |
264 | if (of_property_read_u64(dn, "cpu-release-addr", | 284 | pr_err("CPU %d: invalid enable-method property: %s\n", |
265 | &cpu_release_addr[cpu])) { | 285 | cpu, enable_method); |
266 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
267 | cpu); | ||
268 | goto next; | 286 | goto next; |
269 | } | 287 | } |
270 | 288 | ||
289 | if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) | ||
290 | goto next; | ||
291 | |||
271 | set_cpu_possible(cpu, true); | 292 | set_cpu_possible(cpu, true); |
272 | next: | 293 | next: |
273 | cpu++; | 294 | cpu++; |
@@ -281,8 +302,7 @@ next: | |||
281 | 302 | ||
282 | void __init smp_prepare_cpus(unsigned int max_cpus) | 303 | void __init smp_prepare_cpus(unsigned int max_cpus) |
283 | { | 304 | { |
284 | int cpu; | 305 | int cpu, err; |
285 | void **release_addr; | ||
286 | unsigned int ncores = num_possible_cpus(); | 306 | unsigned int ncores = num_possible_cpus(); |
287 | 307 | ||
288 | /* | 308 | /* |
@@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
291 | if (max_cpus > ncores) | 311 | if (max_cpus > ncores) |
292 | max_cpus = ncores; | 312 | max_cpus = ncores; |
293 | 313 | ||
314 | /* Don't bother if we're effectively UP */ | ||
315 | if (max_cpus <= 1) | ||
316 | return; | ||
317 | |||
294 | /* | 318 | /* |
295 | * Initialise the present map (which describes the set of CPUs | 319 | * Initialise the present map (which describes the set of CPUs |
296 | * actually populated at the present time) and release the | 320 | * actually populated at the present time) and release the |
297 | * secondaries from the bootloader. | 321 | * secondaries from the bootloader. |
322 | * | ||
323 | * Make sure we online at most (max_cpus - 1) additional CPUs. | ||
298 | */ | 324 | */ |
325 | max_cpus--; | ||
299 | for_each_possible_cpu(cpu) { | 326 | for_each_possible_cpu(cpu) { |
300 | if (max_cpus == 0) | 327 | if (max_cpus == 0) |
301 | break; | 328 | break; |
302 | 329 | ||
303 | if (!cpu_release_addr[cpu]) | 330 | if (cpu == smp_processor_id()) |
331 | continue; | ||
332 | |||
333 | if (!smp_enable_ops[cpu]) | ||
304 | continue; | 334 | continue; |
305 | 335 | ||
306 | release_addr = __va(cpu_release_addr[cpu]); | 336 | err = smp_enable_ops[cpu]->prepare_cpu(cpu); |
307 | release_addr[0] = (void *)__pa(secondary_holding_pen); | 337 | if (err) |
308 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | 338 | continue; |
309 | 339 | ||
310 | set_cpu_present(cpu, true); | 340 | set_cpu_present(cpu, true); |
311 | max_cpus--; | 341 | max_cpus--; |
312 | } | 342 | } |
313 | |||
314 | /* | ||
315 | * Send an event to wake up the secondaries. | ||
316 | */ | ||
317 | sev(); | ||
318 | } | 343 | } |
319 | 344 | ||
320 | 345 | ||
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c new file mode 100644 index 000000000000..112091684c22 --- /dev/null +++ b/arch/arm64/kernel/smp_psci.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * PSCI SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/psci.h> | ||
24 | |||
25 | static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static int __init smp_psci_prepare_cpu(int cpu) | ||
31 | { | ||
32 | int err; | ||
33 | |||
34 | if (!psci_ops.cpu_on) { | ||
35 | pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); | ||
36 | return -ENODEV; | ||
37 | } | ||
38 | |||
39 | err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); | ||
40 | if (err) { | ||
41 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
42 | return err; | ||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | const struct smp_enable_ops smp_psci_ops __initconst = { | ||
49 | .name = "psci", | ||
50 | .init_cpu = smp_psci_init_cpu, | ||
51 | .prepare_cpu = smp_psci_prepare_cpu, | ||
52 | }; | ||
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c new file mode 100644 index 000000000000..7c35fa682f76 --- /dev/null +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Spin Table SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | static phys_addr_t cpu_release_addr[NR_CPUS]; | ||
26 | |||
27 | static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) | ||
28 | { | ||
29 | /* | ||
30 | * Determine the address from which the CPU is polling. | ||
31 | */ | ||
32 | if (of_property_read_u64(dn, "cpu-release-addr", | ||
33 | &cpu_release_addr[cpu])) { | ||
34 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
35 | cpu); | ||
36 | |||
37 | return -1; | ||
38 | } | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int __init smp_spin_table_prepare_cpu(int cpu) | ||
44 | { | ||
45 | void **release_addr; | ||
46 | |||
47 | if (!cpu_release_addr[cpu]) | ||
48 | return -ENODEV; | ||
49 | |||
50 | release_addr = __va(cpu_release_addr[cpu]); | ||
51 | release_addr[0] = (void *)__pa(secondary_holding_pen); | ||
52 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | ||
53 | |||
54 | /* | ||
55 | * Send an event to wake up the secondary CPU. | ||
56 | */ | ||
57 | sev(); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | const struct smp_enable_ops smp_spin_table_ops __initconst = { | ||
63 | .name = "spin-table", | ||
64 | .init_cpu = smp_spin_table_init_cpu, | ||
65 | .prepare_cpu = smp_spin_table_prepare_cpu, | ||
66 | }; | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a6885d896ab6..f4dd585898c5 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/nodemask.h> | 25 | #include <linux/nodemask.h> |
26 | #include <linux/memblock.h> | 26 | #include <linux/memblock.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/io.h> | ||
28 | 29 | ||
29 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
30 | #include <asm/sections.h> | 31 | #include <asm/sections.h> |
@@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, | |||
251 | } while (pgd++, addr = next, addr != end); | 252 | } while (pgd++, addr = next, addr != end); |
252 | } | 253 | } |
253 | 254 | ||
255 | #ifdef CONFIG_EARLY_PRINTK | ||
256 | /* | ||
257 | * Create an early I/O mapping using the pgd/pmd entries already populated | ||
258 | * in head.S as this function is called too early to allocated any memory. The | ||
259 | * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. | ||
260 | */ | ||
261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) | ||
262 | { | ||
263 | unsigned long size, mask; | ||
264 | bool page64k = IS_ENABLED(ARM64_64K_PAGES); | ||
265 | pgd_t *pgd; | ||
266 | pud_t *pud; | ||
267 | pmd_t *pmd; | ||
268 | pte_t *pte; | ||
269 | |||
270 | /* | ||
271 | * No early pte entries with !ARM64_64K_PAGES configuration, so using | ||
272 | * sections (pmd). | ||
273 | */ | ||
274 | size = page64k ? PAGE_SIZE : SECTION_SIZE; | ||
275 | mask = ~(size - 1); | ||
276 | |||
277 | pgd = pgd_offset_k(virt); | ||
278 | pud = pud_offset(pgd, virt); | ||
279 | if (pud_none(*pud)) | ||
280 | return NULL; | ||
281 | pmd = pmd_offset(pud, virt); | ||
282 | |||
283 | if (page64k) { | ||
284 | if (pmd_none(*pmd)) | ||
285 | return NULL; | ||
286 | pte = pte_offset_kernel(pmd, virt); | ||
287 | set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); | ||
288 | } else { | ||
289 | set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); | ||
290 | } | ||
291 | |||
292 | return (void __iomem *)((virt & mask) + (phys & ~mask)); | ||
293 | } | ||
294 | #endif | ||
295 | |||
254 | static void __init map_mem(void) | 296 | static void __init map_mem(void) |
255 | { | 297 | { |
256 | struct memblock_region *reg; | 298 | struct memblock_region *reg; |