aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/atomic.h132
-rw-r--r--arch/arm64/include/asm/cmpxchg.h74
-rw-r--r--arch/arm64/include/asm/futex.h2
-rw-r--r--arch/arm64/include/asm/io.h3
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/mmu_context.h15
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/psci.h38
-rw-r--r--arch/arm64/include/asm/ptrace.h10
-rw-r--r--arch/arm64/include/asm/smp.h11
-rw-r--r--arch/arm64/include/asm/spinlock.h78
13 files changed, 230 insertions, 144 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 14a9d5a2b85b..e5fe4f99fe10 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += ipcbuf.h
19generic-y += irq_regs.h 19generic-y += irq_regs.h
20generic-y += kdebug.h 20generic-y += kdebug.h
21generic-y += kmap_types.h 21generic-y += kmap_types.h
22generic-y += kvm_para.h
22generic-y += local.h 23generic-y += local.h
23generic-y += local64.h 24generic-y += local64.h
24generic-y += mman.h 25generic-y += mman.h
@@ -48,3 +49,4 @@ generic-y += trace_clock.h
48generic-y += types.h 49generic-y += types.h
49generic-y += unaligned.h 50generic-y += unaligned.h
50generic-y += user.h 51generic-y += user.h
52generic-y += xor.h
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 407717ba060e..836364468571 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v)
49 int result; 49 int result;
50 50
51 asm volatile("// atomic_add\n" 51 asm volatile("// atomic_add\n"
52"1: ldxr %w0, [%3]\n" 52"1: ldxr %w0, %2\n"
53" add %w0, %w0, %w4\n" 53" add %w0, %w0, %w3\n"
54" stxr %w1, %w0, [%3]\n" 54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b" 55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "r" (&v->counter), "Ir" (i) 57 : "Ir" (i)
58 : "cc"); 58 : "cc");
59} 59}
60 60
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
64 int result; 64 int result;
65 65
66 asm volatile("// atomic_add_return\n" 66 asm volatile("// atomic_add_return\n"
67"1: ldaxr %w0, [%3]\n" 67"1: ldaxr %w0, %2\n"
68" add %w0, %w0, %w4\n" 68" add %w0, %w0, %w3\n"
69" stlxr %w1, %w0, [%3]\n" 69" stlxr %w1, %w0, %2\n"
70" cbnz %w1, 1b" 70" cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
72 : "r" (&v->counter), "Ir" (i) 72 : "Ir" (i)
73 : "cc"); 73 : "cc", "memory");
74 74
75 return result; 75 return result;
76} 76}
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v)
81 int result; 81 int result;
82 82
83 asm volatile("// atomic_sub\n" 83 asm volatile("// atomic_sub\n"
84"1: ldxr %w0, [%3]\n" 84"1: ldxr %w0, %2\n"
85" sub %w0, %w0, %w4\n" 85" sub %w0, %w0, %w3\n"
86" stxr %w1, %w0, [%3]\n" 86" stxr %w1, %w0, %2\n"
87" cbnz %w1, 1b" 87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "r" (&v->counter), "Ir" (i) 89 : "Ir" (i)
90 : "cc"); 90 : "cc");
91} 91}
92 92
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 int result; 96 int result;
97 97
98 asm volatile("// atomic_sub_return\n" 98 asm volatile("// atomic_sub_return\n"
99"1: ldaxr %w0, [%3]\n" 99"1: ldaxr %w0, %2\n"
100" sub %w0, %w0, %w4\n" 100" sub %w0, %w0, %w3\n"
101" stlxr %w1, %w0, [%3]\n" 101" stlxr %w1, %w0, %2\n"
102" cbnz %w1, 1b" 102" cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 103 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104 : "r" (&v->counter), "Ir" (i) 104 : "Ir" (i)
105 : "cc"); 105 : "cc", "memory");
106 106
107 return result; 107 return result;
108} 108}
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
113 int oldval; 113 int oldval;
114 114
115 asm volatile("// atomic_cmpxchg\n" 115 asm volatile("// atomic_cmpxchg\n"
116"1: ldaxr %w1, [%3]\n" 116"1: ldaxr %w1, %2\n"
117" cmp %w1, %w4\n" 117" cmp %w1, %w3\n"
118" b.ne 2f\n" 118" b.ne 2f\n"
119" stlxr %w0, %w5, [%3]\n" 119" stlxr %w0, %w4, %2\n"
120" cbnz %w0, 1b\n" 120" cbnz %w0, 1b\n"
121"2:" 121"2:"
122 : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) 122 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123 : "r" (&ptr->counter), "Ir" (old), "r" (new) 123 : "Ir" (old), "r" (new)
124 : "cc"); 124 : "cc", "memory");
125 125
126 return oldval; 126 return oldval;
127} 127}
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
131 unsigned long tmp, tmp2; 131 unsigned long tmp, tmp2;
132 132
133 asm volatile("// atomic_clear_mask\n" 133 asm volatile("// atomic_clear_mask\n"
134"1: ldxr %0, [%3]\n" 134"1: ldxr %0, %2\n"
135" bic %0, %0, %4\n" 135" bic %0, %0, %3\n"
136" stxr %w1, %0, [%3]\n" 136" stxr %w1, %0, %2\n"
137" cbnz %w1, 1b" 137" cbnz %w1, 1b"
138 : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) 138 : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
139 : "r" (addr), "Ir" (mask) 139 : "Ir" (mask)
140 : "cc"); 140 : "cc");
141} 141}
142 142
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
182 unsigned long tmp; 182 unsigned long tmp;
183 183
184 asm volatile("// atomic64_add\n" 184 asm volatile("// atomic64_add\n"
185"1: ldxr %0, [%3]\n" 185"1: ldxr %0, %2\n"
186" add %0, %0, %4\n" 186" add %0, %0, %3\n"
187" stxr %w1, %0, [%3]\n" 187" stxr %w1, %0, %2\n"
188" cbnz %w1, 1b" 188" cbnz %w1, 1b"
189 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 189 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
190 : "r" (&v->counter), "Ir" (i) 190 : "Ir" (i)
191 : "cc"); 191 : "cc");
192} 192}
193 193
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
197 unsigned long tmp; 197 unsigned long tmp;
198 198
199 asm volatile("// atomic64_add_return\n" 199 asm volatile("// atomic64_add_return\n"
200"1: ldaxr %0, [%3]\n" 200"1: ldaxr %0, %2\n"
201" add %0, %0, %4\n" 201" add %0, %0, %3\n"
202" stlxr %w1, %0, [%3]\n" 202" stlxr %w1, %0, %2\n"
203" cbnz %w1, 1b" 203" cbnz %w1, 1b"
204 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 204 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
205 : "r" (&v->counter), "Ir" (i) 205 : "Ir" (i)
206 : "cc"); 206 : "cc", "memory");
207 207
208 return result; 208 return result;
209} 209}
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
214 unsigned long tmp; 214 unsigned long tmp;
215 215
216 asm volatile("// atomic64_sub\n" 216 asm volatile("// atomic64_sub\n"
217"1: ldxr %0, [%3]\n" 217"1: ldxr %0, %2\n"
218" sub %0, %0, %4\n" 218" sub %0, %0, %3\n"
219" stxr %w1, %0, [%3]\n" 219" stxr %w1, %0, %2\n"
220" cbnz %w1, 1b" 220" cbnz %w1, 1b"
221 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 221 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
222 : "r" (&v->counter), "Ir" (i) 222 : "Ir" (i)
223 : "cc"); 223 : "cc");
224} 224}
225 225
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
229 unsigned long tmp; 229 unsigned long tmp;
230 230
231 asm volatile("// atomic64_sub_return\n" 231 asm volatile("// atomic64_sub_return\n"
232"1: ldaxr %0, [%3]\n" 232"1: ldaxr %0, %2\n"
233" sub %0, %0, %4\n" 233" sub %0, %0, %3\n"
234" stlxr %w1, %0, [%3]\n" 234" stlxr %w1, %0, %2\n"
235" cbnz %w1, 1b" 235" cbnz %w1, 1b"
236 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 236 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
237 : "r" (&v->counter), "Ir" (i) 237 : "Ir" (i)
238 : "cc"); 238 : "cc", "memory");
239 239
240 return result; 240 return result;
241} 241}
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
246 unsigned long res; 246 unsigned long res;
247 247
248 asm volatile("// atomic64_cmpxchg\n" 248 asm volatile("// atomic64_cmpxchg\n"
249"1: ldaxr %1, [%3]\n" 249"1: ldaxr %1, %2\n"
250" cmp %1, %4\n" 250" cmp %1, %3\n"
251" b.ne 2f\n" 251" b.ne 2f\n"
252" stlxr %w0, %5, [%3]\n" 252" stlxr %w0, %4, %2\n"
253" cbnz %w0, 1b\n" 253" cbnz %w0, 1b\n"
254"2:" 254"2:"
255 : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) 255 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
256 : "r" (&ptr->counter), "Ir" (old), "r" (new) 256 : "Ir" (old), "r" (new)
257 : "cc"); 257 : "cc", "memory");
258 258
259 return oldval; 259 return oldval;
260} 260}
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
267 unsigned long tmp; 267 unsigned long tmp;
268 268
269 asm volatile("// atomic64_dec_if_positive\n" 269 asm volatile("// atomic64_dec_if_positive\n"
270"1: ldaxr %0, [%3]\n" 270"1: ldaxr %0, %2\n"
271" subs %0, %0, #1\n" 271" subs %0, %0, #1\n"
272" b.mi 2f\n" 272" b.mi 2f\n"
273" stlxr %w1, %0, [%3]\n" 273" stlxr %w1, %0, %2\n"
274" cbnz %w1, 1b\n" 274" cbnz %w1, 1b\n"
275"2:" 275"2:"
276 : "=&r" (result), "=&r" (tmp), "+o" (v->counter) 276 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
277 : "r" (&v->counter) 277 :
278 : "cc"); 278 : "cc", "memory");
279 279
280 return result; 280 return result;
281} 281}
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index e0e65b069d9e..968b5cbfc260 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
29 switch (size) { 29 switch (size) {
30 case 1: 30 case 1:
31 asm volatile("// __xchg1\n" 31 asm volatile("// __xchg1\n"
32 "1: ldaxrb %w0, [%3]\n" 32 "1: ldaxrb %w0, %2\n"
33 " stlxrb %w1, %w2, [%3]\n" 33 " stlxrb %w1, %w3, %2\n"
34 " cbnz %w1, 1b\n" 34 " cbnz %w1, 1b\n"
35 : "=&r" (ret), "=&r" (tmp) 35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36 : "r" (x), "r" (ptr) 36 : "r" (x)
37 : "memory", "cc"); 37 : "cc", "memory");
38 break; 38 break;
39 case 2: 39 case 2:
40 asm volatile("// __xchg2\n" 40 asm volatile("// __xchg2\n"
41 "1: ldaxrh %w0, [%3]\n" 41 "1: ldaxrh %w0, %2\n"
42 " stlxrh %w1, %w2, [%3]\n" 42 " stlxrh %w1, %w3, %2\n"
43 " cbnz %w1, 1b\n" 43 " cbnz %w1, 1b\n"
44 : "=&r" (ret), "=&r" (tmp) 44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45 : "r" (x), "r" (ptr) 45 : "r" (x)
46 : "memory", "cc"); 46 : "cc", "memory");
47 break; 47 break;
48 case 4: 48 case 4:
49 asm volatile("// __xchg4\n" 49 asm volatile("// __xchg4\n"
50 "1: ldaxr %w0, [%3]\n" 50 "1: ldaxr %w0, %2\n"
51 " stlxr %w1, %w2, [%3]\n" 51 " stlxr %w1, %w3, %2\n"
52 " cbnz %w1, 1b\n" 52 " cbnz %w1, 1b\n"
53 : "=&r" (ret), "=&r" (tmp) 53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54 : "r" (x), "r" (ptr) 54 : "r" (x)
55 : "memory", "cc"); 55 : "cc", "memory");
56 break; 56 break;
57 case 8: 57 case 8:
58 asm volatile("// __xchg8\n" 58 asm volatile("// __xchg8\n"
59 "1: ldaxr %0, [%3]\n" 59 "1: ldaxr %0, %2\n"
60 " stlxr %w1, %2, [%3]\n" 60 " stlxr %w1, %3, %2\n"
61 " cbnz %w1, 1b\n" 61 " cbnz %w1, 1b\n"
62 : "=&r" (ret), "=&r" (tmp) 62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63 : "r" (x), "r" (ptr) 63 : "r" (x)
64 : "memory", "cc"); 64 : "cc", "memory");
65 break; 65 break;
66 default: 66 default:
67 BUILD_BUG(); 67 BUILD_BUG();
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
82 case 1: 82 case 1:
83 do { 83 do {
84 asm volatile("// __cmpxchg1\n" 84 asm volatile("// __cmpxchg1\n"
85 " ldxrb %w1, [%2]\n" 85 " ldxrb %w1, %2\n"
86 " mov %w0, #0\n" 86 " mov %w0, #0\n"
87 " cmp %w1, %w3\n" 87 " cmp %w1, %w3\n"
88 " b.ne 1f\n" 88 " b.ne 1f\n"
89 " stxrb %w0, %w4, [%2]\n" 89 " stxrb %w0, %w4, %2\n"
90 "1:\n" 90 "1:\n"
91 : "=&r" (res), "=&r" (oldval) 91 : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
92 : "r" (ptr), "Ir" (old), "r" (new) 92 : "Ir" (old), "r" (new)
93 : "cc"); 93 : "cc");
94 } while (res); 94 } while (res);
95 break; 95 break;
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
97 case 2: 97 case 2:
98 do { 98 do {
99 asm volatile("// __cmpxchg2\n" 99 asm volatile("// __cmpxchg2\n"
100 " ldxrh %w1, [%2]\n" 100 " ldxrh %w1, %2\n"
101 " mov %w0, #0\n" 101 " mov %w0, #0\n"
102 " cmp %w1, %w3\n" 102 " cmp %w1, %w3\n"
103 " b.ne 1f\n" 103 " b.ne 1f\n"
104 " stxrh %w0, %w4, [%2]\n" 104 " stxrh %w0, %w4, %2\n"
105 "1:\n" 105 "1:\n"
106 : "=&r" (res), "=&r" (oldval) 106 : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
107 : "r" (ptr), "Ir" (old), "r" (new) 107 : "Ir" (old), "r" (new)
108 : "memory", "cc"); 108 : "cc");
109 } while (res); 109 } while (res);
110 break; 110 break;
111 111
112 case 4: 112 case 4:
113 do { 113 do {
114 asm volatile("// __cmpxchg4\n" 114 asm volatile("// __cmpxchg4\n"
115 " ldxr %w1, [%2]\n" 115 " ldxr %w1, %2\n"
116 " mov %w0, #0\n" 116 " mov %w0, #0\n"
117 " cmp %w1, %w3\n" 117 " cmp %w1, %w3\n"
118 " b.ne 1f\n" 118 " b.ne 1f\n"
119 " stxr %w0, %w4, [%2]\n" 119 " stxr %w0, %w4, %2\n"
120 "1:\n" 120 "1:\n"
121 : "=&r" (res), "=&r" (oldval) 121 : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
122 : "r" (ptr), "Ir" (old), "r" (new) 122 : "Ir" (old), "r" (new)
123 : "cc"); 123 : "cc");
124 } while (res); 124 } while (res);
125 break; 125 break;
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
127 case 8: 127 case 8:
128 do { 128 do {
129 asm volatile("// __cmpxchg8\n" 129 asm volatile("// __cmpxchg8\n"
130 " ldxr %1, [%2]\n" 130 " ldxr %1, %2\n"
131 " mov %w0, #0\n" 131 " mov %w0, #0\n"
132 " cmp %1, %3\n" 132 " cmp %1, %3\n"
133 " b.ne 1f\n" 133 " b.ne 1f\n"
134 " stxr %w0, %4, [%2]\n" 134 " stxr %w0, %4, %2\n"
135 "1:\n" 135 "1:\n"
136 : "=&r" (res), "=&r" (oldval) 136 : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
137 : "r" (ptr), "Ir" (old), "r" (new) 137 : "Ir" (old), "r" (new)
138 : "cc"); 138 : "cc");
139 } while (res); 139 } while (res);
140 break; 140 break;
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 3468ae8439fa..c582fa316366 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -39,7 +39,7 @@
39" .popsection\n" \ 39" .popsection\n" \
40 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ 40 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
41 : "r" (oparg), "Ir" (-EFAULT) \ 41 : "r" (oparg), "Ir" (-EFAULT) \
42 : "cc") 42 : "cc", "memory")
43 43
44static inline int 44static inline int
45futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 45futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index d2f05a608274..57f12c991de2 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr);
230#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 230#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
231#define iounmap __iounmap 231#define iounmap __iounmap
232 232
233#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
234#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
235
233#define ARCH_HAS_IOREMAP_WC 236#define ARCH_HAS_IOREMAP_WC
234#include <asm-generic/iomap.h> 237#include <asm-generic/iomap.h>
235 238
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1cac16a001cb..381f556b664e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -43,6 +43,7 @@
43#define PAGE_OFFSET UL(0xffffffc000000000) 43#define PAGE_OFFSET UL(0xffffffc000000000)
44#define MODULES_END (PAGE_OFFSET) 44#define MODULES_END (PAGE_OFFSET)
45#define MODULES_VADDR (MODULES_END - SZ_64M) 45#define MODULES_VADDR (MODULES_END - SZ_64M)
46#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
46#define VA_BITS (39) 47#define VA_BITS (39)
47#define TASK_SIZE_64 (UL(1) << VA_BITS) 48#define TASK_SIZE_64 (UL(1) << VA_BITS)
48 49
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d4f7fd5b9e33..2494fc01896a 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -26,5 +26,6 @@ typedef struct {
26 26
27extern void paging_init(void); 27extern void paging_init(void);
28extern void setup_mm_for_reboot(void); 28extern void setup_mm_for_reboot(void);
29extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
29 30
30#endif 31#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index f68465dee026..e2bc385adb6b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid;
35void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 35void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
36void __new_context(struct mm_struct *mm); 36void __new_context(struct mm_struct *mm);
37 37
38#ifdef CONFIG_PID_IN_CONTEXTIDR
39static inline void contextidr_thread_switch(struct task_struct *next)
40{
41 asm(
42 " msr contextidr_el1, %0\n"
43 " isb"
44 :
45 : "r" (task_pid_nr(next)));
46}
47#else
48static inline void contextidr_thread_switch(struct task_struct *next)
49{
50}
51#endif
52
38/* 53/*
39 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. 54 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
40 */ 55 */
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index a6fffd511c5e..d26d1d53c0d7 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,11 @@
17#ifndef __ASM_PERF_EVENT_H 17#ifndef __ASM_PERF_EVENT_H
18#define __ASM_PERF_EVENT_H 18#define __ASM_PERF_EVENT_H
19 19
20/* It's quiet around here... */ 20#ifdef CONFIG_HW_PERF_EVENTS
21struct pt_regs;
22extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
23extern unsigned long perf_misc_flags(struct pt_regs *regs);
24#define perf_misc_flags(regs) perf_misc_flags(regs)
25#endif
21 26
22#endif 27#endif
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
new file mode 100644
index 000000000000..0604237ecd99
--- /dev/null
+++ b/arch/arm64/include/asm/psci.h
@@ -0,0 +1,38 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2013 ARM Limited
12 */
13
14#ifndef __ASM_PSCI_H
15#define __ASM_PSCI_H
16
17#define PSCI_POWER_STATE_TYPE_STANDBY 0
18#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
19
20struct psci_power_state {
21 u16 id;
22 u8 type;
23 u8 affinity_level;
24};
25
26struct psci_operations {
27 int (*cpu_suspend)(struct psci_power_state state,
28 unsigned long entry_point);
29 int (*cpu_off)(struct psci_power_state state);
30 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
31 int (*migrate)(unsigned long cpuid);
32};
33
34extern struct psci_operations psci_ops;
35
36int psci_init(void);
37
38#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 4ce845f8ee1c..41a71ee4c3df 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,16 @@
42#define COMPAT_PSR_MODE_UND 0x0000001b 42#define COMPAT_PSR_MODE_UND 0x0000001b
43#define COMPAT_PSR_MODE_SYS 0x0000001f 43#define COMPAT_PSR_MODE_SYS 0x0000001f
44#define COMPAT_PSR_T_BIT 0x00000020 44#define COMPAT_PSR_T_BIT 0x00000020
45#define COMPAT_PSR_F_BIT 0x00000040
46#define COMPAT_PSR_I_BIT 0x00000080
47#define COMPAT_PSR_A_BIT 0x00000100
48#define COMPAT_PSR_E_BIT 0x00000200
49#define COMPAT_PSR_J_BIT 0x01000000
50#define COMPAT_PSR_Q_BIT 0x08000000
51#define COMPAT_PSR_V_BIT 0x10000000
52#define COMPAT_PSR_C_BIT 0x20000000
53#define COMPAT_PSR_Z_BIT 0x40000000
54#define COMPAT_PSR_N_BIT 0x80000000
45#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ 55#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
46/* 56/*
47 * These are 'magic' values for PTRACE_PEEKUSR that return info about where a 57 * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 7e34295f78e3..4b8023c5d146 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release;
66extern void arch_send_call_function_single_ipi(int cpu); 66extern void arch_send_call_function_single_ipi(int cpu);
67extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 67extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
68 68
69struct device_node;
70
71struct smp_enable_ops {
72 const char *name;
73 int (*init_cpu)(struct device_node *, int);
74 int (*prepare_cpu)(int);
75};
76
77extern const struct smp_enable_ops smp_spin_table_ops;
78extern const struct smp_enable_ops smp_psci_ops;
79
69#endif /* ifndef __ASM_SMP_H */ 80#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 41112fe2f8b1..7065e920149d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
45 asm volatile( 45 asm volatile(
46 " sevl\n" 46 " sevl\n"
47 "1: wfe\n" 47 "1: wfe\n"
48 "2: ldaxr %w0, [%1]\n" 48 "2: ldaxr %w0, %1\n"
49 " cbnz %w0, 1b\n" 49 " cbnz %w0, 1b\n"
50 " stxr %w0, %w2, [%1]\n" 50 " stxr %w0, %w2, %1\n"
51 " cbnz %w0, 2b\n" 51 " cbnz %w0, 2b\n"
52 : "=&r" (tmp) 52 : "=&r" (tmp), "+Q" (lock->lock)
53 : "r" (&lock->lock), "r" (1) 53 : "r" (1)
54 : "memory"); 54 : "cc", "memory");
55} 55}
56 56
57static inline int arch_spin_trylock(arch_spinlock_t *lock) 57static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
59 unsigned int tmp; 59 unsigned int tmp;
60 60
61 asm volatile( 61 asm volatile(
62 " ldaxr %w0, [%1]\n" 62 " ldaxr %w0, %1\n"
63 " cbnz %w0, 1f\n" 63 " cbnz %w0, 1f\n"
64 " stxr %w0, %w2, [%1]\n" 64 " stxr %w0, %w2, %1\n"
65 "1:\n" 65 "1:\n"
66 : "=&r" (tmp) 66 : "=&r" (tmp), "+Q" (lock->lock)
67 : "r" (&lock->lock), "r" (1) 67 : "r" (1)
68 : "memory"); 68 : "cc", "memory");
69 69
70 return !tmp; 70 return !tmp;
71} 71}
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
73static inline void arch_spin_unlock(arch_spinlock_t *lock) 73static inline void arch_spin_unlock(arch_spinlock_t *lock)
74{ 74{
75 asm volatile( 75 asm volatile(
76 " stlr %w1, [%0]\n" 76 " stlr %w1, %0\n"
77 : : "r" (&lock->lock), "r" (0) : "memory"); 77 : "=Q" (lock->lock) : "r" (0) : "memory");
78} 78}
79 79
80/* 80/*
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
94 asm volatile( 94 asm volatile(
95 " sevl\n" 95 " sevl\n"
96 "1: wfe\n" 96 "1: wfe\n"
97 "2: ldaxr %w0, [%1]\n" 97 "2: ldaxr %w0, %1\n"
98 " cbnz %w0, 1b\n" 98 " cbnz %w0, 1b\n"
99 " stxr %w0, %w2, [%1]\n" 99 " stxr %w0, %w2, %1\n"
100 " cbnz %w0, 2b\n" 100 " cbnz %w0, 2b\n"
101 : "=&r" (tmp) 101 : "=&r" (tmp), "+Q" (rw->lock)
102 : "r" (&rw->lock), "r" (0x80000000) 102 : "r" (0x80000000)
103 : "memory"); 103 : "cc", "memory");
104} 104}
105 105
106static inline int arch_write_trylock(arch_rwlock_t *rw) 106static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
108 unsigned int tmp; 108 unsigned int tmp;
109 109
110 asm volatile( 110 asm volatile(
111 " ldaxr %w0, [%1]\n" 111 " ldaxr %w0, %1\n"
112 " cbnz %w0, 1f\n" 112 " cbnz %w0, 1f\n"
113 " stxr %w0, %w2, [%1]\n" 113 " stxr %w0, %w2, %1\n"
114 "1:\n" 114 "1:\n"
115 : "=&r" (tmp) 115 : "=&r" (tmp), "+Q" (rw->lock)
116 : "r" (&rw->lock), "r" (0x80000000) 116 : "r" (0x80000000)
117 : "memory"); 117 : "cc", "memory");
118 118
119 return !tmp; 119 return !tmp;
120} 120}
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
122static inline void arch_write_unlock(arch_rwlock_t *rw) 122static inline void arch_write_unlock(arch_rwlock_t *rw)
123{ 123{
124 asm volatile( 124 asm volatile(
125 " stlr %w1, [%0]\n" 125 " stlr %w1, %0\n"
126 : : "r" (&rw->lock), "r" (0) : "memory"); 126 : "=Q" (rw->lock) : "r" (0) : "memory");
127} 127}
128 128
129/* write_can_lock - would write_trylock() succeed? */ 129/* write_can_lock - would write_trylock() succeed? */
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
148 asm volatile( 148 asm volatile(
149 " sevl\n" 149 " sevl\n"
150 "1: wfe\n" 150 "1: wfe\n"
151 "2: ldaxr %w0, [%2]\n" 151 "2: ldaxr %w0, %2\n"
152 " add %w0, %w0, #1\n" 152 " add %w0, %w0, #1\n"
153 " tbnz %w0, #31, 1b\n" 153 " tbnz %w0, #31, 1b\n"
154 " stxr %w1, %w0, [%2]\n" 154 " stxr %w1, %w0, %2\n"
155 " cbnz %w1, 2b\n" 155 " cbnz %w1, 2b\n"
156 : "=&r" (tmp), "=&r" (tmp2) 156 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
157 : "r" (&rw->lock) 157 :
158 : "memory"); 158 : "cc", "memory");
159} 159}
160 160
161static inline void arch_read_unlock(arch_rwlock_t *rw) 161static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
163 unsigned int tmp, tmp2; 163 unsigned int tmp, tmp2;
164 164
165 asm volatile( 165 asm volatile(
166 "1: ldxr %w0, [%2]\n" 166 "1: ldxr %w0, %2\n"
167 " sub %w0, %w0, #1\n" 167 " sub %w0, %w0, #1\n"
168 " stlxr %w1, %w0, [%2]\n" 168 " stlxr %w1, %w0, %2\n"
169 " cbnz %w1, 1b\n" 169 " cbnz %w1, 1b\n"
170 : "=&r" (tmp), "=&r" (tmp2) 170 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
171 : "r" (&rw->lock) 171 :
172 : "memory"); 172 : "cc", "memory");
173} 173}
174 174
175static inline int arch_read_trylock(arch_rwlock_t *rw) 175static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
177 unsigned int tmp, tmp2 = 1; 177 unsigned int tmp, tmp2 = 1;
178 178
179 asm volatile( 179 asm volatile(
180 " ldaxr %w0, [%2]\n" 180 " ldaxr %w0, %2\n"
181 " add %w0, %w0, #1\n" 181 " add %w0, %w0, #1\n"
182 " tbnz %w0, #31, 1f\n" 182 " tbnz %w0, #31, 1f\n"
183 " stxr %w1, %w0, [%2]\n" 183 " stxr %w1, %w0, %2\n"
184 "1:\n" 184 "1:\n"
185 : "=&r" (tmp), "+r" (tmp2) 185 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
186 : "r" (&rw->lock) 186 :
187 : "memory"); 187 : "cc", "memory");
188 188
189 return !tmp2; 189 return !tmp2;
190} 190}