aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/atomic.h53
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h17
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
12 files changed, 80 insertions, 49 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..0237f0867e37 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
54" stxr %w1, %w0, %2\n" 54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b" 55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i) 57 : "Ir" (i));
58 : "cc");
59} 58}
60 59
61static inline int atomic_add_return(int i, atomic_t *v) 60static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
64 int result; 63 int result;
65 64
66 asm volatile("// atomic_add_return\n" 65 asm volatile("// atomic_add_return\n"
67"1: ldaxr %w0, %2\n" 66"1: ldxr %w0, %2\n"
68" add %w0, %w0, %w3\n" 67" add %w0, %w0, %w3\n"
69" stlxr %w1, %w0, %2\n" 68" stlxr %w1, %w0, %2\n"
70" cbnz %w1, 1b" 69" cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 70 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
72 : "Ir" (i) 71 : "Ir" (i)
73 : "cc", "memory"); 72 : "memory");
74 73
74 smp_mb();
75 return result; 75 return result;
76} 76}
77 77
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
86" stxr %w1, %w0, %2\n" 86" stxr %w1, %w0, %2\n"
87" cbnz %w1, 1b" 87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "Ir" (i) 89 : "Ir" (i));
90 : "cc");
91} 90}
92 91
93static inline int atomic_sub_return(int i, atomic_t *v) 92static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 int result; 95 int result;
97 96
98 asm volatile("// atomic_sub_return\n" 97 asm volatile("// atomic_sub_return\n"
99"1: ldaxr %w0, %2\n" 98"1: ldxr %w0, %2\n"
100" sub %w0, %w0, %w3\n" 99" sub %w0, %w0, %w3\n"
101" stlxr %w1, %w0, %2\n" 100" stlxr %w1, %w0, %2\n"
102" cbnz %w1, 1b" 101" cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 102 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104 : "Ir" (i) 103 : "Ir" (i)
105 : "cc", "memory"); 104 : "memory");
106 105
106 smp_mb();
107 return result; 107 return result;
108} 108}
109 109
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
112 unsigned long tmp; 112 unsigned long tmp;
113 int oldval; 113 int oldval;
114 114
115 smp_mb();
116
115 asm volatile("// atomic_cmpxchg\n" 117 asm volatile("// atomic_cmpxchg\n"
116"1: ldaxr %w1, %2\n" 118"1: ldxr %w1, %2\n"
117" cmp %w1, %w3\n" 119" cmp %w1, %w3\n"
118" b.ne 2f\n" 120" b.ne 2f\n"
119" stlxr %w0, %w4, %2\n" 121" stxr %w0, %w4, %2\n"
120" cbnz %w0, 1b\n" 122" cbnz %w0, 1b\n"
121"2:" 123"2:"
122 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) 124 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123 : "Ir" (old), "r" (new) 125 : "Ir" (old), "r" (new)
124 : "cc", "memory"); 126 : "cc");
125 127
128 smp_mb();
126 return oldval; 129 return oldval;
127} 130}
128 131
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
173" stxr %w1, %0, %2\n" 176" stxr %w1, %0, %2\n"
174" cbnz %w1, 1b" 177" cbnz %w1, 1b"
175 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 178 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
176 : "Ir" (i) 179 : "Ir" (i));
177 : "cc");
178} 180}
179 181
180static inline long atomic64_add_return(long i, atomic64_t *v) 182static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
183 unsigned long tmp; 185 unsigned long tmp;
184 186
185 asm volatile("// atomic64_add_return\n" 187 asm volatile("// atomic64_add_return\n"
186"1: ldaxr %0, %2\n" 188"1: ldxr %0, %2\n"
187" add %0, %0, %3\n" 189" add %0, %0, %3\n"
188" stlxr %w1, %0, %2\n" 190" stlxr %w1, %0, %2\n"
189" cbnz %w1, 1b" 191" cbnz %w1, 1b"
190 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 192 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
191 : "Ir" (i) 193 : "Ir" (i)
192 : "cc", "memory"); 194 : "memory");
193 195
196 smp_mb();
194 return result; 197 return result;
195} 198}
196 199
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
205" stxr %w1, %0, %2\n" 208" stxr %w1, %0, %2\n"
206" cbnz %w1, 1b" 209" cbnz %w1, 1b"
207 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 210 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
208 : "Ir" (i) 211 : "Ir" (i));
209 : "cc");
210} 212}
211 213
212static inline long atomic64_sub_return(long i, atomic64_t *v) 214static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
215 unsigned long tmp; 217 unsigned long tmp;
216 218
217 asm volatile("// atomic64_sub_return\n" 219 asm volatile("// atomic64_sub_return\n"
218"1: ldaxr %0, %2\n" 220"1: ldxr %0, %2\n"
219" sub %0, %0, %3\n" 221" sub %0, %0, %3\n"
220" stlxr %w1, %0, %2\n" 222" stlxr %w1, %0, %2\n"
221" cbnz %w1, 1b" 223" cbnz %w1, 1b"
222 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 224 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
223 : "Ir" (i) 225 : "Ir" (i)
224 : "cc", "memory"); 226 : "memory");
225 227
228 smp_mb();
226 return result; 229 return result;
227} 230}
228 231
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
231 long oldval; 234 long oldval;
232 unsigned long res; 235 unsigned long res;
233 236
237 smp_mb();
238
234 asm volatile("// atomic64_cmpxchg\n" 239 asm volatile("// atomic64_cmpxchg\n"
235"1: ldaxr %1, %2\n" 240"1: ldxr %1, %2\n"
236" cmp %1, %3\n" 241" cmp %1, %3\n"
237" b.ne 2f\n" 242" b.ne 2f\n"
238" stlxr %w0, %4, %2\n" 243" stxr %w0, %4, %2\n"
239" cbnz %w0, 1b\n" 244" cbnz %w0, 1b\n"
240"2:" 245"2:"
241 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) 246 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
242 : "Ir" (old), "r" (new) 247 : "Ir" (old), "r" (new)
243 : "cc", "memory"); 248 : "cc");
244 249
250 smp_mb();
245 return oldval; 251 return oldval;
246} 252}
247 253
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
253 unsigned long tmp; 259 unsigned long tmp;
254 260
255 asm volatile("// atomic64_dec_if_positive\n" 261 asm volatile("// atomic64_dec_if_positive\n"
256"1: ldaxr %0, %2\n" 262"1: ldxr %0, %2\n"
257" subs %0, %0, #1\n" 263" subs %0, %0, #1\n"
258" b.mi 2f\n" 264" b.mi 2f\n"
259" stlxr %w1, %0, %2\n" 265" stlxr %w1, %0, %2\n"
260" cbnz %w1, 1b\n" 266" cbnz %w1, 1b\n"
267" dmb ish\n"
261"2:" 268"2:"
262 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 269 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
263 : 270 :
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba8806b..409ca370cfe2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
25#define wfi() asm volatile("wfi" : : : "memory") 25#define wfi() asm volatile("wfi" : : : "memory")
26 26
27#define isb() asm volatile("isb" : : : "memory") 27#define isb() asm volatile("isb" : : : "memory")
28#define dsb() asm volatile("dsb sy" : : : "memory") 28#define dsb(opt) asm volatile("dsb sy" : : : "memory")
29 29
30#define mb() dsb() 30#define mb() dsb()
31#define rmb() asm volatile("dsb ld" : : : "memory") 31#define rmb() asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee327206..889324981aa4 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
116static inline void __flush_icache_all(void) 116static inline void __flush_icache_all(void)
117{ 117{
118 asm("ic ialluis"); 118 asm("ic ialluis");
119 dsb();
119} 120}
120 121
121#define flush_dcache_mmap_lock(mapping) \ 122#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7f4a25..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
29 switch (size) { 29 switch (size) {
30 case 1: 30 case 1:
31 asm volatile("// __xchg1\n" 31 asm volatile("// __xchg1\n"
32 "1: ldaxrb %w0, %2\n" 32 "1: ldxrb %w0, %2\n"
33 " stlxrb %w1, %w3, %2\n" 33 " stlxrb %w1, %w3, %2\n"
34 " cbnz %w1, 1b\n" 34 " cbnz %w1, 1b\n"
35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) 35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36 : "r" (x) 36 : "r" (x)
37 : "cc", "memory"); 37 : "memory");
38 break; 38 break;
39 case 2: 39 case 2:
40 asm volatile("// __xchg2\n" 40 asm volatile("// __xchg2\n"
41 "1: ldaxrh %w0, %2\n" 41 "1: ldxrh %w0, %2\n"
42 " stlxrh %w1, %w3, %2\n" 42 " stlxrh %w1, %w3, %2\n"
43 " cbnz %w1, 1b\n" 43 " cbnz %w1, 1b\n"
44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) 44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45 : "r" (x) 45 : "r" (x)
46 : "cc", "memory"); 46 : "memory");
47 break; 47 break;
48 case 4: 48 case 4:
49 asm volatile("// __xchg4\n" 49 asm volatile("// __xchg4\n"
50 "1: ldaxr %w0, %2\n" 50 "1: ldxr %w0, %2\n"
51 " stlxr %w1, %w3, %2\n" 51 " stlxr %w1, %w3, %2\n"
52 " cbnz %w1, 1b\n" 52 " cbnz %w1, 1b\n"
53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) 53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54 : "r" (x) 54 : "r" (x)
55 : "cc", "memory"); 55 : "memory");
56 break; 56 break;
57 case 8: 57 case 8:
58 asm volatile("// __xchg8\n" 58 asm volatile("// __xchg8\n"
59 "1: ldaxr %0, %2\n" 59 "1: ldxr %0, %2\n"
60 " stlxr %w1, %3, %2\n" 60 " stlxr %w1, %3, %2\n"
61 " cbnz %w1, 1b\n" 61 " cbnz %w1, 1b\n"
62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) 62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63 : "r" (x) 63 : "r" (x)
64 : "cc", "memory"); 64 : "memory");
65 break; 65 break;
66 default: 66 default:
67 BUILD_BUG(); 67 BUILD_BUG();
68 } 68 }
69 69
70 smp_mb();
70 return ret; 71 return ret;
71} 72}
72 73
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
42#define ESR_EL1_EC_SP_ALIGN (0x26) 42#define ESR_EL1_EC_SP_ALIGN (0x26)
43#define ESR_EL1_EC_FP_EXC32 (0x28) 43#define ESR_EL1_EC_FP_EXC32 (0x28)
44#define ESR_EL1_EC_FP_EXC64 (0x2C) 44#define ESR_EL1_EC_FP_EXC64 (0x2C)
45#define ESR_EL1_EC_SERRROR (0x2F) 45#define ESR_EL1_EC_SERROR (0x2F)
46#define ESR_EL1_EC_BREAKPT_EL0 (0x30) 46#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
47#define ESR_EL1_EC_BREAKPT_EL1 (0x31) 47#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) 48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3aba5d69..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
24 24
25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
26 asm volatile( \ 26 asm volatile( \
27"1: ldaxr %w1, %2\n" \ 27"1: ldxr %w1, %2\n" \
28 insn "\n" \ 28 insn "\n" \
29"2: stlxr %w3, %w0, %2\n" \ 29"2: stlxr %w3, %w0, %2\n" \
30" cbnz %w3, 1b\n" \ 30" cbnz %w3, 1b\n" \
31" dmb ish\n" \
31"3:\n" \ 32"3:\n" \
32" .pushsection .fixup,\"ax\"\n" \ 33" .pushsection .fixup,\"ax\"\n" \
33" .align 2\n" \ 34" .align 2\n" \
@@ -40,7 +41,7 @@
40" .popsection\n" \ 41" .popsection\n" \
41 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ 42 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
42 : "r" (oparg), "Ir" (-EFAULT) \ 43 : "r" (oparg), "Ir" (-EFAULT) \
43 : "cc", "memory") 44 : "memory")
44 45
45static inline int 46static inline int
46futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 47futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
111 return -EFAULT; 112 return -EFAULT;
112 113
113 asm volatile("// futex_atomic_cmpxchg_inatomic\n" 114 asm volatile("// futex_atomic_cmpxchg_inatomic\n"
114"1: ldaxr %w1, %2\n" 115"1: ldxr %w1, %2\n"
115" sub %w3, %w1, %w4\n" 116" sub %w3, %w1, %w4\n"
116" cbnz %w3, 3f\n" 117" cbnz %w3, 3f\n"
117"2: stlxr %w3, %w5, %2\n" 118"2: stlxr %w3, %w5, %2\n"
118" cbnz %w3, 1b\n" 119" cbnz %w3, 1b\n"
120" dmb ish\n"
119"3:\n" 121"3:\n"
120" .pushsection .fixup,\"ax\"\n" 122" .pushsection .fixup,\"ax\"\n"
121"4: mov %w0, %w6\n" 123"4: mov %w0, %w6\n"
@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
127" .popsection\n" 129" .popsection\n"
128 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) 130 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
129 : "r" (oldval), "r" (newval), "Ir" (-EFAULT) 131 : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
130 : "cc", "memory"); 132 : "memory");
131 133
132 *uval = val; 134 *uval = val;
133 return ret; 135 return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef4771c73..0eb398655378 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
231#define ESR_EL2_EC_SP_ALIGN (0x26) 231#define ESR_EL2_EC_SP_ALIGN (0x26)
232#define ESR_EL2_EC_FP_EXC32 (0x28) 232#define ESR_EL2_EC_FP_EXC32 (0x28)
233#define ESR_EL2_EC_FP_EXC64 (0x2C) 233#define ESR_EL2_EC_FP_EXC64 (0x2C)
234#define ESR_EL2_EC_SERRROR (0x2F) 234#define ESR_EL2_EC_SERROR (0x2F)
235#define ESR_EL2_EC_BREAKPT (0x30) 235#define ESR_EL2_EC_BREAKPT (0x30)
236#define ESR_EL2_EC_BREAKPT_HYP (0x31) 236#define ESR_EL2_EC_BREAKPT_HYP (0x31)
237#define ESR_EL2_EC_SOFTSTP (0x32) 237#define ESR_EL2_EC_SOFTSTP (0x32)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PERCPU_H 16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H 17#define __ASM_PERCPU_H
18 18
19#ifdef CONFIG_SMP
20
19static inline void set_my_cpu_offset(unsigned long off) 21static inline void set_my_cpu_offset(unsigned long off)
20{ 22{
21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); 23 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
36} 38}
37#define __my_cpu_offset __my_cpu_offset() 39#define __my_cpu_offset __my_cpu_offset()
38 40
41#else /* !CONFIG_SMP */
42
43#define set_my_cpu_offset(x) do { } while (0)
44
45#endif /* CONFIG_SMP */
46
39#include <asm-generic/percpu.h> 47#include <asm-generic/percpu.h>
40 48
41#endif /* __ASM_PERCPU_H */ 49#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..aa3917c8b623 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
136/* 136/*
137 * The following only work if pte_present(). Undefined behaviour otherwise. 137 * The following only work if pte_present(). Undefined behaviour otherwise.
138 */ 138 */
139#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 139#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
140#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 140#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
141#define pte_young(pte) (pte_val(pte) & PTE_AF) 141#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
142#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 142#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
143#define pte_write(pte) (pte_val(pte) & PTE_WRITE) 143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 145
146#define pte_valid_user(pte) \ 146#define pte_valid_user(pte) \
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf064d7a1..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
132 " cbnz %w0, 2b\n" 132 " cbnz %w0, 2b\n"
133 : "=&r" (tmp), "+Q" (rw->lock) 133 : "=&r" (tmp), "+Q" (rw->lock)
134 : "r" (0x80000000) 134 : "r" (0x80000000)
135 : "cc", "memory"); 135 : "memory");
136} 136}
137 137
138static inline int arch_write_trylock(arch_rwlock_t *rw) 138static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
146 "1:\n" 146 "1:\n"
147 : "=&r" (tmp), "+Q" (rw->lock) 147 : "=&r" (tmp), "+Q" (rw->lock)
148 : "r" (0x80000000) 148 : "r" (0x80000000)
149 : "cc", "memory"); 149 : "memory");
150 150
151 return !tmp; 151 return !tmp;
152} 152}
@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
187 " cbnz %w1, 2b\n" 187 " cbnz %w1, 2b\n"
188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
189 : 189 :
190 : "cc", "memory"); 190 : "memory");
191} 191}
192 192
193static inline void arch_read_unlock(arch_rwlock_t *rw) 193static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
201 " cbnz %w1, 1b\n" 201 " cbnz %w1, 1b\n"
202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
203 : 203 :
204 : "cc", "memory"); 204 : "memory");
205} 205}
206 206
207static inline int arch_read_trylock(arch_rwlock_t *rw) 207static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
216 "1:\n" 216 "1:\n"
217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) 217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
218 : 218 :
219 : "cc", "memory"); 219 : "memory");
220 220
221 return !tmp2; 221 return !tmp2;
222} 222}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf008d3..bb8eb8a78e67 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
399__SYSCALL(375, sys_setns) 399__SYSCALL(375, sys_setns)
400__SYSCALL(376, compat_sys_process_vm_readv) 400__SYSCALL(376, compat_sys_process_vm_readv)
401__SYSCALL(377, compat_sys_process_vm_writev) 401__SYSCALL(377, compat_sys_process_vm_writev)
402__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ 402__SYSCALL(378, sys_kcmp)
403__SYSCALL(379, sys_finit_module)
404__SYSCALL(380, sys_sched_setattr)
405__SYSCALL(381, sys_sched_getattr)
403 406
404#define __NR_compat_syscalls 379 407#define __NR_compat_syscalls 379
405 408
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
150 150
151/* Device Control API: ARM VGIC */
152#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
153#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
154#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
155#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
156#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
157#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
158#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
159
151/* KVM_IRQ_LINE irq field index values */ 160/* KVM_IRQ_LINE irq field index values */
152#define KVM_ARM_IRQ_TYPE_SHIFT 24 161#define KVM_ARM_IRQ_TYPE_SHIFT 24
153#define KVM_ARM_IRQ_TYPE_MASK 0xff 162#define KVM_ARM_IRQ_TYPE_MASK 0xff