aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig18
-rw-r--r--arch/arm64/include/asm/atomic.h53
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h17
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/kernel/kuser32.S6
-rw-r--r--arch/arm64/kernel/stacktrace.c6
-rw-r--r--arch/arm64/kernel/vdso.c4
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/kvm/hyp.S27
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c1
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c11
24 files changed, 153 insertions, 74 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dd4327f09ba4..27bbcfc7202a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@ config ARM64
36 select HAVE_GENERIC_DMA_COHERENT 36 select HAVE_GENERIC_DMA_COHERENT
37 select HAVE_HW_BREAKPOINT if PERF_EVENTS 37 select HAVE_HW_BREAKPOINT if PERF_EVENTS
38 select HAVE_MEMBLOCK 38 select HAVE_MEMBLOCK
39 select HAVE_PATA_PLATFORM
39 select HAVE_PERF_EVENTS 40 select HAVE_PERF_EVENTS
40 select IRQ_DOMAIN 41 select IRQ_DOMAIN
41 select MODULES_USE_ELF_RELA 42 select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be62ae6..7959dd0ca5d5 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
19CONFIG_KALLSYMS_ALL=y 18CONFIG_KALLSYMS_ALL=y
20# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
21CONFIG_PROFILING=y 20CONFIG_PROFILING=y
21CONFIG_JUMP_LABEL=y
22CONFIG_MODULES=y 22CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y 23CONFIG_MODULE_UNLOAD=y
24# CONFIG_BLK_DEV_BSG is not set 24# CONFIG_BLK_DEV_BSG is not set
@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
27CONFIG_ARCH_XGENE=y 27CONFIG_ARCH_XGENE=y
28CONFIG_SMP=y 28CONFIG_SMP=y
29CONFIG_PREEMPT=y 29CONFIG_PREEMPT=y
30CONFIG_CMA=y
30CONFIG_CMDLINE="console=ttyAMA0" 31CONFIG_CMDLINE="console=ttyAMA0"
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_COMPAT=y 33CONFIG_COMPAT=y
@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
42# CONFIG_WIRELESS is not set 43# CONFIG_WIRELESS is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 44CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 45CONFIG_DEVTMPFS=y
45CONFIG_BLK_DEV=y 46CONFIG_DMA_CMA=y
46CONFIG_SCSI=y 47CONFIG_SCSI=y
47# CONFIG_SCSI_PROC_FS is not set 48# CONFIG_SCSI_PROC_FS is not set
48CONFIG_BLK_DEV_SD=y 49CONFIG_BLK_DEV_SD=y
49# CONFIG_SCSI_LOWLEVEL is not set 50# CONFIG_SCSI_LOWLEVEL is not set
51CONFIG_ATA=y
52CONFIG_PATA_PLATFORM=y
53CONFIG_PATA_OF_PLATFORM=y
50CONFIG_NETDEVICES=y 54CONFIG_NETDEVICES=y
51CONFIG_MII=y
52CONFIG_SMC91X=y 55CONFIG_SMC91X=y
56CONFIG_SMSC911X=y
53# CONFIG_WLAN is not set 57# CONFIG_WLAN is not set
54CONFIG_INPUT_EVDEV=y 58CONFIG_INPUT_EVDEV=y
55# CONFIG_SERIO_I8042 is not set 59# CONFIG_SERIO_I8042 is not set
@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
62CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 66CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
63# CONFIG_HW_RANDOM is not set 67# CONFIG_HW_RANDOM is not set
64# CONFIG_HWMON is not set 68# CONFIG_HWMON is not set
69CONFIG_REGULATOR=y
70CONFIG_REGULATOR_FIXED_VOLTAGE=y
65CONFIG_FB=y 71CONFIG_FB=y
66# CONFIG_VGA_CONSOLE is not set 72# CONFIG_VGA_CONSOLE is not set
67CONFIG_FRAMEBUFFER_CONSOLE=y 73CONFIG_FRAMEBUFFER_CONSOLE=y
68CONFIG_LOGO=y 74CONFIG_LOGO=y
69# CONFIG_LOGO_LINUX_MONO is not set 75# CONFIG_LOGO_LINUX_MONO is not set
70# CONFIG_LOGO_LINUX_VGA16 is not set 76# CONFIG_LOGO_LINUX_VGA16 is not set
71# CONFIG_USB_SUPPORT is not set 77CONFIG_USB=y
78CONFIG_USB_ISP1760_HCD=y
79CONFIG_USB_STORAGE=y
80CONFIG_MMC=y
81CONFIG_MMC_ARMMMCI=y
72# CONFIG_IOMMU_SUPPORT is not set 82# CONFIG_IOMMU_SUPPORT is not set
73CONFIG_EXT2_FS=y 83CONFIG_EXT2_FS=y
74CONFIG_EXT3_FS=y 84CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..0237f0867e37 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
54" stxr %w1, %w0, %2\n" 54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b" 55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i) 57 : "Ir" (i));
58 : "cc");
59} 58}
60 59
61static inline int atomic_add_return(int i, atomic_t *v) 60static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
64 int result; 63 int result;
65 64
66 asm volatile("// atomic_add_return\n" 65 asm volatile("// atomic_add_return\n"
67"1: ldaxr %w0, %2\n" 66"1: ldxr %w0, %2\n"
68" add %w0, %w0, %w3\n" 67" add %w0, %w0, %w3\n"
69" stlxr %w1, %w0, %2\n" 68" stlxr %w1, %w0, %2\n"
70" cbnz %w1, 1b" 69" cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 70 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
72 : "Ir" (i) 71 : "Ir" (i)
73 : "cc", "memory"); 72 : "memory");
74 73
74 smp_mb();
75 return result; 75 return result;
76} 76}
77 77
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
86" stxr %w1, %w0, %2\n" 86" stxr %w1, %w0, %2\n"
87" cbnz %w1, 1b" 87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "Ir" (i) 89 : "Ir" (i));
90 : "cc");
91} 90}
92 91
93static inline int atomic_sub_return(int i, atomic_t *v) 92static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 int result; 95 int result;
97 96
98 asm volatile("// atomic_sub_return\n" 97 asm volatile("// atomic_sub_return\n"
99"1: ldaxr %w0, %2\n" 98"1: ldxr %w0, %2\n"
100" sub %w0, %w0, %w3\n" 99" sub %w0, %w0, %w3\n"
101" stlxr %w1, %w0, %2\n" 100" stlxr %w1, %w0, %2\n"
102" cbnz %w1, 1b" 101" cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 102 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104 : "Ir" (i) 103 : "Ir" (i)
105 : "cc", "memory"); 104 : "memory");
106 105
106 smp_mb();
107 return result; 107 return result;
108} 108}
109 109
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
112 unsigned long tmp; 112 unsigned long tmp;
113 int oldval; 113 int oldval;
114 114
115 smp_mb();
116
115 asm volatile("// atomic_cmpxchg\n" 117 asm volatile("// atomic_cmpxchg\n"
116"1: ldaxr %w1, %2\n" 118"1: ldxr %w1, %2\n"
117" cmp %w1, %w3\n" 119" cmp %w1, %w3\n"
118" b.ne 2f\n" 120" b.ne 2f\n"
119" stlxr %w0, %w4, %2\n" 121" stxr %w0, %w4, %2\n"
120" cbnz %w0, 1b\n" 122" cbnz %w0, 1b\n"
121"2:" 123"2:"
122 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) 124 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123 : "Ir" (old), "r" (new) 125 : "Ir" (old), "r" (new)
124 : "cc", "memory"); 126 : "cc");
125 127
128 smp_mb();
126 return oldval; 129 return oldval;
127} 130}
128 131
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
173" stxr %w1, %0, %2\n" 176" stxr %w1, %0, %2\n"
174" cbnz %w1, 1b" 177" cbnz %w1, 1b"
175 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 178 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
176 : "Ir" (i) 179 : "Ir" (i));
177 : "cc");
178} 180}
179 181
180static inline long atomic64_add_return(long i, atomic64_t *v) 182static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
183 unsigned long tmp; 185 unsigned long tmp;
184 186
185 asm volatile("// atomic64_add_return\n" 187 asm volatile("// atomic64_add_return\n"
186"1: ldaxr %0, %2\n" 188"1: ldxr %0, %2\n"
187" add %0, %0, %3\n" 189" add %0, %0, %3\n"
188" stlxr %w1, %0, %2\n" 190" stlxr %w1, %0, %2\n"
189" cbnz %w1, 1b" 191" cbnz %w1, 1b"
190 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 192 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
191 : "Ir" (i) 193 : "Ir" (i)
192 : "cc", "memory"); 194 : "memory");
193 195
196 smp_mb();
194 return result; 197 return result;
195} 198}
196 199
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
205" stxr %w1, %0, %2\n" 208" stxr %w1, %0, %2\n"
206" cbnz %w1, 1b" 209" cbnz %w1, 1b"
207 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 210 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
208 : "Ir" (i) 211 : "Ir" (i));
209 : "cc");
210} 212}
211 213
212static inline long atomic64_sub_return(long i, atomic64_t *v) 214static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
215 unsigned long tmp; 217 unsigned long tmp;
216 218
217 asm volatile("// atomic64_sub_return\n" 219 asm volatile("// atomic64_sub_return\n"
218"1: ldaxr %0, %2\n" 220"1: ldxr %0, %2\n"
219" sub %0, %0, %3\n" 221" sub %0, %0, %3\n"
220" stlxr %w1, %0, %2\n" 222" stlxr %w1, %0, %2\n"
221" cbnz %w1, 1b" 223" cbnz %w1, 1b"
222 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 224 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
223 : "Ir" (i) 225 : "Ir" (i)
224 : "cc", "memory"); 226 : "memory");
225 227
228 smp_mb();
226 return result; 229 return result;
227} 230}
228 231
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
231 long oldval; 234 long oldval;
232 unsigned long res; 235 unsigned long res;
233 236
237 smp_mb();
238
234 asm volatile("// atomic64_cmpxchg\n" 239 asm volatile("// atomic64_cmpxchg\n"
235"1: ldaxr %1, %2\n" 240"1: ldxr %1, %2\n"
236" cmp %1, %3\n" 241" cmp %1, %3\n"
237" b.ne 2f\n" 242" b.ne 2f\n"
238" stlxr %w0, %4, %2\n" 243" stxr %w0, %4, %2\n"
239" cbnz %w0, 1b\n" 244" cbnz %w0, 1b\n"
240"2:" 245"2:"
241 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) 246 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
242 : "Ir" (old), "r" (new) 247 : "Ir" (old), "r" (new)
243 : "cc", "memory"); 248 : "cc");
244 249
250 smp_mb();
245 return oldval; 251 return oldval;
246} 252}
247 253
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
253 unsigned long tmp; 259 unsigned long tmp;
254 260
255 asm volatile("// atomic64_dec_if_positive\n" 261 asm volatile("// atomic64_dec_if_positive\n"
256"1: ldaxr %0, %2\n" 262"1: ldxr %0, %2\n"
257" subs %0, %0, #1\n" 263" subs %0, %0, #1\n"
258" b.mi 2f\n" 264" b.mi 2f\n"
259" stlxr %w1, %0, %2\n" 265" stlxr %w1, %0, %2\n"
260" cbnz %w1, 1b\n" 266" cbnz %w1, 1b\n"
267" dmb ish\n"
261"2:" 268"2:"
262 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 269 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
263 : 270 :
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba8806b..409ca370cfe2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
25#define wfi() asm volatile("wfi" : : : "memory") 25#define wfi() asm volatile("wfi" : : : "memory")
26 26
27#define isb() asm volatile("isb" : : : "memory") 27#define isb() asm volatile("isb" : : : "memory")
28#define dsb() asm volatile("dsb sy" : : : "memory") 28#define dsb(opt) asm volatile("dsb sy" : : : "memory")
29 29
30#define mb() dsb() 30#define mb() dsb()
31#define rmb() asm volatile("dsb ld" : : : "memory") 31#define rmb() asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee327206..889324981aa4 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
116static inline void __flush_icache_all(void) 116static inline void __flush_icache_all(void)
117{ 117{
118 asm("ic ialluis"); 118 asm("ic ialluis");
119 dsb();
119} 120}
120 121
121#define flush_dcache_mmap_lock(mapping) \ 122#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7f4a25..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
29 switch (size) { 29 switch (size) {
30 case 1: 30 case 1:
31 asm volatile("// __xchg1\n" 31 asm volatile("// __xchg1\n"
32 "1: ldaxrb %w0, %2\n" 32 "1: ldxrb %w0, %2\n"
33 " stlxrb %w1, %w3, %2\n" 33 " stlxrb %w1, %w3, %2\n"
34 " cbnz %w1, 1b\n" 34 " cbnz %w1, 1b\n"
35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) 35 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36 : "r" (x) 36 : "r" (x)
37 : "cc", "memory"); 37 : "memory");
38 break; 38 break;
39 case 2: 39 case 2:
40 asm volatile("// __xchg2\n" 40 asm volatile("// __xchg2\n"
41 "1: ldaxrh %w0, %2\n" 41 "1: ldxrh %w0, %2\n"
42 " stlxrh %w1, %w3, %2\n" 42 " stlxrh %w1, %w3, %2\n"
43 " cbnz %w1, 1b\n" 43 " cbnz %w1, 1b\n"
44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) 44 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45 : "r" (x) 45 : "r" (x)
46 : "cc", "memory"); 46 : "memory");
47 break; 47 break;
48 case 4: 48 case 4:
49 asm volatile("// __xchg4\n" 49 asm volatile("// __xchg4\n"
50 "1: ldaxr %w0, %2\n" 50 "1: ldxr %w0, %2\n"
51 " stlxr %w1, %w3, %2\n" 51 " stlxr %w1, %w3, %2\n"
52 " cbnz %w1, 1b\n" 52 " cbnz %w1, 1b\n"
53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) 53 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54 : "r" (x) 54 : "r" (x)
55 : "cc", "memory"); 55 : "memory");
56 break; 56 break;
57 case 8: 57 case 8:
58 asm volatile("// __xchg8\n" 58 asm volatile("// __xchg8\n"
59 "1: ldaxr %0, %2\n" 59 "1: ldxr %0, %2\n"
60 " stlxr %w1, %3, %2\n" 60 " stlxr %w1, %3, %2\n"
61 " cbnz %w1, 1b\n" 61 " cbnz %w1, 1b\n"
62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) 62 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63 : "r" (x) 63 : "r" (x)
64 : "cc", "memory"); 64 : "memory");
65 break; 65 break;
66 default: 66 default:
67 BUILD_BUG(); 67 BUILD_BUG();
68 } 68 }
69 69
70 smp_mb();
70 return ret; 71 return ret;
71} 72}
72 73
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
42#define ESR_EL1_EC_SP_ALIGN (0x26) 42#define ESR_EL1_EC_SP_ALIGN (0x26)
43#define ESR_EL1_EC_FP_EXC32 (0x28) 43#define ESR_EL1_EC_FP_EXC32 (0x28)
44#define ESR_EL1_EC_FP_EXC64 (0x2C) 44#define ESR_EL1_EC_FP_EXC64 (0x2C)
45#define ESR_EL1_EC_SERRROR (0x2F) 45#define ESR_EL1_EC_SERROR (0x2F)
46#define ESR_EL1_EC_BREAKPT_EL0 (0x30) 46#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
47#define ESR_EL1_EC_BREAKPT_EL1 (0x31) 47#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) 48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3aba5d69..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
24 24
25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 25#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
26 asm volatile( \ 26 asm volatile( \
27"1: ldaxr %w1, %2\n" \ 27"1: ldxr %w1, %2\n" \
28 insn "\n" \ 28 insn "\n" \
29"2: stlxr %w3, %w0, %2\n" \ 29"2: stlxr %w3, %w0, %2\n" \
30" cbnz %w3, 1b\n" \ 30" cbnz %w3, 1b\n" \
31" dmb ish\n" \
31"3:\n" \ 32"3:\n" \
32" .pushsection .fixup,\"ax\"\n" \ 33" .pushsection .fixup,\"ax\"\n" \
33" .align 2\n" \ 34" .align 2\n" \
@@ -40,7 +41,7 @@
40" .popsection\n" \ 41" .popsection\n" \
41 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ 42 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
42 : "r" (oparg), "Ir" (-EFAULT) \ 43 : "r" (oparg), "Ir" (-EFAULT) \
43 : "cc", "memory") 44 : "memory")
44 45
45static inline int 46static inline int
46futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 47futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
111 return -EFAULT; 112 return -EFAULT;
112 113
113 asm volatile("// futex_atomic_cmpxchg_inatomic\n" 114 asm volatile("// futex_atomic_cmpxchg_inatomic\n"
114"1: ldaxr %w1, %2\n" 115"1: ldxr %w1, %2\n"
115" sub %w3, %w1, %w4\n" 116" sub %w3, %w1, %w4\n"
116" cbnz %w3, 3f\n" 117" cbnz %w3, 3f\n"
117"2: stlxr %w3, %w5, %2\n" 118"2: stlxr %w3, %w5, %2\n"
118" cbnz %w3, 1b\n" 119" cbnz %w3, 1b\n"
120" dmb ish\n"
119"3:\n" 121"3:\n"
120" .pushsection .fixup,\"ax\"\n" 122" .pushsection .fixup,\"ax\"\n"
121"4: mov %w0, %w6\n" 123"4: mov %w0, %w6\n"
@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
127" .popsection\n" 129" .popsection\n"
128 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) 130 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
129 : "r" (oldval), "r" (newval), "Ir" (-EFAULT) 131 : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
130 : "cc", "memory"); 132 : "memory");
131 133
132 *uval = val; 134 *uval = val;
133 return ret; 135 return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef4771c73..0eb398655378 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
231#define ESR_EL2_EC_SP_ALIGN (0x26) 231#define ESR_EL2_EC_SP_ALIGN (0x26)
232#define ESR_EL2_EC_FP_EXC32 (0x28) 232#define ESR_EL2_EC_FP_EXC32 (0x28)
233#define ESR_EL2_EC_FP_EXC64 (0x2C) 233#define ESR_EL2_EC_FP_EXC64 (0x2C)
234#define ESR_EL2_EC_SERRROR (0x2F) 234#define ESR_EL2_EC_SERROR (0x2F)
235#define ESR_EL2_EC_BREAKPT (0x30) 235#define ESR_EL2_EC_BREAKPT (0x30)
236#define ESR_EL2_EC_BREAKPT_HYP (0x31) 236#define ESR_EL2_EC_BREAKPT_HYP (0x31)
237#define ESR_EL2_EC_SOFTSTP (0x32) 237#define ESR_EL2_EC_SOFTSTP (0x32)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PERCPU_H 16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H 17#define __ASM_PERCPU_H
18 18
19#ifdef CONFIG_SMP
20
19static inline void set_my_cpu_offset(unsigned long off) 21static inline void set_my_cpu_offset(unsigned long off)
20{ 22{
21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); 23 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
36} 38}
37#define __my_cpu_offset __my_cpu_offset() 39#define __my_cpu_offset __my_cpu_offset()
38 40
41#else /* !CONFIG_SMP */
42
43#define set_my_cpu_offset(x) do { } while (0)
44
45#endif /* CONFIG_SMP */
46
39#include <asm-generic/percpu.h> 47#include <asm-generic/percpu.h>
40 48
41#endif /* __ASM_PERCPU_H */ 49#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..aa3917c8b623 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
136/* 136/*
137 * The following only work if pte_present(). Undefined behaviour otherwise. 137 * The following only work if pte_present(). Undefined behaviour otherwise.
138 */ 138 */
139#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 139#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
140#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 140#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
141#define pte_young(pte) (pte_val(pte) & PTE_AF) 141#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
142#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 142#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
143#define pte_write(pte) (pte_val(pte) & PTE_WRITE) 143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 145
146#define pte_valid_user(pte) \ 146#define pte_valid_user(pte) \
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf064d7a1..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
132 " cbnz %w0, 2b\n" 132 " cbnz %w0, 2b\n"
133 : "=&r" (tmp), "+Q" (rw->lock) 133 : "=&r" (tmp), "+Q" (rw->lock)
134 : "r" (0x80000000) 134 : "r" (0x80000000)
135 : "cc", "memory"); 135 : "memory");
136} 136}
137 137
138static inline int arch_write_trylock(arch_rwlock_t *rw) 138static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
146 "1:\n" 146 "1:\n"
147 : "=&r" (tmp), "+Q" (rw->lock) 147 : "=&r" (tmp), "+Q" (rw->lock)
148 : "r" (0x80000000) 148 : "r" (0x80000000)
149 : "cc", "memory"); 149 : "memory");
150 150
151 return !tmp; 151 return !tmp;
152} 152}
@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
187 " cbnz %w1, 2b\n" 187 " cbnz %w1, 2b\n"
188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
189 : 189 :
190 : "cc", "memory"); 190 : "memory");
191} 191}
192 192
193static inline void arch_read_unlock(arch_rwlock_t *rw) 193static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
201 " cbnz %w1, 1b\n" 201 " cbnz %w1, 1b\n"
202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) 202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
203 : 203 :
204 : "cc", "memory"); 204 : "memory");
205} 205}
206 206
207static inline int arch_read_trylock(arch_rwlock_t *rw) 207static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
216 "1:\n" 216 "1:\n"
217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) 217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
218 : 218 :
219 : "cc", "memory"); 219 : "memory");
220 220
221 return !tmp2; 221 return !tmp2;
222} 222}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf008d3..bb8eb8a78e67 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
399__SYSCALL(375, sys_setns) 399__SYSCALL(375, sys_setns)
400__SYSCALL(376, compat_sys_process_vm_readv) 400__SYSCALL(376, compat_sys_process_vm_readv)
401__SYSCALL(377, compat_sys_process_vm_writev) 401__SYSCALL(377, compat_sys_process_vm_writev)
402__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ 402__SYSCALL(378, sys_kcmp)
403__SYSCALL(379, sys_finit_module)
404__SYSCALL(380, sys_sched_setattr)
405__SYSCALL(381, sys_sched_getattr)
403 406
404#define __NR_compat_syscalls 379 407#define __NR_compat_syscalls 379
405 408
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
150 150
151/* Device Control API: ARM VGIC */
152#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
153#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
154#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
155#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
156#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
157#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
158#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
159
151/* KVM_IRQ_LINE irq field index values */ 160/* KVM_IRQ_LINE irq field index values */
152#define KVM_ARM_IRQ_TYPE_SHIFT 24 161#define KVM_ARM_IRQ_TYPE_SHIFT 24
153#define KVM_ARM_IRQ_TYPE_MASK 0xff 162#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ffdf230..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
38 .inst 0xe92d00f0 // push {r4, r5, r6, r7} 38 .inst 0xe92d00f0 // push {r4, r5, r6, r7}
39 .inst 0xe1c040d0 // ldrd r4, r5, [r0] 39 .inst 0xe1c040d0 // ldrd r4, r5, [r0]
40 .inst 0xe1c160d0 // ldrd r6, r7, [r1] 40 .inst 0xe1c160d0 // ldrd r6, r7, [r1]
41 .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] 41 .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
42 .inst 0xe0303004 // eors r3, r0, r4 42 .inst 0xe0303004 // eors r3, r0, r4
43 .inst 0x00313005 // eoreqs r3, r1, r5 43 .inst 0x00313005 // eoreqs r3, r1, r5
44 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] 44 .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
45 .inst 0x03330001 // teqeq r3, #1 45 .inst 0x03330001 // teqeq r3, #1
46 .inst 0x0afffff9 // beq 1b 46 .inst 0x0afffff9 // beq 1b
47 .inst 0xf57ff05b // dmb ish
47 .inst 0xe2730000 // rsbs r0, r3, #0 48 .inst 0xe2730000 // rsbs r0, r3, #0
48 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} 49 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
49 .inst 0xe12fff1e // bx lr 50 .inst 0xe12fff1e // bx lr
@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
55 56
56 .align 5 57 .align 5
57__kuser_cmpxchg: // 0xffff0fc0 58__kuser_cmpxchg: // 0xffff0fc0
58 .inst 0xe1923e9f // 1: ldaex r3, [r2] 59 .inst 0xe1923f9f // 1: ldrex r3, [r2]
59 .inst 0xe0533000 // subs r3, r3, r0 60 .inst 0xe0533000 // subs r3, r3, r0
60 .inst 0x01823e91 // stlexeq r3, r1, [r2] 61 .inst 0x01823e91 // stlexeq r3, r1, [r2]
61 .inst 0x03330001 // teqeq r3, #1 62 .inst 0x03330001 // teqeq r3, #1
62 .inst 0x0afffffa // beq 1b 63 .inst 0x0afffffa // beq 1b
64 .inst 0xf57ff05b // dmb ish
63 .inst 0xe2730000 // rsbs r0, r3, #0 65 .inst 0xe2730000 // rsbs r0, r3, #0
64 .inst 0xe12fff1e // bx lr 66 .inst 0xe12fff1e // bx lr
65 67
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c3b6c63ea5fb..38f0558f0c0a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
48 48
49 frame->sp = fp + 0x10; 49 frame->sp = fp + 0x10;
50 frame->fp = *(unsigned long *)(fp); 50 frame->fp = *(unsigned long *)(fp);
51 frame->pc = *(unsigned long *)(fp + 8); 51 /*
52 * -4 here because we care about the PC at time of bl,
53 * not where the return will go.
54 */
55 frame->pc = *(unsigned long *)(fp + 8) - 4;
52 56
53 return 0; 57 return 0;
54} 58}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf6945a..a7149cae1615 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
238 vdso_data->use_syscall = use_syscall; 238 vdso_data->use_syscall = use_syscall;
239 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 239 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
240 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 240 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
241 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
242 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
241 243
242 if (!use_syscall) { 244 if (!use_syscall) {
243 vdso_data->cs_cycle_last = tk->clock->cycle_last; 245 vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
245 vdso_data->xtime_clock_nsec = tk->xtime_nsec; 247 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
246 vdso_data->cs_mult = tk->mult; 248 vdso_data->cs_mult = tk->mult;
247 vdso_data->cs_shift = tk->shift; 249 vdso_data->cs_shift = tk->shift;
248 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
249 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
250 } 250 }
251 251
252 smp_wmb(); 252 smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..6d20b7d162d8 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
48 48
49# Actual build commands 49# Actual build commands
50quiet_cmd_vdsold = VDSOL $@ 50quiet_cmd_vdsold = VDSOL $@
51 cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ 51 cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
52quiet_cmd_vdsoas = VDSOA $@ 52quiet_cmd_vdsoas = VDSOA $@
53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< 53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
54 54
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
103 bl __do_get_tspec 103 bl __do_get_tspec
104 seqcnt_check w9, 1b 104 seqcnt_check w9, 1b
105 105
106 mov x30, x2
107
106 cmp w0, #CLOCK_MONOTONIC 108 cmp w0, #CLOCK_MONOTONIC
107 b.ne 6f 109 b.ne 6f
108 110
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
118 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 120 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
119 b.ne 8f 121 b.ne 8f
120 122
123 /* xtime_coarse_nsec is already right-shifted */
124 mov x12, #0
125
121 /* Get coarse timespec. */ 126 /* Get coarse timespec. */
122 adr vdso_data, _vdso_data 127 adr vdso_data, _vdso_data
1233: seqcnt_acquire 1283: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
156 lsr x11, x11, x12 161 lsr x11, x11, x12
157 stp x10, x11, [x1, #TSPEC_TV_SEC] 162 stp x10, x11, [x1, #TSPEC_TV_SEC]
158 mov x0, xzr 163 mov x0, xzr
159 ret x2 164 ret
1607: 1657:
161 mov x30, x2 166 mov x30, x2
1628: /* Syscall fallback. */ 1678: /* Syscall fallback. */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36e10ff..2c56012cb2d2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -694,6 +694,24 @@ __hyp_panic_str:
694 694
695 .align 2 695 .align 2
696 696
697/*
698 * u64 kvm_call_hyp(void *hypfn, ...);
699 *
700 * This is not really a variadic function in the classic C-way and care must
701 * be taken when calling this to ensure parameters are passed in registers
702 * only, since the stack will change between the caller and the callee.
703 *
704 * Call the function with the first argument containing a pointer to the
705 * function you wish to call in Hyp mode, and subsequent arguments will be
706 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
707 * function pointer can be passed). The function being called must be mapped
708 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
709 * passed in r0 and r1.
710 *
711 * A function pointer with a value of 0 has a special meaning, and is
712 * used to implement __hyp_get_vectors in the same way as in
713 * arch/arm64/kernel/hyp_stub.S.
714 */
697ENTRY(kvm_call_hyp) 715ENTRY(kvm_call_hyp)
698 hvc #0 716 hvc #0
699 ret 717 ret
@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
737 pop x2, x3 755 pop x2, x3
738 pop x0, x1 756 pop x0, x1
739 757
740 push lr, xzr 758 /* Check for __hyp_get_vectors */
759 cbnz x0, 1f
760 mrs x0, vbar_el2
761 b 2f
762
7631: push lr, xzr
741 764
742 /* 765 /*
743 * Compute the function address in EL2, and shuffle the parameters. 766 * Compute the function address in EL2, and shuffle the parameters.
@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
750 blr lr 773 blr lr
751 774
752 pop lr, xzr 775 pop lr, xzr
753 eret 7762: eret
754 777
755el1_trap: 778el1_trap:
756 /* 779 /*
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
46 mov x2, #1 46 mov x2, #1
47 add x1, x1, x0, lsr #3 // Get word offset 47 add x1, x1, x0, lsr #3 // Get word offset
48 lsl x4, x2, x3 // Create mask 48 lsl x4, x2, x3 // Create mask
491: ldaxr x2, [x1] 491: ldxr x2, [x1]
50 lsr x0, x2, x3 // Save old value of bit 50 lsr x0, x2, x3 // Save old value of bit
51 \instr x2, x2, x4 // toggle bit 51 \instr x2, x2, x4 // toggle bit
52 stlxr w5, x2, [x1] 52 stlxr w5, x2, [x1]
53 cbnz w5, 1b 53 cbnz w5, 1b
54 dmb ish
54 and x0, x0, #1 55 and x0, x0, #1
553: ret 563: ret
56ENDPROC(\name ) 57ENDPROC(\name )
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 45b5ab54c9ee..fbd76785c5db 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
45 if (IS_ENABLED(CONFIG_DMA_CMA)) { 45 if (IS_ENABLED(CONFIG_DMA_CMA)) {
46 struct page *page; 46 struct page *page;
47 47
48 size = PAGE_ALIGN(size);
48 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 49 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
49 get_order(size)); 50 get_order(size));
50 if (!page) 51 if (!page)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebbe7013..f8dc7e8fce6f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
203 do { 203 do {
204 next = pmd_addr_end(addr, end); 204 next = pmd_addr_end(addr, end);
205 /* try section mapping first */ 205 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) 206 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd;
207 set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 208 set_pmd(pmd, __pmd(phys | prot_sect_kernel));
208 else 209 /*
210 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them.
212 */
213 if (!pmd_none(old_pmd))
214 flush_tlb_all();
215 } else {
209 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); 216 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
217 }
210 phys += next - addr; 218 phys += next - addr;
211 } while (pmd++, addr = next, addr != end); 219 } while (pmd++, addr = next, addr != end);
212} 220}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
32 32
33pgd_t *pgd_alloc(struct mm_struct *mm) 33pgd_t *pgd_alloc(struct mm_struct *mm)
34{ 34{
35 pgd_t *new_pgd;
36
37 if (PGD_SIZE == PAGE_SIZE) 35 if (PGD_SIZE == PAGE_SIZE)
38 new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); 36 return (pgd_t *)get_zeroed_page(GFP_KERNEL);
39 else 37 else
40 new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); 38 return kzalloc(PGD_SIZE, GFP_KERNEL);
41
42 if (!new_pgd)
43 return NULL;
44
45 return new_pgd;
46} 39}
47 40
48void pgd_free(struct mm_struct *mm, pgd_t *pgd) 41void pgd_free(struct mm_struct *mm, pgd_t *pgd)