aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /arch/arm64
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig.platforms4
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts3
-rw-r--r--arch/arm64/configs/defconfig53
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/kernel/entry.S7
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/hibernate.c82
-rw-r--r--arch/arm64/kernel/probes/kprobes.c31
-rw-r--r--arch/arm64/kernel/sleep.S10
-rw-r--r--arch/arm64/kernel/smp.c8
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/arm64/mm/dump.c6
-rw-r--r--arch/arm64/mm/fault.c14
-rw-r--r--arch/arm64/mm/numa.c2
15 files changed, 147 insertions, 90 deletions
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index bb2616b16157..be5d824ebdba 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -8,7 +8,7 @@ config ARCH_SUNXI
8 8
9config ARCH_ALPINE 9config ARCH_ALPINE
10 bool "Annapurna Labs Alpine platform" 10 bool "Annapurna Labs Alpine platform"
11 select ALPINE_MSI 11 select ALPINE_MSI if PCI
12 help 12 help
13 This enables support for the Annapurna Labs Alpine 13 This enables support for the Annapurna Labs Alpine
14 Soc family. 14 Soc family.
@@ -66,7 +66,7 @@ config ARCH_LG1K
66config ARCH_HISI 66config ARCH_HISI
67 bool "Hisilicon SoC Family" 67 bool "Hisilicon SoC Family"
68 select ARM_TIMER_SP804 68 select ARM_TIMER_SP804
69 select HISILICON_IRQ_MBIGEN 69 select HISILICON_IRQ_MBIGEN if PCI
70 help 70 help
71 This enables support for Hisilicon ARMv8 SoC family 71 This enables support for Hisilicon ARMv8 SoC family
72 72
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index 299f3ce969ab..c528dd52ba2d 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -12,6 +12,7 @@
12/dts-v1/; 12/dts-v1/;
13#include "exynos7.dtsi" 13#include "exynos7.dtsi"
14#include <dt-bindings/interrupt-controller/irq.h> 14#include <dt-bindings/interrupt-controller/irq.h>
15#include <dt-bindings/clock/samsung,s2mps11.h>
15 16
16/ { 17/ {
17 model = "Samsung Exynos7 Espresso board based on EXYNOS7"; 18 model = "Samsung Exynos7 Espresso board based on EXYNOS7";
@@ -43,6 +44,8 @@
43 44
44&rtc { 45&rtc {
45 status = "okay"; 46 status = "okay";
47 clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
48 clock-names = "rtc", "rtc_src";
46}; 49};
47 50
48&watchdog { 51&watchdog {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 0555b7caaf2c..eadf4855ad2d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
15CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
16CONFIG_MEMCG=y 15CONFIG_MEMCG=y
17CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y
18CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_HUGETLB=y 19CONFIG_CGROUP_HUGETLB=y
19# CONFIG_UTS_NS is not set 20CONFIG_CPUSETS=y
20# CONFIG_IPC_NS is not set 21CONFIG_CGROUP_DEVICE=y
21# CONFIG_NET_NS is not set 22CONFIG_CGROUP_CPUACCT=y
23CONFIG_CGROUP_PERF=y
24CONFIG_USER_NS=y
22CONFIG_SCHED_AUTOGROUP=y 25CONFIG_SCHED_AUTOGROUP=y
23CONFIG_BLK_DEV_INITRD=y 26CONFIG_BLK_DEV_INITRD=y
24CONFIG_KALLSYMS_ALL=y 27CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
71CONFIG_KSM=y 74CONFIG_KSM=y
72CONFIG_TRANSPARENT_HUGEPAGE=y 75CONFIG_TRANSPARENT_HUGEPAGE=y
73CONFIG_CMA=y 76CONFIG_CMA=y
77CONFIG_SECCOMP=y
74CONFIG_XEN=y 78CONFIG_XEN=y
75CONFIG_KEXEC=y 79CONFIG_KEXEC=y
76# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 80# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
84CONFIG_PACKET=y 88CONFIG_PACKET=y
85CONFIG_UNIX=y 89CONFIG_UNIX=y
86CONFIG_INET=y 90CONFIG_INET=y
91CONFIG_IP_MULTICAST=y
87CONFIG_IP_PNP=y 92CONFIG_IP_PNP=y
88CONFIG_IP_PNP_DHCP=y 93CONFIG_IP_PNP_DHCP=y
89CONFIG_IP_PNP_BOOTP=y 94CONFIG_IP_PNP_BOOTP=y
90# CONFIG_IPV6 is not set 95CONFIG_IPV6=m
96CONFIG_NETFILTER=y
97CONFIG_NF_CONNTRACK=m
98CONFIG_NF_CONNTRACK_EVENTS=y
99CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
100CONFIG_NETFILTER_XT_TARGET_LOG=m
101CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
102CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
103CONFIG_NF_CONNTRACK_IPV4=m
104CONFIG_IP_NF_IPTABLES=m
105CONFIG_IP_NF_FILTER=m
106CONFIG_IP_NF_TARGET_REJECT=m
107CONFIG_IP_NF_NAT=m
108CONFIG_IP_NF_TARGET_MASQUERADE=m
109CONFIG_IP_NF_MANGLE=m
110CONFIG_NF_CONNTRACK_IPV6=m
111CONFIG_IP6_NF_IPTABLES=m
112CONFIG_IP6_NF_FILTER=m
113CONFIG_IP6_NF_TARGET_REJECT=m
114CONFIG_IP6_NF_MANGLE=m
115CONFIG_IP6_NF_NAT=m
116CONFIG_IP6_NF_TARGET_MASQUERADE=m
117CONFIG_BRIDGE=m
118CONFIG_BRIDGE_VLAN_FILTERING=y
119CONFIG_VLAN_8021Q=m
120CONFIG_VLAN_8021Q_GVRP=y
121CONFIG_VLAN_8021Q_MVRP=y
91CONFIG_BPF_JIT=y 122CONFIG_BPF_JIT=y
92CONFIG_CFG80211=m 123CONFIG_CFG80211=m
93CONFIG_MAC80211=m 124CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
103CONFIG_MTD_M25P80=y 134CONFIG_MTD_M25P80=y
104CONFIG_MTD_SPI_NOR=y 135CONFIG_MTD_SPI_NOR=y
105CONFIG_BLK_DEV_LOOP=y 136CONFIG_BLK_DEV_LOOP=y
137CONFIG_BLK_DEV_NBD=m
106CONFIG_VIRTIO_BLK=y 138CONFIG_VIRTIO_BLK=y
107CONFIG_SRAM=y 139CONFIG_SRAM=y
108# CONFIG_SCSI_PROC_FS is not set 140# CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
120CONFIG_PATA_PLATFORM=y 152CONFIG_PATA_PLATFORM=y
121CONFIG_PATA_OF_PLATFORM=y 153CONFIG_PATA_OF_PLATFORM=y
122CONFIG_NETDEVICES=y 154CONFIG_NETDEVICES=y
155CONFIG_MACVLAN=m
156CONFIG_MACVTAP=m
123CONFIG_TUN=y 157CONFIG_TUN=y
158CONFIG_VETH=m
124CONFIG_VIRTIO_NET=y 159CONFIG_VIRTIO_NET=y
125CONFIG_AMD_XGBE=y 160CONFIG_AMD_XGBE=y
126CONFIG_NET_XGENE=y 161CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
350CONFIG_PWM_SAMSUNG=y 385CONFIG_PWM_SAMSUNG=y
351CONFIG_EXT2_FS=y 386CONFIG_EXT2_FS=y
352CONFIG_EXT3_FS=y 387CONFIG_EXT3_FS=y
388CONFIG_EXT4_FS_POSIX_ACL=y
389CONFIG_BTRFS_FS=m
390CONFIG_BTRFS_FS_POSIX_ACL=y
353CONFIG_FANOTIFY=y 391CONFIG_FANOTIFY=y
354CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 392CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
355CONFIG_QUOTA=y 393CONFIG_QUOTA=y
356CONFIG_AUTOFS4_FS=y 394CONFIG_AUTOFS4_FS=y
357CONFIG_FUSE_FS=y 395CONFIG_FUSE_FS=m
358CONFIG_CUSE=y 396CONFIG_CUSE=m
397CONFIG_OVERLAY_FS=m
359CONFIG_VFAT_FS=y 398CONFIG_VFAT_FS=y
360CONFIG_TMPFS=y 399CONFIG_TMPFS=y
361CONFIG_HUGETLBFS=y 400CONFIG_HUGETLBFS=y
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 61b49150dfa3..1737aecfcc5e 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -22,7 +22,6 @@
22 22
23#define __ARCH_WANT_KPROBES_INSN_SLOT 23#define __ARCH_WANT_KPROBES_INSN_SLOT
24#define MAX_INSN_SIZE 1 24#define MAX_INSN_SIZE 1
25#define MAX_STACK_SIZE 128
26 25
27#define flush_insn_slot(p) do { } while (0) 26#define flush_insn_slot(p) do { } while (0)
28#define kretprobe_blacklist_size 0 27#define kretprobe_blacklist_size 0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
47 struct prev_kprobe prev_kprobe; 46 struct prev_kprobe prev_kprobe;
48 struct kprobe_step_ctx ss_ctx; 47 struct kprobe_step_ctx ss_ctx;
49 struct pt_regs jprobe_saved_regs; 48 struct pt_regs jprobe_saved_regs;
50 char jprobes_stack[MAX_STACK_SIZE];
51}; 49};
52 50
53void arch_remove_kprobe(struct kprobe *); 51void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 96e4a2b64cc1..441420ca7d08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -353,6 +353,8 @@ el1_sync:
353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class 353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
355 b.eq el1_da 355 b.eq el1_da
356 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
357 b.eq el1_ia
356 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap 358 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
357 b.eq el1_undef 359 b.eq el1_undef
358 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception 360 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
364 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 366 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
365 b.ge el1_dbg 367 b.ge el1_dbg
366 b el1_inv 368 b el1_inv
369
370el1_ia:
371 /*
372 * Fall through to the Data abort case
373 */
367el1_da: 374el1_da:
368 /* 375 /*
369 * Data abort handling 376 * Data abort handling
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b77f58355da1..3e7b050e99dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
757 isb 757 isb
758 bl __create_page_tables // recreate kernel mapping 758 bl __create_page_tables // recreate kernel mapping
759 759
760 tlbi vmalle1 // Remove any stale TLB entries
761 dsb nsh
762
760 msr sctlr_el1, x19 // re-enable the MMU 763 msr sctlr_el1, x19 // re-enable the MMU
761 isb 764 isb
762 ic iallu // flush instructions fetched 765 ic iallu // flush instructions fetched
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 21ab5df9fa76..65d81f965e74 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -35,6 +35,7 @@
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/smp.h> 36#include <asm/smp.h>
37#include <asm/suspend.h> 37#include <asm/suspend.h>
38#include <asm/sysreg.h>
38#include <asm/virt.h> 39#include <asm/virt.h>
39 40
40/* 41/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
217 set_pte(pte, __pte(virt_to_phys((void *)dst) | 218 set_pte(pte, __pte(virt_to_phys((void *)dst) |
218 pgprot_val(PAGE_KERNEL_EXEC))); 219 pgprot_val(PAGE_KERNEL_EXEC)));
219 220
220 /* Load our new page tables */ 221 /*
221 asm volatile("msr ttbr0_el1, %0;" 222 * Load our new page tables. A strict BBM approach requires that we
222 "isb;" 223 * ensure that TLBs are free of any entries that may overlap with the
223 "tlbi vmalle1is;" 224 * global mappings we are about to install.
224 "dsb ish;" 225 *
225 "isb" : : "r"(virt_to_phys(pgd))); 226 * For a real hibernate/resume cycle TTBR0 currently points to a zero
227 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
228 * runtime services), while for a userspace-driven test_resume cycle it
229 * points to userspace page tables (and we must point it at a zero page
230 * ourselves). Elsewhere we only (un)install the idmap with preemption
231 * disabled, so T0SZ should be as required regardless.
232 */
233 cpu_set_reserved_ttbr0();
234 local_flush_tlb_all();
235 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
236 isb();
226 237
227 *phys_dst_addr = virt_to_phys((void *)dst); 238 *phys_dst_addr = virt_to_phys((void *)dst);
228 239
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
394 void *, phys_addr_t, phys_addr_t); 405 void *, phys_addr_t, phys_addr_t);
395 406
396 /* 407 /*
408 * Restoring the memory image will overwrite the ttbr1 page tables.
409 * Create a second copy of just the linear map, and use this when
410 * restoring.
411 */
412 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
413 if (!tmp_pg_dir) {
414 pr_err("Failed to allocate memory for temporary page tables.");
415 rc = -ENOMEM;
416 goto out;
417 }
418 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
419 if (rc)
420 goto out;
421
422 /*
423 * Since we only copied the linear map, we need to find restore_pblist's
424 * linear map address.
425 */
426 lm_restore_pblist = LMADDR(restore_pblist);
427
428 /*
429 * We need a zero page that is zero before & after resume in order to
430 * to break before make on the ttbr1 page tables.
431 */
432 zero_page = (void *)get_safe_page(GFP_ATOMIC);
433 if (!zero_page) {
434 pr_err("Failed to allocate zero page.");
435 rc = -ENOMEM;
436 goto out;
437 }
438
439 /*
397 * Locate the exit code in the bottom-but-one page, so that *NULL 440 * Locate the exit code in the bottom-but-one page, so that *NULL
398 * still has disastrous affects. 441 * still has disastrous affects.
399 */ 442 */
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
419 __flush_dcache_area(hibernate_exit, exit_size); 462 __flush_dcache_area(hibernate_exit, exit_size);
420 463
421 /* 464 /*
422 * Restoring the memory image will overwrite the ttbr1 page tables.
423 * Create a second copy of just the linear map, and use this when
424 * restoring.
425 */
426 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
427 if (!tmp_pg_dir) {
428 pr_err("Failed to allocate memory for temporary page tables.");
429 rc = -ENOMEM;
430 goto out;
431 }
432 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
433 if (rc)
434 goto out;
435
436 /*
437 * Since we only copied the linear map, we need to find restore_pblist's
438 * linear map address.
439 */
440 lm_restore_pblist = LMADDR(restore_pblist);
441
442 /*
443 * KASLR will cause the el2 vectors to be in a different location in 465 * KASLR will cause the el2 vectors to be in a different location in
444 * the resumed kernel. Load hibernate's temporary copy into el2. 466 * the resumed kernel. Load hibernate's temporary copy into el2.
445 * 467 *
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
453 __hyp_set_vectors(el2_vectors); 475 __hyp_set_vectors(el2_vectors);
454 } 476 }
455 477
456 /*
457 * We need a zero page that is zero before & after resume in order to
458 * to break before make on the ttbr1 page tables.
459 */
460 zero_page = (void *)get_safe_page(GFP_ATOMIC);
461
462 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 478 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463 resume_hdr.reenter_kernel, lm_restore_pblist, 479 resume_hdr.reenter_kernel, lm_restore_pblist,
464 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 480 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bf9768588288..c6b0f40620d8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41static void __kprobes 41static void __kprobes
42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
43 43
44static inline unsigned long min_stack_size(unsigned long addr)
45{
46 unsigned long size;
47
48 if (on_irq_stack(addr, raw_smp_processor_id()))
49 size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
50 else
51 size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
52
53 return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
54}
55
56static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 44static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
57{ 45{
58 /* prepare insn slot */ 46 /* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
489{ 477{
490 struct jprobe *jp = container_of(p, struct jprobe, kp); 478 struct jprobe *jp = container_of(p, struct jprobe, kp);
491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 479 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
492 long stack_ptr = kernel_stack_pointer(regs);
493 480
494 kcb->jprobe_saved_regs = *regs; 481 kcb->jprobe_saved_regs = *regs;
495 /* 482 /*
496 * As Linus pointed out, gcc assumes that the callee 483 * Since we can't be sure where in the stack frame "stacked"
497 * owns the argument space and could overwrite it, e.g. 484 * pass-by-value arguments are stored we just don't try to
498 * tailcall optimization. So, to be absolutely safe 485 * duplicate any of the stack. Do not use jprobes on functions that
499 * we also save and restore enough stack bytes to cover 486 * use more than 64 bytes (after padding each to an 8 byte boundary)
500 * the argument area. 487 * of arguments, or pass individual arguments larger than 16 bytes.
501 */ 488 */
502 kasan_disable_current();
503 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
504 min_stack_size(stack_ptr));
505 kasan_enable_current();
506 489
507 instruction_pointer_set(regs, (unsigned long) jp->entry); 490 instruction_pointer_set(regs, (unsigned long) jp->entry);
508 preempt_disable(); 491 preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
554 } 537 }
555 unpause_graph_tracing(); 538 unpause_graph_tracing();
556 *regs = kcb->jprobe_saved_regs; 539 *regs = kcb->jprobe_saved_regs;
557 kasan_disable_current();
558 memcpy((void *)stack_addr, kcb->jprobes_stack,
559 min_stack_size(stack_addr));
560 kasan_enable_current();
561 preempt_enable_no_resched(); 540 preempt_enable_no_resched();
562 return 1; 541 return 1;
563} 542}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 9a3aec97ac09..ccf79d849e0a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
101 bl el2_setup // if in EL2 drop to EL1 cleanly 101 bl el2_setup // if in EL2 drop to EL1 cleanly
102 /* enable the MMU early - so we can access sleep_save_stash by va */ 102 /* enable the MMU early - so we can access sleep_save_stash by va */
103 adr_l lr, __enable_mmu /* __cpu_setup will return here */ 103 adr_l lr, __enable_mmu /* __cpu_setup will return here */
104 ldr x27, =_cpu_resume /* __enable_mmu will branch here */ 104 adr_l x27, _resume_switched /* __enable_mmu will branch here */
105 adrp x25, idmap_pg_dir 105 adrp x25, idmap_pg_dir
106 adrp x26, swapper_pg_dir 106 adrp x26, swapper_pg_dir
107 b __cpu_setup 107 b __cpu_setup
108ENDPROC(cpu_resume) 108ENDPROC(cpu_resume)
109 109
110 .pushsection ".idmap.text", "ax"
111_resume_switched:
112 ldr x8, =_cpu_resume
113 br x8
114ENDPROC(_resume_switched)
115 .ltorg
116 .popsection
117
110ENTRY(_cpu_resume) 118ENTRY(_cpu_resume)
111 mrs x1, mpidr_el1 119 mrs x1, mpidr_el1
112 adrp x8, mpidr_hash 120 adrp x8, mpidr_hash
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 76a6d9263908..d93d43352504 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
662 acpi_parse_gic_cpu_interface, 0); 662 acpi_parse_gic_cpu_interface, 0);
663 663
664 if (cpu_count > NR_CPUS) 664 if (cpu_count > nr_cpu_ids)
665 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 665 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
666 cpu_count, NR_CPUS); 666 cpu_count, nr_cpu_ids);
667 667
668 if (!bootcpu_valid) { 668 if (!bootcpu_valid) {
669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
677 * with entries in cpu_logical_map while initializing the cpus. 677 * with entries in cpu_logical_map while initializing the cpus.
678 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 678 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
679 */ 679 */
680 for (i = 1; i < NR_CPUS; i++) { 680 for (i = 1; i < nr_cpu_ids; i++) {
681 if (cpu_logical_map(i) != INVALID_HWID) { 681 if (cpu_logical_map(i) != INVALID_HWID) {
682 if (smp_cpu_setup(i)) 682 if (smp_cpu_setup(i))
683 cpu_logical_map(i) = INVALID_HWID; 683 cpu_logical_map(i) = INVALID_HWID;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ae7855f16ec2..5a84b4562603 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
256 256
257 /* 257 /*
258 * We must restore the 32-bit state before the sysregs, thanks 258 * We must restore the 32-bit state before the sysregs, thanks
259 * to Cortex-A57 erratum #852523. 259 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
260 */ 260 */
261 __sysreg32_restore_state(vcpu); 261 __sysreg32_restore_state(vcpu);
262 __sysreg_restore_guest_state(guest_ctxt); 262 __sysreg_restore_guest_state(guest_ctxt);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b0b225ceca18..e51367d159d0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
823 * Architected system registers. 823 * Architected system registers.
824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
825 * 825 *
826 * We could trap ID_DFR0 and tell the guest we don't support performance
827 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
828 * NAKed, so it will read the PMCR anyway.
829 *
830 * Therefore we tell the guest we have 0 counters. Unfortunately, we
831 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
832 * all PM registers, which doesn't crash the guest kernel at least.
833 *
834 * Debug handling: We do trap most, if not all debug related system 826 * Debug handling: We do trap most, if not all debug related system
835 * registers. The implementation is good enough to ensure that a guest 827 * registers. The implementation is good enough to ensure that a guest
836 * can use these with minimal performance degradation. The drawback is 828 * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
1360 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 1352 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1361 1353
1362 /* ICC_SRE */ 1354 /* ICC_SRE */
1363 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 1355 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1364 1356
1365 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 1357 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1366 1358
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index f94b80eb295d..9c3e75df2180 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
242 242
243static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 243static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
244{ 244{
245 pte_t *pte = pte_offset_kernel(pmd, 0); 245 pte_t *pte = pte_offset_kernel(pmd, 0UL);
246 unsigned long addr; 246 unsigned long addr;
247 unsigned i; 247 unsigned i;
248 248
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
254 254
255static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 255static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
256{ 256{
257 pmd_t *pmd = pmd_offset(pud, 0); 257 pmd_t *pmd = pmd_offset(pud, 0UL);
258 unsigned long addr; 258 unsigned long addr;
259 unsigned i; 259 unsigned i;
260 260
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
271 271
272static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 272static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
273{ 273{
274 pud_t *pud = pud_offset(pgd, 0); 274 pud_t *pud = pud_offset(pgd, 0UL);
275 unsigned long addr; 275 unsigned long addr;
276 unsigned i; 276 unsigned i;
277 277
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c8beaa0da7df..05d2bd776c69 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
153} 153}
154#endif 154#endif
155 155
156static bool is_el1_instruction_abort(unsigned int esr)
157{
158 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
159}
160
156/* 161/*
157 * The kernel tried to access some page that wasn't present. 162 * The kernel tried to access some page that wasn't present.
158 */ 163 */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
161{ 166{
162 /* 167 /*
163 * Are we prepared to handle this kernel fault? 168 * Are we prepared to handle this kernel fault?
169 * We are almost certainly not prepared to handle instruction faults.
164 */ 170 */
165 if (fixup_exception(regs)) 171 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
166 return; 172 return;
167 173
168 /* 174 /*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
267 unsigned int ec = ESR_ELx_EC(esr); 273 unsigned int ec = ESR_ELx_EC(esr);
268 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; 274 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
269 275
270 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); 276 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
277 (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
271} 278}
272 279
273static bool is_el0_instruction_abort(unsigned int esr) 280static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
312 if (regs->orig_addr_limit == KERNEL_DS) 319 if (regs->orig_addr_limit == KERNEL_DS)
313 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 320 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
314 321
322 if (is_el1_instruction_abort(esr))
323 die("Attempting to execute userspace memory", regs, esr);
324
315 if (!search_exception_tables(regs->pc)) 325 if (!search_exception_tables(regs->pc))
316 die("Accessing user space memory outside uaccess.h routines", regs, esr); 326 die("Accessing user space memory outside uaccess.h routines", regs, esr);
317 } 327 }
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index c7fe3ec70774..5bb15eab6f00 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -23,6 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/of.h> 24#include <linux/of.h>
25 25
26#include <asm/acpi.h>
27
26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 28struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27EXPORT_SYMBOL(node_data); 29EXPORT_SYMBOL(node_data);
28nodemask_t numa_nodes_parsed __initdata; 30nodemask_t numa_nodes_parsed __initdata;