diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
commit | 0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch) | |
tree | 41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /arch/arm64 | |
parent | aa877175e7a9982233ed8f10cb4bfddd78d82741 (diff) | |
parent | 3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff) |
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig.platforms | 4 | ||||
-rw-r--r-- | arch/arm64/boot/dts/exynos/exynos7-espresso.dts | 3 | ||||
-rw-r--r-- | arch/arm64/configs/defconfig | 53 | ||||
-rw-r--r-- | arch/arm64/include/asm/kprobes.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 82 | ||||
-rw-r--r-- | arch/arm64/kernel/probes/kprobes.c | 31 | ||||
-rw-r--r-- | arch/arm64/kernel/sleep.S | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/switch.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 10 | ||||
-rw-r--r-- | arch/arm64/mm/dump.c | 6 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 14 | ||||
-rw-r--r-- | arch/arm64/mm/numa.c | 2 |
15 files changed, 147 insertions, 90 deletions
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index bb2616b16157..be5d824ebdba 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms | |||
@@ -8,7 +8,7 @@ config ARCH_SUNXI | |||
8 | 8 | ||
9 | config ARCH_ALPINE | 9 | config ARCH_ALPINE |
10 | bool "Annapurna Labs Alpine platform" | 10 | bool "Annapurna Labs Alpine platform" |
11 | select ALPINE_MSI | 11 | select ALPINE_MSI if PCI |
12 | help | 12 | help |
13 | This enables support for the Annapurna Labs Alpine | 13 | This enables support for the Annapurna Labs Alpine |
14 | Soc family. | 14 | Soc family. |
@@ -66,7 +66,7 @@ config ARCH_LG1K | |||
66 | config ARCH_HISI | 66 | config ARCH_HISI |
67 | bool "Hisilicon SoC Family" | 67 | bool "Hisilicon SoC Family" |
68 | select ARM_TIMER_SP804 | 68 | select ARM_TIMER_SP804 |
69 | select HISILICON_IRQ_MBIGEN | 69 | select HISILICON_IRQ_MBIGEN if PCI |
70 | help | 70 | help |
71 | This enables support for Hisilicon ARMv8 SoC family | 71 | This enables support for Hisilicon ARMv8 SoC family |
72 | 72 | ||
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 299f3ce969ab..c528dd52ba2d 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts | |||
@@ -12,6 +12,7 @@ | |||
12 | /dts-v1/; | 12 | /dts-v1/; |
13 | #include "exynos7.dtsi" | 13 | #include "exynos7.dtsi" |
14 | #include <dt-bindings/interrupt-controller/irq.h> | 14 | #include <dt-bindings/interrupt-controller/irq.h> |
15 | #include <dt-bindings/clock/samsung,s2mps11.h> | ||
15 | 16 | ||
16 | / { | 17 | / { |
17 | model = "Samsung Exynos7 Espresso board based on EXYNOS7"; | 18 | model = "Samsung Exynos7 Espresso board based on EXYNOS7"; |
@@ -43,6 +44,8 @@ | |||
43 | 44 | ||
44 | &rtc { | 45 | &rtc { |
45 | status = "okay"; | 46 | status = "okay"; |
47 | clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>; | ||
48 | clock-names = "rtc", "rtc_src"; | ||
46 | }; | 49 | }; |
47 | 50 | ||
48 | &watchdog { | 51 | &watchdog { |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0555b7caaf2c..eadf4855ad2d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y | |||
15 | CONFIG_LOG_BUF_SHIFT=14 | 14 | CONFIG_LOG_BUF_SHIFT=14 |
16 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
17 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
17 | CONFIG_BLK_CGROUP=y | ||
18 | CONFIG_CGROUP_PIDS=y | ||
18 | CONFIG_CGROUP_HUGETLB=y | 19 | CONFIG_CGROUP_HUGETLB=y |
19 | # CONFIG_UTS_NS is not set | 20 | CONFIG_CPUSETS=y |
20 | # CONFIG_IPC_NS is not set | 21 | CONFIG_CGROUP_DEVICE=y |
21 | # CONFIG_NET_NS is not set | 22 | CONFIG_CGROUP_CPUACCT=y |
23 | CONFIG_CGROUP_PERF=y | ||
24 | CONFIG_USER_NS=y | ||
22 | CONFIG_SCHED_AUTOGROUP=y | 25 | CONFIG_SCHED_AUTOGROUP=y |
23 | CONFIG_BLK_DEV_INITRD=y | 26 | CONFIG_BLK_DEV_INITRD=y |
24 | CONFIG_KALLSYMS_ALL=y | 27 | CONFIG_KALLSYMS_ALL=y |
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y | |||
71 | CONFIG_KSM=y | 74 | CONFIG_KSM=y |
72 | CONFIG_TRANSPARENT_HUGEPAGE=y | 75 | CONFIG_TRANSPARENT_HUGEPAGE=y |
73 | CONFIG_CMA=y | 76 | CONFIG_CMA=y |
77 | CONFIG_SECCOMP=y | ||
74 | CONFIG_XEN=y | 78 | CONFIG_XEN=y |
75 | CONFIG_KEXEC=y | 79 | CONFIG_KEXEC=y |
76 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 80 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
@@ -84,10 +88,37 @@ CONFIG_NET=y | |||
84 | CONFIG_PACKET=y | 88 | CONFIG_PACKET=y |
85 | CONFIG_UNIX=y | 89 | CONFIG_UNIX=y |
86 | CONFIG_INET=y | 90 | CONFIG_INET=y |
91 | CONFIG_IP_MULTICAST=y | ||
87 | CONFIG_IP_PNP=y | 92 | CONFIG_IP_PNP=y |
88 | CONFIG_IP_PNP_DHCP=y | 93 | CONFIG_IP_PNP_DHCP=y |
89 | CONFIG_IP_PNP_BOOTP=y | 94 | CONFIG_IP_PNP_BOOTP=y |
90 | # CONFIG_IPV6 is not set | 95 | CONFIG_IPV6=m |
96 | CONFIG_NETFILTER=y | ||
97 | CONFIG_NF_CONNTRACK=m | ||
98 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
99 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | ||
100 | CONFIG_NETFILTER_XT_TARGET_LOG=m | ||
101 | CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m | ||
102 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
103 | CONFIG_NF_CONNTRACK_IPV4=m | ||
104 | CONFIG_IP_NF_IPTABLES=m | ||
105 | CONFIG_IP_NF_FILTER=m | ||
106 | CONFIG_IP_NF_TARGET_REJECT=m | ||
107 | CONFIG_IP_NF_NAT=m | ||
108 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
109 | CONFIG_IP_NF_MANGLE=m | ||
110 | CONFIG_NF_CONNTRACK_IPV6=m | ||
111 | CONFIG_IP6_NF_IPTABLES=m | ||
112 | CONFIG_IP6_NF_FILTER=m | ||
113 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
114 | CONFIG_IP6_NF_MANGLE=m | ||
115 | CONFIG_IP6_NF_NAT=m | ||
116 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | ||
117 | CONFIG_BRIDGE=m | ||
118 | CONFIG_BRIDGE_VLAN_FILTERING=y | ||
119 | CONFIG_VLAN_8021Q=m | ||
120 | CONFIG_VLAN_8021Q_GVRP=y | ||
121 | CONFIG_VLAN_8021Q_MVRP=y | ||
91 | CONFIG_BPF_JIT=y | 122 | CONFIG_BPF_JIT=y |
92 | CONFIG_CFG80211=m | 123 | CONFIG_CFG80211=m |
93 | CONFIG_MAC80211=m | 124 | CONFIG_MAC80211=m |
@@ -103,6 +134,7 @@ CONFIG_MTD=y | |||
103 | CONFIG_MTD_M25P80=y | 134 | CONFIG_MTD_M25P80=y |
104 | CONFIG_MTD_SPI_NOR=y | 135 | CONFIG_MTD_SPI_NOR=y |
105 | CONFIG_BLK_DEV_LOOP=y | 136 | CONFIG_BLK_DEV_LOOP=y |
137 | CONFIG_BLK_DEV_NBD=m | ||
106 | CONFIG_VIRTIO_BLK=y | 138 | CONFIG_VIRTIO_BLK=y |
107 | CONFIG_SRAM=y | 139 | CONFIG_SRAM=y |
108 | # CONFIG_SCSI_PROC_FS is not set | 140 | # CONFIG_SCSI_PROC_FS is not set |
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y | |||
120 | CONFIG_PATA_PLATFORM=y | 152 | CONFIG_PATA_PLATFORM=y |
121 | CONFIG_PATA_OF_PLATFORM=y | 153 | CONFIG_PATA_OF_PLATFORM=y |
122 | CONFIG_NETDEVICES=y | 154 | CONFIG_NETDEVICES=y |
155 | CONFIG_MACVLAN=m | ||
156 | CONFIG_MACVTAP=m | ||
123 | CONFIG_TUN=y | 157 | CONFIG_TUN=y |
158 | CONFIG_VETH=m | ||
124 | CONFIG_VIRTIO_NET=y | 159 | CONFIG_VIRTIO_NET=y |
125 | CONFIG_AMD_XGBE=y | 160 | CONFIG_AMD_XGBE=y |
126 | CONFIG_NET_XGENE=y | 161 | CONFIG_NET_XGENE=y |
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y | |||
350 | CONFIG_PWM_SAMSUNG=y | 385 | CONFIG_PWM_SAMSUNG=y |
351 | CONFIG_EXT2_FS=y | 386 | CONFIG_EXT2_FS=y |
352 | CONFIG_EXT3_FS=y | 387 | CONFIG_EXT3_FS=y |
388 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
389 | CONFIG_BTRFS_FS=m | ||
390 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
353 | CONFIG_FANOTIFY=y | 391 | CONFIG_FANOTIFY=y |
354 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | 392 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y |
355 | CONFIG_QUOTA=y | 393 | CONFIG_QUOTA=y |
356 | CONFIG_AUTOFS4_FS=y | 394 | CONFIG_AUTOFS4_FS=y |
357 | CONFIG_FUSE_FS=y | 395 | CONFIG_FUSE_FS=m |
358 | CONFIG_CUSE=y | 396 | CONFIG_CUSE=m |
397 | CONFIG_OVERLAY_FS=m | ||
359 | CONFIG_VFAT_FS=y | 398 | CONFIG_VFAT_FS=y |
360 | CONFIG_TMPFS=y | 399 | CONFIG_TMPFS=y |
361 | CONFIG_HUGETLBFS=y | 400 | CONFIG_HUGETLBFS=y |
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 61b49150dfa3..1737aecfcc5e 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 23 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
24 | #define MAX_INSN_SIZE 1 | 24 | #define MAX_INSN_SIZE 1 |
25 | #define MAX_STACK_SIZE 128 | ||
26 | 25 | ||
27 | #define flush_insn_slot(p) do { } while (0) | 26 | #define flush_insn_slot(p) do { } while (0) |
28 | #define kretprobe_blacklist_size 0 | 27 | #define kretprobe_blacklist_size 0 |
@@ -47,7 +46,6 @@ struct kprobe_ctlblk { | |||
47 | struct prev_kprobe prev_kprobe; | 46 | struct prev_kprobe prev_kprobe; |
48 | struct kprobe_step_ctx ss_ctx; | 47 | struct kprobe_step_ctx ss_ctx; |
49 | struct pt_regs jprobe_saved_regs; | 48 | struct pt_regs jprobe_saved_regs; |
50 | char jprobes_stack[MAX_STACK_SIZE]; | ||
51 | }; | 49 | }; |
52 | 50 | ||
53 | void arch_remove_kprobe(struct kprobe *); | 51 | void arch_remove_kprobe(struct kprobe *); |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 96e4a2b64cc1..441420ca7d08 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -353,6 +353,8 @@ el1_sync: | |||
353 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class | 353 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
354 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | 354 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 |
355 | b.eq el1_da | 355 | b.eq el1_da |
356 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 | ||
357 | b.eq el1_ia | ||
356 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap | 358 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
357 | b.eq el1_undef | 359 | b.eq el1_undef |
358 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception | 360 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
@@ -364,6 +366,11 @@ el1_sync: | |||
364 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 | 366 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
365 | b.ge el1_dbg | 367 | b.ge el1_dbg |
366 | b el1_inv | 368 | b el1_inv |
369 | |||
370 | el1_ia: | ||
371 | /* | ||
372 | * Fall through to the Data abort case | ||
373 | */ | ||
367 | el1_da: | 374 | el1_da: |
368 | /* | 375 | /* |
369 | * Data abort handling | 376 | * Data abort handling |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b77f58355da1..3e7b050e99dc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu) | |||
757 | isb | 757 | isb |
758 | bl __create_page_tables // recreate kernel mapping | 758 | bl __create_page_tables // recreate kernel mapping |
759 | 759 | ||
760 | tlbi vmalle1 // Remove any stale TLB entries | ||
761 | dsb nsh | ||
762 | |||
760 | msr sctlr_el1, x19 // re-enable the MMU | 763 | msr sctlr_el1, x19 // re-enable the MMU |
761 | isb | 764 | isb |
762 | ic iallu // flush instructions fetched | 765 | ic iallu // flush instructions fetched |
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 21ab5df9fa76..65d81f965e74 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
36 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
37 | #include <asm/suspend.h> | 37 | #include <asm/suspend.h> |
38 | #include <asm/sysreg.h> | ||
38 | #include <asm/virt.h> | 39 | #include <asm/virt.h> |
39 | 40 | ||
40 | /* | 41 | /* |
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length, | |||
217 | set_pte(pte, __pte(virt_to_phys((void *)dst) | | 218 | set_pte(pte, __pte(virt_to_phys((void *)dst) | |
218 | pgprot_val(PAGE_KERNEL_EXEC))); | 219 | pgprot_val(PAGE_KERNEL_EXEC))); |
219 | 220 | ||
220 | /* Load our new page tables */ | 221 | /* |
221 | asm volatile("msr ttbr0_el1, %0;" | 222 | * Load our new page tables. A strict BBM approach requires that we |
222 | "isb;" | 223 | * ensure that TLBs are free of any entries that may overlap with the |
223 | "tlbi vmalle1is;" | 224 | * global mappings we are about to install. |
224 | "dsb ish;" | 225 | * |
225 | "isb" : : "r"(virt_to_phys(pgd))); | 226 | * For a real hibernate/resume cycle TTBR0 currently points to a zero |
227 | * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI | ||
228 | * runtime services), while for a userspace-driven test_resume cycle it | ||
229 | * points to userspace page tables (and we must point it at a zero page | ||
230 | * ourselves). Elsewhere we only (un)install the idmap with preemption | ||
231 | * disabled, so T0SZ should be as required regardless. | ||
232 | */ | ||
233 | cpu_set_reserved_ttbr0(); | ||
234 | local_flush_tlb_all(); | ||
235 | write_sysreg(virt_to_phys(pgd), ttbr0_el1); | ||
236 | isb(); | ||
226 | 237 | ||
227 | *phys_dst_addr = virt_to_phys((void *)dst); | 238 | *phys_dst_addr = virt_to_phys((void *)dst); |
228 | 239 | ||
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void) | |||
394 | void *, phys_addr_t, phys_addr_t); | 405 | void *, phys_addr_t, phys_addr_t); |
395 | 406 | ||
396 | /* | 407 | /* |
408 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
409 | * Create a second copy of just the linear map, and use this when | ||
410 | * restoring. | ||
411 | */ | ||
412 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
413 | if (!tmp_pg_dir) { | ||
414 | pr_err("Failed to allocate memory for temporary page tables."); | ||
415 | rc = -ENOMEM; | ||
416 | goto out; | ||
417 | } | ||
418 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
419 | if (rc) | ||
420 | goto out; | ||
421 | |||
422 | /* | ||
423 | * Since we only copied the linear map, we need to find restore_pblist's | ||
424 | * linear map address. | ||
425 | */ | ||
426 | lm_restore_pblist = LMADDR(restore_pblist); | ||
427 | |||
428 | /* | ||
429 | * We need a zero page that is zero before & after resume in order to | ||
430 | * to break before make on the ttbr1 page tables. | ||
431 | */ | ||
432 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
433 | if (!zero_page) { | ||
434 | pr_err("Failed to allocate zero page."); | ||
435 | rc = -ENOMEM; | ||
436 | goto out; | ||
437 | } | ||
438 | |||
439 | /* | ||
397 | * Locate the exit code in the bottom-but-one page, so that *NULL | 440 | * Locate the exit code in the bottom-but-one page, so that *NULL |
398 | * still has disastrous affects. | 441 | * still has disastrous affects. |
399 | */ | 442 | */ |
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void) | |||
419 | __flush_dcache_area(hibernate_exit, exit_size); | 462 | __flush_dcache_area(hibernate_exit, exit_size); |
420 | 463 | ||
421 | /* | 464 | /* |
422 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
423 | * Create a second copy of just the linear map, and use this when | ||
424 | * restoring. | ||
425 | */ | ||
426 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
427 | if (!tmp_pg_dir) { | ||
428 | pr_err("Failed to allocate memory for temporary page tables."); | ||
429 | rc = -ENOMEM; | ||
430 | goto out; | ||
431 | } | ||
432 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
433 | if (rc) | ||
434 | goto out; | ||
435 | |||
436 | /* | ||
437 | * Since we only copied the linear map, we need to find restore_pblist's | ||
438 | * linear map address. | ||
439 | */ | ||
440 | lm_restore_pblist = LMADDR(restore_pblist); | ||
441 | |||
442 | /* | ||
443 | * KASLR will cause the el2 vectors to be in a different location in | 465 | * KASLR will cause the el2 vectors to be in a different location in |
444 | * the resumed kernel. Load hibernate's temporary copy into el2. | 466 | * the resumed kernel. Load hibernate's temporary copy into el2. |
445 | * | 467 | * |
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void) | |||
453 | __hyp_set_vectors(el2_vectors); | 475 | __hyp_set_vectors(el2_vectors); |
454 | } | 476 | } |
455 | 477 | ||
456 | /* | ||
457 | * We need a zero page that is zero before & after resume in order to | ||
458 | * to break before make on the ttbr1 page tables. | ||
459 | */ | ||
460 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
461 | |||
462 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, | 478 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, |
463 | resume_hdr.reenter_kernel, lm_restore_pblist, | 479 | resume_hdr.reenter_kernel, lm_restore_pblist, |
464 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); | 480 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index bf9768588288..c6b0f40620d8 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
41 | static void __kprobes | 41 | static void __kprobes |
42 | post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); | 42 | post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); |
43 | 43 | ||
44 | static inline unsigned long min_stack_size(unsigned long addr) | ||
45 | { | ||
46 | unsigned long size; | ||
47 | |||
48 | if (on_irq_stack(addr, raw_smp_processor_id())) | ||
49 | size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr; | ||
50 | else | ||
51 | size = (unsigned long)current_thread_info() + THREAD_START_SP - addr; | ||
52 | |||
53 | return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack)); | ||
54 | } | ||
55 | |||
56 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) | 44 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) |
57 | { | 45 | { |
58 | /* prepare insn slot */ | 46 | /* prepare insn slot */ |
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
489 | { | 477 | { |
490 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 478 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
491 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 479 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
492 | long stack_ptr = kernel_stack_pointer(regs); | ||
493 | 480 | ||
494 | kcb->jprobe_saved_regs = *regs; | 481 | kcb->jprobe_saved_regs = *regs; |
495 | /* | 482 | /* |
496 | * As Linus pointed out, gcc assumes that the callee | 483 | * Since we can't be sure where in the stack frame "stacked" |
497 | * owns the argument space and could overwrite it, e.g. | 484 | * pass-by-value arguments are stored we just don't try to |
498 | * tailcall optimization. So, to be absolutely safe | 485 | * duplicate any of the stack. Do not use jprobes on functions that |
499 | * we also save and restore enough stack bytes to cover | 486 | * use more than 64 bytes (after padding each to an 8 byte boundary) |
500 | * the argument area. | 487 | * of arguments, or pass individual arguments larger than 16 bytes. |
501 | */ | 488 | */ |
502 | kasan_disable_current(); | ||
503 | memcpy(kcb->jprobes_stack, (void *)stack_ptr, | ||
504 | min_stack_size(stack_ptr)); | ||
505 | kasan_enable_current(); | ||
506 | 489 | ||
507 | instruction_pointer_set(regs, (unsigned long) jp->entry); | 490 | instruction_pointer_set(regs, (unsigned long) jp->entry); |
508 | preempt_disable(); | 491 | preempt_disable(); |
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
554 | } | 537 | } |
555 | unpause_graph_tracing(); | 538 | unpause_graph_tracing(); |
556 | *regs = kcb->jprobe_saved_regs; | 539 | *regs = kcb->jprobe_saved_regs; |
557 | kasan_disable_current(); | ||
558 | memcpy((void *)stack_addr, kcb->jprobes_stack, | ||
559 | min_stack_size(stack_addr)); | ||
560 | kasan_enable_current(); | ||
561 | preempt_enable_no_resched(); | 540 | preempt_enable_no_resched(); |
562 | return 1; | 541 | return 1; |
563 | } | 542 | } |
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 9a3aec97ac09..ccf79d849e0a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -101,12 +101,20 @@ ENTRY(cpu_resume) | |||
101 | bl el2_setup // if in EL2 drop to EL1 cleanly | 101 | bl el2_setup // if in EL2 drop to EL1 cleanly |
102 | /* enable the MMU early - so we can access sleep_save_stash by va */ | 102 | /* enable the MMU early - so we can access sleep_save_stash by va */ |
103 | adr_l lr, __enable_mmu /* __cpu_setup will return here */ | 103 | adr_l lr, __enable_mmu /* __cpu_setup will return here */ |
104 | ldr x27, =_cpu_resume /* __enable_mmu will branch here */ | 104 | adr_l x27, _resume_switched /* __enable_mmu will branch here */ |
105 | adrp x25, idmap_pg_dir | 105 | adrp x25, idmap_pg_dir |
106 | adrp x26, swapper_pg_dir | 106 | adrp x26, swapper_pg_dir |
107 | b __cpu_setup | 107 | b __cpu_setup |
108 | ENDPROC(cpu_resume) | 108 | ENDPROC(cpu_resume) |
109 | 109 | ||
110 | .pushsection ".idmap.text", "ax" | ||
111 | _resume_switched: | ||
112 | ldr x8, =_cpu_resume | ||
113 | br x8 | ||
114 | ENDPROC(_resume_switched) | ||
115 | .ltorg | ||
116 | .popsection | ||
117 | |||
110 | ENTRY(_cpu_resume) | 118 | ENTRY(_cpu_resume) |
111 | mrs x1, mpidr_el1 | 119 | mrs x1, mpidr_el1 |
112 | adrp x8, mpidr_hash | 120 | adrp x8, mpidr_hash |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 76a6d9263908..d93d43352504 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void) | |||
661 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | 661 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, |
662 | acpi_parse_gic_cpu_interface, 0); | 662 | acpi_parse_gic_cpu_interface, 0); |
663 | 663 | ||
664 | if (cpu_count > NR_CPUS) | 664 | if (cpu_count > nr_cpu_ids) |
665 | pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", | 665 | pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", |
666 | cpu_count, NR_CPUS); | 666 | cpu_count, nr_cpu_ids); |
667 | 667 | ||
668 | if (!bootcpu_valid) { | 668 | if (!bootcpu_valid) { |
669 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); | 669 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); |
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void) | |||
677 | * with entries in cpu_logical_map while initializing the cpus. | 677 | * with entries in cpu_logical_map while initializing the cpus. |
678 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. | 678 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. |
679 | */ | 679 | */ |
680 | for (i = 1; i < NR_CPUS; i++) { | 680 | for (i = 1; i < nr_cpu_ids; i++) { |
681 | if (cpu_logical_map(i) != INVALID_HWID) { | 681 | if (cpu_logical_map(i) != INVALID_HWID) { |
682 | if (smp_cpu_setup(i)) | 682 | if (smp_cpu_setup(i)) |
683 | cpu_logical_map(i) = INVALID_HWID; | 683 | cpu_logical_map(i) = INVALID_HWID; |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ae7855f16ec2..5a84b4562603 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) | |||
256 | 256 | ||
257 | /* | 257 | /* |
258 | * We must restore the 32-bit state before the sysregs, thanks | 258 | * We must restore the 32-bit state before the sysregs, thanks |
259 | * to Cortex-A57 erratum #852523. | 259 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). |
260 | */ | 260 | */ |
261 | __sysreg32_restore_state(vcpu); | 261 | __sysreg32_restore_state(vcpu); |
262 | __sysreg_restore_guest_state(guest_ctxt); | 262 | __sysreg_restore_guest_state(guest_ctxt); |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b0b225ceca18..e51367d159d0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |||
823 | * Architected system registers. | 823 | * Architected system registers. |
824 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | 824 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 |
825 | * | 825 | * |
826 | * We could trap ID_DFR0 and tell the guest we don't support performance | ||
827 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | ||
828 | * NAKed, so it will read the PMCR anyway. | ||
829 | * | ||
830 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | ||
831 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | ||
832 | * all PM registers, which doesn't crash the guest kernel at least. | ||
833 | * | ||
834 | * Debug handling: We do trap most, if not all debug related system | 826 | * Debug handling: We do trap most, if not all debug related system |
835 | * registers. The implementation is good enough to ensure that a guest | 827 | * registers. The implementation is good enough to ensure that a guest |
836 | * can use these with minimal performance degradation. The drawback is | 828 | * can use these with minimal performance degradation. The drawback is |
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = { | |||
1360 | { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, | 1352 | { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, |
1361 | 1353 | ||
1362 | /* ICC_SRE */ | 1354 | /* ICC_SRE */ |
1363 | { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, | 1355 | { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, |
1364 | 1356 | ||
1365 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, | 1357 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, |
1366 | 1358 | ||
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index f94b80eb295d..9c3e75df2180 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
242 | 242 | ||
243 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) | 243 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) |
244 | { | 244 | { |
245 | pte_t *pte = pte_offset_kernel(pmd, 0); | 245 | pte_t *pte = pte_offset_kernel(pmd, 0UL); |
246 | unsigned long addr; | 246 | unsigned long addr; |
247 | unsigned i; | 247 | unsigned i; |
248 | 248 | ||
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) | |||
254 | 254 | ||
255 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | 255 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) |
256 | { | 256 | { |
257 | pmd_t *pmd = pmd_offset(pud, 0); | 257 | pmd_t *pmd = pmd_offset(pud, 0UL); |
258 | unsigned long addr; | 258 | unsigned long addr; |
259 | unsigned i; | 259 | unsigned i; |
260 | 260 | ||
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | |||
271 | 271 | ||
272 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) | 272 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) |
273 | { | 273 | { |
274 | pud_t *pud = pud_offset(pgd, 0); | 274 | pud_t *pud = pud_offset(pgd, 0UL); |
275 | unsigned long addr; | 275 | unsigned long addr; |
276 | unsigned i; | 276 | unsigned i; |
277 | 277 | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c8beaa0da7df..05d2bd776c69 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
153 | } | 153 | } |
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | static bool is_el1_instruction_abort(unsigned int esr) | ||
157 | { | ||
158 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; | ||
159 | } | ||
160 | |||
156 | /* | 161 | /* |
157 | * The kernel tried to access some page that wasn't present. | 162 | * The kernel tried to access some page that wasn't present. |
158 | */ | 163 | */ |
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, | |||
161 | { | 166 | { |
162 | /* | 167 | /* |
163 | * Are we prepared to handle this kernel fault? | 168 | * Are we prepared to handle this kernel fault? |
169 | * We are almost certainly not prepared to handle instruction faults. | ||
164 | */ | 170 | */ |
165 | if (fixup_exception(regs)) | 171 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
166 | return; | 172 | return; |
167 | 173 | ||
168 | /* | 174 | /* |
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr) | |||
267 | unsigned int ec = ESR_ELx_EC(esr); | 273 | unsigned int ec = ESR_ELx_EC(esr); |
268 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; | 274 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; |
269 | 275 | ||
270 | return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); | 276 | return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) || |
277 | (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM); | ||
271 | } | 278 | } |
272 | 279 | ||
273 | static bool is_el0_instruction_abort(unsigned int esr) | 280 | static bool is_el0_instruction_abort(unsigned int esr) |
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
312 | if (regs->orig_addr_limit == KERNEL_DS) | 319 | if (regs->orig_addr_limit == KERNEL_DS) |
313 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); | 320 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); |
314 | 321 | ||
322 | if (is_el1_instruction_abort(esr)) | ||
323 | die("Attempting to execute userspace memory", regs, esr); | ||
324 | |||
315 | if (!search_exception_tables(regs->pc)) | 325 | if (!search_exception_tables(regs->pc)) |
316 | die("Accessing user space memory outside uaccess.h routines", regs, esr); | 326 | die("Accessing user space memory outside uaccess.h routines", regs, esr); |
317 | } | 327 | } |
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index c7fe3ec70774..5bb15eab6f00 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | 25 | ||
26 | #include <asm/acpi.h> | ||
27 | |||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 28 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
27 | EXPORT_SYMBOL(node_data); | 29 | EXPORT_SYMBOL(node_data); |
28 | nodemask_t numa_nodes_parsed __initdata; | 30 | nodemask_t numa_nodes_parsed __initdata; |