diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-13 22:29:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-13 22:29:46 -0400 |
commit | 120c54751b1cecaa18b4e2f247f242af6ee87fd9 (patch) | |
tree | aaca81706ab56519e812f1fd8ab6d25baf9b3f94 | |
parent | 329f4152911c276b074bec75a0443f88821afdb7 (diff) | |
parent | 53fb45d3df6fb64eff6c314b3fa2e279a2496e5b (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- support for nr_cpus= command line argument (maxcpus was previously
changed to allow secondary CPUs to be hot-plugged)
- ARM PMU interrupt handling fix
- fix potential TLB conflict in the hibernate code
- improved handling of EL1 instruction aborts (better error reporting)
- removal of useless jprobes code for stack saving/restoring
- defconfig updates
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: defconfig: enable CONFIG_LOCALVERSION_AUTO
arm64: defconfig: add options for virtualization and containers
arm64: hibernate: handle allocation failures
arm64: hibernate: avoid potential TLB conflict
arm64: Handle el1 synchronous instruction aborts cleanly
arm64: Remove stack duplicating code from jprobes
drivers/perf: arm-pmu: Fix handling of SPI lacking "interrupt-affinity" property
drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock
arm64: Support hard limit of cpu count by nr_cpus
-rw-r--r-- | arch/arm64/configs/defconfig | 53 | ||||
-rw-r--r-- | arch/arm64/include/asm/kprobes.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 82 | ||||
-rw-r--r-- | arch/arm64/kernel/probes/kprobes.c | 31 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 14 | ||||
-rw-r--r-- | drivers/perf/arm_pmu.c | 25 |
8 files changed, 136 insertions, 86 deletions
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 0555b7caaf2c..eadf4855ad2d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -1,4 +1,3 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y | |||
15 | CONFIG_LOG_BUF_SHIFT=14 | 14 | CONFIG_LOG_BUF_SHIFT=14 |
16 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
17 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
17 | CONFIG_BLK_CGROUP=y | ||
18 | CONFIG_CGROUP_PIDS=y | ||
18 | CONFIG_CGROUP_HUGETLB=y | 19 | CONFIG_CGROUP_HUGETLB=y |
19 | # CONFIG_UTS_NS is not set | 20 | CONFIG_CPUSETS=y |
20 | # CONFIG_IPC_NS is not set | 21 | CONFIG_CGROUP_DEVICE=y |
21 | # CONFIG_NET_NS is not set | 22 | CONFIG_CGROUP_CPUACCT=y |
23 | CONFIG_CGROUP_PERF=y | ||
24 | CONFIG_USER_NS=y | ||
22 | CONFIG_SCHED_AUTOGROUP=y | 25 | CONFIG_SCHED_AUTOGROUP=y |
23 | CONFIG_BLK_DEV_INITRD=y | 26 | CONFIG_BLK_DEV_INITRD=y |
24 | CONFIG_KALLSYMS_ALL=y | 27 | CONFIG_KALLSYMS_ALL=y |
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y | |||
71 | CONFIG_KSM=y | 74 | CONFIG_KSM=y |
72 | CONFIG_TRANSPARENT_HUGEPAGE=y | 75 | CONFIG_TRANSPARENT_HUGEPAGE=y |
73 | CONFIG_CMA=y | 76 | CONFIG_CMA=y |
77 | CONFIG_SECCOMP=y | ||
74 | CONFIG_XEN=y | 78 | CONFIG_XEN=y |
75 | CONFIG_KEXEC=y | 79 | CONFIG_KEXEC=y |
76 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 80 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
@@ -84,10 +88,37 @@ CONFIG_NET=y | |||
84 | CONFIG_PACKET=y | 88 | CONFIG_PACKET=y |
85 | CONFIG_UNIX=y | 89 | CONFIG_UNIX=y |
86 | CONFIG_INET=y | 90 | CONFIG_INET=y |
91 | CONFIG_IP_MULTICAST=y | ||
87 | CONFIG_IP_PNP=y | 92 | CONFIG_IP_PNP=y |
88 | CONFIG_IP_PNP_DHCP=y | 93 | CONFIG_IP_PNP_DHCP=y |
89 | CONFIG_IP_PNP_BOOTP=y | 94 | CONFIG_IP_PNP_BOOTP=y |
90 | # CONFIG_IPV6 is not set | 95 | CONFIG_IPV6=m |
96 | CONFIG_NETFILTER=y | ||
97 | CONFIG_NF_CONNTRACK=m | ||
98 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
99 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | ||
100 | CONFIG_NETFILTER_XT_TARGET_LOG=m | ||
101 | CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m | ||
102 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m | ||
103 | CONFIG_NF_CONNTRACK_IPV4=m | ||
104 | CONFIG_IP_NF_IPTABLES=m | ||
105 | CONFIG_IP_NF_FILTER=m | ||
106 | CONFIG_IP_NF_TARGET_REJECT=m | ||
107 | CONFIG_IP_NF_NAT=m | ||
108 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
109 | CONFIG_IP_NF_MANGLE=m | ||
110 | CONFIG_NF_CONNTRACK_IPV6=m | ||
111 | CONFIG_IP6_NF_IPTABLES=m | ||
112 | CONFIG_IP6_NF_FILTER=m | ||
113 | CONFIG_IP6_NF_TARGET_REJECT=m | ||
114 | CONFIG_IP6_NF_MANGLE=m | ||
115 | CONFIG_IP6_NF_NAT=m | ||
116 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | ||
117 | CONFIG_BRIDGE=m | ||
118 | CONFIG_BRIDGE_VLAN_FILTERING=y | ||
119 | CONFIG_VLAN_8021Q=m | ||
120 | CONFIG_VLAN_8021Q_GVRP=y | ||
121 | CONFIG_VLAN_8021Q_MVRP=y | ||
91 | CONFIG_BPF_JIT=y | 122 | CONFIG_BPF_JIT=y |
92 | CONFIG_CFG80211=m | 123 | CONFIG_CFG80211=m |
93 | CONFIG_MAC80211=m | 124 | CONFIG_MAC80211=m |
@@ -103,6 +134,7 @@ CONFIG_MTD=y | |||
103 | CONFIG_MTD_M25P80=y | 134 | CONFIG_MTD_M25P80=y |
104 | CONFIG_MTD_SPI_NOR=y | 135 | CONFIG_MTD_SPI_NOR=y |
105 | CONFIG_BLK_DEV_LOOP=y | 136 | CONFIG_BLK_DEV_LOOP=y |
137 | CONFIG_BLK_DEV_NBD=m | ||
106 | CONFIG_VIRTIO_BLK=y | 138 | CONFIG_VIRTIO_BLK=y |
107 | CONFIG_SRAM=y | 139 | CONFIG_SRAM=y |
108 | # CONFIG_SCSI_PROC_FS is not set | 140 | # CONFIG_SCSI_PROC_FS is not set |
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y | |||
120 | CONFIG_PATA_PLATFORM=y | 152 | CONFIG_PATA_PLATFORM=y |
121 | CONFIG_PATA_OF_PLATFORM=y | 153 | CONFIG_PATA_OF_PLATFORM=y |
122 | CONFIG_NETDEVICES=y | 154 | CONFIG_NETDEVICES=y |
155 | CONFIG_MACVLAN=m | ||
156 | CONFIG_MACVTAP=m | ||
123 | CONFIG_TUN=y | 157 | CONFIG_TUN=y |
158 | CONFIG_VETH=m | ||
124 | CONFIG_VIRTIO_NET=y | 159 | CONFIG_VIRTIO_NET=y |
125 | CONFIG_AMD_XGBE=y | 160 | CONFIG_AMD_XGBE=y |
126 | CONFIG_NET_XGENE=y | 161 | CONFIG_NET_XGENE=y |
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y | |||
350 | CONFIG_PWM_SAMSUNG=y | 385 | CONFIG_PWM_SAMSUNG=y |
351 | CONFIG_EXT2_FS=y | 386 | CONFIG_EXT2_FS=y |
352 | CONFIG_EXT3_FS=y | 387 | CONFIG_EXT3_FS=y |
388 | CONFIG_EXT4_FS_POSIX_ACL=y | ||
389 | CONFIG_BTRFS_FS=m | ||
390 | CONFIG_BTRFS_FS_POSIX_ACL=y | ||
353 | CONFIG_FANOTIFY=y | 391 | CONFIG_FANOTIFY=y |
354 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | 392 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y |
355 | CONFIG_QUOTA=y | 393 | CONFIG_QUOTA=y |
356 | CONFIG_AUTOFS4_FS=y | 394 | CONFIG_AUTOFS4_FS=y |
357 | CONFIG_FUSE_FS=y | 395 | CONFIG_FUSE_FS=m |
358 | CONFIG_CUSE=y | 396 | CONFIG_CUSE=m |
397 | CONFIG_OVERLAY_FS=m | ||
359 | CONFIG_VFAT_FS=y | 398 | CONFIG_VFAT_FS=y |
360 | CONFIG_TMPFS=y | 399 | CONFIG_TMPFS=y |
361 | CONFIG_HUGETLBFS=y | 400 | CONFIG_HUGETLBFS=y |
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 61b49150dfa3..1737aecfcc5e 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 23 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
24 | #define MAX_INSN_SIZE 1 | 24 | #define MAX_INSN_SIZE 1 |
25 | #define MAX_STACK_SIZE 128 | ||
26 | 25 | ||
27 | #define flush_insn_slot(p) do { } while (0) | 26 | #define flush_insn_slot(p) do { } while (0) |
28 | #define kretprobe_blacklist_size 0 | 27 | #define kretprobe_blacklist_size 0 |
@@ -47,7 +46,6 @@ struct kprobe_ctlblk { | |||
47 | struct prev_kprobe prev_kprobe; | 46 | struct prev_kprobe prev_kprobe; |
48 | struct kprobe_step_ctx ss_ctx; | 47 | struct kprobe_step_ctx ss_ctx; |
49 | struct pt_regs jprobe_saved_regs; | 48 | struct pt_regs jprobe_saved_regs; |
50 | char jprobes_stack[MAX_STACK_SIZE]; | ||
51 | }; | 49 | }; |
52 | 50 | ||
53 | void arch_remove_kprobe(struct kprobe *); | 51 | void arch_remove_kprobe(struct kprobe *); |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 96e4a2b64cc1..441420ca7d08 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -353,6 +353,8 @@ el1_sync: | |||
353 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class | 353 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
354 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | 354 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 |
355 | b.eq el1_da | 355 | b.eq el1_da |
356 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 | ||
357 | b.eq el1_ia | ||
356 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap | 358 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
357 | b.eq el1_undef | 359 | b.eq el1_undef |
358 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception | 360 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
@@ -364,6 +366,11 @@ el1_sync: | |||
364 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 | 366 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
365 | b.ge el1_dbg | 367 | b.ge el1_dbg |
366 | b el1_inv | 368 | b el1_inv |
369 | |||
370 | el1_ia: | ||
371 | /* | ||
372 | * Fall through to the Data abort case | ||
373 | */ | ||
367 | el1_da: | 374 | el1_da: |
368 | /* | 375 | /* |
369 | * Data abort handling | 376 | * Data abort handling |
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 21ab5df9fa76..65d81f965e74 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
36 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
37 | #include <asm/suspend.h> | 37 | #include <asm/suspend.h> |
38 | #include <asm/sysreg.h> | ||
38 | #include <asm/virt.h> | 39 | #include <asm/virt.h> |
39 | 40 | ||
40 | /* | 41 | /* |
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length, | |||
217 | set_pte(pte, __pte(virt_to_phys((void *)dst) | | 218 | set_pte(pte, __pte(virt_to_phys((void *)dst) | |
218 | pgprot_val(PAGE_KERNEL_EXEC))); | 219 | pgprot_val(PAGE_KERNEL_EXEC))); |
219 | 220 | ||
220 | /* Load our new page tables */ | 221 | /* |
221 | asm volatile("msr ttbr0_el1, %0;" | 222 | * Load our new page tables. A strict BBM approach requires that we |
222 | "isb;" | 223 | * ensure that TLBs are free of any entries that may overlap with the |
223 | "tlbi vmalle1is;" | 224 | * global mappings we are about to install. |
224 | "dsb ish;" | 225 | * |
225 | "isb" : : "r"(virt_to_phys(pgd))); | 226 | * For a real hibernate/resume cycle TTBR0 currently points to a zero |
227 | * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI | ||
228 | * runtime services), while for a userspace-driven test_resume cycle it | ||
229 | * points to userspace page tables (and we must point it at a zero page | ||
230 | * ourselves). Elsewhere we only (un)install the idmap with preemption | ||
231 | * disabled, so T0SZ should be as required regardless. | ||
232 | */ | ||
233 | cpu_set_reserved_ttbr0(); | ||
234 | local_flush_tlb_all(); | ||
235 | write_sysreg(virt_to_phys(pgd), ttbr0_el1); | ||
236 | isb(); | ||
226 | 237 | ||
227 | *phys_dst_addr = virt_to_phys((void *)dst); | 238 | *phys_dst_addr = virt_to_phys((void *)dst); |
228 | 239 | ||
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void) | |||
394 | void *, phys_addr_t, phys_addr_t); | 405 | void *, phys_addr_t, phys_addr_t); |
395 | 406 | ||
396 | /* | 407 | /* |
408 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
409 | * Create a second copy of just the linear map, and use this when | ||
410 | * restoring. | ||
411 | */ | ||
412 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
413 | if (!tmp_pg_dir) { | ||
414 | pr_err("Failed to allocate memory for temporary page tables."); | ||
415 | rc = -ENOMEM; | ||
416 | goto out; | ||
417 | } | ||
418 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
419 | if (rc) | ||
420 | goto out; | ||
421 | |||
422 | /* | ||
423 | * Since we only copied the linear map, we need to find restore_pblist's | ||
424 | * linear map address. | ||
425 | */ | ||
426 | lm_restore_pblist = LMADDR(restore_pblist); | ||
427 | |||
428 | /* | ||
429 | * We need a zero page that is zero before & after resume in order to | ||
430 | * to break before make on the ttbr1 page tables. | ||
431 | */ | ||
432 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
433 | if (!zero_page) { | ||
434 | pr_err("Failed to allocate zero page."); | ||
435 | rc = -ENOMEM; | ||
436 | goto out; | ||
437 | } | ||
438 | |||
439 | /* | ||
397 | * Locate the exit code in the bottom-but-one page, so that *NULL | 440 | * Locate the exit code in the bottom-but-one page, so that *NULL |
398 | * still has disastrous affects. | 441 | * still has disastrous affects. |
399 | */ | 442 | */ |
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void) | |||
419 | __flush_dcache_area(hibernate_exit, exit_size); | 462 | __flush_dcache_area(hibernate_exit, exit_size); |
420 | 463 | ||
421 | /* | 464 | /* |
422 | * Restoring the memory image will overwrite the ttbr1 page tables. | ||
423 | * Create a second copy of just the linear map, and use this when | ||
424 | * restoring. | ||
425 | */ | ||
426 | tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); | ||
427 | if (!tmp_pg_dir) { | ||
428 | pr_err("Failed to allocate memory for temporary page tables."); | ||
429 | rc = -ENOMEM; | ||
430 | goto out; | ||
431 | } | ||
432 | rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); | ||
433 | if (rc) | ||
434 | goto out; | ||
435 | |||
436 | /* | ||
437 | * Since we only copied the linear map, we need to find restore_pblist's | ||
438 | * linear map address. | ||
439 | */ | ||
440 | lm_restore_pblist = LMADDR(restore_pblist); | ||
441 | |||
442 | /* | ||
443 | * KASLR will cause the el2 vectors to be in a different location in | 465 | * KASLR will cause the el2 vectors to be in a different location in |
444 | * the resumed kernel. Load hibernate's temporary copy into el2. | 466 | * the resumed kernel. Load hibernate's temporary copy into el2. |
445 | * | 467 | * |
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void) | |||
453 | __hyp_set_vectors(el2_vectors); | 475 | __hyp_set_vectors(el2_vectors); |
454 | } | 476 | } |
455 | 477 | ||
456 | /* | ||
457 | * We need a zero page that is zero before & after resume in order to | ||
458 | * to break before make on the ttbr1 page tables. | ||
459 | */ | ||
460 | zero_page = (void *)get_safe_page(GFP_ATOMIC); | ||
461 | |||
462 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, | 478 | hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, |
463 | resume_hdr.reenter_kernel, lm_restore_pblist, | 479 | resume_hdr.reenter_kernel, lm_restore_pblist, |
464 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); | 480 | resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index bf9768588288..c6b0f40620d8 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
41 | static void __kprobes | 41 | static void __kprobes |
42 | post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); | 42 | post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); |
43 | 43 | ||
44 | static inline unsigned long min_stack_size(unsigned long addr) | ||
45 | { | ||
46 | unsigned long size; | ||
47 | |||
48 | if (on_irq_stack(addr, raw_smp_processor_id())) | ||
49 | size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr; | ||
50 | else | ||
51 | size = (unsigned long)current_thread_info() + THREAD_START_SP - addr; | ||
52 | |||
53 | return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack)); | ||
54 | } | ||
55 | |||
56 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) | 44 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) |
57 | { | 45 | { |
58 | /* prepare insn slot */ | 46 | /* prepare insn slot */ |
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
489 | { | 477 | { |
490 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 478 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
491 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 479 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
492 | long stack_ptr = kernel_stack_pointer(regs); | ||
493 | 480 | ||
494 | kcb->jprobe_saved_regs = *regs; | 481 | kcb->jprobe_saved_regs = *regs; |
495 | /* | 482 | /* |
496 | * As Linus pointed out, gcc assumes that the callee | 483 | * Since we can't be sure where in the stack frame "stacked" |
497 | * owns the argument space and could overwrite it, e.g. | 484 | * pass-by-value arguments are stored we just don't try to |
498 | * tailcall optimization. So, to be absolutely safe | 485 | * duplicate any of the stack. Do not use jprobes on functions that |
499 | * we also save and restore enough stack bytes to cover | 486 | * use more than 64 bytes (after padding each to an 8 byte boundary) |
500 | * the argument area. | 487 | * of arguments, or pass individual arguments larger than 16 bytes. |
501 | */ | 488 | */ |
502 | kasan_disable_current(); | ||
503 | memcpy(kcb->jprobes_stack, (void *)stack_ptr, | ||
504 | min_stack_size(stack_ptr)); | ||
505 | kasan_enable_current(); | ||
506 | 489 | ||
507 | instruction_pointer_set(regs, (unsigned long) jp->entry); | 490 | instruction_pointer_set(regs, (unsigned long) jp->entry); |
508 | preempt_disable(); | 491 | preempt_disable(); |
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
554 | } | 537 | } |
555 | unpause_graph_tracing(); | 538 | unpause_graph_tracing(); |
556 | *regs = kcb->jprobe_saved_regs; | 539 | *regs = kcb->jprobe_saved_regs; |
557 | kasan_disable_current(); | ||
558 | memcpy((void *)stack_addr, kcb->jprobes_stack, | ||
559 | min_stack_size(stack_addr)); | ||
560 | kasan_enable_current(); | ||
561 | preempt_enable_no_resched(); | 540 | preempt_enable_no_resched(); |
562 | return 1; | 541 | return 1; |
563 | } | 542 | } |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 76a6d9263908..d93d43352504 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void) | |||
661 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | 661 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, |
662 | acpi_parse_gic_cpu_interface, 0); | 662 | acpi_parse_gic_cpu_interface, 0); |
663 | 663 | ||
664 | if (cpu_count > NR_CPUS) | 664 | if (cpu_count > nr_cpu_ids) |
665 | pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", | 665 | pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", |
666 | cpu_count, NR_CPUS); | 666 | cpu_count, nr_cpu_ids); |
667 | 667 | ||
668 | if (!bootcpu_valid) { | 668 | if (!bootcpu_valid) { |
669 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); | 669 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); |
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void) | |||
677 | * with entries in cpu_logical_map while initializing the cpus. | 677 | * with entries in cpu_logical_map while initializing the cpus. |
678 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. | 678 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. |
679 | */ | 679 | */ |
680 | for (i = 1; i < NR_CPUS; i++) { | 680 | for (i = 1; i < nr_cpu_ids; i++) { |
681 | if (cpu_logical_map(i) != INVALID_HWID) { | 681 | if (cpu_logical_map(i) != INVALID_HWID) { |
682 | if (smp_cpu_setup(i)) | 682 | if (smp_cpu_setup(i)) |
683 | cpu_logical_map(i) = INVALID_HWID; | 683 | cpu_logical_map(i) = INVALID_HWID; |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c8beaa0da7df..05d2bd776c69 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
153 | } | 153 | } |
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | static bool is_el1_instruction_abort(unsigned int esr) | ||
157 | { | ||
158 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; | ||
159 | } | ||
160 | |||
156 | /* | 161 | /* |
157 | * The kernel tried to access some page that wasn't present. | 162 | * The kernel tried to access some page that wasn't present. |
158 | */ | 163 | */ |
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, | |||
161 | { | 166 | { |
162 | /* | 167 | /* |
163 | * Are we prepared to handle this kernel fault? | 168 | * Are we prepared to handle this kernel fault? |
169 | * We are almost certainly not prepared to handle instruction faults. | ||
164 | */ | 170 | */ |
165 | if (fixup_exception(regs)) | 171 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
166 | return; | 172 | return; |
167 | 173 | ||
168 | /* | 174 | /* |
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr) | |||
267 | unsigned int ec = ESR_ELx_EC(esr); | 273 | unsigned int ec = ESR_ELx_EC(esr); |
268 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; | 274 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; |
269 | 275 | ||
270 | return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); | 276 | return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) || |
277 | (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM); | ||
271 | } | 278 | } |
272 | 279 | ||
273 | static bool is_el0_instruction_abort(unsigned int esr) | 280 | static bool is_el0_instruction_abort(unsigned int esr) |
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
312 | if (regs->orig_addr_limit == KERNEL_DS) | 319 | if (regs->orig_addr_limit == KERNEL_DS) |
313 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); | 320 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); |
314 | 321 | ||
322 | if (is_el1_instruction_abort(esr)) | ||
323 | die("Attempting to execute userspace memory", regs, esr); | ||
324 | |||
315 | if (!search_exception_tables(regs->pc)) | 325 | if (!search_exception_tables(regs->pc)) |
316 | die("Accessing user space memory outside uaccess.h routines", regs, esr); | 326 | die("Accessing user space memory outside uaccess.h routines", regs, esr); |
317 | } | 327 | } |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6ccb994bdfcb..c494613c1909 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | 690 | ||
691 | static DEFINE_MUTEX(arm_pmu_mutex); | 691 | static DEFINE_SPINLOCK(arm_pmu_lock); |
692 | static LIST_HEAD(arm_pmu_list); | 692 | static LIST_HEAD(arm_pmu_list); |
693 | 693 | ||
694 | /* | 694 | /* |
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) | |||
701 | { | 701 | { |
702 | struct arm_pmu *pmu; | 702 | struct arm_pmu *pmu; |
703 | 703 | ||
704 | mutex_lock(&arm_pmu_mutex); | 704 | spin_lock(&arm_pmu_lock); |
705 | list_for_each_entry(pmu, &arm_pmu_list, entry) { | 705 | list_for_each_entry(pmu, &arm_pmu_list, entry) { |
706 | 706 | ||
707 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | 707 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) | |||
709 | if (pmu->reset) | 709 | if (pmu->reset) |
710 | pmu->reset(pmu); | 710 | pmu->reset(pmu); |
711 | } | 711 | } |
712 | mutex_unlock(&arm_pmu_mutex); | 712 | spin_unlock(&arm_pmu_lock); |
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | 715 | ||
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
821 | if (!cpu_hw_events) | 821 | if (!cpu_hw_events) |
822 | return -ENOMEM; | 822 | return -ENOMEM; |
823 | 823 | ||
824 | mutex_lock(&arm_pmu_mutex); | 824 | spin_lock(&arm_pmu_lock); |
825 | list_add_tail(&cpu_pmu->entry, &arm_pmu_list); | 825 | list_add_tail(&cpu_pmu->entry, &arm_pmu_list); |
826 | mutex_unlock(&arm_pmu_mutex); | 826 | spin_unlock(&arm_pmu_lock); |
827 | 827 | ||
828 | err = cpu_pm_pmu_register(cpu_pmu); | 828 | err = cpu_pm_pmu_register(cpu_pmu); |
829 | if (err) | 829 | if (err) |
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
859 | return 0; | 859 | return 0; |
860 | 860 | ||
861 | out_unregister: | 861 | out_unregister: |
862 | mutex_lock(&arm_pmu_mutex); | 862 | spin_lock(&arm_pmu_lock); |
863 | list_del(&cpu_pmu->entry); | 863 | list_del(&cpu_pmu->entry); |
864 | mutex_unlock(&arm_pmu_mutex); | 864 | spin_unlock(&arm_pmu_lock); |
865 | free_percpu(cpu_hw_events); | 865 | free_percpu(cpu_hw_events); |
866 | return err; | 866 | return err; |
867 | } | 867 | } |
@@ -869,9 +869,9 @@ out_unregister: | |||
869 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | 869 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
870 | { | 870 | { |
871 | cpu_pm_pmu_unregister(cpu_pmu); | 871 | cpu_pm_pmu_unregister(cpu_pmu); |
872 | mutex_lock(&arm_pmu_mutex); | 872 | spin_lock(&arm_pmu_lock); |
873 | list_del(&cpu_pmu->entry); | 873 | list_del(&cpu_pmu->entry); |
874 | mutex_unlock(&arm_pmu_mutex); | 874 | spin_unlock(&arm_pmu_lock); |
875 | free_percpu(cpu_pmu->hw_events); | 875 | free_percpu(cpu_pmu->hw_events); |
876 | } | 876 | } |
877 | 877 | ||
@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
967 | 967 | ||
968 | /* If we didn't manage to parse anything, try the interrupt affinity */ | 968 | /* If we didn't manage to parse anything, try the interrupt affinity */ |
969 | if (cpumask_weight(&pmu->supported_cpus) == 0) { | 969 | if (cpumask_weight(&pmu->supported_cpus) == 0) { |
970 | if (!using_spi) { | 970 | int irq = platform_get_irq(pdev, 0); |
971 | |||
972 | if (irq_is_percpu(irq)) { | ||
971 | /* If using PPIs, check the affinity of the partition */ | 973 | /* If using PPIs, check the affinity of the partition */ |
972 | int ret, irq; | 974 | int ret; |
973 | 975 | ||
974 | irq = platform_get_irq(pdev, 0); | ||
975 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); | 976 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); |
976 | if (ret) { | 977 | if (ret) { |
977 | kfree(irqs); | 978 | kfree(irqs); |