aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-13 22:29:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-13 22:29:46 -0400
commit120c54751b1cecaa18b4e2f247f242af6ee87fd9 (patch)
treeaaca81706ab56519e812f1fd8ab6d25baf9b3f94
parent329f4152911c276b074bec75a0443f88821afdb7 (diff)
parent53fb45d3df6fb64eff6c314b3fa2e279a2496e5b (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas: - support for nr_cpus= command line argument (maxcpus was previously changed to allow secondary CPUs to be hot-plugged) - ARM PMU interrupt handling fix - fix potential TLB conflict in the hibernate code - improved handling of EL1 instruction aborts (better error reporting) - removal of useless jprobes code for stack saving/restoring - defconfig updates * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: defconfig: enable CONFIG_LOCALVERSION_AUTO arm64: defconfig: add options for virtualization and containers arm64: hibernate: handle allocation failures arm64: hibernate: avoid potential TLB conflict arm64: Handle el1 synchronous instruction aborts cleanly arm64: Remove stack duplicating code from jprobes drivers/perf: arm-pmu: Fix handling of SPI lacking "interrupt-affinity" property drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock arm64: Support hard limit of cpu count by nr_cpus
-rw-r--r--arch/arm64/configs/defconfig53
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/kernel/entry.S7
-rw-r--r--arch/arm64/kernel/hibernate.c82
-rw-r--r--arch/arm64/kernel/probes/kprobes.c31
-rw-r--r--arch/arm64/kernel/smp.c8
-rw-r--r--arch/arm64/mm/fault.c14
-rw-r--r--drivers/perf/arm_pmu.c25
8 files changed, 136 insertions, 86 deletions
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 0555b7caaf2c..eadf4855ad2d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
15CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
16CONFIG_MEMCG=y 15CONFIG_MEMCG=y
17CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y
18CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_HUGETLB=y 19CONFIG_CGROUP_HUGETLB=y
19# CONFIG_UTS_NS is not set 20CONFIG_CPUSETS=y
20# CONFIG_IPC_NS is not set 21CONFIG_CGROUP_DEVICE=y
21# CONFIG_NET_NS is not set 22CONFIG_CGROUP_CPUACCT=y
23CONFIG_CGROUP_PERF=y
24CONFIG_USER_NS=y
22CONFIG_SCHED_AUTOGROUP=y 25CONFIG_SCHED_AUTOGROUP=y
23CONFIG_BLK_DEV_INITRD=y 26CONFIG_BLK_DEV_INITRD=y
24CONFIG_KALLSYMS_ALL=y 27CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
71CONFIG_KSM=y 74CONFIG_KSM=y
72CONFIG_TRANSPARENT_HUGEPAGE=y 75CONFIG_TRANSPARENT_HUGEPAGE=y
73CONFIG_CMA=y 76CONFIG_CMA=y
77CONFIG_SECCOMP=y
74CONFIG_XEN=y 78CONFIG_XEN=y
75CONFIG_KEXEC=y 79CONFIG_KEXEC=y
76# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 80# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
84CONFIG_PACKET=y 88CONFIG_PACKET=y
85CONFIG_UNIX=y 89CONFIG_UNIX=y
86CONFIG_INET=y 90CONFIG_INET=y
91CONFIG_IP_MULTICAST=y
87CONFIG_IP_PNP=y 92CONFIG_IP_PNP=y
88CONFIG_IP_PNP_DHCP=y 93CONFIG_IP_PNP_DHCP=y
89CONFIG_IP_PNP_BOOTP=y 94CONFIG_IP_PNP_BOOTP=y
90# CONFIG_IPV6 is not set 95CONFIG_IPV6=m
96CONFIG_NETFILTER=y
97CONFIG_NF_CONNTRACK=m
98CONFIG_NF_CONNTRACK_EVENTS=y
99CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
100CONFIG_NETFILTER_XT_TARGET_LOG=m
101CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
102CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
103CONFIG_NF_CONNTRACK_IPV4=m
104CONFIG_IP_NF_IPTABLES=m
105CONFIG_IP_NF_FILTER=m
106CONFIG_IP_NF_TARGET_REJECT=m
107CONFIG_IP_NF_NAT=m
108CONFIG_IP_NF_TARGET_MASQUERADE=m
109CONFIG_IP_NF_MANGLE=m
110CONFIG_NF_CONNTRACK_IPV6=m
111CONFIG_IP6_NF_IPTABLES=m
112CONFIG_IP6_NF_FILTER=m
113CONFIG_IP6_NF_TARGET_REJECT=m
114CONFIG_IP6_NF_MANGLE=m
115CONFIG_IP6_NF_NAT=m
116CONFIG_IP6_NF_TARGET_MASQUERADE=m
117CONFIG_BRIDGE=m
118CONFIG_BRIDGE_VLAN_FILTERING=y
119CONFIG_VLAN_8021Q=m
120CONFIG_VLAN_8021Q_GVRP=y
121CONFIG_VLAN_8021Q_MVRP=y
91CONFIG_BPF_JIT=y 122CONFIG_BPF_JIT=y
92CONFIG_CFG80211=m 123CONFIG_CFG80211=m
93CONFIG_MAC80211=m 124CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
103CONFIG_MTD_M25P80=y 134CONFIG_MTD_M25P80=y
104CONFIG_MTD_SPI_NOR=y 135CONFIG_MTD_SPI_NOR=y
105CONFIG_BLK_DEV_LOOP=y 136CONFIG_BLK_DEV_LOOP=y
137CONFIG_BLK_DEV_NBD=m
106CONFIG_VIRTIO_BLK=y 138CONFIG_VIRTIO_BLK=y
107CONFIG_SRAM=y 139CONFIG_SRAM=y
108# CONFIG_SCSI_PROC_FS is not set 140# CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
120CONFIG_PATA_PLATFORM=y 152CONFIG_PATA_PLATFORM=y
121CONFIG_PATA_OF_PLATFORM=y 153CONFIG_PATA_OF_PLATFORM=y
122CONFIG_NETDEVICES=y 154CONFIG_NETDEVICES=y
155CONFIG_MACVLAN=m
156CONFIG_MACVTAP=m
123CONFIG_TUN=y 157CONFIG_TUN=y
158CONFIG_VETH=m
124CONFIG_VIRTIO_NET=y 159CONFIG_VIRTIO_NET=y
125CONFIG_AMD_XGBE=y 160CONFIG_AMD_XGBE=y
126CONFIG_NET_XGENE=y 161CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
350CONFIG_PWM_SAMSUNG=y 385CONFIG_PWM_SAMSUNG=y
351CONFIG_EXT2_FS=y 386CONFIG_EXT2_FS=y
352CONFIG_EXT3_FS=y 387CONFIG_EXT3_FS=y
388CONFIG_EXT4_FS_POSIX_ACL=y
389CONFIG_BTRFS_FS=m
390CONFIG_BTRFS_FS_POSIX_ACL=y
353CONFIG_FANOTIFY=y 391CONFIG_FANOTIFY=y
354CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 392CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
355CONFIG_QUOTA=y 393CONFIG_QUOTA=y
356CONFIG_AUTOFS4_FS=y 394CONFIG_AUTOFS4_FS=y
357CONFIG_FUSE_FS=y 395CONFIG_FUSE_FS=m
358CONFIG_CUSE=y 396CONFIG_CUSE=m
397CONFIG_OVERLAY_FS=m
359CONFIG_VFAT_FS=y 398CONFIG_VFAT_FS=y
360CONFIG_TMPFS=y 399CONFIG_TMPFS=y
361CONFIG_HUGETLBFS=y 400CONFIG_HUGETLBFS=y
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 61b49150dfa3..1737aecfcc5e 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -22,7 +22,6 @@
22 22
23#define __ARCH_WANT_KPROBES_INSN_SLOT 23#define __ARCH_WANT_KPROBES_INSN_SLOT
24#define MAX_INSN_SIZE 1 24#define MAX_INSN_SIZE 1
25#define MAX_STACK_SIZE 128
26 25
27#define flush_insn_slot(p) do { } while (0) 26#define flush_insn_slot(p) do { } while (0)
28#define kretprobe_blacklist_size 0 27#define kretprobe_blacklist_size 0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
47 struct prev_kprobe prev_kprobe; 46 struct prev_kprobe prev_kprobe;
48 struct kprobe_step_ctx ss_ctx; 47 struct kprobe_step_ctx ss_ctx;
49 struct pt_regs jprobe_saved_regs; 48 struct pt_regs jprobe_saved_regs;
50 char jprobes_stack[MAX_STACK_SIZE];
51}; 49};
52 50
53void arch_remove_kprobe(struct kprobe *); 51void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 96e4a2b64cc1..441420ca7d08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -353,6 +353,8 @@ el1_sync:
353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class 353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
355 b.eq el1_da 355 b.eq el1_da
356 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
357 b.eq el1_ia
356 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap 358 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
357 b.eq el1_undef 359 b.eq el1_undef
358 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception 360 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
364 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 366 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
365 b.ge el1_dbg 367 b.ge el1_dbg
366 b el1_inv 368 b el1_inv
369
370el1_ia:
371 /*
372 * Fall through to the Data abort case
373 */
367el1_da: 374el1_da:
368 /* 375 /*
369 * Data abort handling 376 * Data abort handling
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 21ab5df9fa76..65d81f965e74 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -35,6 +35,7 @@
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/smp.h> 36#include <asm/smp.h>
37#include <asm/suspend.h> 37#include <asm/suspend.h>
38#include <asm/sysreg.h>
38#include <asm/virt.h> 39#include <asm/virt.h>
39 40
40/* 41/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
217 set_pte(pte, __pte(virt_to_phys((void *)dst) | 218 set_pte(pte, __pte(virt_to_phys((void *)dst) |
218 pgprot_val(PAGE_KERNEL_EXEC))); 219 pgprot_val(PAGE_KERNEL_EXEC)));
219 220
220 /* Load our new page tables */ 221 /*
221 asm volatile("msr ttbr0_el1, %0;" 222 * Load our new page tables. A strict BBM approach requires that we
222 "isb;" 223 * ensure that TLBs are free of any entries that may overlap with the
223 "tlbi vmalle1is;" 224 * global mappings we are about to install.
224 "dsb ish;" 225 *
225 "isb" : : "r"(virt_to_phys(pgd))); 226 * For a real hibernate/resume cycle TTBR0 currently points to a zero
227 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
228 * runtime services), while for a userspace-driven test_resume cycle it
229 * points to userspace page tables (and we must point it at a zero page
230 * ourselves). Elsewhere we only (un)install the idmap with preemption
231 * disabled, so T0SZ should be as required regardless.
232 */
233 cpu_set_reserved_ttbr0();
234 local_flush_tlb_all();
235 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
236 isb();
226 237
227 *phys_dst_addr = virt_to_phys((void *)dst); 238 *phys_dst_addr = virt_to_phys((void *)dst);
228 239
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
394 void *, phys_addr_t, phys_addr_t); 405 void *, phys_addr_t, phys_addr_t);
395 406
396 /* 407 /*
408 * Restoring the memory image will overwrite the ttbr1 page tables.
409 * Create a second copy of just the linear map, and use this when
410 * restoring.
411 */
412 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
413 if (!tmp_pg_dir) {
414 pr_err("Failed to allocate memory for temporary page tables.");
415 rc = -ENOMEM;
416 goto out;
417 }
418 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
419 if (rc)
420 goto out;
421
422 /*
423 * Since we only copied the linear map, we need to find restore_pblist's
424 * linear map address.
425 */
426 lm_restore_pblist = LMADDR(restore_pblist);
427
428 /*
429 * We need a zero page that is zero before & after resume in order to
430 * to break before make on the ttbr1 page tables.
431 */
432 zero_page = (void *)get_safe_page(GFP_ATOMIC);
433 if (!zero_page) {
434 pr_err("Failed to allocate zero page.");
435 rc = -ENOMEM;
436 goto out;
437 }
438
439 /*
397 * Locate the exit code in the bottom-but-one page, so that *NULL 440 * Locate the exit code in the bottom-but-one page, so that *NULL
398 * still has disastrous affects. 441 * still has disastrous affects.
399 */ 442 */
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
419 __flush_dcache_area(hibernate_exit, exit_size); 462 __flush_dcache_area(hibernate_exit, exit_size);
420 463
421 /* 464 /*
422 * Restoring the memory image will overwrite the ttbr1 page tables.
423 * Create a second copy of just the linear map, and use this when
424 * restoring.
425 */
426 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
427 if (!tmp_pg_dir) {
428 pr_err("Failed to allocate memory for temporary page tables.");
429 rc = -ENOMEM;
430 goto out;
431 }
432 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
433 if (rc)
434 goto out;
435
436 /*
437 * Since we only copied the linear map, we need to find restore_pblist's
438 * linear map address.
439 */
440 lm_restore_pblist = LMADDR(restore_pblist);
441
442 /*
443 * KASLR will cause the el2 vectors to be in a different location in 465 * KASLR will cause the el2 vectors to be in a different location in
444 * the resumed kernel. Load hibernate's temporary copy into el2. 466 * the resumed kernel. Load hibernate's temporary copy into el2.
445 * 467 *
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
453 __hyp_set_vectors(el2_vectors); 475 __hyp_set_vectors(el2_vectors);
454 } 476 }
455 477
456 /*
457 * We need a zero page that is zero before & after resume in order to
458 * to break before make on the ttbr1 page tables.
459 */
460 zero_page = (void *)get_safe_page(GFP_ATOMIC);
461
462 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 478 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463 resume_hdr.reenter_kernel, lm_restore_pblist, 479 resume_hdr.reenter_kernel, lm_restore_pblist,
464 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 480 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bf9768588288..c6b0f40620d8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41static void __kprobes 41static void __kprobes
42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
43 43
44static inline unsigned long min_stack_size(unsigned long addr)
45{
46 unsigned long size;
47
48 if (on_irq_stack(addr, raw_smp_processor_id()))
49 size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
50 else
51 size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
52
53 return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
54}
55
56static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 44static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
57{ 45{
58 /* prepare insn slot */ 46 /* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
489{ 477{
490 struct jprobe *jp = container_of(p, struct jprobe, kp); 478 struct jprobe *jp = container_of(p, struct jprobe, kp);
491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 479 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
492 long stack_ptr = kernel_stack_pointer(regs);
493 480
494 kcb->jprobe_saved_regs = *regs; 481 kcb->jprobe_saved_regs = *regs;
495 /* 482 /*
496 * As Linus pointed out, gcc assumes that the callee 483 * Since we can't be sure where in the stack frame "stacked"
497 * owns the argument space and could overwrite it, e.g. 484 * pass-by-value arguments are stored we just don't try to
498 * tailcall optimization. So, to be absolutely safe 485 * duplicate any of the stack. Do not use jprobes on functions that
499 * we also save and restore enough stack bytes to cover 486 * use more than 64 bytes (after padding each to an 8 byte boundary)
500 * the argument area. 487 * of arguments, or pass individual arguments larger than 16 bytes.
501 */ 488 */
502 kasan_disable_current();
503 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
504 min_stack_size(stack_ptr));
505 kasan_enable_current();
506 489
507 instruction_pointer_set(regs, (unsigned long) jp->entry); 490 instruction_pointer_set(regs, (unsigned long) jp->entry);
508 preempt_disable(); 491 preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
554 } 537 }
555 unpause_graph_tracing(); 538 unpause_graph_tracing();
556 *regs = kcb->jprobe_saved_regs; 539 *regs = kcb->jprobe_saved_regs;
557 kasan_disable_current();
558 memcpy((void *)stack_addr, kcb->jprobes_stack,
559 min_stack_size(stack_addr));
560 kasan_enable_current();
561 preempt_enable_no_resched(); 540 preempt_enable_no_resched();
562 return 1; 541 return 1;
563} 542}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 76a6d9263908..d93d43352504 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
662 acpi_parse_gic_cpu_interface, 0); 662 acpi_parse_gic_cpu_interface, 0);
663 663
664 if (cpu_count > NR_CPUS) 664 if (cpu_count > nr_cpu_ids)
665 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 665 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
666 cpu_count, NR_CPUS); 666 cpu_count, nr_cpu_ids);
667 667
668 if (!bootcpu_valid) { 668 if (!bootcpu_valid) {
669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
677 * with entries in cpu_logical_map while initializing the cpus. 677 * with entries in cpu_logical_map while initializing the cpus.
678 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 678 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
679 */ 679 */
680 for (i = 1; i < NR_CPUS; i++) { 680 for (i = 1; i < nr_cpu_ids; i++) {
681 if (cpu_logical_map(i) != INVALID_HWID) { 681 if (cpu_logical_map(i) != INVALID_HWID) {
682 if (smp_cpu_setup(i)) 682 if (smp_cpu_setup(i))
683 cpu_logical_map(i) = INVALID_HWID; 683 cpu_logical_map(i) = INVALID_HWID;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c8beaa0da7df..05d2bd776c69 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
153} 153}
154#endif 154#endif
155 155
156static bool is_el1_instruction_abort(unsigned int esr)
157{
158 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
159}
160
156/* 161/*
157 * The kernel tried to access some page that wasn't present. 162 * The kernel tried to access some page that wasn't present.
158 */ 163 */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
161{ 166{
162 /* 167 /*
163 * Are we prepared to handle this kernel fault? 168 * Are we prepared to handle this kernel fault?
169 * We are almost certainly not prepared to handle instruction faults.
164 */ 170 */
165 if (fixup_exception(regs)) 171 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
166 return; 172 return;
167 173
168 /* 174 /*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
267 unsigned int ec = ESR_ELx_EC(esr); 273 unsigned int ec = ESR_ELx_EC(esr);
268 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; 274 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
269 275
270 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); 276 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
277 (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
271} 278}
272 279
273static bool is_el0_instruction_abort(unsigned int esr) 280static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
312 if (regs->orig_addr_limit == KERNEL_DS) 319 if (regs->orig_addr_limit == KERNEL_DS)
313 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 320 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
314 321
322 if (is_el1_instruction_abort(esr))
323 die("Attempting to execute userspace memory", regs, esr);
324
315 if (!search_exception_tables(regs->pc)) 325 if (!search_exception_tables(regs->pc))
316 die("Accessing user space memory outside uaccess.h routines", regs, esr); 326 die("Accessing user space memory outside uaccess.h routines", regs, esr);
317 } 327 }
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 6ccb994bdfcb..c494613c1909 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
688 return 0; 688 return 0;
689} 689}
690 690
691static DEFINE_MUTEX(arm_pmu_mutex); 691static DEFINE_SPINLOCK(arm_pmu_lock);
692static LIST_HEAD(arm_pmu_list); 692static LIST_HEAD(arm_pmu_list);
693 693
694/* 694/*
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
701{ 701{
702 struct arm_pmu *pmu; 702 struct arm_pmu *pmu;
703 703
704 mutex_lock(&arm_pmu_mutex); 704 spin_lock(&arm_pmu_lock);
705 list_for_each_entry(pmu, &arm_pmu_list, entry) { 705 list_for_each_entry(pmu, &arm_pmu_list, entry) {
706 706
707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
709 if (pmu->reset) 709 if (pmu->reset)
710 pmu->reset(pmu); 710 pmu->reset(pmu);
711 } 711 }
712 mutex_unlock(&arm_pmu_mutex); 712 spin_unlock(&arm_pmu_lock);
713 return 0; 713 return 0;
714} 714}
715 715
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
821 if (!cpu_hw_events) 821 if (!cpu_hw_events)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
824 mutex_lock(&arm_pmu_mutex); 824 spin_lock(&arm_pmu_lock);
825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list); 825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
826 mutex_unlock(&arm_pmu_mutex); 826 spin_unlock(&arm_pmu_lock);
827 827
828 err = cpu_pm_pmu_register(cpu_pmu); 828 err = cpu_pm_pmu_register(cpu_pmu);
829 if (err) 829 if (err)
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
859 return 0; 859 return 0;
860 860
861out_unregister: 861out_unregister:
862 mutex_lock(&arm_pmu_mutex); 862 spin_lock(&arm_pmu_lock);
863 list_del(&cpu_pmu->entry); 863 list_del(&cpu_pmu->entry);
864 mutex_unlock(&arm_pmu_mutex); 864 spin_unlock(&arm_pmu_lock);
865 free_percpu(cpu_hw_events); 865 free_percpu(cpu_hw_events);
866 return err; 866 return err;
867} 867}
@@ -869,9 +869,9 @@ out_unregister:
869static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 869static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
870{ 870{
871 cpu_pm_pmu_unregister(cpu_pmu); 871 cpu_pm_pmu_unregister(cpu_pmu);
872 mutex_lock(&arm_pmu_mutex); 872 spin_lock(&arm_pmu_lock);
873 list_del(&cpu_pmu->entry); 873 list_del(&cpu_pmu->entry);
874 mutex_unlock(&arm_pmu_mutex); 874 spin_unlock(&arm_pmu_lock);
875 free_percpu(cpu_pmu->hw_events); 875 free_percpu(cpu_pmu->hw_events);
876} 876}
877 877
@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
967 967
968 /* If we didn't manage to parse anything, try the interrupt affinity */ 968 /* If we didn't manage to parse anything, try the interrupt affinity */
969 if (cpumask_weight(&pmu->supported_cpus) == 0) { 969 if (cpumask_weight(&pmu->supported_cpus) == 0) {
970 if (!using_spi) { 970 int irq = platform_get_irq(pdev, 0);
971
972 if (irq_is_percpu(irq)) {
971 /* If using PPIs, check the affinity of the partition */ 973 /* If using PPIs, check the affinity of the partition */
972 int ret, irq; 974 int ret;
973 975
974 irq = platform_get_irq(pdev, 0);
975 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); 976 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
976 if (ret) { 977 if (ret) {
977 kfree(irqs); 978 kfree(irqs);