aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-03 12:25:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-03 12:25:53 -0400
commit866ba84ea30f94838251f74becf3cfe3c2d5c0f9 (patch)
treef9639eb4b6772d5e24a538976be6ffc82aa2ae4d
parent3f46540ee7015ad2e3665e68b0b22c48ce9f99e5 (diff)
parent7ecb37f62fe58e3e4d9b03443b92d213b2c108ce (diff)
Merge tag 'powerpc-4.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "Some more powerpc fixes for 4.14. This is bigger than I like to send at rc7, but that's at least partly because I didn't send any fixes last week. If it wasn't for the IMC driver, which is new and getting heavy testing, the diffstat would look a bit better. I've also added ftrace on big endian to my test suite, so we shouldn't break that again in future. - A fix to the handling of misaligned paste instructions (P9 only), where a change to a #define has caused the check for the instruction to always fail. - The preempt handling was unbalanced in the radix THP flush (P9 only). Though we don't generally use preempt we want to keep it working as much as possible. - Two fixes for IMC (P9 only), one when booting with restricted number of CPUs and one in the error handling when initialisation fails due to firmware etc. - A revert to fix function_graph on big endian machines, and then a rework of the reverted patch to fix kprobes blacklist handling on big endian machines. Thanks to: Anju T Sudhakar, Guilherme G. Piccoli, Madhavan Srinivasan, Naveen N. Rao, Nicholas Piggin, Paul Mackerras" * tag 'powerpc-4.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/perf: Fix core-imc hotplug callback failure during imc initialization powerpc/kprobes: Dereference function pointers only if the address does not belong to kernel text Revert "powerpc64/elfv1: Only dereference function descriptor for non-text symbols" powerpc/64s/radix: Fix preempt imbalance in TLB flush powerpc: Fix check for copy/paste instructions in alignment handler powerpc/perf: Fix IMC allocation routine
-rw-r--r--arch/powerpc/include/asm/code-patching.h10
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c7
-rw-r--r--arch/powerpc/mm/tlb-radix.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c18
5 files changed, 26 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 5482928eea1b..abef812de7f8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -83,16 +83,8 @@ static inline unsigned long ppc_function_entry(void *func)
83 * On PPC64 ABIv1 the function pointer actually points to the 83 * On PPC64 ABIv1 the function pointer actually points to the
84 * function's descriptor. The first entry in the descriptor is the 84 * function's descriptor. The first entry in the descriptor is the
85 * address of the function text. 85 * address of the function text.
86 *
87 * However, we may also receive pointer to an assembly symbol. To
88 * detect that, we first check if the function pointer we receive
89 * already points to kernel/module text and we only dereference it
90 * if it doesn't.
91 */ 86 */
92 if (kernel_text_address((unsigned long)func)) 87 return ((func_descr_t *)func)->entry;
93 return (unsigned long)func;
94 else
95 return ((func_descr_t *)func)->entry;
96#else 88#else
97 return (unsigned long)func; 89 return (unsigned long)func;
98#endif 90#endif
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 43ef25156480..3e6c0744c174 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -332,7 +332,7 @@ int fix_alignment(struct pt_regs *regs)
332 * when pasting to a co-processor. Furthermore, paste_last is the 332 * when pasting to a co-processor. Furthermore, paste_last is the
333 * synchronisation point for preceding copy/paste sequences. 333 * synchronisation point for preceding copy/paste sequences.
334 */ 334 */
335 if ((instr & 0xfc0006fe) == PPC_INST_COPY) 335 if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
336 return -EIO; 336 return -EIO;
337 337
338 r = analyse_instr(&op, regs, instr); 338 r = analyse_instr(&op, regs, instr);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 367494dc67d9..bebc3007a793 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -600,7 +600,12 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
600 600
601unsigned long arch_deref_entry_point(void *entry) 601unsigned long arch_deref_entry_point(void *entry)
602{ 602{
603 return ppc_global_function_entry(entry); 603#ifdef PPC64_ELF_ABI_v1
604 if (!kernel_text_address((unsigned long)entry))
605 return ppc_global_function_entry(entry);
606 else
607#endif
608 return (unsigned long)entry;
604} 609}
605NOKPROBE_SYMBOL(arch_deref_entry_point); 610NOKPROBE_SYMBOL(arch_deref_entry_point);
606 611
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index b3e849c4886e..d304028641a2 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -360,12 +360,14 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
360 360
361 361
362 pid = mm ? mm->context.id : 0; 362 pid = mm ? mm->context.id : 0;
363 preempt_disable();
363 if (unlikely(pid == MMU_NO_CONTEXT)) 364 if (unlikely(pid == MMU_NO_CONTEXT))
364 goto no_context; 365 goto no_context;
365 366
366 /* 4k page size, just blow the world */ 367 /* 4k page size, just blow the world */
367 if (PAGE_SIZE == 0x1000) { 368 if (PAGE_SIZE == 0x1000) {
368 radix__flush_all_mm(mm); 369 radix__flush_all_mm(mm);
370 preempt_enable();
369 return; 371 return;
370 } 372 }
371 373
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 88126245881b..36344117c680 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -607,6 +607,20 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
607 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) 607 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
608 return 0; 608 return 0;
609 609
610 /*
611 * Check whether core_imc is registered. We could end up here
612 * if the cpuhotplug callback registration fails. i.e, callback
613 * invokes the offline path for all sucessfully registered cpus.
614 * At this stage, core_imc pmu will not be registered and we
615 * should return here.
616 *
617 * We return with a zero since this is not an offline failure.
618 * And cpuhp_setup_state() returns the actual failure reason
619 * to the caller, which inturn will call the cleanup routine.
620 */
621 if (!core_imc_pmu->pmu.event_init)
622 return 0;
623
610 /* Find any online cpu in that core except the current "cpu" */ 624 /* Find any online cpu in that core except the current "cpu" */
611 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); 625 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
612 626
@@ -1104,7 +1118,7 @@ static int init_nest_pmu_ref(void)
1104 1118
1105static void cleanup_all_core_imc_memory(void) 1119static void cleanup_all_core_imc_memory(void)
1106{ 1120{
1107 int i, nr_cores = num_present_cpus() / threads_per_core; 1121 int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
1108 struct imc_mem_info *ptr = core_imc_pmu->mem_info; 1122 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
1109 int size = core_imc_pmu->counter_mem_size; 1123 int size = core_imc_pmu->counter_mem_size;
1110 1124
@@ -1212,7 +1226,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
1212 if (!pmu_ptr->pmu.name) 1226 if (!pmu_ptr->pmu.name)
1213 return -ENOMEM; 1227 return -ENOMEM;
1214 1228
1215 nr_cores = num_present_cpus() / threads_per_core; 1229 nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
1216 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), 1230 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
1217 GFP_KERNEL); 1231 GFP_KERNEL);
1218 1232