aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/kprobes/core.c14
-rw-r--r--kernel/events/hw_breakpoint.c6
2 files changed, 13 insertions, 7 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9895a9a41380..211bce445522 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 365 return insn.length;
366} 366}
367 367
368static void __kprobes arch_copy_kprobe(struct kprobe *p) 368static int __kprobes arch_copy_kprobe(struct kprobe *p)
369{ 369{
370 int ret;
371
370 /* Copy an instruction with recovering if other optprobe modifies it.*/ 372 /* Copy an instruction with recovering if other optprobe modifies it.*/
371 __copy_instruction(p->ainsn.insn, p->addr); 373 ret = __copy_instruction(p->ainsn.insn, p->addr);
374 if (!ret)
375 return -EINVAL;
372 376
373 /* 377 /*
374 * __copy_instruction can modify the displacement of the instruction, 378 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
384 388
385 /* Also, displacement change doesn't affect the first byte */ 389 /* Also, displacement change doesn't affect the first byte */
386 p->opcode = p->ainsn.insn[0]; 390 p->opcode = p->ainsn.insn[0];
391
392 return 0;
387} 393}
388 394
389int __kprobes arch_prepare_kprobe(struct kprobe *p) 395int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
397 p->ainsn.insn = get_insn_slot(); 403 p->ainsn.insn = get_insn_slot();
398 if (!p->ainsn.insn) 404 if (!p->ainsn.insn)
399 return -ENOMEM; 405 return -ENOMEM;
400 arch_copy_kprobe(p); 406
401 return 0; 407 return arch_copy_kprobe(p);
402} 408}
403 409
404void __kprobes arch_arm_kprobe(struct kprobe *p) 410void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index a64f8aeb5c1f..20185ea64aa6 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->hw.bp_target == tsk && 121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type && 122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu) 123 (iter->cpu < 0 || cpu == iter->cpu))
124 count += hw_breakpoint_weight(iter); 124 count += hw_breakpoint_weight(iter);
125 } 125 }
126 126
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
149 return; 149 return;
150 } 150 }
151 151
152 for_each_online_cpu(cpu) { 152 for_each_possible_cpu(cpu) {
153 unsigned int nr; 153 unsigned int nr;
154 154
155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu); 155 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
235 if (cpu >= 0) { 235 if (cpu >= 0) {
236 toggle_bp_task_slot(bp, cpu, enable, type, weight); 236 toggle_bp_task_slot(bp, cpu, enable, type, weight);
237 } else { 237 } else {
238 for_each_online_cpu(cpu) 238 for_each_possible_cpu(cpu)
239 toggle_bp_task_slot(bp, cpu, enable, type, weight); 239 toggle_bp_task_slot(bp, cpu, enable, type, weight);
240 } 240 }
241 241