aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h5
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h5
-rw-r--r--include/linux/hw_breakpoint.h10
-rw-r--r--kernel/hw_breakpoint.c53
-rw-r--r--kernel/trace/trace_ksym.c26
5 files changed, 68 insertions, 31 deletions
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 382bad937dcc..e14cad96798f 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -46,6 +46,11 @@ struct pmu;
46/* Maximum number of UBC channels */ 46/* Maximum number of UBC channels */
47#define HBP_NUM 2 47#define HBP_NUM 2
48 48
49static inline int hw_breakpoint_slots(int type)
50{
51 return HBP_NUM;
52}
53
49/* arch/sh/kernel/hw_breakpoint.c */ 54/* arch/sh/kernel/hw_breakpoint.c */
50extern int arch_check_bp_in_kernelspace(struct perf_event *bp); 55extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
51extern int arch_validate_hwbkpt_settings(struct perf_event *bp); 56extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index c77a5a6fab9d..942255310e6a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -41,6 +41,11 @@ struct arch_hw_breakpoint {
41/* Total number of available HW breakpoint registers */ 41/* Total number of available HW breakpoint registers */
42#define HBP_NUM 4 42#define HBP_NUM 4
43 43
44static inline int hw_breakpoint_slots(int type)
45{
46 return HBP_NUM;
47}
48
44struct perf_event; 49struct perf_event;
45struct pmu; 50struct pmu;
46 51
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 7e8899093098..a2d6ea49ec56 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -17,6 +17,16 @@ enum {
17 HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, 17 HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
18}; 18};
19 19
20enum bp_type_idx {
21 TYPE_INST = 0,
22#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
23 TYPE_DATA = 0,
24#else
25 TYPE_DATA = 1,
26#endif
27 TYPE_MAX
28};
29
20#ifdef __KERNEL__ 30#ifdef __KERNEL__
21 31
22#include <linux/perf_event.h> 32#include <linux/perf_event.h>
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 974498b858fc..684b710cbb91 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -40,20 +40,12 @@
40#include <linux/percpu.h> 40#include <linux/percpu.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/slab.h>
43#include <linux/cpu.h> 44#include <linux/cpu.h>
44#include <linux/smp.h> 45#include <linux/smp.h>
45 46
46#include <linux/hw_breakpoint.h> 47#include <linux/hw_breakpoint.h>
47 48
48enum bp_type_idx {
49 TYPE_INST = 0,
50#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
51 TYPE_DATA = 0,
52#else
53 TYPE_DATA = 1,
54#endif
55 TYPE_MAX
56};
57 49
58/* 50/*
59 * Constraints data 51 * Constraints data
@@ -63,11 +55,15 @@ enum bp_type_idx {
63static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); 55static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
64 56
65/* Number of pinned task breakpoints in a cpu */ 57/* Number of pinned task breakpoints in a cpu */
66static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[TYPE_MAX][HBP_NUM]); 58static DEFINE_PER_CPU(unsigned int, *nr_task_bp_pinned[TYPE_MAX]);
67 59
68/* Number of non-pinned cpu/task breakpoints in a cpu */ 60/* Number of non-pinned cpu/task breakpoints in a cpu */
69static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); 61static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
70 62
63static int nr_slots[TYPE_MAX];
64
65static int constraints_initialized;
66
71/* Gather the number of total pinned and un-pinned bp in a cpuset */ 67/* Gather the number of total pinned and un-pinned bp in a cpuset */
72struct bp_busy_slots { 68struct bp_busy_slots {
73 unsigned int pinned; 69 unsigned int pinned;
@@ -99,7 +95,7 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
99 int i; 95 int i;
100 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); 96 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
101 97
102 for (i = HBP_NUM -1; i >= 0; i--) { 98 for (i = nr_slots[type] - 1; i >= 0; i--) {
103 if (tsk_pinned[i] > 0) 99 if (tsk_pinned[i] > 0)
104 return i + 1; 100 return i + 1;
105 } 101 }
@@ -292,6 +288,10 @@ static int __reserve_bp_slot(struct perf_event *bp)
292 enum bp_type_idx type; 288 enum bp_type_idx type;
293 int weight; 289 int weight;
294 290
291 /* We couldn't initialize breakpoint constraints on boot */
292 if (!constraints_initialized)
293 return -ENOMEM;
294
295 /* Basic checks */ 295 /* Basic checks */
296 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || 296 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
297 bp->attr.bp_type == HW_BREAKPOINT_INVALID) 297 bp->attr.bp_type == HW_BREAKPOINT_INVALID)
@@ -304,7 +304,7 @@ static int __reserve_bp_slot(struct perf_event *bp)
304 fetch_this_slot(&slots, weight); 304 fetch_this_slot(&slots, weight);
305 305
306 /* Flexible counters need to keep at least one slot */ 306 /* Flexible counters need to keep at least one slot */
307 if (slots.pinned + (!!slots.flexible) > HBP_NUM) 307 if (slots.pinned + (!!slots.flexible) > nr_slots[type])
308 return -ENOSPC; 308 return -ENOSPC;
309 309
310 toggle_bp_slot(bp, true, type, weight); 310 toggle_bp_slot(bp, true, type, weight);
@@ -551,7 +551,36 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
551 551
552static int __init init_hw_breakpoint(void) 552static int __init init_hw_breakpoint(void)
553{ 553{
554 unsigned int **task_bp_pinned;
555 int cpu, err_cpu;
556 int i;
557
558 for (i = 0; i < TYPE_MAX; i++)
559 nr_slots[i] = hw_breakpoint_slots(i);
560
561 for_each_possible_cpu(cpu) {
562 for (i = 0; i < TYPE_MAX; i++) {
563 task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
564 *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
565 GFP_KERNEL);
566 if (!*task_bp_pinned)
567 goto err_alloc;
568 }
569 }
570
571 constraints_initialized = 1;
572
554 return register_die_notifier(&hw_breakpoint_exceptions_nb); 573 return register_die_notifier(&hw_breakpoint_exceptions_nb);
574
575 err_alloc:
576 for_each_possible_cpu(err_cpu) {
577 if (err_cpu == cpu)
578 break;
579 for (i = 0; i < TYPE_MAX; i++)
580 kfree(per_cpu(nr_task_bp_pinned[i], cpu));
581 }
582
583 return -ENOMEM;
555} 584}
556core_initcall(init_hw_breakpoint); 585core_initcall(init_hw_breakpoint);
557 586
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index d59cd6879477..8eaf00749b65 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -34,12 +34,6 @@
34 34
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36 36
37/*
38 * For now, let us restrict the no. of symbols traced simultaneously to number
39 * of available hardware breakpoint registers.
40 */
41#define KSYM_TRACER_MAX HBP_NUM
42
43#define KSYM_TRACER_OP_LEN 3 /* rw- */ 37#define KSYM_TRACER_OP_LEN 3 /* rw- */
44 38
45struct trace_ksym { 39struct trace_ksym {
@@ -53,7 +47,6 @@ struct trace_ksym {
53 47
54static struct trace_array *ksym_trace_array; 48static struct trace_array *ksym_trace_array;
55 49
56static unsigned int ksym_filter_entry_count;
57static unsigned int ksym_tracing_enabled; 50static unsigned int ksym_tracing_enabled;
58 51
59static HLIST_HEAD(ksym_filter_head); 52static HLIST_HEAD(ksym_filter_head);
@@ -181,13 +174,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
181 struct trace_ksym *entry; 174 struct trace_ksym *entry;
182 int ret = -ENOMEM; 175 int ret = -ENOMEM;
183 176
184 if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
185 printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
186 " new requests for tracing can be accepted now.\n",
187 KSYM_TRACER_MAX);
188 return -ENOSPC;
189 }
190
191 entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); 177 entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
192 if (!entry) 178 if (!entry)
193 return -ENOMEM; 179 return -ENOMEM;
@@ -203,13 +189,17 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
203 189
204 if (IS_ERR(entry->ksym_hbp)) { 190 if (IS_ERR(entry->ksym_hbp)) {
205 ret = PTR_ERR(entry->ksym_hbp); 191 ret = PTR_ERR(entry->ksym_hbp);
206 printk(KERN_INFO "ksym_tracer request failed. Try again" 192 if (ret == -ENOSPC) {
207 " later!!\n"); 193 printk(KERN_ERR "ksym_tracer: Maximum limit reached."
194 " No new requests for tracing can be accepted now.\n");
195 } else {
196 printk(KERN_INFO "ksym_tracer request failed. Try again"
197 " later!!\n");
198 }
208 goto err; 199 goto err;
209 } 200 }
210 201
211 hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); 202 hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
212 ksym_filter_entry_count++;
213 203
214 return 0; 204 return 0;
215 205
@@ -265,7 +255,6 @@ static void __ksym_trace_reset(void)
265 hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, 255 hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
266 ksym_hlist) { 256 ksym_hlist) {
267 unregister_wide_hw_breakpoint(entry->ksym_hbp); 257 unregister_wide_hw_breakpoint(entry->ksym_hbp);
268 ksym_filter_entry_count--;
269 hlist_del_rcu(&(entry->ksym_hlist)); 258 hlist_del_rcu(&(entry->ksym_hlist));
270 synchronize_rcu(); 259 synchronize_rcu();
271 kfree(entry); 260 kfree(entry);
@@ -338,7 +327,6 @@ static ssize_t ksym_trace_filter_write(struct file *file,
338 goto out_unlock; 327 goto out_unlock;
339 } 328 }
340 /* Error or "symbol:---" case: drop it */ 329 /* Error or "symbol:---" case: drop it */
341 ksym_filter_entry_count--;
342 hlist_del_rcu(&(entry->ksym_hlist)); 330 hlist_del_rcu(&(entry->ksym_hlist));
343 synchronize_rcu(); 331 synchronize_rcu();
344 kfree(entry); 332 kfree(entry);