diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2010-04-22 23:59:55 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2010-04-30 22:32:14 -0400 |
commit | feef47d0cb530e8419dfa0b48141b538b89b1b1a (patch) | |
tree | ad40b07e8b240eca134770120b5c644ec0062ce2 /kernel/hw_breakpoint.c | |
parent | f93a20541134fa767e8dc4eb32e956d30b9f6b92 (diff) |
hw-breakpoints: Get the number of available registers on boot dynamically
The breakpoint generic layer assumes that archs always know in advance
the static number of address registers available to host breakpoints
through the HBP_NUM macro.
However this is not true for every archs. For example Arm needs to get
this information dynamically to handle the compatiblity between
different versions.
To solve this, this patch proposes to drop the static HBP_NUM macro
and let the arch provide the number of available slots through a
new hw_breakpoint_slots() function. For archs that have
CONFIG_HAVE_MIXED_BREAKPOINTS_REGS selected, it will be called once
as the number of registers fits for instruction and data breakpoints
together.
For the others it will be called first to get the number of
instruction breakpoint registers and another time to get the
data breakpoint registers, the targeted type is given as a
parameter of hw_breakpoint_slots().
Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: K. Prasad <prasad@linux.vnet.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r-- | kernel/hw_breakpoint.c | 53 |
1 files changed, 41 insertions, 12 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 974498b858fc..684b710cbb91 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -40,20 +40,12 @@ | |||
40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | ||
43 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
44 | #include <linux/smp.h> | 45 | #include <linux/smp.h> |
45 | 46 | ||
46 | #include <linux/hw_breakpoint.h> | 47 | #include <linux/hw_breakpoint.h> |
47 | 48 | ||
48 | enum bp_type_idx { | ||
49 | TYPE_INST = 0, | ||
50 | #ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS | ||
51 | TYPE_DATA = 0, | ||
52 | #else | ||
53 | TYPE_DATA = 1, | ||
54 | #endif | ||
55 | TYPE_MAX | ||
56 | }; | ||
57 | 49 | ||
58 | /* | 50 | /* |
59 | * Constraints data | 51 | * Constraints data |
@@ -63,11 +55,15 @@ enum bp_type_idx { | |||
63 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); | 55 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
64 | 56 | ||
65 | /* Number of pinned task breakpoints in a cpu */ | 57 | /* Number of pinned task breakpoints in a cpu */ |
66 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[TYPE_MAX][HBP_NUM]); | 58 | static DEFINE_PER_CPU(unsigned int, *nr_task_bp_pinned[TYPE_MAX]); |
67 | 59 | ||
68 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 60 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
69 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); | 61 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
70 | 62 | ||
63 | static int nr_slots[TYPE_MAX]; | ||
64 | |||
65 | static int constraints_initialized; | ||
66 | |||
71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | 67 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
72 | struct bp_busy_slots { | 68 | struct bp_busy_slots { |
73 | unsigned int pinned; | 69 | unsigned int pinned; |
@@ -99,7 +95,7 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) | |||
99 | int i; | 95 | int i; |
100 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | 96 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
101 | 97 | ||
102 | for (i = HBP_NUM -1; i >= 0; i--) { | 98 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
103 | if (tsk_pinned[i] > 0) | 99 | if (tsk_pinned[i] > 0) |
104 | return i + 1; | 100 | return i + 1; |
105 | } | 101 | } |
@@ -292,6 +288,10 @@ static int __reserve_bp_slot(struct perf_event *bp) | |||
292 | enum bp_type_idx type; | 288 | enum bp_type_idx type; |
293 | int weight; | 289 | int weight; |
294 | 290 | ||
291 | /* We couldn't initialize breakpoint constraints on boot */ | ||
292 | if (!constraints_initialized) | ||
293 | return -ENOMEM; | ||
294 | |||
295 | /* Basic checks */ | 295 | /* Basic checks */ |
296 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || | 296 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || |
297 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) | 297 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) |
@@ -304,7 +304,7 @@ static int __reserve_bp_slot(struct perf_event *bp) | |||
304 | fetch_this_slot(&slots, weight); | 304 | fetch_this_slot(&slots, weight); |
305 | 305 | ||
306 | /* Flexible counters need to keep at least one slot */ | 306 | /* Flexible counters need to keep at least one slot */ |
307 | if (slots.pinned + (!!slots.flexible) > HBP_NUM) | 307 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
308 | return -ENOSPC; | 308 | return -ENOSPC; |
309 | 309 | ||
310 | toggle_bp_slot(bp, true, type, weight); | 310 | toggle_bp_slot(bp, true, type, weight); |
@@ -551,7 +551,36 @@ static struct notifier_block hw_breakpoint_exceptions_nb = { | |||
551 | 551 | ||
552 | static int __init init_hw_breakpoint(void) | 552 | static int __init init_hw_breakpoint(void) |
553 | { | 553 | { |
554 | unsigned int **task_bp_pinned; | ||
555 | int cpu, err_cpu; | ||
556 | int i; | ||
557 | |||
558 | for (i = 0; i < TYPE_MAX; i++) | ||
559 | nr_slots[i] = hw_breakpoint_slots(i); | ||
560 | |||
561 | for_each_possible_cpu(cpu) { | ||
562 | for (i = 0; i < TYPE_MAX; i++) { | ||
563 | task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); | ||
564 | *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], | ||
565 | GFP_KERNEL); | ||
566 | if (!*task_bp_pinned) | ||
567 | goto err_alloc; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | constraints_initialized = 1; | ||
572 | |||
554 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | 573 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
574 | |||
575 | err_alloc: | ||
576 | for_each_possible_cpu(err_cpu) { | ||
577 | if (err_cpu == cpu) | ||
578 | break; | ||
579 | for (i = 0; i < TYPE_MAX; i++) | ||
580 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); | ||
581 | } | ||
582 | |||
583 | return -ENOMEM; | ||
555 | } | 584 | } |
556 | core_initcall(init_hw_breakpoint); | 585 | core_initcall(init_hw_breakpoint); |
557 | 586 | ||