diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-08-31 03:45:21 -0400 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-08-31 03:45:46 -0400 | 
| commit | daab7fc734a53fdeaf844b7c03053118ad1769da (patch) | |
| tree | 575deb3cdcc6dda562acaed6f7c29bc81ae01cf2 /kernel/hw_breakpoint.c | |
| parent | 774ea0bcb27f57b6fd521b3b6c43237782fed4b9 (diff) | |
| parent | 2bfc96a127bc1cc94d26bfaa40159966064f9c8c (diff) | |
Merge commit 'v2.6.36-rc3' into x86/memblock
Conflicts:
	arch/x86/kernel/trampoline.c
	mm/memblock.c
Merge reason: Resolve the conflicts, update to latest upstream.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hw_breakpoint.c')
| -rw-r--r-- | kernel/hw_breakpoint.c | 90 | 
1 files changed, 53 insertions, 37 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 7a56b22e0602..d71a987fd2bf 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c  | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> | 
| 42 | #include <linux/init.h> | 42 | #include <linux/init.h> | 
| 43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> | 
| 44 | #include <linux/list.h> | ||
| 44 | #include <linux/cpu.h> | 45 | #include <linux/cpu.h> | 
| 45 | #include <linux/smp.h> | 46 | #include <linux/smp.h> | 
| 46 | 47 | ||
| @@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); | |||
| 62 | 63 | ||
| 63 | static int nr_slots[TYPE_MAX]; | 64 | static int nr_slots[TYPE_MAX]; | 
| 64 | 65 | ||
| 66 | /* Keep track of the breakpoints attached to tasks */ | ||
| 67 | static LIST_HEAD(bp_task_head); | ||
| 68 | |||
| 65 | static int constraints_initialized; | 69 | static int constraints_initialized; | 
| 66 | 70 | ||
| 67 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | 71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | 
| @@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) | |||
| 103 | return 0; | 107 | return 0; | 
| 104 | } | 108 | } | 
| 105 | 109 | ||
| 106 | static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type) | 110 | /* | 
| 111 | * Count the number of breakpoints of the same type and same task. | ||
| 112 | * The given event must be not on the list. | ||
| 113 | */ | ||
| 114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) | ||
| 107 | { | 115 | { | 
| 108 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | 116 | struct perf_event_context *ctx = bp->ctx; | 
| 109 | struct list_head *list; | 117 | struct perf_event *iter; | 
| 110 | struct perf_event *bp; | ||
| 111 | unsigned long flags; | ||
| 112 | int count = 0; | 118 | int count = 0; | 
| 113 | 119 | ||
| 114 | if (WARN_ONCE(!ctx, "No perf context for this task")) | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { | 
| 115 | return 0; | 121 | if (iter->ctx == ctx && find_slot_idx(iter) == type) | 
| 116 | 122 | count += hw_breakpoint_weight(iter); | |
| 117 | list = &ctx->event_list; | ||
| 118 | |||
| 119 | raw_spin_lock_irqsave(&ctx->lock, flags); | ||
| 120 | |||
| 121 | /* | ||
| 122 | * The current breakpoint counter is not included in the list | ||
| 123 | * at the open() callback time | ||
| 124 | */ | ||
| 125 | list_for_each_entry(bp, list, event_entry) { | ||
| 126 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
| 127 | if (find_slot_idx(bp) == type) | ||
| 128 | count += hw_breakpoint_weight(bp); | ||
| 129 | } | 123 | } | 
| 130 | 124 | ||
| 131 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | ||
| 132 | |||
| 133 | return count; | 125 | return count; | 
| 134 | } | 126 | } | 
| 135 | 127 | ||
| @@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
| 149 | if (!tsk) | 141 | if (!tsk) | 
| 150 | slots->pinned += max_task_bp_pinned(cpu, type); | 142 | slots->pinned += max_task_bp_pinned(cpu, type); | 
| 151 | else | 143 | else | 
| 152 | slots->pinned += task_bp_pinned(tsk, type); | 144 | slots->pinned += task_bp_pinned(bp, type); | 
| 153 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | 145 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | 
| 154 | 146 | ||
| 155 | return; | 147 | return; | 
| @@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
| 162 | if (!tsk) | 154 | if (!tsk) | 
| 163 | nr += max_task_bp_pinned(cpu, type); | 155 | nr += max_task_bp_pinned(cpu, type); | 
| 164 | else | 156 | else | 
| 165 | nr += task_bp_pinned(tsk, type); | 157 | nr += task_bp_pinned(bp, type); | 
| 166 | 158 | ||
| 167 | if (nr > slots->pinned) | 159 | if (nr > slots->pinned) | 
| 168 | slots->pinned = nr; | 160 | slots->pinned = nr; | 
| @@ -188,7 +180,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) | |||
| 188 | /* | 180 | /* | 
| 189 | * Add a pinned breakpoint for the given task in our constraint table | 181 | * Add a pinned breakpoint for the given task in our constraint table | 
| 190 | */ | 182 | */ | 
| 191 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, | 183 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, | 
| 192 | enum bp_type_idx type, int weight) | 184 | enum bp_type_idx type, int weight) | 
| 193 | { | 185 | { | 
| 194 | unsigned int *tsk_pinned; | 186 | unsigned int *tsk_pinned; | 
| @@ -196,10 +188,11 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, | |||
| 196 | int old_idx = 0; | 188 | int old_idx = 0; | 
| 197 | int idx = 0; | 189 | int idx = 0; | 
| 198 | 190 | ||
| 199 | old_count = task_bp_pinned(tsk, type); | 191 | old_count = task_bp_pinned(bp, type); | 
| 200 | old_idx = old_count - 1; | 192 | old_idx = old_count - 1; | 
| 201 | idx = old_idx + weight; | 193 | idx = old_idx + weight; | 
| 202 | 194 | ||
| 195 | /* tsk_pinned[n] is the number of tasks having n breakpoints */ | ||
| 203 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | 196 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | 
| 204 | if (enable) { | 197 | if (enable) { | 
| 205 | tsk_pinned[idx]++; | 198 | tsk_pinned[idx]++; | 
| @@ -222,23 +215,41 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
| 222 | int cpu = bp->cpu; | 215 | int cpu = bp->cpu; | 
| 223 | struct task_struct *tsk = bp->ctx->task; | 216 | struct task_struct *tsk = bp->ctx->task; | 
| 224 | 217 | ||
| 218 | /* Pinned counter cpu profiling */ | ||
| 219 | if (!tsk) { | ||
| 220 | |||
| 221 | if (enable) | ||
| 222 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; | ||
| 223 | else | ||
| 224 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; | ||
| 225 | return; | ||
| 226 | } | ||
| 227 | |||
| 225 | /* Pinned counter task profiling */ | 228 | /* Pinned counter task profiling */ | 
| 226 | if (tsk) { | ||
| 227 | if (cpu >= 0) { | ||
| 228 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); | ||
| 229 | return; | ||
| 230 | } | ||
| 231 | 229 | ||
| 230 | if (!enable) | ||
| 231 | list_del(&bp->hw.bp_list); | ||
| 232 | |||
| 233 | if (cpu >= 0) { | ||
| 234 | toggle_bp_task_slot(bp, cpu, enable, type, weight); | ||
| 235 | } else { | ||
| 232 | for_each_online_cpu(cpu) | 236 | for_each_online_cpu(cpu) | 
| 233 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); | 237 | toggle_bp_task_slot(bp, cpu, enable, type, weight); | 
| 234 | return; | ||
| 235 | } | 238 | } | 
| 236 | 239 | ||
| 237 | /* Pinned counter cpu profiling */ | ||
| 238 | if (enable) | 240 | if (enable) | 
| 239 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; | 241 | list_add_tail(&bp->hw.bp_list, &bp_task_head); | 
| 240 | else | 242 | } | 
| 241 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; | 243 | |
| 244 | /* | ||
| 245 | * Function to perform processor-specific cleanup during unregistration | ||
| 246 | */ | ||
| 247 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) | ||
| 248 | { | ||
| 249 | /* | ||
| 250 | * A weak stub function here for those archs that don't define | ||
| 251 | * it inside arch/.../kernel/hw_breakpoint.c | ||
| 252 | */ | ||
| 242 | } | 253 | } | 
| 243 | 254 | ||
| 244 | /* | 255 | /* | 
| @@ -301,6 +312,10 @@ static int __reserve_bp_slot(struct perf_event *bp) | |||
| 301 | weight = hw_breakpoint_weight(bp); | 312 | weight = hw_breakpoint_weight(bp); | 
| 302 | 313 | ||
| 303 | fetch_bp_busy_slots(&slots, bp, type); | 314 | fetch_bp_busy_slots(&slots, bp, type); | 
| 315 | /* | ||
| 316 | * Simulate the addition of this breakpoint to the constraints | ||
| 317 | * and see the result. | ||
| 318 | */ | ||
| 304 | fetch_this_slot(&slots, weight); | 319 | fetch_this_slot(&slots, weight); | 
| 305 | 320 | ||
| 306 | /* Flexible counters need to keep at least one slot */ | 321 | /* Flexible counters need to keep at least one slot */ | 
| @@ -339,6 +354,7 @@ void release_bp_slot(struct perf_event *bp) | |||
| 339 | { | 354 | { | 
| 340 | mutex_lock(&nr_bp_mutex); | 355 | mutex_lock(&nr_bp_mutex); | 
| 341 | 356 | ||
| 357 | arch_unregister_hw_breakpoint(bp); | ||
| 342 | __release_bp_slot(bp); | 358 | __release_bp_slot(bp); | 
| 343 | 359 | ||
| 344 | mutex_unlock(&nr_bp_mutex); | 360 | mutex_unlock(&nr_bp_mutex); | 
