aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorJason Wessel <jason.wessel@windriver.com>2010-01-28 18:04:43 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-30 02:42:21 -0500
commit5352ae638e2d7d5c9b2e4d528676bbf2af6fd6f3 (patch)
tree95bab4d28f7c91bc5b7e79b3e1c879dfe96c52b9 /kernel/hw_breakpoint.c
parentcc0967490c1c3824bc5b75718b6ca8a51d9f2617 (diff)
perf, hw_breakpoint, kgdb: Do not take mutex for kernel debugger
This patch fixes the regression in functionality where the kernel debugger and the perf API do not nicely share hw breakpoint reservations. The kernel debugger cannot use any mutex_lock() calls because it can start the kernel running from an invalid context. A mutex free version of the reservation API needed to get created for the kernel debugger to safely update hw breakpoint reservations. The possibility for a breakpoint reservation to be concurrently processed at the time that kgdb interrupts the system is improbable. Should this corner case occur the end user is warned, and the kernel debugger will prohibit updating the hardware breakpoint reservations. Any time the kernel debugger reserves a hardware breakpoint it will be a system wide reservation. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: kgdb-bugreport@lists.sourceforge.net Cc: K.Prasad <prasad@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: torvalds@linux-foundation.org LKML-Reference: <1264719883-7285-3-git-send-email-jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r--kernel/hw_breakpoint.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index c030ae657f2..8a5c7d55ac9 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
243 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) 243 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
244 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM 244 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
245 */ 245 */
246int reserve_bp_slot(struct perf_event *bp) 246static int __reserve_bp_slot(struct perf_event *bp)
247{ 247{
248 struct bp_busy_slots slots = {0}; 248 struct bp_busy_slots slots = {0};
249 int ret = 0;
250
251 mutex_lock(&nr_bp_mutex);
252 249
253 fetch_bp_busy_slots(&slots, bp); 250 fetch_bp_busy_slots(&slots, bp);
254 251
255 /* Flexible counters need to keep at least one slot */ 252 /* Flexible counters need to keep at least one slot */
256 if (slots.pinned + (!!slots.flexible) == HBP_NUM) { 253 if (slots.pinned + (!!slots.flexible) == HBP_NUM)
257 ret = -ENOSPC; 254 return -ENOSPC;
258 goto end;
259 }
260 255
261 toggle_bp_slot(bp, true); 256 toggle_bp_slot(bp, true);
262 257
263end: 258 return 0;
259}
260
261int reserve_bp_slot(struct perf_event *bp)
262{
263 int ret;
264
265 mutex_lock(&nr_bp_mutex);
266
267 ret = __reserve_bp_slot(bp);
268
264 mutex_unlock(&nr_bp_mutex); 269 mutex_unlock(&nr_bp_mutex);
265 270
266 return ret; 271 return ret;
267} 272}
268 273
274static void __release_bp_slot(struct perf_event *bp)
275{
276 toggle_bp_slot(bp, false);
277}
278
269void release_bp_slot(struct perf_event *bp) 279void release_bp_slot(struct perf_event *bp)
270{ 280{
271 mutex_lock(&nr_bp_mutex); 281 mutex_lock(&nr_bp_mutex);
272 282
273 toggle_bp_slot(bp, false); 283 __release_bp_slot(bp);
274 284
275 mutex_unlock(&nr_bp_mutex); 285 mutex_unlock(&nr_bp_mutex);
276} 286}
277 287
288/*
289 * Allow the kernel debugger to reserve breakpoint slots without
290 * taking a lock using the dbg_* variant of for the reserve and
291 * release breakpoint slots.
292 */
293int dbg_reserve_bp_slot(struct perf_event *bp)
294{
295 if (mutex_is_locked(&nr_bp_mutex))
296 return -1;
297
298 return __reserve_bp_slot(bp);
299}
300
301int dbg_release_bp_slot(struct perf_event *bp)
302{
303 if (mutex_is_locked(&nr_bp_mutex))
304 return -1;
305
306 __release_bp_slot(bp);
307
308 return 0;
309}
278 310
279int register_perf_hw_breakpoint(struct perf_event *bp) 311int register_perf_hw_breakpoint(struct perf_event *bp)
280{ 312{