aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/lockdep.h10
-rw-r--r--kernel/locking/lockdep.c80
2 files changed, 90 insertions, 0 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 066ba4157541..c5b6b5830acf 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -255,6 +255,7 @@ struct held_lock {
255 unsigned int check:1; /* see lock_acquire() comment */ 255 unsigned int check:1; /* see lock_acquire() comment */
256 unsigned int hardirqs_off:1; 256 unsigned int hardirqs_off:1;
257 unsigned int references:12; /* 32 bits */ 257 unsigned int references:12; /* 32 bits */
258 unsigned int pin_count;
258}; 259};
259 260
260/* 261/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
354extern void lockdep_clear_current_reclaim_state(void); 355extern void lockdep_clear_current_reclaim_state(void);
355extern void lockdep_trace_alloc(gfp_t mask); 356extern void lockdep_trace_alloc(gfp_t mask);
356 357
358extern void lock_pin_lock(struct lockdep_map *lock);
359extern void lock_unpin_lock(struct lockdep_map *lock);
360
357# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, 361# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
358 362
359#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 363#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
368 372
369#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 373#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
370 374
375#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
376#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
377
371#else /* !CONFIG_LOCKDEP */ 378#else /* !CONFIG_LOCKDEP */
372 379
373static inline void lockdep_off(void) 380static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
420 427
421#define lockdep_recursing(tsk) (0) 428#define lockdep_recursing(tsk) (0)
422 429
430#define lockdep_pin_lock(l) do { (void)(l); } while (0)
431#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
432
423#endif /* !LOCKDEP */ 433#endif /* !LOCKDEP */
424 434
425#ifdef CONFIG_LOCK_STAT 435#ifdef CONFIG_LOCK_STAT
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a266d5165b63..18f9f434d17e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3157,6 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3157 hlock->waittime_stamp = 0; 3157 hlock->waittime_stamp = 0;
3158 hlock->holdtime_stamp = lockstat_clock(); 3158 hlock->holdtime_stamp = lockstat_clock();
3159#endif 3159#endif
3160 hlock->pin_count = 0;
3160 3161
3161 if (check && !mark_irqflags(curr, hlock)) 3162 if (check && !mark_irqflags(curr, hlock))
3162 return 0; 3163 return 0;
@@ -3403,6 +3404,8 @@ found_it:
3403 if (hlock->instance == lock) 3404 if (hlock->instance == lock)
3404 lock_release_holdtime(hlock); 3405 lock_release_holdtime(hlock);
3405 3406
3407 WARN(hlock->pin_count, "releasing a pinned lock\n");
3408
3406 if (hlock->references) { 3409 if (hlock->references) {
3407 hlock->references--; 3410 hlock->references--;
3408 if (hlock->references) { 3411 if (hlock->references) {
@@ -3459,6 +3462,49 @@ static int __lock_is_held(struct lockdep_map *lock)
3459 return 0; 3462 return 0;
3460} 3463}
3461 3464
3465static void __lock_pin_lock(struct lockdep_map *lock)
3466{
3467 struct task_struct *curr = current;
3468 int i;
3469
3470 if (unlikely(!debug_locks))
3471 return;
3472
3473 for (i = 0; i < curr->lockdep_depth; i++) {
3474 struct held_lock *hlock = curr->held_locks + i;
3475
3476 if (match_held_lock(hlock, lock)) {
3477 hlock->pin_count++;
3478 return;
3479 }
3480 }
3481
3482 WARN(1, "pinning an unheld lock\n");
3483}
3484
3485static void __lock_unpin_lock(struct lockdep_map *lock)
3486{
3487 struct task_struct *curr = current;
3488 int i;
3489
3490 if (unlikely(!debug_locks))
3491 return;
3492
3493 for (i = 0; i < curr->lockdep_depth; i++) {
3494 struct held_lock *hlock = curr->held_locks + i;
3495
3496 if (match_held_lock(hlock, lock)) {
3497 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3498 return;
3499
3500 hlock->pin_count--;
3501 return;
3502 }
3503 }
3504
3505 WARN(1, "unpinning an unheld lock\n");
3506}
3507
3462/* 3508/*
3463 * Check whether we follow the irq-flags state precisely: 3509 * Check whether we follow the irq-flags state precisely:
3464 */ 3510 */
@@ -3582,6 +3628,40 @@ int lock_is_held(struct lockdep_map *lock)
3582} 3628}
3583EXPORT_SYMBOL_GPL(lock_is_held); 3629EXPORT_SYMBOL_GPL(lock_is_held);
3584 3630
3631void lock_pin_lock(struct lockdep_map *lock)
3632{
3633 unsigned long flags;
3634
3635 if (unlikely(current->lockdep_recursion))
3636 return;
3637
3638 raw_local_irq_save(flags);
3639 check_flags(flags);
3640
3641 current->lockdep_recursion = 1;
3642 __lock_pin_lock(lock);
3643 current->lockdep_recursion = 0;
3644 raw_local_irq_restore(flags);
3645}
3646EXPORT_SYMBOL_GPL(lock_pin_lock);
3647
3648void lock_unpin_lock(struct lockdep_map *lock)
3649{
3650 unsigned long flags;
3651
3652 if (unlikely(current->lockdep_recursion))
3653 return;
3654
3655 raw_local_irq_save(flags);
3656 check_flags(flags);
3657
3658 current->lockdep_recursion = 1;
3659 __lock_unpin_lock(lock);
3660 current->lockdep_recursion = 0;
3661 raw_local_irq_restore(flags);
3662}
3663EXPORT_SYMBOL_GPL(lock_unpin_lock);
3664
3585void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3665void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3586{ 3666{
3587 current->lockdep_reclaim_gfp = gfp_mask; 3667 current->lockdep_reclaim_gfp = gfp_mask;