diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-07-19 04:48:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:49 -0400 |
commit | f20786ff4da51e56b1956acf30be2552be266746 (patch) | |
tree | f6d0a9ed84ca476ca19fe7131d842699881756c4 /kernel | |
parent | 8e18257d29238311e82085152741f0c3aa18b74d (diff) |
lockstat: core infrastructure
Introduce the core lock statistics code.
Lock statistics provides lock wait-time and hold-time (as well as the count
of corresponding contention and acquisitions events). Also, the first few
call-sites that encounter contention are tracked.
Lock wait-time is the time spent waiting on the lock. This provides insight
into the locking scheme, that is, a heavily contended lock is indicative of
a too coarse locking scheme.
Lock hold-time is the duration the lock was held, this provides a reference for
the wait-time numbers, so they can be put into perspective.
1)
lock
2)
... do stuff ..
unlock
3)
The time between 1 and 2 is the wait-time. The time between 2 and 3 is the
hold-time.
The lockdep held-lock tracking code is reused, because it already collects locks
into meaningful groups (classes), and because it is an existing infrastructure
for lock instrumentation.
Currently lockdep tracks lock acquisition with two hooks:
lock()
lock_acquire()
_lock()
... code protected by lock ...
unlock()
lock_release()
_unlock()
We need to extend this with two more hooks, in order to measure contention.
lock_contended() - used to measure contention events
lock_acquired() - completion of the contention
These are then placed the following way:
lock()
lock_acquire()
if (!_try_lock())
lock_contended()
_lock()
lock_acquired()
... do locked stuff ...
unlock()
lock_release()
_unlock()
(Note: the try_lock() 'trick' is used to avoid instrumenting all platform
dependent lock primitive implementations.)
It is also possible to toggle the two lockdep features at runtime using:
/proc/sys/kernel/prove_locking
/proc/sys/kernel/lock_stat
(esp. turning off the O(n^2) prove_locking functionaliy can help)
[akpm@linux-foundation.org: build fixes]
[akpm@linux-foundation.org: nuke unneeded ifdefs]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 247 | ||||
-rw-r--r-- | kernel/sysctl.c | 22 |
2 files changed, 269 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 87ac36425070..70ca4db28aff 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -42,6 +42,20 @@ | |||
42 | 42 | ||
43 | #include "lockdep_internals.h" | 43 | #include "lockdep_internals.h" |
44 | 44 | ||
45 | #ifdef CONFIG_PROVE_LOCKING | ||
46 | int prove_locking = 1; | ||
47 | module_param(prove_locking, int, 0644); | ||
48 | #else | ||
49 | #define prove_locking 0 | ||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_LOCK_STAT | ||
53 | int lock_stat = 1; | ||
54 | module_param(lock_stat, int, 0644); | ||
55 | #else | ||
56 | #define lock_stat 0 | ||
57 | #endif | ||
58 | |||
45 | /* | 59 | /* |
46 | * lockdep_lock: protects the lockdep graph, the hashes and the | 60 | * lockdep_lock: protects the lockdep graph, the hashes and the |
47 | * class/list/hash allocators. | 61 | * class/list/hash allocators. |
@@ -104,6 +118,70 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
104 | unsigned long nr_lock_classes; | 118 | unsigned long nr_lock_classes; |
105 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 119 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
106 | 120 | ||
121 | #ifdef CONFIG_LOCK_STAT | ||
122 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | ||
123 | |||
124 | static int lock_contention_point(struct lock_class *class, unsigned long ip) | ||
125 | { | ||
126 | int i; | ||
127 | |||
128 | for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { | ||
129 | if (class->contention_point[i] == 0) { | ||
130 | class->contention_point[i] = ip; | ||
131 | break; | ||
132 | } | ||
133 | if (class->contention_point[i] == ip) | ||
134 | break; | ||
135 | } | ||
136 | |||
137 | return i; | ||
138 | } | ||
139 | |||
140 | static void lock_time_inc(struct lock_time *lt, s64 time) | ||
141 | { | ||
142 | if (time > lt->max) | ||
143 | lt->max = time; | ||
144 | |||
145 | if (time < lt->min || !lt->min) | ||
146 | lt->min = time; | ||
147 | |||
148 | lt->total += time; | ||
149 | lt->nr++; | ||
150 | } | ||
151 | |||
152 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | ||
153 | { | ||
154 | return &get_cpu_var(lock_stats)[class - lock_classes]; | ||
155 | } | ||
156 | |||
157 | static void put_lock_stats(struct lock_class_stats *stats) | ||
158 | { | ||
159 | put_cpu_var(lock_stats); | ||
160 | } | ||
161 | |||
162 | static void lock_release_holdtime(struct held_lock *hlock) | ||
163 | { | ||
164 | struct lock_class_stats *stats; | ||
165 | s64 holdtime; | ||
166 | |||
167 | if (!lock_stat) | ||
168 | return; | ||
169 | |||
170 | holdtime = sched_clock() - hlock->holdtime_stamp; | ||
171 | |||
172 | stats = get_lock_stats(hlock->class); | ||
173 | if (hlock->read) | ||
174 | lock_time_inc(&stats->read_holdtime, holdtime); | ||
175 | else | ||
176 | lock_time_inc(&stats->write_holdtime, holdtime); | ||
177 | put_lock_stats(stats); | ||
178 | } | ||
179 | #else | ||
180 | static inline void lock_release_holdtime(struct held_lock *hlock) | ||
181 | { | ||
182 | } | ||
183 | #endif | ||
184 | |||
107 | /* | 185 | /* |
108 | * We keep a global list of all lock classes. The list only grows, | 186 | * We keep a global list of all lock classes. The list only grows, |
109 | * never shrinks. The list is only accessed with the lockdep | 187 | * never shrinks. The list is only accessed with the lockdep |
@@ -2221,6 +2299,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2221 | int chain_head = 0; | 2299 | int chain_head = 0; |
2222 | u64 chain_key; | 2300 | u64 chain_key; |
2223 | 2301 | ||
2302 | if (!prove_locking) | ||
2303 | check = 1; | ||
2304 | |||
2224 | if (unlikely(!debug_locks)) | 2305 | if (unlikely(!debug_locks)) |
2225 | return 0; | 2306 | return 0; |
2226 | 2307 | ||
@@ -2271,6 +2352,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2271 | hlock->read = read; | 2352 | hlock->read = read; |
2272 | hlock->check = check; | 2353 | hlock->check = check; |
2273 | hlock->hardirqs_off = hardirqs_off; | 2354 | hlock->hardirqs_off = hardirqs_off; |
2355 | #ifdef CONFIG_LOCK_STAT | ||
2356 | hlock->waittime_stamp = 0; | ||
2357 | hlock->holdtime_stamp = sched_clock(); | ||
2358 | #endif | ||
2274 | 2359 | ||
2275 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2360 | if (check == 2 && !mark_irqflags(curr, hlock)) |
2276 | return 0; | 2361 | return 0; |
@@ -2411,6 +2496,8 @@ lock_release_non_nested(struct task_struct *curr, | |||
2411 | return print_unlock_inbalance_bug(curr, lock, ip); | 2496 | return print_unlock_inbalance_bug(curr, lock, ip); |
2412 | 2497 | ||
2413 | found_it: | 2498 | found_it: |
2499 | lock_release_holdtime(hlock); | ||
2500 | |||
2414 | /* | 2501 | /* |
2415 | * We have the right lock to unlock, 'hlock' points to it. | 2502 | * We have the right lock to unlock, 'hlock' points to it. |
2416 | * Now we remove it from the stack, and add back the other | 2503 | * Now we remove it from the stack, and add back the other |
@@ -2463,6 +2550,8 @@ static int lock_release_nested(struct task_struct *curr, | |||
2463 | 2550 | ||
2464 | curr->curr_chain_key = hlock->prev_chain_key; | 2551 | curr->curr_chain_key = hlock->prev_chain_key; |
2465 | 2552 | ||
2553 | lock_release_holdtime(hlock); | ||
2554 | |||
2466 | #ifdef CONFIG_DEBUG_LOCKDEP | 2555 | #ifdef CONFIG_DEBUG_LOCKDEP |
2467 | hlock->prev_chain_key = 0; | 2556 | hlock->prev_chain_key = 0; |
2468 | hlock->class = NULL; | 2557 | hlock->class = NULL; |
@@ -2537,6 +2626,9 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2537 | { | 2626 | { |
2538 | unsigned long flags; | 2627 | unsigned long flags; |
2539 | 2628 | ||
2629 | if (unlikely(!lock_stat && !prove_locking)) | ||
2630 | return; | ||
2631 | |||
2540 | if (unlikely(current->lockdep_recursion)) | 2632 | if (unlikely(current->lockdep_recursion)) |
2541 | return; | 2633 | return; |
2542 | 2634 | ||
@@ -2556,6 +2648,9 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
2556 | { | 2648 | { |
2557 | unsigned long flags; | 2649 | unsigned long flags; |
2558 | 2650 | ||
2651 | if (unlikely(!lock_stat && !prove_locking)) | ||
2652 | return; | ||
2653 | |||
2559 | if (unlikely(current->lockdep_recursion)) | 2654 | if (unlikely(current->lockdep_recursion)) |
2560 | return; | 2655 | return; |
2561 | 2656 | ||
@@ -2569,6 +2664,158 @@ void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
2569 | 2664 | ||
2570 | EXPORT_SYMBOL_GPL(lock_release); | 2665 | EXPORT_SYMBOL_GPL(lock_release); |
2571 | 2666 | ||
2667 | #ifdef CONFIG_LOCK_STAT | ||
2668 | static int | ||
2669 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | ||
2670 | unsigned long ip) | ||
2671 | { | ||
2672 | if (!debug_locks_off()) | ||
2673 | return 0; | ||
2674 | if (debug_locks_silent) | ||
2675 | return 0; | ||
2676 | |||
2677 | printk("\n=================================\n"); | ||
2678 | printk( "[ BUG: bad contention detected! ]\n"); | ||
2679 | printk( "---------------------------------\n"); | ||
2680 | printk("%s/%d is trying to contend lock (", | ||
2681 | curr->comm, curr->pid); | ||
2682 | print_lockdep_cache(lock); | ||
2683 | printk(") at:\n"); | ||
2684 | print_ip_sym(ip); | ||
2685 | printk("but there are no locks held!\n"); | ||
2686 | printk("\nother info that might help us debug this:\n"); | ||
2687 | lockdep_print_held_locks(curr); | ||
2688 | |||
2689 | printk("\nstack backtrace:\n"); | ||
2690 | dump_stack(); | ||
2691 | |||
2692 | return 0; | ||
2693 | } | ||
2694 | |||
2695 | static void | ||
2696 | __lock_contended(struct lockdep_map *lock, unsigned long ip) | ||
2697 | { | ||
2698 | struct task_struct *curr = current; | ||
2699 | struct held_lock *hlock, *prev_hlock; | ||
2700 | struct lock_class_stats *stats; | ||
2701 | unsigned int depth; | ||
2702 | int i, point; | ||
2703 | |||
2704 | depth = curr->lockdep_depth; | ||
2705 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2706 | return; | ||
2707 | |||
2708 | prev_hlock = NULL; | ||
2709 | for (i = depth-1; i >= 0; i--) { | ||
2710 | hlock = curr->held_locks + i; | ||
2711 | /* | ||
2712 | * We must not cross into another context: | ||
2713 | */ | ||
2714 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2715 | break; | ||
2716 | if (hlock->instance == lock) | ||
2717 | goto found_it; | ||
2718 | prev_hlock = hlock; | ||
2719 | } | ||
2720 | print_lock_contention_bug(curr, lock, ip); | ||
2721 | return; | ||
2722 | |||
2723 | found_it: | ||
2724 | hlock->waittime_stamp = sched_clock(); | ||
2725 | |||
2726 | point = lock_contention_point(hlock->class, ip); | ||
2727 | |||
2728 | stats = get_lock_stats(hlock->class); | ||
2729 | if (point < ARRAY_SIZE(stats->contention_point)) | ||
2730 | stats->contention_point[i]++; | ||
2731 | put_lock_stats(stats); | ||
2732 | } | ||
2733 | |||
2734 | static void | ||
2735 | __lock_acquired(struct lockdep_map *lock) | ||
2736 | { | ||
2737 | struct task_struct *curr = current; | ||
2738 | struct held_lock *hlock, *prev_hlock; | ||
2739 | struct lock_class_stats *stats; | ||
2740 | unsigned int depth; | ||
2741 | u64 now; | ||
2742 | s64 waittime; | ||
2743 | int i; | ||
2744 | |||
2745 | depth = curr->lockdep_depth; | ||
2746 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2747 | return; | ||
2748 | |||
2749 | prev_hlock = NULL; | ||
2750 | for (i = depth-1; i >= 0; i--) { | ||
2751 | hlock = curr->held_locks + i; | ||
2752 | /* | ||
2753 | * We must not cross into another context: | ||
2754 | */ | ||
2755 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2756 | break; | ||
2757 | if (hlock->instance == lock) | ||
2758 | goto found_it; | ||
2759 | prev_hlock = hlock; | ||
2760 | } | ||
2761 | print_lock_contention_bug(curr, lock, _RET_IP_); | ||
2762 | return; | ||
2763 | |||
2764 | found_it: | ||
2765 | if (!hlock->waittime_stamp) | ||
2766 | return; | ||
2767 | |||
2768 | now = sched_clock(); | ||
2769 | waittime = now - hlock->waittime_stamp; | ||
2770 | hlock->holdtime_stamp = now; | ||
2771 | |||
2772 | stats = get_lock_stats(hlock->class); | ||
2773 | if (hlock->read) | ||
2774 | lock_time_inc(&stats->read_waittime, waittime); | ||
2775 | else | ||
2776 | lock_time_inc(&stats->write_waittime, waittime); | ||
2777 | put_lock_stats(stats); | ||
2778 | } | ||
2779 | |||
2780 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | ||
2781 | { | ||
2782 | unsigned long flags; | ||
2783 | |||
2784 | if (unlikely(!lock_stat)) | ||
2785 | return; | ||
2786 | |||
2787 | if (unlikely(current->lockdep_recursion)) | ||
2788 | return; | ||
2789 | |||
2790 | raw_local_irq_save(flags); | ||
2791 | check_flags(flags); | ||
2792 | current->lockdep_recursion = 1; | ||
2793 | __lock_contended(lock, ip); | ||
2794 | current->lockdep_recursion = 0; | ||
2795 | raw_local_irq_restore(flags); | ||
2796 | } | ||
2797 | EXPORT_SYMBOL_GPL(lock_contended); | ||
2798 | |||
2799 | void lock_acquired(struct lockdep_map *lock) | ||
2800 | { | ||
2801 | unsigned long flags; | ||
2802 | |||
2803 | if (unlikely(!lock_stat)) | ||
2804 | return; | ||
2805 | |||
2806 | if (unlikely(current->lockdep_recursion)) | ||
2807 | return; | ||
2808 | |||
2809 | raw_local_irq_save(flags); | ||
2810 | check_flags(flags); | ||
2811 | current->lockdep_recursion = 1; | ||
2812 | __lock_acquired(lock); | ||
2813 | current->lockdep_recursion = 0; | ||
2814 | raw_local_irq_restore(flags); | ||
2815 | } | ||
2816 | EXPORT_SYMBOL_GPL(lock_acquired); | ||
2817 | #endif | ||
2818 | |||
2572 | /* | 2819 | /* |
2573 | * Used by the testsuite, sanitize the validator state | 2820 | * Used by the testsuite, sanitize the validator state |
2574 | * after a simulated failure: | 2821 | * after a simulated failure: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 2aaa3f98185d..e69179b1809c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -161,6 +161,8 @@ extern ctl_table inotify_table[]; | |||
161 | int sysctl_legacy_va_layout; | 161 | int sysctl_legacy_va_layout; |
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | extern int prove_locking; | ||
165 | extern int lock_stat; | ||
164 | 166 | ||
165 | /* The default sysctl tables: */ | 167 | /* The default sysctl tables: */ |
166 | 168 | ||
@@ -282,6 +284,26 @@ static ctl_table kern_table[] = { | |||
282 | .mode = 0644, | 284 | .mode = 0644, |
283 | .proc_handler = &proc_dointvec, | 285 | .proc_handler = &proc_dointvec, |
284 | }, | 286 | }, |
287 | #ifdef CONFIG_PROVE_LOCKING | ||
288 | { | ||
289 | .ctl_name = CTL_UNNUMBERED, | ||
290 | .procname = "prove_locking", | ||
291 | .data = &prove_locking, | ||
292 | .maxlen = sizeof(int), | ||
293 | .mode = 0644, | ||
294 | .proc_handler = &proc_dointvec, | ||
295 | }, | ||
296 | #endif | ||
297 | #ifdef CONFIG_LOCK_STAT | ||
298 | { | ||
299 | .ctl_name = CTL_UNNUMBERED, | ||
300 | .procname = "lock_stat", | ||
301 | .data = &lock_stat, | ||
302 | .maxlen = sizeof(int), | ||
303 | .mode = 0644, | ||
304 | .proc_handler = &proc_dointvec, | ||
305 | }, | ||
306 | #endif | ||
285 | { | 307 | { |
286 | .ctl_name = CTL_UNNUMBERED, | 308 | .ctl_name = CTL_UNNUMBERED, |
287 | .procname = "sched_features", | 309 | .procname = "sched_features", |