aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep_internals.h
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2009-07-16 09:44:29 -0400
committerPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-24 04:49:44 -0400
commitc94aa5ca3088018d2a7a9bd3258aefffe29df265 (patch)
tree29c81673e37315087ee3087180fae043085e6343 /kernel/lockdep_internals.h
parent4be3bd7849165e7efa6b0b35a23d6a3598d97465 (diff)
lockdep: Print the shortest dependency chain if finding a circle
Currently lockdep will print the 1st circle detected if it exists when acquiring a new (next) lock. This patch prints the shortest path from the next lock to be acquired to the previous held lock if a circle is found. The patch still uses the current method to check circle, and once the circle is found, breadth-first search algorithem is used to compute the shortest path from the next lock to the previous lock in the forward lock dependency graph. Printing the shortest path will shorten the dependency chain, and make troubleshooting for possible circular locking easier. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1246201486-7308-2-git-send-email-tom.leiming@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r--kernel/lockdep_internals.h83
1 files changed, 83 insertions, 0 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac3a0d7..6f48d37d5be2 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
136# define debug_atomic_dec(ptr) do { } while (0) 136# define debug_atomic_dec(ptr) do { } while (0)
137# define debug_atomic_read(ptr) 0 137# define debug_atomic_read(ptr) 0
138#endif 138#endif
139
140/* The circular_queue and helpers is used to implement the
141 * breadth-first search(BFS)algorithem, by which we can build
142 * the shortest path from the next lock to be acquired to the
143 * previous held lock if there is a circular between them.
144 * */
145#define MAX_CIRCULAR_QUE_SIZE 4096UL
146struct circular_queue{
147 unsigned long element[MAX_CIRCULAR_QUE_SIZE];
148 unsigned int front, rear;
149};
150
151#define LOCK_ACCESSED 1UL
152#define LOCK_ACCESSED_MASK (~LOCK_ACCESSED)
153
154static inline void __cq_init(struct circular_queue *cq)
155{
156 cq->front = cq->rear = 0;
157}
158
159static inline int __cq_empty(struct circular_queue *cq)
160{
161 return (cq->front == cq->rear);
162}
163
164static inline int __cq_full(struct circular_queue *cq)
165{
166 return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE) == cq->front;
167}
168
169static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
170{
171 if (__cq_full(cq))
172 return -1;
173
174 cq->element[cq->rear] = elem;
175 cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
176 return 0;
177}
178
179static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
180{
181 if (__cq_empty(cq))
182 return -1;
183
184 *elem = cq->element[cq->front];
185 cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
186 return 0;
187}
188
189static inline int __cq_get_elem_count(struct circular_queue *cq)
190{
191 return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
192}
193
194static inline void mark_lock_accessed(struct lock_list *lock,
195 struct lock_list *parent)
196{
197 lock->parent = (void *) parent + LOCK_ACCESSED;
198}
199
200static inline unsigned long lock_accessed(struct lock_list *lock)
201{
202 return (unsigned long)lock->parent & LOCK_ACCESSED;
203}
204
205static inline struct lock_list *get_lock_parent(struct lock_list *child)
206{
207 return (struct lock_list *)
208 ((unsigned long)child->parent & LOCK_ACCESSED_MASK);
209}
210
211static inline unsigned long get_lock_depth(struct lock_list *child)
212{
213 unsigned long depth = 0;
214 struct lock_list *parent;
215
216 while ((parent = get_lock_parent(child))) {
217 child = parent;
218 depth++;
219 }
220 return depth;
221}