aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep_internals.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-16 09:44:29 -0400
committerPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-24 04:53:29 -0400
commitaf012961450949ea297b209e091bd1a3805b8a0a (patch)
treed092a182ad014b4508079bacf416deb1e0650c01 /kernel/lockdep_internals.h
parent12f3dfd022d7e616757a94f0538d3d525d806a16 (diff)
lockdep: BFS cleanup
Some cleanups of the lockdep code after the BFS series: - Remove the last traces of the generation id - Fixup comment style - Move the bfs routines into lockdep.c - Cleanup the bfs routines [ tom.leiming@gmail.com: Fix crash ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1246201486-7308-11-git-send-email-tom.leiming@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r--kernel/lockdep_internals.h97
1 files changed, 2 insertions, 95 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6baa8807efdd..a2ee95ad1313 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
91extern unsigned int max_lockdep_depth; 91extern unsigned int max_lockdep_depth;
92extern unsigned int max_recursion_depth; 92extern unsigned int max_recursion_depth;
93 93
94extern unsigned int max_bfs_queue_depth;
95
94#ifdef CONFIG_PROVE_LOCKING 96#ifdef CONFIG_PROVE_LOCKING
95extern unsigned long lockdep_count_forward_deps(struct lock_class *); 97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
96extern unsigned long lockdep_count_backward_deps(struct lock_class *); 98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -136,98 +138,3 @@ extern atomic_t nr_find_usage_backwards_recursions;
136# define debug_atomic_dec(ptr) do { } while (0) 138# define debug_atomic_dec(ptr) do { } while (0)
137# define debug_atomic_read(ptr) 0 139# define debug_atomic_read(ptr) 0
138#endif 140#endif
139
140
141extern unsigned int max_bfs_queue_depth;
142extern unsigned long nr_list_entries;
143extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
144extern unsigned long bfs_accessed[];
145
146/*For good efficiency of modular, we use power of 2*/
147#define MAX_CIRCULAR_QUE_SIZE 4096UL
148
149/* The circular_queue and helpers is used to implement the
150 * breadth-first search(BFS)algorithem, by which we can build
151 * the shortest path from the next lock to be acquired to the
152 * previous held lock if there is a circular between them.
153 * */
154struct circular_queue{
155 unsigned long element[MAX_CIRCULAR_QUE_SIZE];
156 unsigned int front, rear;
157};
158
159static inline void __cq_init(struct circular_queue *cq)
160{
161 cq->front = cq->rear = 0;
162 bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
163}
164
165static inline int __cq_empty(struct circular_queue *cq)
166{
167 return (cq->front == cq->rear);
168}
169
170static inline int __cq_full(struct circular_queue *cq)
171{
172 return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1)) == cq->front;
173}
174
175static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
176{
177 if (__cq_full(cq))
178 return -1;
179
180 cq->element[cq->rear] = elem;
181 cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
182 return 0;
183}
184
185static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
186{
187 if (__cq_empty(cq))
188 return -1;
189
190 *elem = cq->element[cq->front];
191 cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
192 return 0;
193}
194
195static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
196{
197 return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
198}
199
200static inline void mark_lock_accessed(struct lock_list *lock,
201 struct lock_list *parent)
202{
203 unsigned long nr;
204 nr = lock - list_entries;
205 WARN_ON(nr >= nr_list_entries);
206 lock->parent = parent;
207 set_bit(nr, bfs_accessed);
208}
209
210static inline unsigned long lock_accessed(struct lock_list *lock)
211{
212 unsigned long nr;
213 nr = lock - list_entries;
214 WARN_ON(nr >= nr_list_entries);
215 return test_bit(nr, bfs_accessed);
216}
217
218static inline struct lock_list *get_lock_parent(struct lock_list *child)
219{
220 return child->parent;
221}
222
223static inline int get_lock_depth(struct lock_list *child)
224{
225 int depth = 0;
226 struct lock_list *parent;
227
228 while ((parent = get_lock_parent(child))) {
229 child = parent;
230 depth++;
231 }
232 return depth;
233}