diff options
author | Yuyang Du <duyuyang@gmail.com> | 2019-05-06 04:19:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-06-03 05:55:46 -0400 |
commit | c1661325597f68bc9e632c4fa9c86983d56fba4f (patch) | |
tree | 8f2b14e7b23a90373d7f2764b02c16b6783aa5e3 /kernel/locking | |
parent | aa4807719e076bfb2dee9c96adf2c648e47d472f (diff) |
locking/lockdep: Change the return type of __cq_dequeue()
With the change, we can slightly adjust the code to iterate the queue in BFS
search, which simplifies the code. No functional change.
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bvanassche@acm.org
Cc: frederic@kernel.org
Cc: ming.lei@redhat.com
Cc: will.deacon@arm.com
Link: https://lkml.kernel.org/r/20190506081939.74287-14-duyuyang@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index d467ba825dca..d23dcb47389e 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1308,14 +1308,21 @@ static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem | |||
1308 | return 0; | 1308 | return 0; |
1309 | } | 1309 | } |
1310 | 1310 | ||
1311 | static inline int __cq_dequeue(struct circular_queue *cq, struct lock_list **elem) | 1311 | /* |
1312 | * Dequeue an element from the circular_queue, return a lock_list if | ||
1313 | * the queue is not empty, or NULL if otherwise. | ||
1314 | */ | ||
1315 | static inline struct lock_list * __cq_dequeue(struct circular_queue *cq) | ||
1312 | { | 1316 | { |
1317 | struct lock_list * lock; | ||
1318 | |||
1313 | if (__cq_empty(cq)) | 1319 | if (__cq_empty(cq)) |
1314 | return -1; | 1320 | return NULL; |
1315 | 1321 | ||
1316 | *elem = cq->element[cq->front]; | 1322 | lock = cq->element[cq->front]; |
1317 | cq->front = (cq->front + 1) & CQ_MASK; | 1323 | cq->front = (cq->front + 1) & CQ_MASK; |
1318 | return 0; | 1324 | |
1325 | return lock; | ||
1319 | } | 1326 | } |
1320 | 1327 | ||
1321 | static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) | 1328 | static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) |
@@ -1367,6 +1374,7 @@ static int __bfs(struct lock_list *source_entry, | |||
1367 | int forward) | 1374 | int forward) |
1368 | { | 1375 | { |
1369 | struct lock_list *entry; | 1376 | struct lock_list *entry; |
1377 | struct lock_list *lock; | ||
1370 | struct list_head *head; | 1378 | struct list_head *head; |
1371 | struct circular_queue *cq = &lock_cq; | 1379 | struct circular_queue *cq = &lock_cq; |
1372 | int ret = 1; | 1380 | int ret = 1; |
@@ -1388,10 +1396,7 @@ static int __bfs(struct lock_list *source_entry, | |||
1388 | __cq_init(cq); | 1396 | __cq_init(cq); |
1389 | __cq_enqueue(cq, source_entry); | 1397 | __cq_enqueue(cq, source_entry); |
1390 | 1398 | ||
1391 | while (!__cq_empty(cq)) { | 1399 | while ((lock = __cq_dequeue(cq))) { |
1392 | struct lock_list *lock; | ||
1393 | |||
1394 | __cq_dequeue(cq, &lock); | ||
1395 | 1400 | ||
1396 | if (!lock->class) { | 1401 | if (!lock->class) { |
1397 | ret = -2; | 1402 | ret = -2; |