aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-02-17 16:19:39 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-26 05:10:51 -0500
commitc2219da51664451149350e47321aa0fcf72a8b8f (patch)
tree5bf97e0165b25e4baefd065d7b4df69556486441
parentf23fd87e154f2c0fbac2e33c999e388f9baf2714 (diff)
irda: Fix lockdep annotations in hashbin_delete().
[ Upstream commit 4c03b862b12f980456f9de92db6d508a4999b788 ] A nested lock depth was added to the hasbin_delete() code but it doesn't actually work some well and results in tons of lockdep splats. Fix the code instead to properly drop the lock around the operation and just keep peeking the head of the hashbin queue. Reported-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--net/irda/irqueue.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index acbe61c7e683..160dc89335e2 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
383 * for deallocating this structure if it's complex. If not the user can 383 * for deallocating this structure if it's complex. If not the user can
384 * just supply kfree, which should take care of the job. 384 * just supply kfree, which should take care of the job.
385 */ 385 */
386#ifdef CONFIG_LOCKDEP
387static int hashbin_lock_depth = 0;
388#endif
389int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 386int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
390{ 387{
391 irda_queue_t* queue; 388 irda_queue_t* queue;
@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
396 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 393 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
397 394
398 /* Synchronize */ 395 /* Synchronize */
399 if ( hashbin->hb_type & HB_LOCK ) { 396 if (hashbin->hb_type & HB_LOCK)
400 spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, 397 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
401 hashbin_lock_depth++);
402 }
403 398
404 /* 399 /*
405 * Free the entries in the hashbin, TODO: use hashbin_clear when 400 * Free the entries in the hashbin, TODO: use hashbin_clear when
406 * it has been shown to work 401 * it has been shown to work
407 */ 402 */
408 for (i = 0; i < HASHBIN_SIZE; i ++ ) { 403 for (i = 0; i < HASHBIN_SIZE; i ++ ) {
409 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); 404 while (1) {
410 while (queue ) { 405 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
411 if (free_func) 406
412 (*free_func)(queue); 407 if (!queue)
413 queue = dequeue_first( 408 break;
414 (irda_queue_t**) &hashbin->hb_queue[i]); 409
410 if (free_func) {
411 if (hashbin->hb_type & HB_LOCK)
412 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
413 free_func(queue);
414 if (hashbin->hb_type & HB_LOCK)
415 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
416 }
415 } 417 }
416 } 418 }
417 419
@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
420 hashbin->magic = ~HB_MAGIC; 422 hashbin->magic = ~HB_MAGIC;
421 423
422 /* Release lock */ 424 /* Release lock */
423 if ( hashbin->hb_type & HB_LOCK) { 425 if (hashbin->hb_type & HB_LOCK)
424 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 426 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
425#ifdef CONFIG_LOCKDEP
426 hashbin_lock_depth--;
427#endif
428 }
429 427
430 /* 428 /*
431 * Free the hashbin structure 429 * Free the hashbin structure