diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2017-03-03 07:57:56 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2017-03-16 04:57:09 -0400 |
| commit | bf7b3ac2e36ac054f93e5dd8d85dfd754b5e1c09 (patch) | |
| tree | 17010ad6d0694cb065462dd0110c43f2eee0d39e /kernel/locking | |
| parent | 383776fa7527745224446337f2dcfb0f0d1b8b56 (diff) | |
locking/ww_mutex: Improve test to cover acquire context changes
Currently each thread starts an acquire context only once, and
performs all its loop iterations under it.
This means that the Wound/Wait relations between threads are fixed.
To make things a little more realistic and cover more of the
functionality with the test, open a new acquire context for each loop.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/test-ww_mutex.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 6b7abb334ca6..90d8d8879969 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c | |||
| @@ -398,12 +398,11 @@ static void stress_inorder_work(struct work_struct *work) | |||
| 398 | if (!order) | 398 | if (!order) |
| 399 | return; | 399 | return; |
| 400 | 400 | ||
| 401 | ww_acquire_init(&ctx, &ww_class); | ||
| 402 | |||
| 403 | do { | 401 | do { |
| 404 | int contended = -1; | 402 | int contended = -1; |
| 405 | int n, err; | 403 | int n, err; |
| 406 | 404 | ||
| 405 | ww_acquire_init(&ctx, &ww_class); | ||
| 407 | retry: | 406 | retry: |
| 408 | err = 0; | 407 | err = 0; |
| 409 | for (n = 0; n < nlocks; n++) { | 408 | for (n = 0; n < nlocks; n++) { |
| @@ -433,9 +432,9 @@ retry: | |||
| 433 | __func__, err); | 432 | __func__, err); |
| 434 | break; | 433 | break; |
| 435 | } | 434 | } |
| 436 | } while (--stress->nloops); | ||
| 437 | 435 | ||
| 438 | ww_acquire_fini(&ctx); | 436 | ww_acquire_fini(&ctx); |
| 437 | } while (--stress->nloops); | ||
| 439 | 438 | ||
| 440 | kfree(order); | 439 | kfree(order); |
| 441 | kfree(stress); | 440 | kfree(stress); |
| @@ -470,9 +469,9 @@ static void stress_reorder_work(struct work_struct *work) | |||
| 470 | kfree(order); | 469 | kfree(order); |
| 471 | order = NULL; | 470 | order = NULL; |
| 472 | 471 | ||
| 473 | ww_acquire_init(&ctx, &ww_class); | ||
| 474 | |||
| 475 | do { | 472 | do { |
| 473 | ww_acquire_init(&ctx, &ww_class); | ||
| 474 | |||
| 476 | list_for_each_entry(ll, &locks, link) { | 475 | list_for_each_entry(ll, &locks, link) { |
| 477 | err = ww_mutex_lock(ll->lock, &ctx); | 476 | err = ww_mutex_lock(ll->lock, &ctx); |
| 478 | if (!err) | 477 | if (!err) |
| @@ -495,9 +494,9 @@ static void stress_reorder_work(struct work_struct *work) | |||
| 495 | dummy_load(stress); | 494 | dummy_load(stress); |
| 496 | list_for_each_entry(ll, &locks, link) | 495 | list_for_each_entry(ll, &locks, link) |
| 497 | ww_mutex_unlock(ll->lock); | 496 | ww_mutex_unlock(ll->lock); |
| 498 | } while (--stress->nloops); | ||
| 499 | 497 | ||
| 500 | ww_acquire_fini(&ctx); | 498 | ww_acquire_fini(&ctx); |
| 499 | } while (--stress->nloops); | ||
| 501 | 500 | ||
| 502 | out: | 501 | out: |
| 503 | list_for_each_entry_safe(ll, ln, &locks, link) | 502 | list_for_each_entry_safe(ll, ln, &locks, link) |
