diff options
author | Arnd Bergmann <arnd@arndb.de> | 2018-12-16 14:48:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-12-18 18:12:53 -0500 |
commit | 809c67059162e7ba85c61a83ad7547b4ffbb1e6e (patch) | |
tree | 4fbd0d0a6421a0940ef7a9bbedb1726a40e17b64 | |
parent | fa7b28c11bbf389617327ad4dd69bbbbbc16a8b4 (diff) |
test_rhashtable: remove semaphore usage
This is one of only two files that initialize a semaphore to a negative
value. We don't really need the two semaphores here at all, but can do
the same thing in more conventional and more effient way, by using a
single waitqueue and an atomic thread counter.
This gets us a little bit closer to eliminating classic semaphores from
the kernel. It also fixes a corner case where we fail to continue after
one of the threads fails to start up.
An alternative would be to use a split kthread_create()+wake_up_process()
and completely eliminate the separate synchronization.
Acked-by: Phil Sutter <phil@nwl.cc>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/test_rhashtable.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 82ac39ce5310..6a8ac7626797 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -20,11 +20,11 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/rcupdate.h> | 21 | #include <linux/rcupdate.h> |
22 | #include <linux/rhashtable.h> | 22 | #include <linux/rhashtable.h> |
23 | #include <linux/semaphore.h> | ||
24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
25 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
26 | #include <linux/random.h> | 25 | #include <linux/random.h> |
27 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/wait.h> | ||
28 | 28 | ||
29 | #define MAX_ENTRIES 1000000 | 29 | #define MAX_ENTRIES 1000000 |
30 | #define TEST_INSERT_FAIL INT_MAX | 30 | #define TEST_INSERT_FAIL INT_MAX |
@@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = { | |||
112 | .automatic_shrinking = false, | 112 | .automatic_shrinking = false, |
113 | }; | 113 | }; |
114 | 114 | ||
115 | static struct semaphore prestart_sem; | 115 | static atomic_t startup_count; |
116 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); | 116 | static DECLARE_WAIT_QUEUE_HEAD(startup_wait); |
117 | 117 | ||
118 | static int insert_retry(struct rhashtable *ht, struct test_obj *obj, | 118 | static int insert_retry(struct rhashtable *ht, struct test_obj *obj, |
119 | const struct rhashtable_params params) | 119 | const struct rhashtable_params params) |
@@ -634,9 +634,12 @@ static int threadfunc(void *data) | |||
634 | int i, step, err = 0, insert_retries = 0; | 634 | int i, step, err = 0, insert_retries = 0; |
635 | struct thread_data *tdata = data; | 635 | struct thread_data *tdata = data; |
636 | 636 | ||
637 | up(&prestart_sem); | 637 | if (atomic_dec_and_test(&startup_count)) |
638 | if (down_interruptible(&startup_sem)) | 638 | wake_up(&startup_wait); |
639 | pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); | 639 | if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) { |
640 | pr_err(" thread[%d]: interrupted\n", tdata->id); | ||
641 | goto out; | ||
642 | } | ||
640 | 643 | ||
641 | for (i = 0; i < tdata->entries; i++) { | 644 | for (i = 0; i < tdata->entries; i++) { |
642 | tdata->objs[i].value.id = i; | 645 | tdata->objs[i].value.id = i; |
@@ -755,7 +758,7 @@ static int __init test_rht_init(void) | |||
755 | 758 | ||
756 | pr_info("Testing concurrent rhashtable access from %d threads\n", | 759 | pr_info("Testing concurrent rhashtable access from %d threads\n", |
757 | tcount); | 760 | tcount); |
758 | sema_init(&prestart_sem, 1 - tcount); | 761 | atomic_set(&startup_count, tcount); |
759 | tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); | 762 | tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); |
760 | if (!tdata) | 763 | if (!tdata) |
761 | return -ENOMEM; | 764 | return -ENOMEM; |
@@ -781,15 +784,18 @@ static int __init test_rht_init(void) | |||
781 | tdata[i].objs = objs + i * entries; | 784 | tdata[i].objs = objs + i * entries; |
782 | tdata[i].task = kthread_run(threadfunc, &tdata[i], | 785 | tdata[i].task = kthread_run(threadfunc, &tdata[i], |
783 | "rhashtable_thrad[%d]", i); | 786 | "rhashtable_thrad[%d]", i); |
784 | if (IS_ERR(tdata[i].task)) | 787 | if (IS_ERR(tdata[i].task)) { |
785 | pr_err(" kthread_run failed for thread %d\n", i); | 788 | pr_err(" kthread_run failed for thread %d\n", i); |
786 | else | 789 | atomic_dec(&startup_count); |
790 | } else { | ||
787 | started_threads++; | 791 | started_threads++; |
792 | } | ||
788 | } | 793 | } |
789 | if (down_interruptible(&prestart_sem)) | 794 | if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0)) |
790 | pr_err(" down interruptible failed\n"); | 795 | pr_err(" wait_event interruptible failed\n"); |
791 | for (i = 0; i < tcount; i++) | 796 | /* count is 0 now, set it to -1 and wake up all threads together */ |
792 | up(&startup_sem); | 797 | atomic_dec(&startup_count); |
798 | wake_up_all(&startup_wait); | ||
793 | for (i = 0; i < tcount; i++) { | 799 | for (i = 0; i < tcount; i++) { |
794 | if (IS_ERR(tdata[i].task)) | 800 | if (IS_ERR(tdata[i].task)) |
795 | continue; | 801 | continue; |