diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2014-09-12 00:41:30 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-16 16:41:36 -0400 |
commit | 4a3b427f0b27c7e15edfa607524ff012a155337a (patch) | |
tree | 7277b03eb68e8d552c6698ff186c4a5d2c0aa687 /kernel/locking/locktorture.c | |
parent | 4f6332c1dce9c64ef6bf93842067250dd850e482 (diff) |
locktorture: Support rwsems
We can easily do so with our new reader lock support. Just an arbitrary
design default: readers have higher (5x) critical region latencies than
writers: 50 ms and 10 ms, respectively.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/locking/locktorture.c')
-rw-r--r-- | kernel/locking/locktorture.c | 68 |
1 files changed, 67 insertions, 1 deletions
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index c1073d79e440..8480118c0ca8 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c | |||
@@ -265,6 +265,71 @@ static struct lock_torture_ops mutex_lock_ops = { | |||
265 | .name = "mutex_lock" | 265 | .name = "mutex_lock" |
266 | }; | 266 | }; |
267 | 267 | ||
268 | static DECLARE_RWSEM(torture_rwsem); | ||
269 | static int torture_rwsem_down_write(void) __acquires(torture_rwsem) | ||
270 | { | ||
271 | down_write(&torture_rwsem); | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void torture_rwsem_write_delay(struct torture_random_state *trsp) | ||
276 | { | ||
277 | const unsigned long longdelay_ms = 100; | ||
278 | |||
279 | /* We want a long delay occasionally to force massive contention. */ | ||
280 | if (!(torture_random(trsp) % | ||
281 | (nrealwriters_stress * 2000 * longdelay_ms))) | ||
282 | mdelay(longdelay_ms * 10); | ||
283 | else | ||
284 | mdelay(longdelay_ms / 10); | ||
285 | #ifdef CONFIG_PREEMPT | ||
286 | if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) | ||
287 | preempt_schedule(); /* Allow test to be preempted. */ | ||
288 | #endif | ||
289 | } | ||
290 | |||
291 | static void torture_rwsem_up_write(void) __releases(torture_rwsem) | ||
292 | { | ||
293 | up_write(&torture_rwsem); | ||
294 | } | ||
295 | |||
296 | static int torture_rwsem_down_read(void) __acquires(torture_rwsem) | ||
297 | { | ||
298 | down_read(&torture_rwsem); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static void torture_rwsem_read_delay(struct torture_random_state *trsp) | ||
303 | { | ||
304 | const unsigned long longdelay_ms = 100; | ||
305 | |||
306 | /* We want a long delay occasionally to force massive contention. */ | ||
307 | if (!(torture_random(trsp) % | ||
308 | (nrealwriters_stress * 2000 * longdelay_ms))) | ||
309 | mdelay(longdelay_ms * 2); | ||
310 | else | ||
311 | mdelay(longdelay_ms / 2); | ||
312 | #ifdef CONFIG_PREEMPT | ||
313 | if (!(torture_random(trsp) % (nrealreaders_stress * 20000))) | ||
314 | preempt_schedule(); /* Allow test to be preempted. */ | ||
315 | #endif | ||
316 | } | ||
317 | |||
318 | static void torture_rwsem_up_read(void) __releases(torture_rwsem) | ||
319 | { | ||
320 | up_read(&torture_rwsem); | ||
321 | } | ||
322 | |||
323 | static struct lock_torture_ops rwsem_lock_ops = { | ||
324 | .writelock = torture_rwsem_down_write, | ||
325 | .write_delay = torture_rwsem_write_delay, | ||
326 | .writeunlock = torture_rwsem_up_write, | ||
327 | .readlock = torture_rwsem_down_read, | ||
328 | .read_delay = torture_rwsem_read_delay, | ||
329 | .readunlock = torture_rwsem_up_read, | ||
330 | .name = "rwsem_lock" | ||
331 | }; | ||
332 | |||
268 | /* | 333 | /* |
269 | * Lock torture writer kthread. Repeatedly acquires and releases | 334 | * Lock torture writer kthread. Repeatedly acquires and releases |
270 | * the lock, checking for duplicate acquisitions. | 335 | * the lock, checking for duplicate acquisitions. |
@@ -467,7 +532,8 @@ static int __init lock_torture_init(void) | |||
467 | int i, j; | 532 | int i, j; |
468 | int firsterr = 0; | 533 | int firsterr = 0; |
469 | static struct lock_torture_ops *torture_ops[] = { | 534 | static struct lock_torture_ops *torture_ops[] = { |
470 | &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, &mutex_lock_ops, | 535 | &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, |
536 | &mutex_lock_ops, &rwsem_lock_ops, | ||
471 | }; | 537 | }; |
472 | 538 | ||
473 | if (!torture_init_begin(torture_type, verbose, &torture_runnable)) | 539 | if (!torture_init_begin(torture_type, verbose, &torture_runnable)) |