aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2014-09-29 09:14:23 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-09-30 03:10:00 -0400
commite34191fad8e5d9fe4e76f6d03b5e29e3eae7535a (patch)
treebdf4b88a473d2a7c416cf44dc826cb8393cb0b4a /kernel/locking
parentdd56af42bd829c6e770ed69812bd65a04eaeb1e4 (diff)
locktorture: Support rwlocks
Add a "rw_lock" torture test to stress kernel rwlocks and their irq variant. Reader critical regions are 5x longer than writers. As such a similar ratio of lock acquisitions is seen in the statistics. In the case of massive contention, both hold the lock for 1/10 of a second. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/locktorture.c115
1 files changed, 112 insertions, 3 deletions
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 540d5dfe1112..0762b25b4110 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/rwlock.h>
30#include <linux/mutex.h> 31#include <linux/mutex.h>
31#include <linux/smp.h> 32#include <linux/smp.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
@@ -229,6 +230,110 @@ static struct lock_torture_ops spin_lock_irq_ops = {
229 .name = "spin_lock_irq" 230 .name = "spin_lock_irq"
230}; 231};
231 232
233static DEFINE_RWLOCK(torture_rwlock);
234
235static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
236{
237 write_lock(&torture_rwlock);
238 return 0;
239}
240
241static void torture_rwlock_write_delay(struct torture_random_state *trsp)
242{
243 const unsigned long shortdelay_us = 2;
244 const unsigned long longdelay_ms = 100;
245
246 /* We want a short delay mostly to emulate likely code, and
247 * we want a long delay occasionally to force massive contention.
248 */
249 if (!(torture_random(trsp) %
250 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
251 mdelay(longdelay_ms);
252 else
253 udelay(shortdelay_us);
254}
255
256static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
257{
258 write_unlock(&torture_rwlock);
259}
260
261static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
262{
263 read_lock(&torture_rwlock);
264 return 0;
265}
266
267static void torture_rwlock_read_delay(struct torture_random_state *trsp)
268{
269 const unsigned long shortdelay_us = 10;
270 const unsigned long longdelay_ms = 100;
271
272 /* We want a short delay mostly to emulate likely code, and
273 * we want a long delay occasionally to force massive contention.
274 */
275 if (!(torture_random(trsp) %
276 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
277 mdelay(longdelay_ms);
278 else
279 udelay(shortdelay_us);
280}
281
282static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
283{
284 read_unlock(&torture_rwlock);
285}
286
287static struct lock_torture_ops rw_lock_ops = {
288 .writelock = torture_rwlock_write_lock,
289 .write_delay = torture_rwlock_write_delay,
290 .writeunlock = torture_rwlock_write_unlock,
291 .readlock = torture_rwlock_read_lock,
292 .read_delay = torture_rwlock_read_delay,
293 .readunlock = torture_rwlock_read_unlock,
294 .name = "rw_lock"
295};
296
297static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
298{
299 unsigned long flags;
300
301 write_lock_irqsave(&torture_rwlock, flags);
302 cxt.cur_ops->flags = flags;
303 return 0;
304}
305
306static void torture_rwlock_write_unlock_irq(void)
307__releases(torture_rwlock)
308{
309 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
310}
311
312static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
313{
314 unsigned long flags;
315
316 read_lock_irqsave(&torture_rwlock, flags);
317 cxt.cur_ops->flags = flags;
318 return 0;
319}
320
321static void torture_rwlock_read_unlock_irq(void)
322__releases(torture_rwlock)
323{
324 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
325}
326
327static struct lock_torture_ops rw_lock_irq_ops = {
328 .writelock = torture_rwlock_write_lock_irq,
329 .write_delay = torture_rwlock_write_delay,
330 .writeunlock = torture_rwlock_write_unlock_irq,
331 .readlock = torture_rwlock_read_lock_irq,
332 .read_delay = torture_rwlock_read_delay,
333 .readunlock = torture_rwlock_read_unlock_irq,
334 .name = "rw_lock_irq"
335};
336
232static DEFINE_MUTEX(torture_mutex); 337static DEFINE_MUTEX(torture_mutex);
233 338
234static int torture_mutex_lock(void) __acquires(torture_mutex) 339static int torture_mutex_lock(void) __acquires(torture_mutex)
@@ -535,8 +640,11 @@ static int __init lock_torture_init(void)
535 int i, j; 640 int i, j;
536 int firsterr = 0; 641 int firsterr = 0;
537 static struct lock_torture_ops *torture_ops[] = { 642 static struct lock_torture_ops *torture_ops[] = {
538 &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, 643 &lock_busted_ops,
539 &mutex_lock_ops, &rwsem_lock_ops, 644 &spin_lock_ops, &spin_lock_irq_ops,
645 &rw_lock_ops, &rw_lock_irq_ops,
646 &mutex_lock_ops,
647 &rwsem_lock_ops,
540 }; 648 };
541 649
542 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 650 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -571,7 +679,8 @@ static int __init lock_torture_init(void)
571 cxt.debug_lock = true; 679 cxt.debug_lock = true;
572#endif 680#endif
573#ifdef CONFIG_DEBUG_SPINLOCK 681#ifdef CONFIG_DEBUG_SPINLOCK
574 if (strncmp(torture_type, "spin", 4) == 0) 682 if ((strncmp(torture_type, "spin", 4) == 0) ||
683 (strncmp(torture_type, "rw_lock", 7) == 0))
575 cxt.debug_lock = true; 684 cxt.debug_lock = true;
576#endif 685#endif
577 686