diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-16 16:17:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-16 16:17:56 -0400 |
| commit | 1c19b68a279c58d6da4379bf8b6d679a300a1daf (patch) | |
| tree | d3907c0aa9119477dfd961bb127468d58233ac4d /kernel/locking | |
| parent | 49817c33433a3cd6f320b13699e6746cc39b453b (diff) | |
| parent | a1cc5bcfcfca0b99f009b117785142dbdc3b87a3 (diff) | |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking changes from Ingo Molnar:
"The main changes in this cycle were:
- pvqspinlock statistics fixes (Davidlohr Bueso)
- flip atomic_fetch_or() arguments (Peter Zijlstra)
- locktorture simplification (Paul E. McKenney)
- documentation updates (SeongJae Park, David Howells, Davidlohr
Bueso, Paul E McKenney, Peter Zijlstra, Will Deacon)
- various fixes"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/atomics: Flip atomic_fetch_or() arguments
locking/pvqspinlock: Robustify init_qspinlock_stat()
locking/pvqspinlock: Avoid double resetting of stats
lcoking/locktorture: Simplify the torture_runnable computation
locking/Documentation: Clarify that ACQUIRE applies to loads, RELEASE applies to stores
locking/Documentation: State purpose of memory-barriers.txt
locking/Documentation: Add disclaimer
locking/Documentation/lockdep: Fix spelling mistakes
locking/lockdep: Deinline register_lock_class(), save 2328 bytes
locking/locktorture: Fix NULL pointer dereference for cleanup paths
locking/locktorture: Fix deboosting NULL pointer dereference
locking/Documentation: Mention smp_cond_acquire()
locking/Documentation: Insert white spaces consistently
locking/Documentation: Fix formatting inconsistencies
locking/Documentation: Add missed subsection in TOC
locking/Documentation: Fix missed s/lock/acquire renames
locking/Documentation: Clarify relationship of barrier() to control dependencies
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/lockdep.c | 2 | ||||
| -rw-r--r-- | kernel/locking/locktorture.c | 25 | ||||
| -rw-r--r-- | kernel/locking/qspinlock_stat.h | 24 |
3 files changed, 31 insertions, 20 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 78c1c0ee6dc1..874d53eaf389 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -708,7 +708,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 708 | * yet. Otherwise we look it up. We cache the result in the lock object | 708 | * yet. Otherwise we look it up. We cache the result in the lock object |
| 709 | * itself, so actual lookup of the hash should be once per lock object. | 709 | * itself, so actual lookup of the hash should be once per lock object. |
| 710 | */ | 710 | */ |
| 711 | static inline struct lock_class * | 711 | static struct lock_class * |
| 712 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | 712 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
| 713 | { | 713 | { |
| 714 | struct lockdep_subclass_key *key; | 714 | struct lockdep_subclass_key *key; |
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 8ef1919d63b2..f8c5af52a131 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c | |||
| @@ -75,12 +75,7 @@ struct lock_stress_stats { | |||
| 75 | long n_lock_acquired; | 75 | long n_lock_acquired; |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | #if defined(MODULE) | 78 | int torture_runnable = IS_ENABLED(MODULE); |
| 79 | #define LOCKTORTURE_RUNNABLE_INIT 1 | ||
| 80 | #else | ||
| 81 | #define LOCKTORTURE_RUNNABLE_INIT 0 | ||
| 82 | #endif | ||
| 83 | int torture_runnable = LOCKTORTURE_RUNNABLE_INIT; | ||
| 84 | module_param(torture_runnable, int, 0444); | 79 | module_param(torture_runnable, int, 0444); |
| 85 | MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); | 80 | MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); |
| 86 | 81 | ||
| @@ -394,12 +389,12 @@ static void torture_rtmutex_boost(struct torture_random_state *trsp) | |||
| 394 | 389 | ||
| 395 | if (!rt_task(current)) { | 390 | if (!rt_task(current)) { |
| 396 | /* | 391 | /* |
| 397 | * (1) Boost priority once every ~50k operations. When the | 392 | * Boost priority once every ~50k operations. When the |
| 398 | * task tries to take the lock, the rtmutex it will account | 393 | * task tries to take the lock, the rtmutex it will account |
| 399 | * for the new priority, and do any corresponding pi-dance. | 394 | * for the new priority, and do any corresponding pi-dance. |
| 400 | */ | 395 | */ |
| 401 | if (!(torture_random(trsp) % | 396 | if (trsp && !(torture_random(trsp) % |
| 402 | (cxt.nrealwriters_stress * factor))) { | 397 | (cxt.nrealwriters_stress * factor))) { |
| 403 | policy = SCHED_FIFO; | 398 | policy = SCHED_FIFO; |
| 404 | param.sched_priority = MAX_RT_PRIO - 1; | 399 | param.sched_priority = MAX_RT_PRIO - 1; |
| 405 | } else /* common case, do nothing */ | 400 | } else /* common case, do nothing */ |
| @@ -748,6 +743,15 @@ static void lock_torture_cleanup(void) | |||
| 748 | if (torture_cleanup_begin()) | 743 | if (torture_cleanup_begin()) |
| 749 | return; | 744 | return; |
| 750 | 745 | ||
| 746 | /* | ||
| 747 | * Indicates early cleanup, meaning that the test has not run, | ||
| 748 | * such as when passing bogus args when loading the module. As | ||
| 749 | * such, only perform the underlying torture-specific cleanups, | ||
| 750 | * and avoid anything related to locktorture. | ||
| 751 | */ | ||
| 752 | if (!cxt.lwsa) | ||
| 753 | goto end; | ||
| 754 | |||
| 751 | if (writer_tasks) { | 755 | if (writer_tasks) { |
| 752 | for (i = 0; i < cxt.nrealwriters_stress; i++) | 756 | for (i = 0; i < cxt.nrealwriters_stress; i++) |
| 753 | torture_stop_kthread(lock_torture_writer, | 757 | torture_stop_kthread(lock_torture_writer, |
| @@ -776,6 +780,7 @@ static void lock_torture_cleanup(void) | |||
| 776 | else | 780 | else |
| 777 | lock_torture_print_module_parms(cxt.cur_ops, | 781 | lock_torture_print_module_parms(cxt.cur_ops, |
| 778 | "End of test: SUCCESS"); | 782 | "End of test: SUCCESS"); |
| 783 | end: | ||
| 779 | torture_cleanup_end(); | 784 | torture_cleanup_end(); |
| 780 | } | 785 | } |
| 781 | 786 | ||
| @@ -870,6 +875,7 @@ static int __init lock_torture_init(void) | |||
| 870 | VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); | 875 | VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); |
| 871 | firsterr = -ENOMEM; | 876 | firsterr = -ENOMEM; |
| 872 | kfree(cxt.lwsa); | 877 | kfree(cxt.lwsa); |
| 878 | cxt.lwsa = NULL; | ||
| 873 | goto unwind; | 879 | goto unwind; |
| 874 | } | 880 | } |
| 875 | 881 | ||
| @@ -878,6 +884,7 @@ static int __init lock_torture_init(void) | |||
| 878 | cxt.lrsa[i].n_lock_acquired = 0; | 884 | cxt.lrsa[i].n_lock_acquired = 0; |
| 879 | } | 885 | } |
| 880 | } | 886 | } |
| 887 | |||
| 881 | lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); | 888 | lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); |
| 882 | 889 | ||
| 883 | /* Prepare torture context. */ | 890 | /* Prepare torture context. */ |
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index d734b7502001..22e025309845 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h | |||
| @@ -191,8 +191,6 @@ static ssize_t qstat_write(struct file *file, const char __user *user_buf, | |||
| 191 | 191 | ||
| 192 | for (i = 0 ; i < qstat_num; i++) | 192 | for (i = 0 ; i < qstat_num; i++) |
| 193 | WRITE_ONCE(ptr[i], 0); | 193 | WRITE_ONCE(ptr[i], 0); |
| 194 | for (i = 0 ; i < qstat_num; i++) | ||
| 195 | WRITE_ONCE(ptr[i], 0); | ||
| 196 | } | 194 | } |
| 197 | return count; | 195 | return count; |
| 198 | } | 196 | } |
| @@ -214,10 +212,8 @@ static int __init init_qspinlock_stat(void) | |||
| 214 | struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL); | 212 | struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL); |
| 215 | int i; | 213 | int i; |
| 216 | 214 | ||
| 217 | if (!d_qstat) { | 215 | if (!d_qstat) |
| 218 | pr_warn("Could not create 'qlockstat' debugfs directory\n"); | 216 | goto out; |
| 219 | return 0; | ||
| 220 | } | ||
| 221 | 217 | ||
| 222 | /* | 218 | /* |
| 223 | * Create the debugfs files | 219 | * Create the debugfs files |
| @@ -227,12 +223,20 @@ static int __init init_qspinlock_stat(void) | |||
| 227 | * performance. | 223 | * performance. |
| 228 | */ | 224 | */ |
| 229 | for (i = 0; i < qstat_num; i++) | 225 | for (i = 0; i < qstat_num; i++) |
| 230 | debugfs_create_file(qstat_names[i], 0400, d_qstat, | 226 | if (!debugfs_create_file(qstat_names[i], 0400, d_qstat, |
| 231 | (void *)(long)i, &fops_qstat); | 227 | (void *)(long)i, &fops_qstat)) |
| 228 | goto fail_undo; | ||
| 229 | |||
| 230 | if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat, | ||
| 231 | (void *)(long)qstat_reset_cnts, &fops_qstat)) | ||
| 232 | goto fail_undo; | ||
| 232 | 233 | ||
| 233 | debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat, | ||
| 234 | (void *)(long)qstat_reset_cnts, &fops_qstat); | ||
| 235 | return 0; | 234 | return 0; |
| 235 | fail_undo: | ||
| 236 | debugfs_remove_recursive(d_qstat); | ||
| 237 | out: | ||
| 238 | pr_warn("Could not create 'qlockstat' debugfs entries\n"); | ||
| 239 | return -ENOMEM; | ||
| 236 | } | 240 | } |
| 237 | fs_initcall(init_qspinlock_stat); | 241 | fs_initcall(init_qspinlock_stat); |
| 238 | 242 | ||
