summaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /security
parent6098850e7e6978f95a958f79a645a653228d0002 (diff)
parent450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'security')
-rw-r--r--security/apparmor/include/lib.h11
-rw-r--r--security/apparmor/label.c8
2 files changed, 4 insertions, 15 deletions
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 436b3a722357..f546707a2bbb 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -19,17 +19,6 @@
19 19
20#include "match.h" 20#include "match.h"
21 21
22/* Provide our own test for whether a write lock is held for asserts
23 * this is because on none SMP systems write_can_lock will always
24 * resolve to true, which is what you want for code making decisions
25 * based on it, but wrong for asserts checking that the lock is held
26 */
27#ifdef CONFIG_SMP
28#define write_is_locked(X) !write_can_lock(X)
29#else
30#define write_is_locked(X) (1)
31#endif /* CONFIG_SMP */
32
33/* 22/*
34 * DEBUG remains global (no per profile flag) since it is mostly used in sysctl 23 * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
35 * which is not related to profile accesses. 24 * which is not related to profile accesses.
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index c5b99b954580..ad28e03a6f30 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -80,7 +80,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
80 80
81 AA_BUG(!orig); 81 AA_BUG(!orig);
82 AA_BUG(!new); 82 AA_BUG(!new);
83 AA_BUG(!write_is_locked(&labels_set(orig)->lock)); 83 lockdep_assert_held_exclusive(&labels_set(orig)->lock);
84 84
85 tmp = rcu_dereference_protected(orig->proxy->label, 85 tmp = rcu_dereference_protected(orig->proxy->label,
86 &labels_ns(orig)->lock); 86 &labels_ns(orig)->lock);
@@ -571,7 +571,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
571 571
572 AA_BUG(!ls); 572 AA_BUG(!ls);
573 AA_BUG(!label); 573 AA_BUG(!label);
574 AA_BUG(!write_is_locked(&ls->lock)); 574 lockdep_assert_held_exclusive(&ls->lock);
575 575
576 if (new) 576 if (new)
577 __aa_proxy_redirect(label, new); 577 __aa_proxy_redirect(label, new);
@@ -608,7 +608,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
608 AA_BUG(!ls); 608 AA_BUG(!ls);
609 AA_BUG(!old); 609 AA_BUG(!old);
610 AA_BUG(!new); 610 AA_BUG(!new);
611 AA_BUG(!write_is_locked(&ls->lock)); 611 lockdep_assert_held_exclusive(&ls->lock);
612 AA_BUG(new->flags & FLAG_IN_TREE); 612 AA_BUG(new->flags & FLAG_IN_TREE);
613 613
614 if (!label_is_stale(old)) 614 if (!label_is_stale(old))
@@ -645,7 +645,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
645 AA_BUG(!ls); 645 AA_BUG(!ls);
646 AA_BUG(!label); 646 AA_BUG(!label);
647 AA_BUG(labels_set(label) != ls); 647 AA_BUG(labels_set(label) != ls);
648 AA_BUG(!write_is_locked(&ls->lock)); 648 lockdep_assert_held_exclusive(&ls->lock);
649 AA_BUG(label->flags & FLAG_IN_TREE); 649 AA_BUG(label->flags & FLAG_IN_TREE);
650 650
651 /* Figure out where to put new node */ 651 /* Figure out where to put new node */