aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2008-04-21 21:12:33 -0400
committerJames Morris <jmorris@namei.org>2008-04-22 01:37:23 -0400
commit618442509128fe4514be94de70ce54075cd9a706 (patch)
tree9aea814978791abd8c4f9a5c60de879b2811c063 /security/selinux
parent0f5e64200f20fc8f5b759c4010082f577ab0af3f (diff)
SELinux fixups needed for preemptable RCU from -rt
The attached patch needs to move from -rt to mainline given preemptable RCU. This patch fixes SELinux code that implicitly assumes that disabling preemption prevents an RCU grace period from completing, an assumption that is valid for Classic RCU, but not necessarily for preemptable RCU. Explicit rcu_read_lock() calls are thus added. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'security/selinux')
-rw-r--r--security/selinux/avc.c9
-rw-r--r--security/selinux/netif.c2
2 files changed, 11 insertions, 0 deletions
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 1d69f6649bff..95a8ef4a5073 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -312,6 +312,7 @@ static inline int avc_reclaim_node(void)
312 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) 312 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
313 continue; 313 continue;
314 314
315 rcu_read_lock();
315 list_for_each_entry(node, &avc_cache.slots[hvalue], list) { 316 list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
316 if (atomic_dec_and_test(&node->ae.used)) { 317 if (atomic_dec_and_test(&node->ae.used)) {
317 /* Recently Unused */ 318 /* Recently Unused */
@@ -319,11 +320,13 @@ static inline int avc_reclaim_node(void)
319 avc_cache_stats_incr(reclaims); 320 avc_cache_stats_incr(reclaims);
320 ecx++; 321 ecx++;
321 if (ecx >= AVC_CACHE_RECLAIM) { 322 if (ecx >= AVC_CACHE_RECLAIM) {
323 rcu_read_unlock();
322 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 324 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
323 goto out; 325 goto out;
324 } 326 }
325 } 327 }
326 } 328 }
329 rcu_read_unlock();
327 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 330 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
328 } 331 }
329out: 332out:
@@ -821,8 +824,14 @@ int avc_ss_reset(u32 seqno)
821 824
822 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 825 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
823 spin_lock_irqsave(&avc_cache.slots_lock[i], flag); 826 spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
827 /*
828 * With preemptable RCU, the outer spinlock does not
829 * prevent RCU grace periods from ending.
830 */
831 rcu_read_lock();
824 list_for_each_entry(node, &avc_cache.slots[i], list) 832 list_for_each_entry(node, &avc_cache.slots[i], list)
825 avc_node_delete(node); 833 avc_node_delete(node);
834 rcu_read_unlock();
826 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); 835 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
827 } 836 }
828 837
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index c658b84c3196..b4e14bc0bf32 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -239,11 +239,13 @@ static void sel_netif_kill(int ifindex)
239{ 239{
240 struct sel_netif *netif; 240 struct sel_netif *netif;
241 241
242 rcu_read_lock();
242 spin_lock_bh(&sel_netif_lock); 243 spin_lock_bh(&sel_netif_lock);
243 netif = sel_netif_find(ifindex); 244 netif = sel_netif_find(ifindex);
244 if (netif) 245 if (netif)
245 sel_netif_destroy(netif); 246 sel_netif_destroy(netif);
246 spin_unlock_bh(&sel_netif_lock); 247 spin_unlock_bh(&sel_netif_lock);
248 rcu_read_unlock();
247} 249}
248 250
249/** 251/**