aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux/ss
diff options
context:
space:
mode:
authorpaul.moore@hp.com <paul.moore@hp.com>2006-10-04 11:46:31 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-10-12 02:59:29 -0400
commitffb733c65000ee701294f7b80c4eca2a5f335637 (patch)
treeedda8e25792fe4a7bf0c619787949291276b9ed7 /security/selinux/ss
parentc25d5180441e344a3368d100c57f0a481c6944f7 (diff)
NetLabel: fix a cache race condition
Testing revealed a problem with the NetLabel cache where a cached entry could be freed while in use by the LSM layer causing an oops and other problems. This patch fixes that problem by introducing a reference counter to the cache entry so that it is only freed when it is no longer in use. Signed-off-by: Paul Moore <paul.moore@hp.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'security/selinux/ss')
-rw-r--r--security/selinux/ss/services.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 0c219a1b3243..bb2d2bc869ba 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2172,7 +2172,12 @@ struct netlbl_cache {
2172 */ 2172 */
2173static void selinux_netlbl_cache_free(const void *data) 2173static void selinux_netlbl_cache_free(const void *data)
2174{ 2174{
2175 struct netlbl_cache *cache = NETLBL_CACHE(data); 2175 struct netlbl_cache *cache;
2176
2177 if (data == NULL)
2178 return;
2179
2180 cache = NETLBL_CACHE(data);
2176 switch (cache->type) { 2181 switch (cache->type) {
2177 case NETLBL_CACHE_T_MLS: 2182 case NETLBL_CACHE_T_MLS:
2178 ebitmap_destroy(&cache->data.mls_label.level[0].cat); 2183 ebitmap_destroy(&cache->data.mls_label.level[0].cat);
@@ -2197,17 +2202,20 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx)
2197 struct netlbl_lsm_secattr secattr; 2202 struct netlbl_lsm_secattr secattr;
2198 2203
2199 netlbl_secattr_init(&secattr); 2204 netlbl_secattr_init(&secattr);
2205 secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
2206 if (secattr.cache == NULL)
2207 goto netlbl_cache_add_return;
2200 2208
2201 cache = kzalloc(sizeof(*cache), GFP_ATOMIC); 2209 cache = kzalloc(sizeof(*cache), GFP_ATOMIC);
2202 if (cache == NULL) 2210 if (cache == NULL)
2203 goto netlbl_cache_add_failure; 2211 goto netlbl_cache_add_return;
2204 secattr.cache.free = selinux_netlbl_cache_free; 2212 secattr.cache->free = selinux_netlbl_cache_free;
2205 secattr.cache.data = (void *)cache; 2213 secattr.cache->data = (void *)cache;
2206 2214
2207 cache->type = NETLBL_CACHE_T_MLS; 2215 cache->type = NETLBL_CACHE_T_MLS;
2208 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, 2216 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
2209 &ctx->range.level[0].cat) != 0) 2217 &ctx->range.level[0].cat) != 0)
2210 goto netlbl_cache_add_failure; 2218 goto netlbl_cache_add_return;
2211 cache->data.mls_label.level[1].cat.highbit = 2219 cache->data.mls_label.level[1].cat.highbit =
2212 cache->data.mls_label.level[0].cat.highbit; 2220 cache->data.mls_label.level[0].cat.highbit;
2213 cache->data.mls_label.level[1].cat.node = 2221 cache->data.mls_label.level[1].cat.node =
@@ -2215,13 +2223,10 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx)
2215 cache->data.mls_label.level[0].sens = ctx->range.level[0].sens; 2223 cache->data.mls_label.level[0].sens = ctx->range.level[0].sens;
2216 cache->data.mls_label.level[1].sens = ctx->range.level[0].sens; 2224 cache->data.mls_label.level[1].sens = ctx->range.level[0].sens;
2217 2225
2218 if (netlbl_cache_add(skb, &secattr) != 0) 2226 netlbl_cache_add(skb, &secattr);
2219 goto netlbl_cache_add_failure;
2220
2221 return;
2222 2227
2223netlbl_cache_add_failure: 2228netlbl_cache_add_return:
2224 netlbl_secattr_destroy(&secattr, 1); 2229 netlbl_secattr_destroy(&secattr);
2225} 2230}
2226 2231
2227/** 2232/**
@@ -2263,8 +2268,8 @@ static int selinux_netlbl_secattr_to_sid(struct sk_buff *skb,
2263 2268
2264 POLICY_RDLOCK; 2269 POLICY_RDLOCK;
2265 2270
2266 if (secattr->cache.data) { 2271 if (secattr->cache) {
2267 cache = NETLBL_CACHE(secattr->cache.data); 2272 cache = NETLBL_CACHE(secattr->cache->data);
2268 switch (cache->type) { 2273 switch (cache->type) {
2269 case NETLBL_CACHE_T_SID: 2274 case NETLBL_CACHE_T_SID:
2270 *sid = cache->data.sid; 2275 *sid = cache->data.sid;
@@ -2369,7 +2374,7 @@ static int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
2369 &secattr, 2374 &secattr,
2370 base_sid, 2375 base_sid,
2371 sid); 2376 sid);
2372 netlbl_secattr_destroy(&secattr, 0); 2377 netlbl_secattr_destroy(&secattr);
2373 2378
2374 return rc; 2379 return rc;
2375} 2380}
@@ -2415,7 +2420,7 @@ static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid)
2415 if (rc == 0) 2420 if (rc == 0)
2416 sksec->nlbl_state = NLBL_LABELED; 2421 sksec->nlbl_state = NLBL_LABELED;
2417 2422
2418 netlbl_secattr_destroy(&secattr, 0); 2423 netlbl_secattr_destroy(&secattr);
2419 2424
2420netlbl_socket_setsid_return: 2425netlbl_socket_setsid_return:
2421 POLICY_RDUNLOCK; 2426 POLICY_RDUNLOCK;
@@ -2517,7 +2522,7 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
2517 sksec->sid, 2522 sksec->sid,
2518 &nlbl_peer_sid) == 0) 2523 &nlbl_peer_sid) == 0)
2519 sksec->peer_sid = nlbl_peer_sid; 2524 sksec->peer_sid = nlbl_peer_sid;
2520 netlbl_secattr_destroy(&secattr, 0); 2525 netlbl_secattr_destroy(&secattr);
2521 2526
2522 sksec->nlbl_state = NLBL_REQUIRE; 2527 sksec->nlbl_state = NLBL_REQUIRE;
2523 2528