aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/cipso_ipv4.c
diff options
context:
space:
mode:
authorPaul Moore <paul.moore@hp.com>2006-09-25 18:52:37 -0400
committerDavid S. Miller <davem@davemloft.net>2006-09-25 18:52:37 -0400
commit609c92feea5652809319bb77f19d24a44615687d (patch)
tree01c7523782233356d0a373f775f21fa52099cd23 /net/ipv4/cipso_ipv4.c
parent14a72f53fb1bb5d5c2bdd8cf172219519664729a (diff)
[NetLabel]: make the CIPSOv4 cache spinlocks bottom half safe
The CIPSOv4 cache traversal routines are triggered both the userspace events (cache invalidation due to DOI removal or updated SELinux policy) and network packet processing events. As a result there is a problem with the existing CIPSOv4 cache spinlocks as they are not bottom-half/softirq safe. This patch converts the CIPSOv4 cache spin_[un]lock() calls into spin_[un]lock_bh() calls to address this problem. Signed-off-by: Paul Moore <paul.moore@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/cipso_ipv4.c')
-rw-r--r--net/ipv4/cipso_ipv4.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a3bae2ca8acc..87e71563335d 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -259,7 +259,7 @@ void cipso_v4_cache_invalidate(void)
259 u32 iter; 259 u32 iter;
260 260
261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { 261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
262 spin_lock(&cipso_v4_cache[iter].lock); 262 spin_lock_bh(&cipso_v4_cache[iter].lock);
263 list_for_each_entry_safe(entry, 263 list_for_each_entry_safe(entry,
264 tmp_entry, 264 tmp_entry,
265 &cipso_v4_cache[iter].list, list) { 265 &cipso_v4_cache[iter].list, list) {
@@ -267,7 +267,7 @@ void cipso_v4_cache_invalidate(void)
267 cipso_v4_cache_entry_free(entry); 267 cipso_v4_cache_entry_free(entry);
268 } 268 }
269 cipso_v4_cache[iter].size = 0; 269 cipso_v4_cache[iter].size = 0;
270 spin_unlock(&cipso_v4_cache[iter].lock); 270 spin_unlock_bh(&cipso_v4_cache[iter].lock);
271 } 271 }
272 272
273 return; 273 return;
@@ -309,7 +309,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
309 309
310 hash = cipso_v4_map_cache_hash(key, key_len); 310 hash = cipso_v4_map_cache_hash(key, key_len);
311 bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 311 bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
312 spin_lock(&cipso_v4_cache[bkt].lock); 312 spin_lock_bh(&cipso_v4_cache[bkt].lock);
313 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { 313 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
314 if (entry->hash == hash && 314 if (entry->hash == hash &&
315 entry->key_len == key_len && 315 entry->key_len == key_len &&
@@ -318,7 +318,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
318 secattr->cache.free = entry->lsm_data.free; 318 secattr->cache.free = entry->lsm_data.free;
319 secattr->cache.data = entry->lsm_data.data; 319 secattr->cache.data = entry->lsm_data.data;
320 if (prev_entry == NULL) { 320 if (prev_entry == NULL) {
321 spin_unlock(&cipso_v4_cache[bkt].lock); 321 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
322 return 0; 322 return 0;
323 } 323 }
324 324
@@ -333,12 +333,12 @@ static int cipso_v4_cache_check(const unsigned char *key,
333 &prev_entry->list); 333 &prev_entry->list);
334 } 334 }
335 335
336 spin_unlock(&cipso_v4_cache[bkt].lock); 336 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
337 return 0; 337 return 0;
338 } 338 }
339 prev_entry = entry; 339 prev_entry = entry;
340 } 340 }
341 spin_unlock(&cipso_v4_cache[bkt].lock); 341 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
342 342
343 return -ENOENT; 343 return -ENOENT;
344} 344}
@@ -387,7 +387,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
387 entry->lsm_data.data = secattr->cache.data; 387 entry->lsm_data.data = secattr->cache.data;
388 388
389 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 389 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
390 spin_lock(&cipso_v4_cache[bkt].lock); 390 spin_lock_bh(&cipso_v4_cache[bkt].lock);
391 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { 391 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
392 list_add(&entry->list, &cipso_v4_cache[bkt].list); 392 list_add(&entry->list, &cipso_v4_cache[bkt].list);
393 cipso_v4_cache[bkt].size += 1; 393 cipso_v4_cache[bkt].size += 1;
@@ -398,7 +398,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
398 list_add(&entry->list, &cipso_v4_cache[bkt].list); 398 list_add(&entry->list, &cipso_v4_cache[bkt].list);
399 cipso_v4_cache_entry_free(old_entry); 399 cipso_v4_cache_entry_free(old_entry);
400 } 400 }
401 spin_unlock(&cipso_v4_cache[bkt].lock); 401 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
402 402
403 return 0; 403 return 0;
404 404