aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-02-12 14:50:59 -0500
committerJames Morris <jmorris@namei.org>2009-02-13 17:23:45 -0500
commitedf3d1aecd0d608acbd561b0c527e1d41abcb657 (patch)
tree49d88ec27a59f602784b47e2f951934d245f7de8 /security/selinux
parentf1c6381a6e337adcecf84be2a838bd9e610e2365 (diff)
SELinux: code readability with avc_cache
The code making use of struct avc_cache was not easy to read thanks to liberal use of &avc_cache.{slots_lock,slots}[hvalue] throughout. This patch simply creates local pointers and uses those instead of the long global names. Signed-off-by: Eric Paris <eparis@redhat.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'security/selinux')
-rw-r--r--security/selinux/avc.c63
1 files changed, 44 insertions, 19 deletions
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 326aa78bd421..9dd5c506a826 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -92,12 +92,12 @@ struct avc_entry {
92 92
93struct avc_node { 93struct avc_node {
94 struct avc_entry ae; 94 struct avc_entry ae;
95 struct list_head list; 95 struct list_head list; /* anchored in avc_cache->slots[i] */
96 struct rcu_head rhead; 96 struct rcu_head rhead;
97}; 97};
98 98
99struct avc_cache { 99struct avc_cache {
100 struct list_head slots[AVC_CACHE_SLOTS]; 100 struct list_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
101 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ 101 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
102 atomic_t lru_hint; /* LRU hint for reclaim scan */ 102 atomic_t lru_hint; /* LRU hint for reclaim scan */
103 atomic_t active_nodes; 103 atomic_t active_nodes;
@@ -249,16 +249,18 @@ int avc_get_hash_stats(char *page)
249{ 249{
250 int i, chain_len, max_chain_len, slots_used; 250 int i, chain_len, max_chain_len, slots_used;
251 struct avc_node *node; 251 struct avc_node *node;
252 struct list_head *head;
252 253
253 rcu_read_lock(); 254 rcu_read_lock();
254 255
255 slots_used = 0; 256 slots_used = 0;
256 max_chain_len = 0; 257 max_chain_len = 0;
257 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 258 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
258 if (!list_empty(&avc_cache.slots[i])) { 259 head = &avc_cache.slots[i];
260 if (!list_empty(head)) {
259 slots_used++; 261 slots_used++;
260 chain_len = 0; 262 chain_len = 0;
261 list_for_each_entry_rcu(node, &avc_cache.slots[i], list) 263 list_for_each_entry_rcu(node, head, list)
262 chain_len++; 264 chain_len++;
263 if (chain_len > max_chain_len) 265 if (chain_len > max_chain_len)
264 max_chain_len = chain_len; 266 max_chain_len = chain_len;
@@ -306,26 +308,30 @@ static inline int avc_reclaim_node(void)
306 struct avc_node *node; 308 struct avc_node *node;
307 int hvalue, try, ecx; 309 int hvalue, try, ecx;
308 unsigned long flags; 310 unsigned long flags;
311 struct list_head *head;
312 spinlock_t *lock;
309 313
310 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { 314 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
311 hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); 315 hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
316 head = &avc_cache.slots[hvalue];
317 lock = &avc_cache.slots_lock[hvalue];
312 318
313 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) 319 if (!spin_trylock_irqsave(lock, flags))
314 continue; 320 continue;
315 321
316 rcu_read_lock(); 322 rcu_read_lock();
317 list_for_each_entry(node, &avc_cache.slots[hvalue], list) { 323 list_for_each_entry(node, head, list) {
318 avc_node_delete(node); 324 avc_node_delete(node);
319 avc_cache_stats_incr(reclaims); 325 avc_cache_stats_incr(reclaims);
320 ecx++; 326 ecx++;
321 if (ecx >= AVC_CACHE_RECLAIM) { 327 if (ecx >= AVC_CACHE_RECLAIM) {
322 rcu_read_unlock(); 328 rcu_read_unlock();
323 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 329 spin_unlock_irqrestore(lock, flags);
324 goto out; 330 goto out;
325 } 331 }
326 } 332 }
327 rcu_read_unlock(); 333 rcu_read_unlock();
328 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 334 spin_unlock_irqrestore(lock, flags);
329 } 335 }
330out: 336out:
331 return ecx; 337 return ecx;
@@ -362,9 +368,11 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
362{ 368{
363 struct avc_node *node, *ret = NULL; 369 struct avc_node *node, *ret = NULL;
364 int hvalue; 370 int hvalue;
371 struct list_head *head;
365 372
366 hvalue = avc_hash(ssid, tsid, tclass); 373 hvalue = avc_hash(ssid, tsid, tclass);
367 list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { 374 head = &avc_cache.slots[hvalue];
375 list_for_each_entry_rcu(node, head, list) {
368 if (ssid == node->ae.ssid && 376 if (ssid == node->ae.ssid &&
369 tclass == node->ae.tclass && 377 tclass == node->ae.tclass &&
370 tsid == node->ae.tsid) { 378 tsid == node->ae.tsid) {
@@ -453,11 +461,17 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
453 461
454 node = avc_alloc_node(); 462 node = avc_alloc_node();
455 if (node) { 463 if (node) {
464 struct list_head *head;
465 spinlock_t *lock;
466
456 hvalue = avc_hash(ssid, tsid, tclass); 467 hvalue = avc_hash(ssid, tsid, tclass);
457 avc_node_populate(node, ssid, tsid, tclass, avd); 468 avc_node_populate(node, ssid, tsid, tclass, avd);
458 469
459 spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); 470 head = &avc_cache.slots[hvalue];
460 list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { 471 lock = &avc_cache.slots_lock[hvalue];
472
473 spin_lock_irqsave(lock, flag);
474 list_for_each_entry(pos, head, list) {
461 if (pos->ae.ssid == ssid && 475 if (pos->ae.ssid == ssid &&
462 pos->ae.tsid == tsid && 476 pos->ae.tsid == tsid &&
463 pos->ae.tclass == tclass) { 477 pos->ae.tclass == tclass) {
@@ -465,9 +479,9 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
465 goto found; 479 goto found;
466 } 480 }
467 } 481 }
468 list_add_rcu(&node->list, &avc_cache.slots[hvalue]); 482 list_add_rcu(&node->list, head);
469found: 483found:
470 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); 484 spin_unlock_irqrestore(lock, flag);
471 } 485 }
472out: 486out:
473 return node; 487 return node;
@@ -736,6 +750,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
736 int hvalue, rc = 0; 750 int hvalue, rc = 0;
737 unsigned long flag; 751 unsigned long flag;
738 struct avc_node *pos, *node, *orig = NULL; 752 struct avc_node *pos, *node, *orig = NULL;
753 struct list_head *head;
754 spinlock_t *lock;
739 755
740 node = avc_alloc_node(); 756 node = avc_alloc_node();
741 if (!node) { 757 if (!node) {
@@ -745,9 +761,13 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
745 761
746 /* Lock the target slot */ 762 /* Lock the target slot */
747 hvalue = avc_hash(ssid, tsid, tclass); 763 hvalue = avc_hash(ssid, tsid, tclass);
748 spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag);
749 764
750 list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { 765 head = &avc_cache.slots[hvalue];
766 lock = &avc_cache.slots_lock[hvalue];
767
768 spin_lock_irqsave(lock, flag);
769
770 list_for_each_entry(pos, head, list) {
751 if (ssid == pos->ae.ssid && 771 if (ssid == pos->ae.ssid &&
752 tsid == pos->ae.tsid && 772 tsid == pos->ae.tsid &&
753 tclass == pos->ae.tclass && 773 tclass == pos->ae.tclass &&
@@ -792,7 +812,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
792 } 812 }
793 avc_node_replace(node, orig); 813 avc_node_replace(node, orig);
794out_unlock: 814out_unlock:
795 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); 815 spin_unlock_irqrestore(lock, flag);
796out: 816out:
797 return rc; 817 return rc;
798} 818}
@@ -807,18 +827,23 @@ int avc_ss_reset(u32 seqno)
807 int i, rc = 0, tmprc; 827 int i, rc = 0, tmprc;
808 unsigned long flag; 828 unsigned long flag;
809 struct avc_node *node; 829 struct avc_node *node;
830 struct list_head *head;
831 spinlock_t *lock;
810 832
811 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 833 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
812 spin_lock_irqsave(&avc_cache.slots_lock[i], flag); 834 head = &avc_cache.slots[i];
835 lock = &avc_cache.slots_lock[i];
836
837 spin_lock_irqsave(lock, flag);
813 /* 838 /*
814 * With preemptable RCU, the outer spinlock does not 839 * With preemptable RCU, the outer spinlock does not
815 * prevent RCU grace periods from ending. 840 * prevent RCU grace periods from ending.
816 */ 841 */
817 rcu_read_lock(); 842 rcu_read_lock();
818 list_for_each_entry(node, &avc_cache.slots[i], list) 843 list_for_each_entry(node, head, list)
819 avc_node_delete(node); 844 avc_node_delete(node);
820 rcu_read_unlock(); 845 rcu_read_unlock();
821 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); 846 spin_unlock_irqrestore(lock, flag);
822 } 847 }
823 848
824 for (c = avc_callbacks; c; c = c->next) { 849 for (c = avc_callbacks; c; c = c->next) {