aboutsummaryrefslogtreecommitdiffstats
path: root/security/selinux
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2009-02-12 14:51:04 -0500
committerJames Morris <jmorris@namei.org>2009-02-13 17:23:48 -0500
commit26036651c562609d1f52d181f9d2cccbf89929b1 (patch)
treedb68ab98d574d6763f562ac87cc7810385496f22 /security/selinux
parentedf3d1aecd0d608acbd561b0c527e1d41abcb657 (diff)
SELinux: convert the avc cache hash list to an hlist
We do not need O(1) access to the tail of the avc cache lists and so we are wasting lots of space using struct list_head instead of struct hlist_head. This patch converts the avc cache to use hlists in which there is a single pointer from the head which saves us about 4k of global memory. Resulted in about a 1.5% decrease in time spent in avc_has_perm_noaudit based on oprofile sampling of tbench. Although likely within the noise.... Signed-off-by: Eric Paris <eparis@redhat.com> Reviewed-by: Paul Moore <paul.moore@hp.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'security/selinux')
-rw-r--r--security/selinux/avc.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 9dd5c506a826..7f9b5fac8779 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -92,12 +92,12 @@ struct avc_entry {
92 92
93struct avc_node { 93struct avc_node {
94 struct avc_entry ae; 94 struct avc_entry ae;
95 struct list_head list; /* anchored in avc_cache->slots[i] */ 95 struct hlist_node list; /* anchored in avc_cache->slots[i] */
96 struct rcu_head rhead; 96 struct rcu_head rhead;
97}; 97};
98 98
99struct avc_cache { 99struct avc_cache {
100 struct list_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ 100 struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
101 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ 101 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
102 atomic_t lru_hint; /* LRU hint for reclaim scan */ 102 atomic_t lru_hint; /* LRU hint for reclaim scan */
103 atomic_t active_nodes; 103 atomic_t active_nodes;
@@ -233,7 +233,7 @@ void __init avc_init(void)
233 int i; 233 int i;
234 234
235 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 235 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
236 INIT_LIST_HEAD(&avc_cache.slots[i]); 236 INIT_HLIST_HEAD(&avc_cache.slots[i]);
237 spin_lock_init(&avc_cache.slots_lock[i]); 237 spin_lock_init(&avc_cache.slots_lock[i]);
238 } 238 }
239 atomic_set(&avc_cache.active_nodes, 0); 239 atomic_set(&avc_cache.active_nodes, 0);
@@ -249,7 +249,7 @@ int avc_get_hash_stats(char *page)
249{ 249{
250 int i, chain_len, max_chain_len, slots_used; 250 int i, chain_len, max_chain_len, slots_used;
251 struct avc_node *node; 251 struct avc_node *node;
252 struct list_head *head; 252 struct hlist_head *head;
253 253
254 rcu_read_lock(); 254 rcu_read_lock();
255 255
@@ -257,10 +257,12 @@ int avc_get_hash_stats(char *page)
257 max_chain_len = 0; 257 max_chain_len = 0;
258 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 258 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
259 head = &avc_cache.slots[i]; 259 head = &avc_cache.slots[i];
260 if (!list_empty(head)) { 260 if (!hlist_empty(head)) {
261 struct hlist_node *next;
262
261 slots_used++; 263 slots_used++;
262 chain_len = 0; 264 chain_len = 0;
263 list_for_each_entry_rcu(node, head, list) 265 hlist_for_each_entry_rcu(node, next, head, list)
264 chain_len++; 266 chain_len++;
265 if (chain_len > max_chain_len) 267 if (chain_len > max_chain_len)
266 max_chain_len = chain_len; 268 max_chain_len = chain_len;
@@ -284,7 +286,7 @@ static void avc_node_free(struct rcu_head *rhead)
284 286
285static void avc_node_delete(struct avc_node *node) 287static void avc_node_delete(struct avc_node *node)
286{ 288{
287 list_del_rcu(&node->list); 289 hlist_del_rcu(&node->list);
288 call_rcu(&node->rhead, avc_node_free); 290 call_rcu(&node->rhead, avc_node_free);
289 atomic_dec(&avc_cache.active_nodes); 291 atomic_dec(&avc_cache.active_nodes);
290} 292}
@@ -298,7 +300,7 @@ static void avc_node_kill(struct avc_node *node)
298 300
299static void avc_node_replace(struct avc_node *new, struct avc_node *old) 301static void avc_node_replace(struct avc_node *new, struct avc_node *old)
300{ 302{
301 list_replace_rcu(&old->list, &new->list); 303 hlist_replace_rcu(&old->list, &new->list);
302 call_rcu(&old->rhead, avc_node_free); 304 call_rcu(&old->rhead, avc_node_free);
303 atomic_dec(&avc_cache.active_nodes); 305 atomic_dec(&avc_cache.active_nodes);
304} 306}
@@ -308,7 +310,8 @@ static inline int avc_reclaim_node(void)
308 struct avc_node *node; 310 struct avc_node *node;
309 int hvalue, try, ecx; 311 int hvalue, try, ecx;
310 unsigned long flags; 312 unsigned long flags;
311 struct list_head *head; 313 struct hlist_head *head;
314 struct hlist_node *next;
312 spinlock_t *lock; 315 spinlock_t *lock;
313 316
314 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { 317 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -320,7 +323,7 @@ static inline int avc_reclaim_node(void)
320 continue; 323 continue;
321 324
322 rcu_read_lock(); 325 rcu_read_lock();
323 list_for_each_entry(node, head, list) { 326 hlist_for_each_entry(node, next, head, list) {
324 avc_node_delete(node); 327 avc_node_delete(node);
325 avc_cache_stats_incr(reclaims); 328 avc_cache_stats_incr(reclaims);
326 ecx++; 329 ecx++;
@@ -346,7 +349,7 @@ static struct avc_node *avc_alloc_node(void)
346 goto out; 349 goto out;
347 350
348 INIT_RCU_HEAD(&node->rhead); 351 INIT_RCU_HEAD(&node->rhead);
349 INIT_LIST_HEAD(&node->list); 352 INIT_HLIST_NODE(&node->list);
350 avc_cache_stats_incr(allocations); 353 avc_cache_stats_incr(allocations);
351 354
352 if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) 355 if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
@@ -368,11 +371,12 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
368{ 371{
369 struct avc_node *node, *ret = NULL; 372 struct avc_node *node, *ret = NULL;
370 int hvalue; 373 int hvalue;
371 struct list_head *head; 374 struct hlist_head *head;
375 struct hlist_node *next;
372 376
373 hvalue = avc_hash(ssid, tsid, tclass); 377 hvalue = avc_hash(ssid, tsid, tclass);
374 head = &avc_cache.slots[hvalue]; 378 head = &avc_cache.slots[hvalue];
375 list_for_each_entry_rcu(node, head, list) { 379 hlist_for_each_entry_rcu(node, next, head, list) {
376 if (ssid == node->ae.ssid && 380 if (ssid == node->ae.ssid &&
377 tclass == node->ae.tclass && 381 tclass == node->ae.tclass &&
378 tsid == node->ae.tsid) { 382 tsid == node->ae.tsid) {
@@ -461,7 +465,8 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
461 465
462 node = avc_alloc_node(); 466 node = avc_alloc_node();
463 if (node) { 467 if (node) {
464 struct list_head *head; 468 struct hlist_head *head;
469 struct hlist_node *next;
465 spinlock_t *lock; 470 spinlock_t *lock;
466 471
467 hvalue = avc_hash(ssid, tsid, tclass); 472 hvalue = avc_hash(ssid, tsid, tclass);
@@ -471,7 +476,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
471 lock = &avc_cache.slots_lock[hvalue]; 476 lock = &avc_cache.slots_lock[hvalue];
472 477
473 spin_lock_irqsave(lock, flag); 478 spin_lock_irqsave(lock, flag);
474 list_for_each_entry(pos, head, list) { 479 hlist_for_each_entry(pos, next, head, list) {
475 if (pos->ae.ssid == ssid && 480 if (pos->ae.ssid == ssid &&
476 pos->ae.tsid == tsid && 481 pos->ae.tsid == tsid &&
477 pos->ae.tclass == tclass) { 482 pos->ae.tclass == tclass) {
@@ -479,7 +484,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
479 goto found; 484 goto found;
480 } 485 }
481 } 486 }
482 list_add_rcu(&node->list, head); 487 hlist_add_head_rcu(&node->list, head);
483found: 488found:
484 spin_unlock_irqrestore(lock, flag); 489 spin_unlock_irqrestore(lock, flag);
485 } 490 }
@@ -750,7 +755,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
750 int hvalue, rc = 0; 755 int hvalue, rc = 0;
751 unsigned long flag; 756 unsigned long flag;
752 struct avc_node *pos, *node, *orig = NULL; 757 struct avc_node *pos, *node, *orig = NULL;
753 struct list_head *head; 758 struct hlist_head *head;
759 struct hlist_node *next;
754 spinlock_t *lock; 760 spinlock_t *lock;
755 761
756 node = avc_alloc_node(); 762 node = avc_alloc_node();
@@ -767,7 +773,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
767 773
768 spin_lock_irqsave(lock, flag); 774 spin_lock_irqsave(lock, flag);
769 775
770 list_for_each_entry(pos, head, list) { 776 hlist_for_each_entry(pos, next, head, list) {
771 if (ssid == pos->ae.ssid && 777 if (ssid == pos->ae.ssid &&
772 tsid == pos->ae.tsid && 778 tsid == pos->ae.tsid &&
773 tclass == pos->ae.tclass && 779 tclass == pos->ae.tclass &&
@@ -827,7 +833,8 @@ int avc_ss_reset(u32 seqno)
827 int i, rc = 0, tmprc; 833 int i, rc = 0, tmprc;
828 unsigned long flag; 834 unsigned long flag;
829 struct avc_node *node; 835 struct avc_node *node;
830 struct list_head *head; 836 struct hlist_head *head;
837 struct hlist_node *next;
831 spinlock_t *lock; 838 spinlock_t *lock;
832 839
833 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 840 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
@@ -840,7 +847,7 @@ int avc_ss_reset(u32 seqno)
840 * prevent RCU grace periods from ending. 847 * prevent RCU grace periods from ending.
841 */ 848 */
842 rcu_read_lock(); 849 rcu_read_lock();
843 list_for_each_entry(node, head, list) 850 hlist_for_each_entry(node, next, head, list)
844 avc_node_delete(node); 851 avc_node_delete(node);
845 rcu_read_unlock(); 852 rcu_read_unlock();
846 spin_unlock_irqrestore(lock, flag); 853 spin_unlock_irqrestore(lock, flag);