diff options
Diffstat (limited to 'security/selinux/avc.c')
-rw-r--r-- | security/selinux/avc.c | 169 |
1 files changed, 95 insertions, 74 deletions
diff --git a/security/selinux/avc.c b/security/selinux/avc.c index eb41f43e2772..7f9b5fac8779 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c | |||
@@ -88,17 +88,16 @@ struct avc_entry { | |||
88 | u32 tsid; | 88 | u32 tsid; |
89 | u16 tclass; | 89 | u16 tclass; |
90 | struct av_decision avd; | 90 | struct av_decision avd; |
91 | atomic_t used; /* used recently */ | ||
92 | }; | 91 | }; |
93 | 92 | ||
94 | struct avc_node { | 93 | struct avc_node { |
95 | struct avc_entry ae; | 94 | struct avc_entry ae; |
96 | struct list_head list; | 95 | struct hlist_node list; /* anchored in avc_cache->slots[i] */ |
97 | struct rcu_head rhead; | 96 | struct rcu_head rhead; |
98 | }; | 97 | }; |
99 | 98 | ||
100 | struct avc_cache { | 99 | struct avc_cache { |
101 | struct list_head slots[AVC_CACHE_SLOTS]; | 100 | struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ |
102 | spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ | 101 | spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ |
103 | atomic_t lru_hint; /* LRU hint for reclaim scan */ | 102 | atomic_t lru_hint; /* LRU hint for reclaim scan */ |
104 | atomic_t active_nodes; | 103 | atomic_t active_nodes; |
@@ -234,7 +233,7 @@ void __init avc_init(void) | |||
234 | int i; | 233 | int i; |
235 | 234 | ||
236 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { | 235 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { |
237 | INIT_LIST_HEAD(&avc_cache.slots[i]); | 236 | INIT_HLIST_HEAD(&avc_cache.slots[i]); |
238 | spin_lock_init(&avc_cache.slots_lock[i]); | 237 | spin_lock_init(&avc_cache.slots_lock[i]); |
239 | } | 238 | } |
240 | atomic_set(&avc_cache.active_nodes, 0); | 239 | atomic_set(&avc_cache.active_nodes, 0); |
@@ -250,16 +249,20 @@ int avc_get_hash_stats(char *page) | |||
250 | { | 249 | { |
251 | int i, chain_len, max_chain_len, slots_used; | 250 | int i, chain_len, max_chain_len, slots_used; |
252 | struct avc_node *node; | 251 | struct avc_node *node; |
252 | struct hlist_head *head; | ||
253 | 253 | ||
254 | rcu_read_lock(); | 254 | rcu_read_lock(); |
255 | 255 | ||
256 | slots_used = 0; | 256 | slots_used = 0; |
257 | max_chain_len = 0; | 257 | max_chain_len = 0; |
258 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { | 258 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { |
259 | if (!list_empty(&avc_cache.slots[i])) { | 259 | head = &avc_cache.slots[i]; |
260 | if (!hlist_empty(head)) { | ||
261 | struct hlist_node *next; | ||
262 | |||
260 | slots_used++; | 263 | slots_used++; |
261 | chain_len = 0; | 264 | chain_len = 0; |
262 | list_for_each_entry_rcu(node, &avc_cache.slots[i], list) | 265 | hlist_for_each_entry_rcu(node, next, head, list) |
263 | chain_len++; | 266 | chain_len++; |
264 | if (chain_len > max_chain_len) | 267 | if (chain_len > max_chain_len) |
265 | max_chain_len = chain_len; | 268 | max_chain_len = chain_len; |
@@ -283,7 +286,7 @@ static void avc_node_free(struct rcu_head *rhead) | |||
283 | 286 | ||
284 | static void avc_node_delete(struct avc_node *node) | 287 | static void avc_node_delete(struct avc_node *node) |
285 | { | 288 | { |
286 | list_del_rcu(&node->list); | 289 | hlist_del_rcu(&node->list); |
287 | call_rcu(&node->rhead, avc_node_free); | 290 | call_rcu(&node->rhead, avc_node_free); |
288 | atomic_dec(&avc_cache.active_nodes); | 291 | atomic_dec(&avc_cache.active_nodes); |
289 | } | 292 | } |
@@ -297,7 +300,7 @@ static void avc_node_kill(struct avc_node *node) | |||
297 | 300 | ||
298 | static void avc_node_replace(struct avc_node *new, struct avc_node *old) | 301 | static void avc_node_replace(struct avc_node *new, struct avc_node *old) |
299 | { | 302 | { |
300 | list_replace_rcu(&old->list, &new->list); | 303 | hlist_replace_rcu(&old->list, &new->list); |
301 | call_rcu(&old->rhead, avc_node_free); | 304 | call_rcu(&old->rhead, avc_node_free); |
302 | atomic_dec(&avc_cache.active_nodes); | 305 | atomic_dec(&avc_cache.active_nodes); |
303 | } | 306 | } |
@@ -307,29 +310,31 @@ static inline int avc_reclaim_node(void) | |||
307 | struct avc_node *node; | 310 | struct avc_node *node; |
308 | int hvalue, try, ecx; | 311 | int hvalue, try, ecx; |
309 | unsigned long flags; | 312 | unsigned long flags; |
313 | struct hlist_head *head; | ||
314 | struct hlist_node *next; | ||
315 | spinlock_t *lock; | ||
310 | 316 | ||
311 | for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { | 317 | for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { |
312 | hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); | 318 | hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); |
319 | head = &avc_cache.slots[hvalue]; | ||
320 | lock = &avc_cache.slots_lock[hvalue]; | ||
313 | 321 | ||
314 | if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) | 322 | if (!spin_trylock_irqsave(lock, flags)) |
315 | continue; | 323 | continue; |
316 | 324 | ||
317 | rcu_read_lock(); | 325 | rcu_read_lock(); |
318 | list_for_each_entry(node, &avc_cache.slots[hvalue], list) { | 326 | hlist_for_each_entry(node, next, head, list) { |
319 | if (atomic_dec_and_test(&node->ae.used)) { | 327 | avc_node_delete(node); |
320 | /* Recently Unused */ | 328 | avc_cache_stats_incr(reclaims); |
321 | avc_node_delete(node); | 329 | ecx++; |
322 | avc_cache_stats_incr(reclaims); | 330 | if (ecx >= AVC_CACHE_RECLAIM) { |
323 | ecx++; | 331 | rcu_read_unlock(); |
324 | if (ecx >= AVC_CACHE_RECLAIM) { | 332 | spin_unlock_irqrestore(lock, flags); |
325 | rcu_read_unlock(); | 333 | goto out; |
326 | spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); | ||
327 | goto out; | ||
328 | } | ||
329 | } | 334 | } |
330 | } | 335 | } |
331 | rcu_read_unlock(); | 336 | rcu_read_unlock(); |
332 | spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); | 337 | spin_unlock_irqrestore(lock, flags); |
333 | } | 338 | } |
334 | out: | 339 | out: |
335 | return ecx; | 340 | return ecx; |
@@ -344,8 +349,7 @@ static struct avc_node *avc_alloc_node(void) | |||
344 | goto out; | 349 | goto out; |
345 | 350 | ||
346 | INIT_RCU_HEAD(&node->rhead); | 351 | INIT_RCU_HEAD(&node->rhead); |
347 | INIT_LIST_HEAD(&node->list); | 352 | INIT_HLIST_NODE(&node->list); |
348 | atomic_set(&node->ae.used, 1); | ||
349 | avc_cache_stats_incr(allocations); | 353 | avc_cache_stats_incr(allocations); |
350 | 354 | ||
351 | if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) | 355 | if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) |
@@ -355,21 +359,24 @@ out: | |||
355 | return node; | 359 | return node; |
356 | } | 360 | } |
357 | 361 | ||
358 | static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) | 362 | static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) |
359 | { | 363 | { |
360 | node->ae.ssid = ssid; | 364 | node->ae.ssid = ssid; |
361 | node->ae.tsid = tsid; | 365 | node->ae.tsid = tsid; |
362 | node->ae.tclass = tclass; | 366 | node->ae.tclass = tclass; |
363 | memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd)); | 367 | memcpy(&node->ae.avd, avd, sizeof(node->ae.avd)); |
364 | } | 368 | } |
365 | 369 | ||
366 | static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) | 370 | static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) |
367 | { | 371 | { |
368 | struct avc_node *node, *ret = NULL; | 372 | struct avc_node *node, *ret = NULL; |
369 | int hvalue; | 373 | int hvalue; |
374 | struct hlist_head *head; | ||
375 | struct hlist_node *next; | ||
370 | 376 | ||
371 | hvalue = avc_hash(ssid, tsid, tclass); | 377 | hvalue = avc_hash(ssid, tsid, tclass); |
372 | list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { | 378 | head = &avc_cache.slots[hvalue]; |
379 | hlist_for_each_entry_rcu(node, next, head, list) { | ||
373 | if (ssid == node->ae.ssid && | 380 | if (ssid == node->ae.ssid && |
374 | tclass == node->ae.tclass && | 381 | tclass == node->ae.tclass && |
375 | tsid == node->ae.tsid) { | 382 | tsid == node->ae.tsid) { |
@@ -378,15 +385,6 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) | |||
378 | } | 385 | } |
379 | } | 386 | } |
380 | 387 | ||
381 | if (ret == NULL) { | ||
382 | /* cache miss */ | ||
383 | goto out; | ||
384 | } | ||
385 | |||
386 | /* cache hit */ | ||
387 | if (atomic_read(&ret->ae.used) != 1) | ||
388 | atomic_set(&ret->ae.used, 1); | ||
389 | out: | ||
390 | return ret; | 388 | return ret; |
391 | } | 389 | } |
392 | 390 | ||
@@ -395,30 +393,25 @@ out: | |||
395 | * @ssid: source security identifier | 393 | * @ssid: source security identifier |
396 | * @tsid: target security identifier | 394 | * @tsid: target security identifier |
397 | * @tclass: target security class | 395 | * @tclass: target security class |
398 | * @requested: requested permissions, interpreted based on @tclass | ||
399 | * | 396 | * |
400 | * Look up an AVC entry that is valid for the | 397 | * Look up an AVC entry that is valid for the |
401 | * @requested permissions between the SID pair | ||
402 | * (@ssid, @tsid), interpreting the permissions | 398 | * (@ssid, @tsid), interpreting the permissions |
403 | * based on @tclass. If a valid AVC entry exists, | 399 | * based on @tclass. If a valid AVC entry exists, |
404 | * then this function return the avc_node. | 400 | * then this function return the avc_node. |
405 | * Otherwise, this function returns NULL. | 401 | * Otherwise, this function returns NULL. |
406 | */ | 402 | */ |
407 | static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested) | 403 | static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) |
408 | { | 404 | { |
409 | struct avc_node *node; | 405 | struct avc_node *node; |
410 | 406 | ||
411 | avc_cache_stats_incr(lookups); | 407 | avc_cache_stats_incr(lookups); |
412 | node = avc_search_node(ssid, tsid, tclass); | 408 | node = avc_search_node(ssid, tsid, tclass); |
413 | 409 | ||
414 | if (node && ((node->ae.avd.decided & requested) == requested)) { | 410 | if (node) |
415 | avc_cache_stats_incr(hits); | 411 | avc_cache_stats_incr(hits); |
416 | goto out; | 412 | else |
417 | } | 413 | avc_cache_stats_incr(misses); |
418 | 414 | ||
419 | node = NULL; | ||
420 | avc_cache_stats_incr(misses); | ||
421 | out: | ||
422 | return node; | 415 | return node; |
423 | } | 416 | } |
424 | 417 | ||
@@ -449,34 +442,41 @@ static int avc_latest_notif_update(int seqno, int is_insert) | |||
449 | * @ssid: source security identifier | 442 | * @ssid: source security identifier |
450 | * @tsid: target security identifier | 443 | * @tsid: target security identifier |
451 | * @tclass: target security class | 444 | * @tclass: target security class |
452 | * @ae: AVC entry | 445 | * @avd: resulting av decision |
453 | * | 446 | * |
454 | * Insert an AVC entry for the SID pair | 447 | * Insert an AVC entry for the SID pair |
455 | * (@ssid, @tsid) and class @tclass. | 448 | * (@ssid, @tsid) and class @tclass. |
456 | * The access vectors and the sequence number are | 449 | * The access vectors and the sequence number are |
457 | * normally provided by the security server in | 450 | * normally provided by the security server in |
458 | * response to a security_compute_av() call. If the | 451 | * response to a security_compute_av() call. If the |
459 | * sequence number @ae->avd.seqno is not less than the latest | 452 | * sequence number @avd->seqno is not less than the latest |
460 | * revocation notification, then the function copies | 453 | * revocation notification, then the function copies |
461 | * the access vectors into a cache entry, returns | 454 | * the access vectors into a cache entry, returns |
462 | * avc_node inserted. Otherwise, this function returns NULL. | 455 | * avc_node inserted. Otherwise, this function returns NULL. |
463 | */ | 456 | */ |
464 | static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) | 457 | static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) |
465 | { | 458 | { |
466 | struct avc_node *pos, *node = NULL; | 459 | struct avc_node *pos, *node = NULL; |
467 | int hvalue; | 460 | int hvalue; |
468 | unsigned long flag; | 461 | unsigned long flag; |
469 | 462 | ||
470 | if (avc_latest_notif_update(ae->avd.seqno, 1)) | 463 | if (avc_latest_notif_update(avd->seqno, 1)) |
471 | goto out; | 464 | goto out; |
472 | 465 | ||
473 | node = avc_alloc_node(); | 466 | node = avc_alloc_node(); |
474 | if (node) { | 467 | if (node) { |
468 | struct hlist_head *head; | ||
469 | struct hlist_node *next; | ||
470 | spinlock_t *lock; | ||
471 | |||
475 | hvalue = avc_hash(ssid, tsid, tclass); | 472 | hvalue = avc_hash(ssid, tsid, tclass); |
476 | avc_node_populate(node, ssid, tsid, tclass, ae); | 473 | avc_node_populate(node, ssid, tsid, tclass, avd); |
474 | |||
475 | head = &avc_cache.slots[hvalue]; | ||
476 | lock = &avc_cache.slots_lock[hvalue]; | ||
477 | 477 | ||
478 | spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); | 478 | spin_lock_irqsave(lock, flag); |
479 | list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { | 479 | hlist_for_each_entry(pos, next, head, list) { |
480 | if (pos->ae.ssid == ssid && | 480 | if (pos->ae.ssid == ssid && |
481 | pos->ae.tsid == tsid && | 481 | pos->ae.tsid == tsid && |
482 | pos->ae.tclass == tclass) { | 482 | pos->ae.tclass == tclass) { |
@@ -484,9 +484,9 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_en | |||
484 | goto found; | 484 | goto found; |
485 | } | 485 | } |
486 | } | 486 | } |
487 | list_add_rcu(&node->list, &avc_cache.slots[hvalue]); | 487 | hlist_add_head_rcu(&node->list, head); |
488 | found: | 488 | found: |
489 | spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); | 489 | spin_unlock_irqrestore(lock, flag); |
490 | } | 490 | } |
491 | out: | 491 | out: |
492 | return node; | 492 | return node; |
@@ -742,17 +742,22 @@ static inline int avc_sidcmp(u32 x, u32 y) | |||
742 | * @event : Updating event | 742 | * @event : Updating event |
743 | * @perms : Permission mask bits | 743 | * @perms : Permission mask bits |
744 | * @ssid,@tsid,@tclass : identifier of an AVC entry | 744 | * @ssid,@tsid,@tclass : identifier of an AVC entry |
745 | * @seqno : sequence number when decision was made | ||
745 | * | 746 | * |
746 | * if a valid AVC entry doesn't exist,this function returns -ENOENT. | 747 | * if a valid AVC entry doesn't exist,this function returns -ENOENT. |
747 | * if kmalloc() called internal returns NULL, this function returns -ENOMEM. | 748 | * if kmalloc() called internal returns NULL, this function returns -ENOMEM. |
748 | * otherwise, this function update the AVC entry. The original AVC-entry object | 749 | * otherwise, this function update the AVC entry. The original AVC-entry object |
749 | * will release later by RCU. | 750 | * will release later by RCU. |
750 | */ | 751 | */ |
751 | static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) | 752 | static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, |
753 | u32 seqno) | ||
752 | { | 754 | { |
753 | int hvalue, rc = 0; | 755 | int hvalue, rc = 0; |
754 | unsigned long flag; | 756 | unsigned long flag; |
755 | struct avc_node *pos, *node, *orig = NULL; | 757 | struct avc_node *pos, *node, *orig = NULL; |
758 | struct hlist_head *head; | ||
759 | struct hlist_node *next; | ||
760 | spinlock_t *lock; | ||
756 | 761 | ||
757 | node = avc_alloc_node(); | 762 | node = avc_alloc_node(); |
758 | if (!node) { | 763 | if (!node) { |
@@ -762,12 +767,17 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) | |||
762 | 767 | ||
763 | /* Lock the target slot */ | 768 | /* Lock the target slot */ |
764 | hvalue = avc_hash(ssid, tsid, tclass); | 769 | hvalue = avc_hash(ssid, tsid, tclass); |
765 | spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); | ||
766 | 770 | ||
767 | list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { | 771 | head = &avc_cache.slots[hvalue]; |
772 | lock = &avc_cache.slots_lock[hvalue]; | ||
773 | |||
774 | spin_lock_irqsave(lock, flag); | ||
775 | |||
776 | hlist_for_each_entry(pos, next, head, list) { | ||
768 | if (ssid == pos->ae.ssid && | 777 | if (ssid == pos->ae.ssid && |
769 | tsid == pos->ae.tsid && | 778 | tsid == pos->ae.tsid && |
770 | tclass == pos->ae.tclass){ | 779 | tclass == pos->ae.tclass && |
780 | seqno == pos->ae.avd.seqno){ | ||
771 | orig = pos; | 781 | orig = pos; |
772 | break; | 782 | break; |
773 | } | 783 | } |
@@ -783,7 +793,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) | |||
783 | * Copy and replace original node. | 793 | * Copy and replace original node. |
784 | */ | 794 | */ |
785 | 795 | ||
786 | avc_node_populate(node, ssid, tsid, tclass, &orig->ae); | 796 | avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); |
787 | 797 | ||
788 | switch (event) { | 798 | switch (event) { |
789 | case AVC_CALLBACK_GRANT: | 799 | case AVC_CALLBACK_GRANT: |
@@ -808,7 +818,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) | |||
808 | } | 818 | } |
809 | avc_node_replace(node, orig); | 819 | avc_node_replace(node, orig); |
810 | out_unlock: | 820 | out_unlock: |
811 | spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); | 821 | spin_unlock_irqrestore(lock, flag); |
812 | out: | 822 | out: |
813 | return rc; | 823 | return rc; |
814 | } | 824 | } |
@@ -823,18 +833,24 @@ int avc_ss_reset(u32 seqno) | |||
823 | int i, rc = 0, tmprc; | 833 | int i, rc = 0, tmprc; |
824 | unsigned long flag; | 834 | unsigned long flag; |
825 | struct avc_node *node; | 835 | struct avc_node *node; |
836 | struct hlist_head *head; | ||
837 | struct hlist_node *next; | ||
838 | spinlock_t *lock; | ||
826 | 839 | ||
827 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { | 840 | for (i = 0; i < AVC_CACHE_SLOTS; i++) { |
828 | spin_lock_irqsave(&avc_cache.slots_lock[i], flag); | 841 | head = &avc_cache.slots[i]; |
842 | lock = &avc_cache.slots_lock[i]; | ||
843 | |||
844 | spin_lock_irqsave(lock, flag); | ||
829 | /* | 845 | /* |
830 | * With preemptable RCU, the outer spinlock does not | 846 | * With preemptable RCU, the outer spinlock does not |
831 | * prevent RCU grace periods from ending. | 847 | * prevent RCU grace periods from ending. |
832 | */ | 848 | */ |
833 | rcu_read_lock(); | 849 | rcu_read_lock(); |
834 | list_for_each_entry(node, &avc_cache.slots[i], list) | 850 | hlist_for_each_entry(node, next, head, list) |
835 | avc_node_delete(node); | 851 | avc_node_delete(node); |
836 | rcu_read_unlock(); | 852 | rcu_read_unlock(); |
837 | spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); | 853 | spin_unlock_irqrestore(lock, flag); |
838 | } | 854 | } |
839 | 855 | ||
840 | for (c = avc_callbacks; c; c = c->next) { | 856 | for (c = avc_callbacks; c; c = c->next) { |
@@ -875,10 +891,10 @@ int avc_ss_reset(u32 seqno) | |||
875 | int avc_has_perm_noaudit(u32 ssid, u32 tsid, | 891 | int avc_has_perm_noaudit(u32 ssid, u32 tsid, |
876 | u16 tclass, u32 requested, | 892 | u16 tclass, u32 requested, |
877 | unsigned flags, | 893 | unsigned flags, |
878 | struct av_decision *avd) | 894 | struct av_decision *in_avd) |
879 | { | 895 | { |
880 | struct avc_node *node; | 896 | struct avc_node *node; |
881 | struct avc_entry entry, *p_ae; | 897 | struct av_decision avd_entry, *avd; |
882 | int rc = 0; | 898 | int rc = 0; |
883 | u32 denied; | 899 | u32 denied; |
884 | 900 | ||
@@ -886,29 +902,34 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, | |||
886 | 902 | ||
887 | rcu_read_lock(); | 903 | rcu_read_lock(); |
888 | 904 | ||
889 | node = avc_lookup(ssid, tsid, tclass, requested); | 905 | node = avc_lookup(ssid, tsid, tclass); |
890 | if (!node) { | 906 | if (!node) { |
891 | rcu_read_unlock(); | 907 | rcu_read_unlock(); |
892 | rc = security_compute_av(ssid, tsid, tclass, requested, &entry.avd); | 908 | |
909 | if (in_avd) | ||
910 | avd = in_avd; | ||
911 | else | ||
912 | avd = &avd_entry; | ||
913 | |||
914 | rc = security_compute_av(ssid, tsid, tclass, requested, avd); | ||
893 | if (rc) | 915 | if (rc) |
894 | goto out; | 916 | goto out; |
895 | rcu_read_lock(); | 917 | rcu_read_lock(); |
896 | node = avc_insert(ssid, tsid, tclass, &entry); | 918 | node = avc_insert(ssid, tsid, tclass, avd); |
919 | } else { | ||
920 | if (in_avd) | ||
921 | memcpy(in_avd, &node->ae.avd, sizeof(*in_avd)); | ||
922 | avd = &node->ae.avd; | ||
897 | } | 923 | } |
898 | 924 | ||
899 | p_ae = node ? &node->ae : &entry; | 925 | denied = requested & ~(avd->allowed); |
900 | |||
901 | if (avd) | ||
902 | memcpy(avd, &p_ae->avd, sizeof(*avd)); | ||
903 | |||
904 | denied = requested & ~(p_ae->avd.allowed); | ||
905 | 926 | ||
906 | if (denied) { | 927 | if (denied) { |
907 | if (flags & AVC_STRICT) | 928 | if (flags & AVC_STRICT) |
908 | rc = -EACCES; | 929 | rc = -EACCES; |
909 | else if (!selinux_enforcing || security_permissive_sid(ssid)) | 930 | else if (!selinux_enforcing || security_permissive_sid(ssid)) |
910 | avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, | 931 | avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, |
911 | tsid, tclass); | 932 | tsid, tclass, avd->seqno); |
912 | else | 933 | else |
913 | rc = -EACCES; | 934 | rc = -EACCES; |
914 | } | 935 | } |