aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/audit.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 801247a6c9e5..6ac1df116c0b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -138,9 +138,9 @@ static DEFINE_SPINLOCK(audit_freelist_lock);
138static int audit_freelist_count; 138static int audit_freelist_count;
139static LIST_HEAD(audit_freelist); 139static LIST_HEAD(audit_freelist);
140 140
141static struct sk_buff_head audit_skb_queue; 141static struct sk_buff_head audit_queue;
142/* queue of skbs to send to auditd when/if it comes back */ 142/* queue of skbs to send to auditd when/if it comes back */
143static struct sk_buff_head audit_skb_hold_queue; 143static struct sk_buff_head audit_hold_queue;
144static struct task_struct *kauditd_task; 144static struct task_struct *kauditd_task;
145static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); 145static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
146static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); 146static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
@@ -377,8 +377,8 @@ static void audit_hold_skb(struct sk_buff *skb)
377{ 377{
378 if (audit_default && 378 if (audit_default &&
379 (!audit_backlog_limit || 379 (!audit_backlog_limit ||
380 skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit)) 380 skb_queue_len(&audit_hold_queue) < audit_backlog_limit))
381 skb_queue_tail(&audit_skb_hold_queue, skb); 381 skb_queue_tail(&audit_hold_queue, skb);
382 else 382 else
383 kfree_skb(skb); 383 kfree_skb(skb);
384} 384}
@@ -387,7 +387,7 @@ static void audit_hold_skb(struct sk_buff *skb)
387 * For one reason or another this nlh isn't getting delivered to the userspace 387 * For one reason or another this nlh isn't getting delivered to the userspace
388 * audit daemon, just send it to printk. 388 * audit daemon, just send it to printk.
389 */ 389 */
390static void audit_printk_skb(struct sk_buff *skb) 390static void kauditd_printk_skb(struct sk_buff *skb)
391{ 391{
392 struct nlmsghdr *nlh = nlmsg_hdr(skb); 392 struct nlmsghdr *nlh = nlmsg_hdr(skb);
393 char *data = nlmsg_data(nlh); 393 char *data = nlmsg_data(nlh);
@@ -402,7 +402,7 @@ static void audit_printk_skb(struct sk_buff *skb)
402 audit_hold_skb(skb); 402 audit_hold_skb(skb);
403} 403}
404 404
405static void kauditd_send_skb(struct sk_buff *skb) 405static void kauditd_send_unicast_skb(struct sk_buff *skb)
406{ 406{
407 int err; 407 int err;
408 int attempts = 0; 408 int attempts = 0;
@@ -493,13 +493,13 @@ static void flush_hold_queue(void)
493 if (!audit_default || !audit_pid) 493 if (!audit_default || !audit_pid)
494 return; 494 return;
495 495
496 skb = skb_dequeue(&audit_skb_hold_queue); 496 skb = skb_dequeue(&audit_hold_queue);
497 if (likely(!skb)) 497 if (likely(!skb))
498 return; 498 return;
499 499
500 while (skb && audit_pid) { 500 while (skb && audit_pid) {
501 kauditd_send_skb(skb); 501 kauditd_send_unicast_skb(skb);
502 skb = skb_dequeue(&audit_skb_hold_queue); 502 skb = skb_dequeue(&audit_hold_queue);
503 } 503 }
504 504
505 /* 505 /*
@@ -518,7 +518,7 @@ static int kauditd_thread(void *dummy)
518 while (!kthread_should_stop()) { 518 while (!kthread_should_stop()) {
519 flush_hold_queue(); 519 flush_hold_queue();
520 520
521 skb = skb_dequeue(&audit_skb_queue); 521 skb = skb_dequeue(&audit_queue);
522 if (skb) { 522 if (skb) {
523 nlh = nlmsg_hdr(skb); 523 nlh = nlmsg_hdr(skb);
524 524
@@ -540,16 +540,16 @@ static int kauditd_thread(void *dummy)
540 } 540 }
541 541
542 if (audit_pid) 542 if (audit_pid)
543 kauditd_send_skb(skb); 543 kauditd_send_unicast_skb(skb);
544 else 544 else
545 audit_printk_skb(skb); 545 kauditd_printk_skb(skb);
546 } else { 546 } else {
547 /* we have flushed the backlog so wake everyone up who 547 /* we have flushed the backlog so wake everyone up who
548 * is blocked and go to sleep until we have something 548 * is blocked and go to sleep until we have something
549 * in the backlog again */ 549 * in the backlog again */
550 wake_up(&audit_backlog_wait); 550 wake_up(&audit_backlog_wait);
551 wait_event_freezable(kauditd_wait, 551 wait_event_freezable(kauditd_wait,
552 skb_queue_len(&audit_skb_queue)); 552 skb_queue_len(&audit_queue));
553 } 553 }
554 } 554 }
555 return 0; 555 return 0;
@@ -865,7 +865,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
865 s.rate_limit = audit_rate_limit; 865 s.rate_limit = audit_rate_limit;
866 s.backlog_limit = audit_backlog_limit; 866 s.backlog_limit = audit_backlog_limit;
867 s.lost = atomic_read(&audit_lost); 867 s.lost = atomic_read(&audit_lost);
868 s.backlog = skb_queue_len(&audit_skb_queue); 868 s.backlog = skb_queue_len(&audit_queue);
869 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; 869 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
870 s.backlog_wait_time = audit_backlog_wait_time_master; 870 s.backlog_wait_time = audit_backlog_wait_time_master;
871 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); 871 audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
@@ -1200,8 +1200,8 @@ static int __init audit_init(void)
1200 audit_default ? "enabled" : "disabled"); 1200 audit_default ? "enabled" : "disabled");
1201 register_pernet_subsys(&audit_net_ops); 1201 register_pernet_subsys(&audit_net_ops);
1202 1202
1203 skb_queue_head_init(&audit_skb_queue); 1203 skb_queue_head_init(&audit_queue);
1204 skb_queue_head_init(&audit_skb_hold_queue); 1204 skb_queue_head_init(&audit_hold_queue);
1205 audit_initialized = AUDIT_INITIALIZED; 1205 audit_initialized = AUDIT_INITIALIZED;
1206 audit_enabled = audit_default; 1206 audit_enabled = audit_default;
1207 audit_ever_enabled |= !!audit_default; 1207 audit_ever_enabled |= !!audit_default;
@@ -1357,7 +1357,7 @@ static long wait_for_auditd(long sleep_time)
1357 DECLARE_WAITQUEUE(wait, current); 1357 DECLARE_WAITQUEUE(wait, current);
1358 1358
1359 if (audit_backlog_limit && 1359 if (audit_backlog_limit &&
1360 skb_queue_len(&audit_skb_queue) > audit_backlog_limit) { 1360 skb_queue_len(&audit_queue) > audit_backlog_limit) {
1361 add_wait_queue_exclusive(&audit_backlog_wait, &wait); 1361 add_wait_queue_exclusive(&audit_backlog_wait, &wait);
1362 set_current_state(TASK_UNINTERRUPTIBLE); 1362 set_current_state(TASK_UNINTERRUPTIBLE);
1363 sleep_time = schedule_timeout(sleep_time); 1363 sleep_time = schedule_timeout(sleep_time);
@@ -1406,7 +1406,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1406 } 1406 }
1407 1407
1408 while (audit_backlog_limit 1408 while (audit_backlog_limit
1409 && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { 1409 && skb_queue_len(&audit_queue) > audit_backlog_limit + reserve) {
1410 if (gfp_mask & __GFP_DIRECT_RECLAIM && audit_backlog_wait_time) { 1410 if (gfp_mask & __GFP_DIRECT_RECLAIM && audit_backlog_wait_time) {
1411 long sleep_time; 1411 long sleep_time;
1412 1412
@@ -1419,7 +1419,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1419 } 1419 }
1420 if (audit_rate_check() && printk_ratelimit()) 1420 if (audit_rate_check() && printk_ratelimit())
1421 pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n", 1421 pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n",
1422 skb_queue_len(&audit_skb_queue), 1422 skb_queue_len(&audit_queue),
1423 audit_backlog_limit); 1423 audit_backlog_limit);
1424 audit_log_lost("backlog limit exceeded"); 1424 audit_log_lost("backlog limit exceeded");
1425 audit_backlog_wait_time = 0; 1425 audit_backlog_wait_time = 0;
@@ -2001,7 +2001,7 @@ void audit_log_end(struct audit_buffer *ab)
2001 if (!audit_rate_check()) { 2001 if (!audit_rate_check()) {
2002 audit_log_lost("rate limit exceeded"); 2002 audit_log_lost("rate limit exceeded");
2003 } else { 2003 } else {
2004 skb_queue_tail(&audit_skb_queue, ab->skb); 2004 skb_queue_tail(&audit_queue, ab->skb);
2005 wake_up_interruptible(&kauditd_wait); 2005 wake_up_interruptible(&kauditd_wait);
2006 ab->skb = NULL; 2006 ab->skb = NULL;
2007 } 2007 }