aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/audit.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-05-03 17:55:09 -0400
committerDavid S. Miller <davem@davemloft.net>2005-05-03 17:55:09 -0400
commit2a0a6ebee1d68552152ae8d4aeda91d806995dec (patch)
treea0b77861b3395b4728e75f2b2f92755e0a4777d3 /kernel/audit.c
parent96c36023434b7b6824b1da72a6b7b1ca61d7310c (diff)
[NETLINK]: Synchronous message processing.
Let's recap the problem. The current asynchronous netlink kernel message processing is vulnerable to these attacks: 1) Hit and run: Attacker sends one or more messages and then exits before they're processed. This may confuse/disable the next netlink user that gets the netlink address of the attacker since it may receive the responses to the attacker's messages. Proposed solutions: a) Synchronous processing. b) Stream mode socket. c) Restrict/prohibit binding. 2) Starvation: Because various netlink rcv functions were written to not return until all messages have been processed on a socket, it is possible for these functions to execute for an arbitrarily long period of time. If this is successfully exploited it could also be used to hold rtnl forever. Proposed solutions: a) Synchronous processing. b) Stream mode socket. Firstly let's cross off solution c). It only solves the first problem and it has user-visible impacts. In particular, it'll break user space applications that expect to bind or communicate with specific netlink addresses (pid's). So we're left with a choice of synchronous processing versus SOCK_STREAM for netlink. For the moment I'm sticking with the synchronous approach as suggested by Alexey since it's simpler and I'd rather spend my time working on other things. However, it does have a number of deficiencies compared to the stream mode solution: 1) User-space to user-space netlink communication is still vulnerable. 2) Inefficient use of resources. This is especially true for rtnetlink since the lock is shared with other users such as networking drivers. The latter could hold the rtnl while communicating with hardware which causes the rtnetlink user to wait when it could be doing other things. 3) It is still possible to DoS all netlink users by flooding the kernel netlink receive queue. The attacker simply fills the receive socket with a single netlink message that fills up the entire queue. The attacker then continues to call sendmsg with the same message in a loop. Point 3) can be countered by retransmissions in user-space code, however it is pretty messy. In light of these problems (in particular, point 3), we should implement stream mode netlink at some point. In the mean time, here is a patch that implements synchronous processing. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/audit.c')
-rw-r--r--kernel/audit.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 0f84dd7af2c8..ac26d4d960d3 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -427,7 +427,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
427/* Get message from skb (based on rtnetlink_rcv_skb). Each message is 427/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
428 * processed by audit_receive_msg. Malformed skbs with wrong length are 428 * processed by audit_receive_msg. Malformed skbs with wrong length are
429 * discarded silently. */ 429 * discarded silently. */
430static int audit_receive_skb(struct sk_buff *skb) 430static void audit_receive_skb(struct sk_buff *skb)
431{ 431{
432 int err; 432 int err;
433 struct nlmsghdr *nlh; 433 struct nlmsghdr *nlh;
@@ -436,7 +436,7 @@ static int audit_receive_skb(struct sk_buff *skb)
436 while (skb->len >= NLMSG_SPACE(0)) { 436 while (skb->len >= NLMSG_SPACE(0)) {
437 nlh = (struct nlmsghdr *)skb->data; 437 nlh = (struct nlmsghdr *)skb->data;
438 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 438 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
439 return 0; 439 return;
440 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 440 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
441 if (rlen > skb->len) 441 if (rlen > skb->len)
442 rlen = skb->len; 442 rlen = skb->len;
@@ -446,23 +446,20 @@ static int audit_receive_skb(struct sk_buff *skb)
446 netlink_ack(skb, nlh, 0); 446 netlink_ack(skb, nlh, 0);
447 skb_pull(skb, rlen); 447 skb_pull(skb, rlen);
448 } 448 }
449 return 0;
450} 449}
451 450
452/* Receive messages from netlink socket. */ 451/* Receive messages from netlink socket. */
453static void audit_receive(struct sock *sk, int length) 452static void audit_receive(struct sock *sk, int length)
454{ 453{
455 struct sk_buff *skb; 454 struct sk_buff *skb;
455 unsigned int qlen;
456 456
457 if (down_trylock(&audit_netlink_sem)) 457 down(&audit_netlink_sem);
458 return;
459 458
460 /* FIXME: this must not cause starvation */ 459 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
461 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 460 skb = skb_dequeue(&sk->sk_receive_queue);
462 if (audit_receive_skb(skb) && skb->len) 461 audit_receive_skb(skb);
463 skb_queue_head(&sk->sk_receive_queue, skb); 462 kfree_skb(skb);
464 else
465 kfree_skb(skb);
466 } 463 }
467 up(&audit_netlink_sem); 464 up(&audit_netlink_sem);
468} 465}