aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-05-03 17:55:09 -0400
committerDavid S. Miller <davem@davemloft.net>2005-05-03 17:55:09 -0400
commit2a0a6ebee1d68552152ae8d4aeda91d806995dec (patch)
treea0b77861b3395b4728e75f2b2f92755e0a4777d3 /net/core
parent96c36023434b7b6824b1da72a6b7b1ca61d7310c (diff)
[NETLINK]: Synchronous message processing.
Let's recap the problem. The current asynchronous netlink kernel message processing is vulnerable to these attacks: 1) Hit and run: Attacker sends one or more messages and then exits before they're processed. This may confuse/disable the next netlink user that gets the netlink address of the attacker since it may receive the responses to the attacker's messages. Proposed solutions: a) Synchronous processing. b) Stream mode socket. c) Restrict/prohibit binding. 2) Starvation: Because various netlink rcv functions were written to not return until all messages have been processed on a socket, it is possible for these functions to execute for an arbitrarily long period of time. If this is successfully exploited it could also be used to hold rtnl forever. Proposed solutions: a) Synchronous processing. b) Stream mode socket. Firstly let's cross off solution c). It only solves the first problem and it has user-visible impacts. In particular, it'll break user space applications that expect to bind or communicate with specific netlink addresses (pid's). So we're left with a choice of synchronous processing versus SOCK_STREAM for netlink. For the moment I'm sticking with the synchronous approach as suggested by Alexey since it's simpler and I'd rather spend my time working on other things. However, it does have a number of deficiencies compared to the stream mode solution: 1) User-space to user-space netlink communication is still vulnerable. 2) Inefficient use of resources. This is especially true for rtnetlink since the lock is shared with other users such as networking drivers. The latter could hold the rtnl while communicating with hardware which causes the rtnetlink user to wait when it could be doing other things. 3) It is still possible to DoS all netlink users by flooding the kernel netlink receive queue. The attacker simply fills the receive socket with a single netlink message that fills up the entire queue. The attacker then continues to call sendmsg with the same message in a loop. Point 3) can be countered by retransmissions in user-space code, however it is pretty messy. In light of these problems (in particular, point 3), we should implement stream mode netlink at some point. In the mean time, here is a patch that implements synchronous processing. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/rtnetlink.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5fb70cfa1085..6e1ab1e34b2e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -609,26 +609,31 @@ static inline int rtnetlink_rcv_skb(struct sk_buff *skb)
609 609
610/* 610/*
611 * rtnetlink input queue processing routine: 611 * rtnetlink input queue processing routine:
612 * - try to acquire shared lock. If it is failed, defer processing. 612 * - process as much as there was in the queue upon entry.
613 * - feed skbs to rtnetlink_rcv_skb, until it refuse a message, 613 * - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
614 * that will occur, when a dump started and/or acquisition of 614 * that will occur, when a dump started.
615 * exclusive lock failed.
616 */ 615 */
617 616
618static void rtnetlink_rcv(struct sock *sk, int len) 617static void rtnetlink_rcv(struct sock *sk, int len)
619{ 618{
619 unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
620
620 do { 621 do {
621 struct sk_buff *skb; 622 struct sk_buff *skb;
622 623
623 if (rtnl_shlock_nowait()) 624 rtnl_lock();
624 return; 625
626 if (qlen > skb_queue_len(&sk->sk_receive_queue))
627 qlen = skb_queue_len(&sk->sk_receive_queue);
625 628
626 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 629 while (qlen--) {
630 skb = skb_dequeue(&sk->sk_receive_queue);
627 if (rtnetlink_rcv_skb(skb)) { 631 if (rtnetlink_rcv_skb(skb)) {
628 if (skb->len) 632 if (skb->len) {
629 skb_queue_head(&sk->sk_receive_queue, 633 skb_queue_head(&sk->sk_receive_queue,
630 skb); 634 skb);
631 else 635 qlen++;
636 } else
632 kfree_skb(skb); 637 kfree_skb(skb);
633 break; 638 break;
634 } 639 }
@@ -638,7 +643,7 @@ static void rtnetlink_rcv(struct sock *sk, int len)
638 up(&rtnl_sem); 643 up(&rtnl_sem);
639 644
640 netdev_run_todo(); 645 netdev_run_todo();
641 } while (rtnl && rtnl->sk_receive_queue.qlen); 646 } while (qlen);
642} 647}
643 648
644static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = 649static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] =