diff options
author | Patrick McHardy <kaber@trash.net> | 2013-04-17 02:46:56 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-19 14:57:56 -0400 |
commit | cd967e05715489c5d1059d8d3012c747e5cfb1c4 (patch) | |
tree | 0410a1012f4129c4255c93ab78b5370fe59d8b46 /net/netlink | |
parent | 447b816fe03898c4dad19b254ca3dd05bae46ec3 (diff) |
netlink: add symbolic value for congested state
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/af_netlink.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index ce2e0064e7f6..f20a81005177 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -68,6 +68,10 @@ struct listeners { | |||
68 | unsigned long masks[0]; | 68 | unsigned long masks[0]; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | /* state bits */ | ||
72 | #define NETLINK_CONGESTED 0x0 | ||
73 | |||
74 | /* flags */ | ||
71 | #define NETLINK_KERNEL_SOCKET 0x1 | 75 | #define NETLINK_KERNEL_SOCKET 0x1 |
72 | #define NETLINK_RECV_PKTINFO 0x2 | 76 | #define NETLINK_RECV_PKTINFO 0x2 |
73 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 | 77 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 |
@@ -727,7 +731,7 @@ static void netlink_overrun(struct sock *sk) | |||
727 | struct netlink_sock *nlk = nlk_sk(sk); | 731 | struct netlink_sock *nlk = nlk_sk(sk); |
728 | 732 | ||
729 | if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { | 733 | if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { |
730 | if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { | 734 | if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) { |
731 | sk->sk_err = ENOBUFS; | 735 | sk->sk_err = ENOBUFS; |
732 | sk->sk_error_report(sk); | 736 | sk->sk_error_report(sk); |
733 | } | 737 | } |
@@ -788,7 +792,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, | |||
788 | nlk = nlk_sk(sk); | 792 | nlk = nlk_sk(sk); |
789 | 793 | ||
790 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 794 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
791 | test_bit(0, &nlk->state)) { | 795 | test_bit(NETLINK_CONGESTED, &nlk->state)) { |
792 | DECLARE_WAITQUEUE(wait, current); | 796 | DECLARE_WAITQUEUE(wait, current); |
793 | if (!*timeo) { | 797 | if (!*timeo) { |
794 | if (!ssk || netlink_is_kernel(ssk)) | 798 | if (!ssk || netlink_is_kernel(ssk)) |
@@ -802,7 +806,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, | |||
802 | add_wait_queue(&nlk->wait, &wait); | 806 | add_wait_queue(&nlk->wait, &wait); |
803 | 807 | ||
804 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 808 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
805 | test_bit(0, &nlk->state)) && | 809 | test_bit(NETLINK_CONGESTED, &nlk->state)) && |
806 | !sock_flag(sk, SOCK_DEAD)) | 810 | !sock_flag(sk, SOCK_DEAD)) |
807 | *timeo = schedule_timeout(*timeo); | 811 | *timeo = schedule_timeout(*timeo); |
808 | 812 | ||
@@ -872,8 +876,8 @@ static void netlink_rcv_wake(struct sock *sk) | |||
872 | struct netlink_sock *nlk = nlk_sk(sk); | 876 | struct netlink_sock *nlk = nlk_sk(sk); |
873 | 877 | ||
874 | if (skb_queue_empty(&sk->sk_receive_queue)) | 878 | if (skb_queue_empty(&sk->sk_receive_queue)) |
875 | clear_bit(0, &nlk->state); | 879 | clear_bit(NETLINK_CONGESTED, &nlk->state); |
876 | if (!test_bit(0, &nlk->state)) | 880 | if (!test_bit(NETLINK_CONGESTED, &nlk->state)) |
877 | wake_up_interruptible(&nlk->wait); | 881 | wake_up_interruptible(&nlk->wait); |
878 | } | 882 | } |
879 | 883 | ||
@@ -957,7 +961,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) | |||
957 | struct netlink_sock *nlk = nlk_sk(sk); | 961 | struct netlink_sock *nlk = nlk_sk(sk); |
958 | 962 | ||
959 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | 963 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
960 | !test_bit(0, &nlk->state)) { | 964 | !test_bit(NETLINK_CONGESTED, &nlk->state)) { |
961 | skb_set_owner_r(skb, sk); | 965 | skb_set_owner_r(skb, sk); |
962 | __netlink_sendskb(sk, skb); | 966 | __netlink_sendskb(sk, skb); |
963 | return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); | 967 | return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); |
@@ -1235,7 +1239,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, | |||
1235 | case NETLINK_NO_ENOBUFS: | 1239 | case NETLINK_NO_ENOBUFS: |
1236 | if (val) { | 1240 | if (val) { |
1237 | nlk->flags |= NETLINK_RECV_NO_ENOBUFS; | 1241 | nlk->flags |= NETLINK_RECV_NO_ENOBUFS; |
1238 | clear_bit(0, &nlk->state); | 1242 | clear_bit(NETLINK_CONGESTED, &nlk->state); |
1239 | wake_up_interruptible(&nlk->wait); | 1243 | wake_up_interruptible(&nlk->wait); |
1240 | } else { | 1244 | } else { |
1241 | nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; | 1245 | nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; |