diff options
author | Patrick McHardy <kaber@trash.net> | 2007-11-07 05:42:09 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-11-07 07:15:12 -0500 |
commit | c3d8d1e30cace31fed6186a4b8c6b1401836d89c (patch) | |
tree | 7122fccf27aa337438123071f3cb07999429de9e /net/netlink | |
parent | 230140cffa7feae90ad50bf259db1fa07674f3a7 (diff) |
[NETLINK]: Fix unicast timeouts
Commit ed6dcf4a in the history.git tree broke netlink_unicast timeouts
by moving the schedule_timeout() call to a new function that doesn't
propagate the remaining timeout back to the caller. This means on each
retry we start with the full timeout again.
ipc/mqueue.c seems to actually want to wait indefinitely so this
behaviour is retained.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/af_netlink.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 260171255576..415c97236f63 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp) | |||
752 | * 1: repeat lookup - reference dropped while waiting for socket memory. | 752 | * 1: repeat lookup - reference dropped while waiting for socket memory. |
753 | */ | 753 | */ |
754 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | 754 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, |
755 | long timeo, struct sock *ssk) | 755 | long *timeo, struct sock *ssk) |
756 | { | 756 | { |
757 | struct netlink_sock *nlk; | 757 | struct netlink_sock *nlk; |
758 | 758 | ||
@@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
761 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 761 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
762 | test_bit(0, &nlk->state)) { | 762 | test_bit(0, &nlk->state)) { |
763 | DECLARE_WAITQUEUE(wait, current); | 763 | DECLARE_WAITQUEUE(wait, current); |
764 | if (!timeo) { | 764 | if (!*timeo) { |
765 | if (!ssk || netlink_is_kernel(ssk)) | 765 | if (!ssk || netlink_is_kernel(ssk)) |
766 | netlink_overrun(sk); | 766 | netlink_overrun(sk); |
767 | sock_put(sk); | 767 | sock_put(sk); |
@@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
775 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 775 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
776 | test_bit(0, &nlk->state)) && | 776 | test_bit(0, &nlk->state)) && |
777 | !sock_flag(sk, SOCK_DEAD)) | 777 | !sock_flag(sk, SOCK_DEAD)) |
778 | timeo = schedule_timeout(timeo); | 778 | *timeo = schedule_timeout(*timeo); |
779 | 779 | ||
780 | __set_current_state(TASK_RUNNING); | 780 | __set_current_state(TASK_RUNNING); |
781 | remove_wait_queue(&nlk->wait, &wait); | 781 | remove_wait_queue(&nlk->wait, &wait); |
@@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, | |||
783 | 783 | ||
784 | if (signal_pending(current)) { | 784 | if (signal_pending(current)) { |
785 | kfree_skb(skb); | 785 | kfree_skb(skb); |
786 | return sock_intr_errno(timeo); | 786 | return sock_intr_errno(*timeo); |
787 | } | 787 | } |
788 | return 1; | 788 | return 1; |
789 | } | 789 | } |
@@ -877,7 +877,7 @@ retry: | |||
877 | if (netlink_is_kernel(sk)) | 877 | if (netlink_is_kernel(sk)) |
878 | return netlink_unicast_kernel(sk, skb); | 878 | return netlink_unicast_kernel(sk, skb); |
879 | 879 | ||
880 | err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); | 880 | err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); |
881 | if (err == 1) | 881 | if (err == 1) |
882 | goto retry; | 882 | goto retry; |
883 | if (err) | 883 | if (err) |