From 4243cac1e76228f7ba916d5df9e75a219352160a Mon Sep 17 00:00:00 2001 From: Vladislav Yasevich Date: Mon, 13 Jun 2005 15:10:49 -0700 Subject: [SCTP]: Fix bug in restart of peeled-off associations. Signed-off-by: Vladislav Yasevich Signed-off-by: Sridhar Samudrala Signed-off-by: David S. Miller --- net/sctp/socket.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'net/sctp/socket.c') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0b338eca6dc0..2a3c0e08a090 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4686,6 +4686,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; + int flags = 0; /* Migrate socket buffer sizes and all the socket level options to the * new socket. @@ -4707,6 +4708,17 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->num = inet_sk(oldsk)->num; + /* Copy the bind_addr list from the original endpoint to the new + * endpoint so that we can handle restarts properly + */ + if (assoc->peer.ipv4_address) + flags |= SCTP_ADDR4_PEERSUPP; + if (assoc->peer.ipv6_address) + flags |= SCTP_ADDR6_PEERSUPP; + sctp_bind_addr_copy(&newsp->ep->base.bind_addr, + &oldsp->ep->base.bind_addr, + SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags); + /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ -- cgit v1.2.2 From 1e061ab2e5aa50a84d68ca654773632f9c425bb6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 18 Jun 2005 22:56:42 -0700 Subject: [SCTP]: Replace spin_lock_irqsave with spin_lock_bh This patch replaces the spin_lock_irqsave call on the receive queue lock in SCTP with spin_lock_bh. Despite the proliferation of spin_lock_irqsave calls in this stack, it is only entered from the IPv4/IPv6 stack and user space. That is, it is never entered from hardirq context. The call in question is only called from recvmsg which means that IRQs aren't disabled. Therefore it is safe to replace it with spin_lock_bh. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- net/sctp/socket.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'net/sctp/socket.c') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2a3c0e08a090..e6926cb19420 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4368,15 +4368,11 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, * However, this function was corrent in any case. 8) */ if (flags & MSG_PEEK) { - unsigned long cpu_flags; - - sctp_spin_lock_irqsave(&sk->sk_receive_queue.lock, - cpu_flags); + spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); - sctp_spin_unlock_irqrestore(&sk->sk_receive_queue.lock, - cpu_flags); + spin_unlock_bh(&sk->sk_receive_queue.lock); } else { skb = skb_dequeue(&sk->sk_receive_queue); } -- cgit v1.2.2