aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/cmtp/core.c6
-rw-r--r--net/bluetooth/hidp/core.c5
-rw-r--r--net/bluetooth/rfcomm/sock.c7
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/core/sock.c11
-rw-r--r--net/decnet/af_decnet.c10
-rw-r--r--net/decnet/dn_nsp_out.c3
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/igmp.c96
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv6/mcast.c29
-rw-r--r--net/irda/irlap.c3
-rw-r--r--net/irda/irlap_event.c14
-rw-r--r--net/irda/irlap_frame.c8
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/llc/llc_c_ev.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/input.c26
-rw-r--r--net/sctp/inqueue.c18
-rw-r--r--net/sctp/output.c22
-rw-r--r--net/sctp/outqueue.c50
-rw-r--r--net/sctp/sm_make_chunk.c12
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/unix/af_unix.c4
33 files changed, 224 insertions, 170 deletions
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 2e341de3e763..901eff7ebe74 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in
213 return kernel_sendmsg(sock, &msg, &iv, 1, len); 213 return kernel_sendmsg(sock, &msg, &iv, 1, len);
214} 214}
215 215
216static int cmtp_process_transmit(struct cmtp_session *session) 216static void cmtp_process_transmit(struct cmtp_session *session)
217{ 217{
218 struct sk_buff *skb, *nskb; 218 struct sk_buff *skb, *nskb;
219 unsigned char *hdr; 219 unsigned char *hdr;
@@ -223,7 +223,7 @@ static int cmtp_process_transmit(struct cmtp_session *session)
223 223
224 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 224 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
225 BT_ERR("Can't allocate memory for new frame"); 225 BT_ERR("Can't allocate memory for new frame");
226 return -ENOMEM; 226 return;
227 } 227 }
228 228
229 while ((skb = skb_dequeue(&session->transmit))) { 229 while ((skb = skb_dequeue(&session->transmit))) {
@@ -275,8 +275,6 @@ static int cmtp_process_transmit(struct cmtp_session *session)
275 cmtp_send_frame(session, nskb->data, nskb->len); 275 cmtp_send_frame(session, nskb->data, nskb->len);
276 276
277 kfree_skb(nskb); 277 kfree_skb(nskb);
278
279 return skb_queue_len(&session->transmit);
280} 278}
281 279
282static int cmtp_session(void *arg) 280static int cmtp_session(void *arg)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index affbc55462e8..de8af5f42394 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
428 return kernel_sendmsg(sock, &msg, &iv, 1, len); 428 return kernel_sendmsg(sock, &msg, &iv, 1, len);
429} 429}
430 430
431static int hidp_process_transmit(struct hidp_session *session) 431static void hidp_process_transmit(struct hidp_session *session)
432{ 432{
433 struct sk_buff *skb; 433 struct sk_buff *skb;
434 434
@@ -453,9 +453,6 @@ static int hidp_process_transmit(struct hidp_session *session)
453 hidp_set_timer(session); 453 hidp_set_timer(session);
454 kfree_skb(skb); 454 kfree_skb(skb);
455 } 455 }
456
457 return skb_queue_len(&session->ctrl_transmit) +
458 skb_queue_len(&session->intr_transmit);
459} 456}
460 457
461static int hidp_session(void *arg) 458static int hidp_session(void *arg)
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index f3f6355a2786..63a123c5c41b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -590,8 +590,11 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
590 for (;;) { 590 for (;;) {
591 set_current_state(TASK_INTERRUPTIBLE); 591 set_current_state(TASK_INTERRUPTIBLE);
592 592
593 if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || 593 if (!skb_queue_empty(&sk->sk_receive_queue) ||
594 signal_pending(current) || !timeo) 594 sk->sk_err ||
595 (sk->sk_shutdown & RCV_SHUTDOWN) ||
596 signal_pending(current) ||
597 !timeo)
595 break; 598 break;
596 599
597 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 600 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 6d689200bcf3..6304590fd36a 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
781 781
782 BT_DBG("tty %p dev %p", tty, dev); 782 BT_DBG("tty %p dev %p", tty, dev);
783 783
784 if (skb_queue_len(&dlc->tx_queue)) 784 if (!skb_queue_empty(&dlc->tx_queue))
785 return dlc->mtu; 785 return dlc->mtu;
786 786
787 return 0; 787 return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7f5f62c65115..ff9dc029233a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1127extern void skb_release_data(struct sk_buff *); 1127extern void skb_release_data(struct sk_buff *);
1128 1128
1129/* Keep head the same: replace data */ 1129/* Keep head the same: replace data */
1130int __skb_linearize(struct sk_buff *skb, int gfp_mask) 1130int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
1131{ 1131{
1132 unsigned int size; 1132 unsigned int size;
1133 u8 *data; 1133 u8 *data;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 733deee24b9f..d9f7b06fe886 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
129 * Buffers may only be allocated from interrupts using a @gfp_mask of 129 * Buffers may only be allocated from interrupts using a @gfp_mask of
130 * %GFP_ATOMIC. 130 * %GFP_ATOMIC.
131 */ 131 */
132struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) 132struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask)
133{ 133{
134 struct sk_buff *skb; 134 struct sk_buff *skb;
135 u8 *data; 135 u8 *data;
@@ -182,7 +182,8 @@ nodata:
182 * %GFP_ATOMIC. 182 * %GFP_ATOMIC.
183 */ 183 */
184struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 184struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
185 unsigned int size, int gfp_mask) 185 unsigned int size,
186 unsigned int __nocast gfp_mask)
186{ 187{
187 struct sk_buff *skb; 188 struct sk_buff *skb;
188 u8 *data; 189 u8 *data;
@@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb)
322 * %GFP_ATOMIC. 323 * %GFP_ATOMIC.
323 */ 324 */
324 325
325struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) 326struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
326{ 327{
327 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 328 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
328 329
@@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
460 * header is going to be modified. Use pskb_copy() instead. 461 * header is going to be modified. Use pskb_copy() instead.
461 */ 462 */
462 463
463struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) 464struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask)
464{ 465{
465 int headerlen = skb->data - skb->head; 466 int headerlen = skb->data - skb->head;
466 /* 467 /*
@@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
499 * The returned buffer has a reference count of 1. 500 * The returned buffer has a reference count of 1.
500 */ 501 */
501 502
502struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) 503struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask)
503{ 504{
504 /* 505 /*
505 * Allocate the copy buffer 506 * Allocate the copy buffer
@@ -557,7 +558,8 @@ out:
557 * reloaded after call to this function. 558 * reloaded after call to this function.
558 */ 559 */
559 560
560int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) 561int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
562 unsigned int __nocast gfp_mask)
561{ 563{
562 int i; 564 int i;
563 u8 *data; 565 u8 *data;
@@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
647 * only by netfilter in the cases when checksum is recalculated? --ANK 649 * only by netfilter in the cases when checksum is recalculated? --ANK
648 */ 650 */
649struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 651struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
650 int newheadroom, int newtailroom, int gfp_mask) 652 int newheadroom, int newtailroom,
653 unsigned int __nocast gfp_mask)
651{ 654{
652 /* 655 /*
653 * Allocate the copy buffer 656 * Allocate the copy buffer
diff --git a/net/core/sock.c b/net/core/sock.c
index a6ec3ada7f9e..8b35ccdc2b3b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -622,7 +622,8 @@ lenout:
622 * @prot: struct proto associated with this new sock instance 622 * @prot: struct proto associated with this new sock instance
623 * @zero_it: if we should zero the newly allocated sock 623 * @zero_it: if we should zero the newly allocated sock
624 */ 624 */
625struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) 625struct sock *sk_alloc(int family, unsigned int __nocast priority,
626 struct proto *prot, int zero_it)
626{ 627{
627 struct sock *sk = NULL; 628 struct sock *sk = NULL;
628 kmem_cache_t *slab = prot->slab; 629 kmem_cache_t *slab = prot->slab;
@@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk)
750/* 751/*
751 * Allocate a skb from the socket's send buffer. 752 * Allocate a skb from the socket's send buffer.
752 */ 753 */
753struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) 754struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
755 unsigned int __nocast priority)
754{ 756{
755 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 757 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
756 struct sk_buff * skb = alloc_skb(size, priority); 758 struct sk_buff * skb = alloc_skb(size, priority);
@@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int
765/* 767/*
766 * Allocate a skb from the socket's receive buffer. 768 * Allocate a skb from the socket's receive buffer.
767 */ 769 */
768struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) 770struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
771 unsigned int __nocast priority)
769{ 772{
770 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 773 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
771 struct sk_buff *skb = alloc_skb(size, priority); 774 struct sk_buff *skb = alloc_skb(size, priority);
@@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int
780/* 783/*
781 * Allocate a memory block from the socket's option memory buffer. 784 * Allocate a memory block from the socket's option memory buffer.
782 */ 785 */
783void *sock_kmalloc(struct sock *sk, int size, int priority) 786void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
784{ 787{
785 if ((unsigned)size <= sysctl_optmem_max && 788 if ((unsigned)size <= sysctl_optmem_max &&
786 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 789 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 29bb3cd21965..96a02800cd28 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk)
536 * we are double checking that we are not sending too 536 * we are double checking that we are not sending too
537 * many of these keepalive frames. 537 * many of these keepalive frames.
538 */ 538 */
539 if (skb_queue_len(&scp->other_xmit_queue) == 0) 539 if (skb_queue_empty(&scp->other_xmit_queue))
540 dn_nsp_send_link(sk, DN_NOCHANGE, 0); 540 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
541} 541}
542 542
@@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table
1191 struct dn_scp *scp = DN_SK(sk); 1191 struct dn_scp *scp = DN_SK(sk);
1192 int mask = datagram_poll(file, sock, wait); 1192 int mask = datagram_poll(file, sock, wait);
1193 1193
1194 if (skb_queue_len(&scp->other_receive_queue)) 1194 if (!skb_queue_empty(&scp->other_receive_queue))
1195 mask |= POLLRDBAND; 1195 mask |= POLLRDBAND;
1196 1196
1197 return mask; 1197 return mask;
@@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1214 1214
1215 case SIOCATMARK: 1215 case SIOCATMARK:
1216 lock_sock(sk); 1216 lock_sock(sk);
1217 val = (skb_queue_len(&scp->other_receive_queue) != 0); 1217 val = !skb_queue_empty(&scp->other_receive_queue);
1218 if (scp->state != DN_RUN) 1218 if (scp->state != DN_RUN)
1219 val = -ENOTCONN; 1219 val = -ENOTCONN;
1220 release_sock(sk); 1220 release_sock(sk);
@@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
1630 int len = 0; 1630 int len = 0;
1631 1631
1632 if (flags & MSG_OOB) 1632 if (flags & MSG_OOB)
1633 return skb_queue_len(q) ? 1 : 0; 1633 return !skb_queue_empty(q) ? 1 : 0;
1634 1634
1635 while(skb != (struct sk_buff *)q) { 1635 while(skb != (struct sk_buff *)q) {
1636 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1636 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1707 if (sk->sk_err) 1707 if (sk->sk_err)
1708 goto out; 1708 goto out;
1709 1709
1710 if (skb_queue_len(&scp->other_receive_queue)) { 1710 if (!skb_queue_empty(&scp->other_receive_queue)) {
1711 if (!(flags & MSG_OOB)) { 1711 if (!(flags & MSG_OOB)) {
1712 msg->msg_flags |= MSG_OOB; 1712 msg->msg_flags |= MSG_OOB;
1713 if (!scp->other_report) { 1713 if (!scp->other_report) {
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 42abbf3f524f..8cce1fdbda90 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -342,7 +342,8 @@ int dn_nsp_xmit_timeout(struct sock *sk)
342 342
343 dn_nsp_output(sk); 343 dn_nsp_output(sk);
344 344
345 if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) 345 if (!skb_queue_empty(&scp->data_xmit_queue) ||
346 !skb_queue_empty(&scp->other_xmit_queue))
346 scp->persist = dn_nsp_persist(sk); 347 scp->persist = dn_nsp_persist(sk);
347 348
348 return 0; 349 return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index cb759484979d..279f57abfecb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -970,7 +970,8 @@ int icmp_rcv(struct sk_buff *skb)
970 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently 970 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
971 * discarded if to broadcast/multicast. 971 * discarded if to broadcast/multicast.
972 */ 972 */
973 if (icmph->type == ICMP_ECHO && 973 if ((icmph->type == ICMP_ECHO ||
974 icmph->type == ICMP_TIMESTAMP) &&
974 sysctl_icmp_echo_ignore_broadcasts) { 975 sysctl_icmp_echo_ignore_broadcasts) {
975 goto error; 976 goto error;
976 } 977 }
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1f3183168a90..5088f90835ae 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1615,9 +1615,10 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1615{ 1615{
1616 int err; 1616 int err;
1617 u32 addr = imr->imr_multiaddr.s_addr; 1617 u32 addr = imr->imr_multiaddr.s_addr;
1618 struct ip_mc_socklist *iml, *i; 1618 struct ip_mc_socklist *iml=NULL, *i;
1619 struct in_device *in_dev; 1619 struct in_device *in_dev;
1620 struct inet_sock *inet = inet_sk(sk); 1620 struct inet_sock *inet = inet_sk(sk);
1621 int ifindex;
1621 int count = 0; 1622 int count = 0;
1622 1623
1623 if (!MULTICAST(addr)) 1624 if (!MULTICAST(addr))
@@ -1633,37 +1634,30 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1633 goto done; 1634 goto done;
1634 } 1635 }
1635 1636
1636 iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
1637
1638 err = -EADDRINUSE; 1637 err = -EADDRINUSE;
1638 ifindex = imr->imr_ifindex;
1639 for (i = inet->mc_list; i; i = i->next) { 1639 for (i = inet->mc_list; i; i = i->next) {
1640 if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) { 1640 if (i->multi.imr_multiaddr.s_addr == addr &&
1641 /* New style additions are reference counted */ 1641 i->multi.imr_ifindex == ifindex)
1642 if (imr->imr_address.s_addr == 0) {
1643 i->count++;
1644 err = 0;
1645 }
1646 goto done; 1642 goto done;
1647 }
1648 count++; 1643 count++;
1649 } 1644 }
1650 err = -ENOBUFS; 1645 err = -ENOBUFS;
1651 if (iml == NULL || count >= sysctl_igmp_max_memberships) 1646 if (count >= sysctl_igmp_max_memberships)
1647 goto done;
1648 iml = (struct ip_mc_socklist *)sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL);
1649 if (iml == NULL)
1652 goto done; 1650 goto done;
1651
1653 memcpy(&iml->multi, imr, sizeof(*imr)); 1652 memcpy(&iml->multi, imr, sizeof(*imr));
1654 iml->next = inet->mc_list; 1653 iml->next = inet->mc_list;
1655 iml->count = 1;
1656 iml->sflist = NULL; 1654 iml->sflist = NULL;
1657 iml->sfmode = MCAST_EXCLUDE; 1655 iml->sfmode = MCAST_EXCLUDE;
1658 inet->mc_list = iml; 1656 inet->mc_list = iml;
1659 ip_mc_inc_group(in_dev, addr); 1657 ip_mc_inc_group(in_dev, addr);
1660 iml = NULL;
1661 err = 0; 1658 err = 0;
1662
1663done: 1659done:
1664 rtnl_shunlock(); 1660 rtnl_shunlock();
1665 if (iml)
1666 sock_kfree_s(sk, iml, sizeof(*iml));
1667 return err; 1661 return err;
1668} 1662}
1669 1663
@@ -1693,30 +1687,25 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1693{ 1687{
1694 struct inet_sock *inet = inet_sk(sk); 1688 struct inet_sock *inet = inet_sk(sk);
1695 struct ip_mc_socklist *iml, **imlp; 1689 struct ip_mc_socklist *iml, **imlp;
1690 struct in_device *in_dev;
1691 u32 group = imr->imr_multiaddr.s_addr;
1692 u32 ifindex;
1696 1693
1697 rtnl_lock(); 1694 rtnl_lock();
1695 in_dev = ip_mc_find_dev(imr);
1696 if (!in_dev) {
1697 rtnl_unlock();
1698 return -ENODEV;
1699 }
1700 ifindex = imr->imr_ifindex;
1698 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1701 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1699 if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && 1702 if (iml->multi.imr_multiaddr.s_addr == group &&
1700 iml->multi.imr_address.s_addr==imr->imr_address.s_addr && 1703 iml->multi.imr_ifindex == ifindex) {
1701 (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { 1704 (void) ip_mc_leave_src(sk, iml, in_dev);
1702 struct in_device *in_dev;
1703
1704 in_dev = inetdev_by_index(iml->multi.imr_ifindex);
1705 if (in_dev)
1706 (void) ip_mc_leave_src(sk, iml, in_dev);
1707 if (--iml->count) {
1708 rtnl_unlock();
1709 if (in_dev)
1710 in_dev_put(in_dev);
1711 return 0;
1712 }
1713 1705
1714 *imlp = iml->next; 1706 *imlp = iml->next;
1715 1707
1716 if (in_dev) { 1708 ip_mc_dec_group(in_dev, group);
1717 ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr);
1718 in_dev_put(in_dev);
1719 }
1720 rtnl_unlock(); 1709 rtnl_unlock();
1721 sock_kfree_s(sk, iml, sizeof(*iml)); 1710 sock_kfree_s(sk, iml, sizeof(*iml));
1722 return 0; 1711 return 0;
@@ -1736,6 +1725,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1736 struct in_device *in_dev = NULL; 1725 struct in_device *in_dev = NULL;
1737 struct inet_sock *inet = inet_sk(sk); 1726 struct inet_sock *inet = inet_sk(sk);
1738 struct ip_sf_socklist *psl; 1727 struct ip_sf_socklist *psl;
1728 int leavegroup = 0;
1739 int i, j, rv; 1729 int i, j, rv;
1740 1730
1741 if (!MULTICAST(addr)) 1731 if (!MULTICAST(addr))
@@ -1755,15 +1745,20 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1755 err = -EADDRNOTAVAIL; 1745 err = -EADDRNOTAVAIL;
1756 1746
1757 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1747 for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1758 if (memcmp(&pmc->multi, mreqs, 2*sizeof(__u32)) == 0) 1748 if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr
1749 && pmc->multi.imr_ifindex == imr.imr_ifindex)
1759 break; 1750 break;
1760 } 1751 }
1761 if (!pmc) /* must have a prior join */ 1752 if (!pmc) { /* must have a prior join */
1753 err = -EINVAL;
1762 goto done; 1754 goto done;
1755 }
1763 /* if a source filter was set, must be the same mode as before */ 1756 /* if a source filter was set, must be the same mode as before */
1764 if (pmc->sflist) { 1757 if (pmc->sflist) {
1765 if (pmc->sfmode != omode) 1758 if (pmc->sfmode != omode) {
1759 err = -EINVAL;
1766 goto done; 1760 goto done;
1761 }
1767 } else if (pmc->sfmode != omode) { 1762 } else if (pmc->sfmode != omode) {
1768 /* allow mode switches for empty-set filters */ 1763 /* allow mode switches for empty-set filters */
1769 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); 1764 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
@@ -1775,7 +1770,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1775 psl = pmc->sflist; 1770 psl = pmc->sflist;
1776 if (!add) { 1771 if (!add) {
1777 if (!psl) 1772 if (!psl)
1778 goto done; 1773 goto done; /* err = -EADDRNOTAVAIL */
1779 rv = !0; 1774 rv = !0;
1780 for (i=0; i<psl->sl_count; i++) { 1775 for (i=0; i<psl->sl_count; i++) {
1781 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, 1776 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
@@ -1784,7 +1779,13 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1784 break; 1779 break;
1785 } 1780 }
1786 if (rv) /* source not found */ 1781 if (rv) /* source not found */
1782 goto done; /* err = -EADDRNOTAVAIL */
1783
1784 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
1785 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
1786 leavegroup = 1;
1787 goto done; 1787 goto done;
1788 }
1788 1789
1789 /* update the interface filter */ 1790 /* update the interface filter */
1790 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 1791 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
@@ -1842,18 +1843,21 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1842 &mreqs->imr_sourceaddr, 1); 1843 &mreqs->imr_sourceaddr, 1);
1843done: 1844done:
1844 rtnl_shunlock(); 1845 rtnl_shunlock();
1846 if (leavegroup)
1847 return ip_mc_leave_group(sk, &imr);
1845 return err; 1848 return err;
1846} 1849}
1847 1850
1848int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 1851int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1849{ 1852{
1850 int err; 1853 int err = 0;
1851 struct ip_mreqn imr; 1854 struct ip_mreqn imr;
1852 u32 addr = msf->imsf_multiaddr; 1855 u32 addr = msf->imsf_multiaddr;
1853 struct ip_mc_socklist *pmc; 1856 struct ip_mc_socklist *pmc;
1854 struct in_device *in_dev; 1857 struct in_device *in_dev;
1855 struct inet_sock *inet = inet_sk(sk); 1858 struct inet_sock *inet = inet_sk(sk);
1856 struct ip_sf_socklist *newpsl, *psl; 1859 struct ip_sf_socklist *newpsl, *psl;
1860 int leavegroup = 0;
1857 1861
1858 if (!MULTICAST(addr)) 1862 if (!MULTICAST(addr))
1859 return -EINVAL; 1863 return -EINVAL;
@@ -1872,15 +1876,22 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1872 err = -ENODEV; 1876 err = -ENODEV;
1873 goto done; 1877 goto done;
1874 } 1878 }
1875 err = -EADDRNOTAVAIL; 1879
1880 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
1881 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
1882 leavegroup = 1;
1883 goto done;
1884 }
1876 1885
1877 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 1886 for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1878 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && 1887 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
1879 pmc->multi.imr_ifindex == imr.imr_ifindex) 1888 pmc->multi.imr_ifindex == imr.imr_ifindex)
1880 break; 1889 break;
1881 } 1890 }
1882 if (!pmc) /* must have a prior join */ 1891 if (!pmc) { /* must have a prior join */
1892 err = -EINVAL;
1883 goto done; 1893 goto done;
1894 }
1884 if (msf->imsf_numsrc) { 1895 if (msf->imsf_numsrc) {
1885 newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk, 1896 newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk,
1886 IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL); 1897 IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL);
@@ -1909,8 +1920,11 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1909 0, NULL, 0); 1920 0, NULL, 0);
1910 pmc->sflist = newpsl; 1921 pmc->sflist = newpsl;
1911 pmc->sfmode = msf->imsf_fmode; 1922 pmc->sfmode = msf->imsf_fmode;
1923 err = 0;
1912done: 1924done:
1913 rtnl_shunlock(); 1925 rtnl_shunlock();
1926 if (leavegroup)
1927 err = ip_mc_leave_group(sk, &imr);
1914 return err; 1928 return err;
1915} 1929}
1916 1930
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f8b172f89811..fc7c481d0d79 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -677,11 +677,11 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
677 mreq.imr_address.s_addr = mreqs.imr_interface; 677 mreq.imr_address.s_addr = mreqs.imr_interface;
678 mreq.imr_ifindex = 0; 678 mreq.imr_ifindex = 0;
679 err = ip_mc_join_group(sk, &mreq); 679 err = ip_mc_join_group(sk, &mreq);
680 if (err) 680 if (err && err != -EADDRINUSE)
681 break; 681 break;
682 omode = MCAST_INCLUDE; 682 omode = MCAST_INCLUDE;
683 add = 1; 683 add = 1;
684 } else /*IP_DROP_SOURCE_MEMBERSHIP */ { 684 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
685 omode = MCAST_INCLUDE; 685 omode = MCAST_INCLUDE;
686 add = 0; 686 add = 0;
687 } 687 }
@@ -754,7 +754,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
754 mreq.imr_address.s_addr = 0; 754 mreq.imr_address.s_addr = 0;
755 mreq.imr_ifindex = greqs.gsr_interface; 755 mreq.imr_ifindex = greqs.gsr_interface;
756 err = ip_mc_join_group(sk, &mreq); 756 err = ip_mc_join_group(sk, &mreq);
757 if (err) 757 if (err && err != -EADDRINUSE)
758 break; 758 break;
759 greqs.gsr_interface = mreq.imr_ifindex; 759 greqs.gsr_interface = mreq.imr_ifindex;
760 omode = MCAST_INCLUDE; 760 omode = MCAST_INCLUDE;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 29894c749163..ddb6ce4ecff2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk)
1105 struct sk_buff *skb; 1105 struct sk_buff *skb;
1106 struct tcp_sock *tp = tcp_sk(sk); 1106 struct tcp_sock *tp = tcp_sk(sk);
1107 1107
1108 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); 1108 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1109 1109
1110 /* RX process wants to run with disabled BHs, though it is not 1110 /* RX process wants to run with disabled BHs, though it is not
1111 * necessary */ 1111 * necessary */
@@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1369 * is not empty. It is more elegant, but eats cycles, 1369 * is not empty. It is more elegant, but eats cycles,
1370 * unfortunately. 1370 * unfortunately.
1371 */ 1371 */
1372 if (skb_queue_len(&tp->ucopy.prequeue)) 1372 if (!skb_queue_empty(&tp->ucopy.prequeue))
1373 goto do_prequeue; 1373 goto do_prequeue;
1374 1374
1375 /* __ Set realtime policy in scheduler __ */ 1375 /* __ Set realtime policy in scheduler __ */
@@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1394 } 1394 }
1395 1395
1396 if (tp->rcv_nxt == tp->copied_seq && 1396 if (tp->rcv_nxt == tp->copied_seq &&
1397 skb_queue_len(&tp->ucopy.prequeue)) { 1397 !skb_queue_empty(&tp->ucopy.prequeue)) {
1398do_prequeue: 1398do_prequeue:
1399 tcp_prequeue_process(sk); 1399 tcp_prequeue_process(sk);
1400 1400
@@ -1476,7 +1476,7 @@ skip_copy:
1476 } while (len > 0); 1476 } while (len > 0);
1477 1477
1478 if (user_recv) { 1478 if (user_recv) {
1479 if (skb_queue_len(&tp->ucopy.prequeue)) { 1479 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1480 int chunk; 1480 int chunk;
1481 1481
1482 tp->ucopy.len = copied > 0 ? len : 0; 1482 tp->ucopy.len = copied > 0 ? len : 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8de2f1071c2b..53a8a5399f1e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
2802 int this_sack; 2802 int this_sack;
2803 2803
2804 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 2804 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
2805 if (skb_queue_len(&tp->out_of_order_queue) == 0) { 2805 if (skb_queue_empty(&tp->out_of_order_queue)) {
2806 tp->rx_opt.num_sacks = 0; 2806 tp->rx_opt.num_sacks = 0;
2807 tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 2807 tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
2808 return; 2808 return;
@@ -2935,13 +2935,13 @@ queue_and_out:
2935 if(th->fin) 2935 if(th->fin)
2936 tcp_fin(skb, sk, th); 2936 tcp_fin(skb, sk, th);
2937 2937
2938 if (skb_queue_len(&tp->out_of_order_queue)) { 2938 if (!skb_queue_empty(&tp->out_of_order_queue)) {
2939 tcp_ofo_queue(sk); 2939 tcp_ofo_queue(sk);
2940 2940
2941 /* RFC2581. 4.2. SHOULD send immediate ACK, when 2941 /* RFC2581. 4.2. SHOULD send immediate ACK, when
2942 * gap in queue is filled. 2942 * gap in queue is filled.
2943 */ 2943 */
2944 if (!skb_queue_len(&tp->out_of_order_queue)) 2944 if (skb_queue_empty(&tp->out_of_order_queue))
2945 tp->ack.pingpong = 0; 2945 tp->ack.pingpong = 0;
2946 } 2946 }
2947 2947
@@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk)
3249 * This must not ever occur. */ 3249 * This must not ever occur. */
3250 3250
3251 /* First, purge the out_of_order queue. */ 3251 /* First, purge the out_of_order queue. */
3252 if (skb_queue_len(&tp->out_of_order_queue)) { 3252 if (!skb_queue_empty(&tp->out_of_order_queue)) {
3253 NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, 3253 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
3254 skb_queue_len(&tp->out_of_order_queue));
3255 __skb_queue_purge(&tp->out_of_order_queue); 3254 __skb_queue_purge(&tp->out_of_order_queue);
3256 3255
3257 /* Reset SACK state. A conforming SACK implementation will 3256 /* Reset SACK state. A conforming SACK implementation will
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e041d057ec86..e3f8ea1bfa9c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk)
1613 * was unread data in the receive queue. This behavior is recommended 1613 * was unread data in the receive queue. This behavior is recommended
1614 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1614 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
1615 */ 1615 */
1616void tcp_send_active_reset(struct sock *sk, int priority) 1616void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
1617{ 1617{
1618 struct tcp_sock *tp = tcp_sk(sk); 1618 struct tcp_sock *tp = tcp_sk(sk);
1619 struct sk_buff *skb; 1619 struct sk_buff *skb;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b127b4498565..0084227438c2 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data)
231 } 231 }
232 tp->ack.pending &= ~TCP_ACK_TIMER; 232 tp->ack.pending &= ~TCP_ACK_TIMER;
233 233
234 if (skb_queue_len(&tp->ucopy.prequeue)) { 234 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
235 struct sk_buff *skb; 235 struct sk_buff *skb;
236 236
237 NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, 237 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
238 skb_queue_len(&tp->ucopy.prequeue));
239 238
240 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 239 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
241 sk->sk_backlog_rcv(sk, skb); 240 sk->sk_backlog_rcv(sk, skb);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 562fcd14fdea..29fed6e58d0a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -281,7 +281,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
281 } 281 }
282 write_unlock_bh(&ipv6_sk_mc_lock); 282 write_unlock_bh(&ipv6_sk_mc_lock);
283 283
284 return -ENOENT; 284 return -EADDRNOTAVAIL;
285} 285}
286 286
287static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) 287static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex)
@@ -386,12 +386,16 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
386 if (ipv6_addr_equal(&pmc->addr, group)) 386 if (ipv6_addr_equal(&pmc->addr, group))
387 break; 387 break;
388 } 388 }
389 if (!pmc) /* must have a prior join */ 389 if (!pmc) { /* must have a prior join */
390 err = -EINVAL;
390 goto done; 391 goto done;
392 }
391 /* if a source filter was set, must be the same mode as before */ 393 /* if a source filter was set, must be the same mode as before */
392 if (pmc->sflist) { 394 if (pmc->sflist) {
393 if (pmc->sfmode != omode) 395 if (pmc->sfmode != omode) {
396 err = -EINVAL;
394 goto done; 397 goto done;
398 }
395 } else if (pmc->sfmode != omode) { 399 } else if (pmc->sfmode != omode) {
396 /* allow mode switches for empty-set filters */ 400 /* allow mode switches for empty-set filters */
397 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); 401 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
@@ -402,7 +406,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
402 psl = pmc->sflist; 406 psl = pmc->sflist;
403 if (!add) { 407 if (!add) {
404 if (!psl) 408 if (!psl)
405 goto done; 409 goto done; /* err = -EADDRNOTAVAIL */
406 rv = !0; 410 rv = !0;
407 for (i=0; i<psl->sl_count; i++) { 411 for (i=0; i<psl->sl_count; i++) {
408 rv = memcmp(&psl->sl_addr[i], source, 412 rv = memcmp(&psl->sl_addr[i], source,
@@ -411,7 +415,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
411 break; 415 break;
412 } 416 }
413 if (rv) /* source not found */ 417 if (rv) /* source not found */
414 goto done; 418 goto done; /* err = -EADDRNOTAVAIL */
415 419
416 /* special case - (INCLUDE, empty) == LEAVE_GROUP */ 420 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
417 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { 421 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
@@ -488,6 +492,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
488 struct inet6_dev *idev; 492 struct inet6_dev *idev;
489 struct ipv6_pinfo *inet6 = inet6_sk(sk); 493 struct ipv6_pinfo *inet6 = inet6_sk(sk);
490 struct ip6_sf_socklist *newpsl, *psl; 494 struct ip6_sf_socklist *newpsl, *psl;
495 int leavegroup = 0;
491 int i, err; 496 int i, err;
492 497
493 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; 498 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
@@ -503,7 +508,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
503 if (!idev) 508 if (!idev)
504 return -ENODEV; 509 return -ENODEV;
505 dev = idev->dev; 510 dev = idev->dev;
506 err = -EADDRNOTAVAIL; 511
512 err = 0;
513 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
514 leavegroup = 1;
515 goto done;
516 }
507 517
508 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 518 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
509 if (pmc->ifindex != gsf->gf_interface) 519 if (pmc->ifindex != gsf->gf_interface)
@@ -511,8 +521,10 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
511 if (ipv6_addr_equal(&pmc->addr, group)) 521 if (ipv6_addr_equal(&pmc->addr, group))
512 break; 522 break;
513 } 523 }
514 if (!pmc) /* must have a prior join */ 524 if (!pmc) { /* must have a prior join */
525 err = -EINVAL;
515 goto done; 526 goto done;
527 }
516 if (gsf->gf_numsrc) { 528 if (gsf->gf_numsrc) {
517 newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk, 529 newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk,
518 IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); 530 IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC);
@@ -544,10 +556,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
544 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 556 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
545 pmc->sflist = newpsl; 557 pmc->sflist = newpsl;
546 pmc->sfmode = gsf->gf_fmode; 558 pmc->sfmode = gsf->gf_fmode;
559 err = 0;
547done: 560done:
548 read_unlock_bh(&idev->lock); 561 read_unlock_bh(&idev->lock);
549 in6_dev_put(idev); 562 in6_dev_put(idev);
550 dev_put(dev); 563 dev_put(dev);
564 if (leavegroup)
565 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
551 return err; 566 return err;
552} 567}
553 568
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 046ad0750e48..7029618f5719 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -445,9 +445,8 @@ void irlap_disconnect_request(struct irlap_cb *self)
445 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 445 IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
446 446
447 /* Don't disconnect until all data frames are successfully sent */ 447 /* Don't disconnect until all data frames are successfully sent */
448 if (skb_queue_len(&self->txq) > 0) { 448 if (!skb_queue_empty(&self->txq)) {
449 self->disconnect_pending = TRUE; 449 self->disconnect_pending = TRUE;
450
451 return; 450 return;
452 } 451 }
453 452
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 1cd89f5f3b75..a505b5457608 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
191 * Send out the RR frames faster if our own transmit queue is empty, or 191 * Send out the RR frames faster if our own transmit queue is empty, or
192 * if the peer is busy. The effect is a much faster conversation 192 * if the peer is busy. The effect is a much faster conversation
193 */ 193 */
194 if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { 194 if (skb_queue_empty(&self->txq) || self->remote_busy) {
195 if (self->fast_RR == TRUE) { 195 if (self->fast_RR == TRUE) {
196 /* 196 /*
197 * Assert that the fast poll timer has not reached the 197 * Assert that the fast poll timer has not reached the
@@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
263 IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, 263 IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__,
264 skb_queue_len(&self->txq)); 264 skb_queue_len(&self->txq));
265 265
266 if (skb_queue_len(&self->txq)) { 266 if (!skb_queue_empty(&self->txq)) {
267 /* Prevent race conditions with irlap_data_request() */ 267 /* Prevent race conditions with irlap_data_request() */
268 self->local_busy = TRUE; 268 self->local_busy = TRUE;
269 269
@@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1074#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1074#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
1075 /* Window has been adjusted for the max packet 1075 /* Window has been adjusted for the max packet
1076 * size, so much simpler... - Jean II */ 1076 * size, so much simpler... - Jean II */
1077 nextfit = (skb_queue_len(&self->txq) > 0); 1077 nextfit = !skb_queue_empty(&self->txq);
1078#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1078#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1079 /* 1079 /*
1080 * Send data with poll bit cleared only if window > 1 1080 * Send data with poll bit cleared only if window > 1
@@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1814#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1814#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
1815 /* Window has been adjusted for the max packet 1815 /* Window has been adjusted for the max packet
1816 * size, so much simpler... - Jean II */ 1816 * size, so much simpler... - Jean II */
1817 nextfit = (skb_queue_len(&self->txq) > 0); 1817 nextfit = !skb_queue_empty(&self->txq);
1818#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1818#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
1819 /* 1819 /*
1820 * Send data with final bit cleared only if window > 1 1820 * Send data with final bit cleared only if window > 1
@@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
1937 irlap_data_indication(self, skb, FALSE); 1937 irlap_data_indication(self, skb, FALSE);
1938 1938
1939 /* Any pending data requests? */ 1939 /* Any pending data requests? */
1940 if ((skb_queue_len(&self->txq) > 0) && 1940 if (!skb_queue_empty(&self->txq) &&
1941 (self->window > 0)) 1941 (self->window > 0))
1942 { 1942 {
1943 self->ack_required = TRUE; 1943 self->ack_required = TRUE;
@@ -2038,7 +2038,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2038 /* 2038 /*
2039 * Any pending data requests? 2039 * Any pending data requests?
2040 */ 2040 */
2041 if ((skb_queue_len(&self->txq) > 0) && 2041 if (!skb_queue_empty(&self->txq) &&
2042 (self->window > 0) && !self->remote_busy) 2042 (self->window > 0) && !self->remote_busy)
2043 { 2043 {
2044 irlap_data_indication(self, skb, TRUE); 2044 irlap_data_indication(self, skb, TRUE);
@@ -2069,7 +2069,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2069 */ 2069 */
2070 nr_status = irlap_validate_nr_received(self, info->nr); 2070 nr_status = irlap_validate_nr_received(self, info->nr);
2071 if (nr_status == NR_EXPECTED) { 2071 if (nr_status == NR_EXPECTED) {
2072 if ((skb_queue_len( &self->txq) > 0) && 2072 if (!skb_queue_empty(&self->txq) &&
2073 (self->window > 0)) { 2073 (self->window > 0)) {
2074 self->remote_busy = FALSE; 2074 self->remote_busy = FALSE;
2075 2075
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 040abe714aa3..6dafbb43b529 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -1018,11 +1018,10 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1018 /* 1018 /*
1019 * We can now fill the window with additional data frames 1019 * We can now fill the window with additional data frames
1020 */ 1020 */
1021 while (skb_queue_len( &self->txq) > 0) { 1021 while (!skb_queue_empty(&self->txq)) {
1022 1022
1023 IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); 1023 IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__);
1024 if ((skb_queue_len( &self->txq) > 0) && 1024 if (self->window > 0) {
1025 (self->window > 0)) {
1026 skb = skb_dequeue( &self->txq); 1025 skb = skb_dequeue( &self->txq);
1027 IRDA_ASSERT(skb != NULL, return;); 1026 IRDA_ASSERT(skb != NULL, return;);
1028 1027
@@ -1031,8 +1030,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
1031 * bit cleared 1030 * bit cleared
1032 */ 1031 */
1033 if ((self->window > 1) && 1032 if ((self->window > 1) &&
1034 skb_queue_len(&self->txq) > 0) 1033 !skb_queue_empty(&self->txq)) {
1035 {
1036 irlap_send_data_primary(self, skb); 1034 irlap_send_data_primary(self, skb);
1037 } else { 1035 } else {
1038 irlap_send_data_primary_poll(self, skb); 1036 irlap_send_data_primary_poll(self, skb);
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index d091ccf773b3..6602d901f8b1 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1513,7 +1513,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
1513 /* 1513 /*
1514 * Check if there is still data segments in the transmit queue 1514 * Check if there is still data segments in the transmit queue
1515 */ 1515 */
1516 if (skb_queue_len(&self->tx_queue) > 0) { 1516 if (!skb_queue_empty(&self->tx_queue)) {
1517 if (priority == P_HIGH) { 1517 if (priority == P_HIGH) {
1518 /* 1518 /*
1519 * No need to send the queued data, if we are 1519 * No need to send the queued data, if we are
diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c
index cd130c3b72bc..d5bdb53a348f 100644
--- a/net/llc/llc_c_ev.c
+++ b/net/llc/llc_c_ev.c
@@ -84,7 +84,7 @@ static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr)
84 if (llc->dev->flags & IFF_LOOPBACK) 84 if (llc->dev->flags & IFF_LOOPBACK)
85 goto out; 85 goto out;
86 rc = 1; 86 rc = 1;
87 if (!skb_queue_len(&llc->pdu_unack_q)) 87 if (skb_queue_empty(&llc->pdu_unack_q))
88 goto out; 88 goto out;
89 skb = skb_peek(&llc->pdu_unack_q); 89 skb = skb_peek(&llc->pdu_unack_q);
90 pdu = llc_pdu_sn_hdr(skb); 90 pdu = llc_pdu_sn_hdr(skb);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fc456a7aaec3..3405fdf41b93 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -858,7 +858,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
858{ 858{
859 struct netlink_sock *nlk = nlk_sk(sk); 859 struct netlink_sock *nlk = nlk_sk(sk);
860 860
861 if (!skb_queue_len(&sk->sk_receive_queue)) 861 if (skb_queue_empty(&sk->sk_receive_queue))
862 clear_bit(0, &nlk->state); 862 clear_bit(0, &nlk->state);
863 if (!test_bit(0, &nlk->state)) 863 if (!test_bit(0, &nlk->state))
864 wake_up_interruptible(&nlk->wait); 864 wake_up_interruptible(&nlk->wait);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 664d0e47374f..7845d045eec4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
385 memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); 385 memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
386 386
387 q->qcount = -1; 387 q->qcount = -1;
388 if (skb_queue_len(&sch->q) == 0) 388 if (skb_queue_empty(&sch->q))
389 PSCHED_SET_PASTPERFECT(q->qidlestart); 389 PSCHED_SET_PASTPERFECT(q->qidlestart);
390 sch_tree_unlock(sch); 390 sch_tree_unlock(sch);
391 return 0; 391 return 0;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 7ae6aa772dab..4b47dd6f2485 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -203,7 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
203 */ 203 */
204 asoc->addip_serial = asoc->c.initial_tsn; 204 asoc->addip_serial = asoc->c.initial_tsn;
205 205
206 skb_queue_head_init(&asoc->addip_chunks); 206 INIT_LIST_HEAD(&asoc->addip_chunk_list);
207 207
208 /* Make an empty list of remote transport addresses. */ 208 /* Make an empty list of remote transport addresses. */
209 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); 209 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 339f7acfdb64..5e085e041a6e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -115,6 +115,17 @@ static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk)
115 atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); 115 atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc);
116} 116}
117 117
118struct sctp_input_cb {
119 union {
120 struct inet_skb_parm h4;
121#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
122 struct inet6_skb_parm h6;
123#endif
124 } header;
125 struct sctp_chunk *chunk;
126};
127#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
128
118/* 129/*
119 * This is the routine which IP calls when receiving an SCTP packet. 130 * This is the routine which IP calls when receiving an SCTP packet.
120 */ 131 */
@@ -243,6 +254,7 @@ int sctp_rcv(struct sk_buff *skb)
243 ret = -ENOMEM; 254 ret = -ENOMEM;
244 goto discard_release; 255 goto discard_release;
245 } 256 }
257 SCTP_INPUT_CB(skb)->chunk = chunk;
246 258
247 sctp_rcv_set_owner_r(skb,sk); 259 sctp_rcv_set_owner_r(skb,sk);
248 260
@@ -265,9 +277,9 @@ int sctp_rcv(struct sk_buff *skb)
265 sctp_bh_lock_sock(sk); 277 sctp_bh_lock_sock(sk);
266 278
267 if (sock_owned_by_user(sk)) 279 if (sock_owned_by_user(sk))
268 sk_add_backlog(sk, (struct sk_buff *) chunk); 280 sk_add_backlog(sk, skb);
269 else 281 else
270 sctp_backlog_rcv(sk, (struct sk_buff *) chunk); 282 sctp_backlog_rcv(sk, skb);
271 283
272 /* Release the sock and any reference counts we took in the 284 /* Release the sock and any reference counts we took in the
273 * lookup calls. 285 * lookup calls.
@@ -302,14 +314,8 @@ discard_release:
302 */ 314 */
303int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 315int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
304{ 316{
305 struct sctp_chunk *chunk; 317 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
306 struct sctp_inq *inqueue; 318 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
307
308 /* One day chunk will live inside the skb, but for
309 * now this works.
310 */
311 chunk = (struct sctp_chunk *) skb;
312 inqueue = &chunk->rcvr->inqueue;
313 319
314 sctp_inq_push(inqueue, chunk); 320 sctp_inq_push(inqueue, chunk);
315 return 0; 321 return 0;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cedf4351556c..2d33922c044b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -50,7 +50,7 @@
50/* Initialize an SCTP inqueue. */ 50/* Initialize an SCTP inqueue. */
51void sctp_inq_init(struct sctp_inq *queue) 51void sctp_inq_init(struct sctp_inq *queue)
52{ 52{
53 skb_queue_head_init(&queue->in); 53 INIT_LIST_HEAD(&queue->in_chunk_list);
54 queue->in_progress = NULL; 54 queue->in_progress = NULL;
55 55
56 /* Create a task for delivering data. */ 56 /* Create a task for delivering data. */
@@ -62,11 +62,13 @@ void sctp_inq_init(struct sctp_inq *queue)
62/* Release the memory associated with an SCTP inqueue. */ 62/* Release the memory associated with an SCTP inqueue. */
63void sctp_inq_free(struct sctp_inq *queue) 63void sctp_inq_free(struct sctp_inq *queue)
64{ 64{
65 struct sctp_chunk *chunk; 65 struct sctp_chunk *chunk, *tmp;
66 66
67 /* Empty the queue. */ 67 /* Empty the queue. */
68 while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) 68 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
69 list_del_init(&chunk->list);
69 sctp_chunk_free(chunk); 70 sctp_chunk_free(chunk);
71 }
70 72
71 /* If there is a packet which is currently being worked on, 73 /* If there is a packet which is currently being worked on,
72 * free it as well. 74 * free it as well.
@@ -92,7 +94,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *packet)
92 * Eventually, we should clean up inqueue to not rely 94 * Eventually, we should clean up inqueue to not rely
93 * on the BH related data structures. 95 * on the BH related data structures.
94 */ 96 */
95 skb_queue_tail(&(q->in), (struct sk_buff *) packet); 97 list_add_tail(&packet->list, &q->in_chunk_list);
96 q->immediate.func(q->immediate.data); 98 q->immediate.func(q->immediate.data);
97} 99}
98 100
@@ -131,12 +133,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
131 133
132 /* Do we need to take the next packet out of the queue to process? */ 134 /* Do we need to take the next packet out of the queue to process? */
133 if (!chunk) { 135 if (!chunk) {
136 struct list_head *entry;
137
134 /* Is the queue empty? */ 138 /* Is the queue empty? */
135 if (skb_queue_empty(&queue->in)) 139 if (list_empty(&queue->in_chunk_list))
136 return NULL; 140 return NULL;
137 141
142 entry = queue->in_chunk_list.next;
138 chunk = queue->in_progress = 143 chunk = queue->in_progress =
139 (struct sctp_chunk *) skb_dequeue(&queue->in); 144 list_entry(entry, struct sctp_chunk, list);
145 list_del_init(entry);
140 146
141 /* This is the first chunk in the packet. */ 147 /* This is the first chunk in the packet. */
142 chunk->singleton = 1; 148 chunk->singleton = 1;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 84b5b370b09d..931371633464 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -108,7 +108,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
108 packet->transport = transport; 108 packet->transport = transport;
109 packet->source_port = sport; 109 packet->source_port = sport;
110 packet->destination_port = dport; 110 packet->destination_port = dport;
111 skb_queue_head_init(&packet->chunks); 111 INIT_LIST_HEAD(&packet->chunk_list);
112 if (asoc) { 112 if (asoc) {
113 struct sctp_sock *sp = sctp_sk(asoc->base.sk); 113 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
114 overhead = sp->pf->af->net_header_len; 114 overhead = sp->pf->af->net_header_len;
@@ -129,12 +129,14 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
129/* Free a packet. */ 129/* Free a packet. */
130void sctp_packet_free(struct sctp_packet *packet) 130void sctp_packet_free(struct sctp_packet *packet)
131{ 131{
132 struct sctp_chunk *chunk; 132 struct sctp_chunk *chunk, *tmp;
133 133
134 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); 134 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
135 135
136 while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) 136 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
137 list_del_init(&chunk->list);
137 sctp_chunk_free(chunk); 138 sctp_chunk_free(chunk);
139 }
138 140
139 if (packet->malloced) 141 if (packet->malloced)
140 kfree(packet); 142 kfree(packet);
@@ -276,7 +278,7 @@ append:
276 packet->has_sack = 1; 278 packet->has_sack = 1;
277 279
278 /* It is OK to send this chunk. */ 280 /* It is OK to send this chunk. */
279 __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk); 281 list_add_tail(&chunk->list, &packet->chunk_list);
280 packet->size += chunk_len; 282 packet->size += chunk_len;
281 chunk->transport = packet->transport; 283 chunk->transport = packet->transport;
282finish: 284finish:
@@ -295,7 +297,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
295 struct sctphdr *sh; 297 struct sctphdr *sh;
296 __u32 crc32; 298 __u32 crc32;
297 struct sk_buff *nskb; 299 struct sk_buff *nskb;
298 struct sctp_chunk *chunk; 300 struct sctp_chunk *chunk, *tmp;
299 struct sock *sk; 301 struct sock *sk;
300 int err = 0; 302 int err = 0;
301 int padding; /* How much padding do we need? */ 303 int padding; /* How much padding do we need? */
@@ -305,11 +307,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
305 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); 307 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
306 308
307 /* Do NOT generate a chunkless packet. */ 309 /* Do NOT generate a chunkless packet. */
308 chunk = (struct sctp_chunk *)skb_peek(&packet->chunks); 310 if (list_empty(&packet->chunk_list))
309 if (unlikely(!chunk))
310 return err; 311 return err;
311 312
312 /* Set up convenience variables... */ 313 /* Set up convenience variables... */
314 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
313 sk = chunk->skb->sk; 315 sk = chunk->skb->sk;
314 316
315 /* Allocate the new skb. */ 317 /* Allocate the new skb. */
@@ -370,7 +372,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
370 * [This whole comment explains WORD_ROUND() below.] 372 * [This whole comment explains WORD_ROUND() below.]
371 */ 373 */
372 SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); 374 SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
373 while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { 375 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
376 list_del_init(&chunk->list);
374 if (sctp_chunk_is_data(chunk)) { 377 if (sctp_chunk_is_data(chunk)) {
375 378
376 if (!chunk->has_tsn) { 379 if (!chunk->has_tsn) {
@@ -511,7 +514,8 @@ err:
511 * will get resent or dropped later. 514 * will get resent or dropped later.
512 */ 515 */
513 516
514 while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { 517 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
518 list_del_init(&chunk->list);
515 if (!sctp_chunk_is_data(chunk)) 519 if (!sctp_chunk_is_data(chunk))
516 sctp_chunk_free(chunk); 520 sctp_chunk_free(chunk);
517 } 521 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 4eb81a1407b7..efb72faba20c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
75static inline void sctp_outq_head_data(struct sctp_outq *q, 75static inline void sctp_outq_head_data(struct sctp_outq *q,
76 struct sctp_chunk *ch) 76 struct sctp_chunk *ch)
77{ 77{
78 __skb_queue_head(&q->out, (struct sk_buff *)ch); 78 list_add(&ch->list, &q->out_chunk_list);
79 q->out_qlen += ch->skb->len; 79 q->out_qlen += ch->skb->len;
80 return; 80 return;
81} 81}
@@ -83,17 +83,22 @@ static inline void sctp_outq_head_data(struct sctp_outq *q,
83/* Take data from the front of the queue. */ 83/* Take data from the front of the queue. */
84static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 84static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
85{ 85{
86 struct sctp_chunk *ch; 86 struct sctp_chunk *ch = NULL;
87 ch = (struct sctp_chunk *)__skb_dequeue(&q->out); 87
88 if (ch) 88 if (!list_empty(&q->out_chunk_list)) {
89 struct list_head *entry = q->out_chunk_list.next;
90
91 ch = list_entry(entry, struct sctp_chunk, list);
92 list_del_init(entry);
89 q->out_qlen -= ch->skb->len; 93 q->out_qlen -= ch->skb->len;
94 }
90 return ch; 95 return ch;
91} 96}
92/* Add data chunk to the end of the queue. */ 97/* Add data chunk to the end of the queue. */
93static inline void sctp_outq_tail_data(struct sctp_outq *q, 98static inline void sctp_outq_tail_data(struct sctp_outq *q,
94 struct sctp_chunk *ch) 99 struct sctp_chunk *ch)
95{ 100{
96 __skb_queue_tail(&q->out, (struct sk_buff *)ch); 101 list_add_tail(&ch->list, &q->out_chunk_list);
97 q->out_qlen += ch->skb->len; 102 q->out_qlen += ch->skb->len;
98 return; 103 return;
99} 104}
@@ -197,8 +202,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
197void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 202void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
198{ 203{
199 q->asoc = asoc; 204 q->asoc = asoc;
200 skb_queue_head_init(&q->out); 205 INIT_LIST_HEAD(&q->out_chunk_list);
201 skb_queue_head_init(&q->control); 206 INIT_LIST_HEAD(&q->control_chunk_list);
202 INIT_LIST_HEAD(&q->retransmit); 207 INIT_LIST_HEAD(&q->retransmit);
203 INIT_LIST_HEAD(&q->sacked); 208 INIT_LIST_HEAD(&q->sacked);
204 INIT_LIST_HEAD(&q->abandoned); 209 INIT_LIST_HEAD(&q->abandoned);
@@ -217,7 +222,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
217{ 222{
218 struct sctp_transport *transport; 223 struct sctp_transport *transport;
219 struct list_head *lchunk, *pos, *temp; 224 struct list_head *lchunk, *pos, *temp;
220 struct sctp_chunk *chunk; 225 struct sctp_chunk *chunk, *tmp;
221 226
222 /* Throw away unacknowledged chunks. */ 227 /* Throw away unacknowledged chunks. */
223 list_for_each(pos, &q->asoc->peer.transport_addr_list) { 228 list_for_each(pos, &q->asoc->peer.transport_addr_list) {
@@ -269,8 +274,10 @@ void sctp_outq_teardown(struct sctp_outq *q)
269 q->error = 0; 274 q->error = 0;
270 275
271 /* Throw away any leftover control chunks. */ 276 /* Throw away any leftover control chunks. */
272 while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) 277 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
278 list_del_init(&chunk->list);
273 sctp_chunk_free(chunk); 279 sctp_chunk_free(chunk);
280 }
274} 281}
275 282
276/* Free the outqueue structure and any related pending chunks. */ 283/* Free the outqueue structure and any related pending chunks. */
@@ -333,7 +340,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
333 break; 340 break;
334 }; 341 };
335 } else { 342 } else {
336 __skb_queue_tail(&q->control, (struct sk_buff *) chunk); 343 list_add_tail(&chunk->list, &q->control_chunk_list);
337 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 344 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
338 } 345 }
339 346
@@ -650,10 +657,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
650 __u16 sport = asoc->base.bind_addr.port; 657 __u16 sport = asoc->base.bind_addr.port;
651 __u16 dport = asoc->peer.port; 658 __u16 dport = asoc->peer.port;
652 __u32 vtag = asoc->peer.i.init_tag; 659 __u32 vtag = asoc->peer.i.init_tag;
653 struct sk_buff_head *queue;
654 struct sctp_transport *transport = NULL; 660 struct sctp_transport *transport = NULL;
655 struct sctp_transport *new_transport; 661 struct sctp_transport *new_transport;
656 struct sctp_chunk *chunk; 662 struct sctp_chunk *chunk, *tmp;
657 sctp_xmit_t status; 663 sctp_xmit_t status;
658 int error = 0; 664 int error = 0;
659 int start_timer = 0; 665 int start_timer = 0;
@@ -675,8 +681,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
675 * ... 681 * ...
676 */ 682 */
677 683
678 queue = &q->control; 684 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
679 while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { 685 list_del_init(&chunk->list);
686
680 /* Pick the right transport to use. */ 687 /* Pick the right transport to use. */
681 new_transport = chunk->transport; 688 new_transport = chunk->transport;
682 689
@@ -814,8 +821,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
814 821
815 /* Finally, transmit new packets. */ 822 /* Finally, transmit new packets. */
816 start_timer = 0; 823 start_timer = 0;
817 queue = &q->out;
818
819 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 824 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
820 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 825 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
821 * stream identifier. 826 * stream identifier.
@@ -1149,8 +1154,9 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1149 /* See if all chunks are acked. 1154 /* See if all chunks are acked.
1150 * Make sure the empty queue handler will get run later. 1155 * Make sure the empty queue handler will get run later.
1151 */ 1156 */
1152 q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && 1157 q->empty = (list_empty(&q->out_chunk_list) &&
1153 list_empty(&q->retransmit); 1158 list_empty(&q->control_chunk_list) &&
1159 list_empty(&q->retransmit));
1154 if (!q->empty) 1160 if (!q->empty)
1155 goto finish; 1161 goto finish;
1156 1162
@@ -1679,9 +1685,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1679 if (TSN_lte(tsn, ctsn)) { 1685 if (TSN_lte(tsn, ctsn)) {
1680 list_del_init(lchunk); 1686 list_del_init(lchunk);
1681 if (!chunk->tsn_gap_acked) { 1687 if (!chunk->tsn_gap_acked) {
1682 chunk->transport->flight_size -= 1688 chunk->transport->flight_size -=
1683 sctp_data_size(chunk); 1689 sctp_data_size(chunk);
1684 q->outstanding_bytes -= sctp_data_size(chunk); 1690 q->outstanding_bytes -= sctp_data_size(chunk);
1685 } 1691 }
1686 sctp_chunk_free(chunk); 1692 sctp_chunk_free(chunk);
1687 } else { 1693 } else {
@@ -1729,7 +1735,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1729 nskips, &ftsn_skip_arr[0]); 1735 nskips, &ftsn_skip_arr[0]);
1730 1736
1731 if (ftsn_chunk) { 1737 if (ftsn_chunk) {
1732 __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); 1738 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1733 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1739 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
1734 } 1740 }
1735} 1741}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 5baed9bb7de5..773cd93fa3d0 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1003,6 +1003,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
1003 SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); 1003 SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
1004 } 1004 }
1005 1005
1006 INIT_LIST_HEAD(&retval->list);
1006 retval->skb = skb; 1007 retval->skb = skb;
1007 retval->asoc = (struct sctp_association *)asoc; 1008 retval->asoc = (struct sctp_association *)asoc;
1008 retval->resent = 0; 1009 retval->resent = 0;
@@ -1116,8 +1117,7 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
1116/* Possibly, free the chunk. */ 1117/* Possibly, free the chunk. */
1117void sctp_chunk_free(struct sctp_chunk *chunk) 1118void sctp_chunk_free(struct sctp_chunk *chunk)
1118{ 1119{
1119 /* Make sure that we are not on any list. */ 1120 BUG_ON(!list_empty(&chunk->list));
1120 skb_unlink((struct sk_buff *) chunk);
1121 list_del_init(&chunk->transmitted_list); 1121 list_del_init(&chunk->transmitted_list);
1122 1122
1123 /* Release our reference on the message tracker. */ 1123 /* Release our reference on the message tracker. */
@@ -2739,8 +2739,12 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
2739 asoc->addip_last_asconf = NULL; 2739 asoc->addip_last_asconf = NULL;
2740 2740
2741 /* Send the next asconf chunk from the addip chunk queue. */ 2741 /* Send the next asconf chunk from the addip chunk queue. */
2742 asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks); 2742 if (!list_empty(&asoc->addip_chunk_list)) {
2743 if (asconf) { 2743 struct list_head *entry = asoc->addip_chunk_list.next;
2744 asconf = list_entry(entry, struct sctp_chunk, list);
2745
2746 list_del_init(entry);
2747
2744 /* Hold the chunk until an ASCONF_ACK is received. */ 2748 /* Hold the chunk until an ASCONF_ACK is received. */
2745 sctp_chunk_hold(asconf); 2749 sctp_chunk_hold(asconf);
2746 if (sctp_primitive_ASCONF(asoc, asconf)) 2750 if (sctp_primitive_ASCONF(asoc, asconf))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index aad55dc3792b..091a66f06a35 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -406,7 +406,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
406 * transmission. 406 * transmission.
407 */ 407 */
408 if (asoc->addip_last_asconf) { 408 if (asoc->addip_last_asconf) {
409 __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk); 409 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
410 goto out; 410 goto out;
411 } 411 }
412 412
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 269f217918a3..3c654e06b084 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -145,8 +145,6 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
145 if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) { 145 if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
146 if (task == xprt->snd_task) 146 if (task == xprt->snd_task)
147 return 1; 147 return 1;
148 if (task == NULL)
149 return 0;
150 goto out_sleep; 148 goto out_sleep;
151 } 149 }
152 if (xprt->nocong || __xprt_get_cong(xprt, task)) { 150 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c420eba4876b..d403e34088ad 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -302,7 +302,7 @@ static void unix_write_space(struct sock *sk)
302 * may receive messages only from that peer. */ 302 * may receive messages only from that peer. */
303static void unix_dgram_disconnected(struct sock *sk, struct sock *other) 303static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
304{ 304{
305 if (skb_queue_len(&sk->sk_receive_queue)) { 305 if (!skb_queue_empty(&sk->sk_receive_queue)) {
306 skb_queue_purge(&sk->sk_receive_queue); 306 skb_queue_purge(&sk->sk_receive_queue);
307 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); 307 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
308 308
@@ -1619,7 +1619,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
1619 for (;;) { 1619 for (;;) {
1620 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1620 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1621 1621
1622 if (skb_queue_len(&sk->sk_receive_queue) || 1622 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1623 sk->sk_err || 1623 sk->sk_err ||
1624 (sk->sk_shutdown & RCV_SHUTDOWN) || 1624 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1625 signal_pending(current) || 1625 signal_pending(current) ||