aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-06-04 20:39:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-04 20:39:33 -0400
commit3e387fcdc485d94fe2c4b52e7c30c0c4cd1fe364 (patch)
tree577de7b1aed18106b30e3e1d517b165f137e52ac
parent9489a0625854cd7482bb0e8b37de4406cdcd49e0 (diff)
parent24b95685ffcdb3dc28f64b9e8af6ea3e8360fbc5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits) l2tp: Fix possible oops if transmitting or receiving when tunnel goes down tcp: Fix for race due to temporary drop of the socket lock in skb_splice_bits. tcp: Increment OUTRSTS in tcp_send_active_reset() raw: Raw socket leak. lt2p: Fix possible WARN_ON from socket code when UDP socket is closed USB ID for Philips CPWUA054/00 Wireless USB Adapter 11g ssb: Fix context assertion in ssb_pcicore_dev_irqvecs_enable libertas: fix command size for CMD_802_11_SUBSCRIBE_EVENT ipw2200: expire and use oldest BSS on adhoc create airo warning fix b43legacy: Fix controller restart crash sctp: Fix ECN markings for IPv6 sctp: Flush the queue only once during fast retransmit. sctp: Start T3-RTX timer when fast retransmitting lowest TSN sctp: Correctly implement Fast Recovery cwnd manipulations. sctp: Move sctp_v4_dst_saddr out of loop sctp: retran_path update bug fix tcp: fix skb vs fack_count out-of-sync condition sunhme: Cleanup use of deprecated calls to save_and_cli and restore_flags. xfrm: xfrm_algo: correct usage of RIPEMD-160 ...
-rw-r--r--Documentation/networking/bridge.txt2
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/cs89x0.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/pppol2tp.c111
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sunhme.c4
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/ucc_geth_ethtool.c3
-rw-r--r--drivers/net/virtio_net.c36
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c17
-rw-r--r--drivers/net/wireless/ipw2200.c27
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/ssb/driver_pcicore.c4
-rw-r--r--include/linux/in_route.h12
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/net/addrconf.h22
-rw-r--r--include/net/genetlink.h4
-rw-r--r--include/net/netlink.h20
-rw-r--r--include/net/sctp/structs.h17
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h1
-rw-r--r--net/ax25/ax25_subr.c11
-rw-r--r--net/bluetooth/rfcomm/tty.c13
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/user_dma.c2
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/raw.c9
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c35
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tunnel4.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/addrconf.c107
-rw-r--r--net/ipv6/datagram.c45
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c21
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/tunnel6.c2
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c12
-rw-r--r--net/netfilter/xt_connlimit.c3
-rw-r--r--net/netlink/attr.c12
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/sched/sch_dsmark.c6
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sctp/associola.c21
-rw-r--r--net/sctp/ipv6.c11
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c120
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/transport.c50
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/xfrm/xfrm_algo.c4
67 files changed, 618 insertions, 315 deletions
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt
index bdae2db4119c..bec69a8a1697 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.txt
@@ -1,6 +1,6 @@
1In order to use the Ethernet bridging functionality, you'll need the 1In order to use the Ethernet bridging functionality, you'll need the
2userspace tools. These programs and documentation are available 2userspace tools. These programs and documentation are available
3at http://bridge.sourceforge.net. The download page is 3at http://www.linux-foundation.org/en/Net:Bridge. The download page is
4http://prdownloads.sourceforge.net/bridge. 4http://prdownloads.sourceforge.net/bridge.
5 5
6If you still have questions, don't hesitate to post to the mailing list 6If you still have questions, don't hesitate to post to the mailing list
diff --git a/MAINTAINERS b/MAINTAINERS
index 0a6d2ca03cea..46fa1797707a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1611,7 +1611,7 @@ ETHERNET BRIDGE
1611P: Stephen Hemminger 1611P: Stephen Hemminger
1612M: shemminger@linux-foundation.org 1612M: shemminger@linux-foundation.org
1613L: bridge@lists.linux-foundation.org 1613L: bridge@lists.linux-foundation.org
1614W: http://bridge.sourceforge.net/ 1614W: http://www.linux-foundation.org/en/Net:Bridge
1615S: Maintained 1615S: Maintained
1616 1616
1617ETHERTEAM 16I DRIVER 1617ETHERTEAM 16I DRIVER
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 6e4c80d41b08..6ddc911e7d15 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2023,6 +2023,7 @@ rrd_ok:
2023 /* Good Receive */ 2023 /* Good Receive */
2024 pci_unmap_page(adapter->pdev, buffer_info->dma, 2024 pci_unmap_page(adapter->pdev, buffer_info->dma,
2025 buffer_info->length, PCI_DMA_FROMDEVICE); 2025 buffer_info->length, PCI_DMA_FROMDEVICE);
2026 buffer_info->dma = 0;
2026 skb = buffer_info->skb; 2027 skb = buffer_info->skb;
2027 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); 2028 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
2028 2029
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 348371fda597..fba87abe78ee 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1394,7 +1394,11 @@ net_open(struct net_device *dev)
1394#endif 1394#endif
1395 if (!result) { 1395 if (!result) {
1396 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); 1396 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
1397 release_irq: 1397release_dma:
1398#if ALLOW_DMA
1399 free_dma(dev->dma);
1400#endif
1401release_irq:
1398#if ALLOW_DMA 1402#if ALLOW_DMA
1399 release_dma_buff(lp); 1403 release_dma_buff(lp);
1400#endif 1404#endif
@@ -1442,12 +1446,12 @@ net_open(struct net_device *dev)
1442 if ((result = detect_bnc(dev)) != DETECTED_NONE) 1446 if ((result = detect_bnc(dev)) != DETECTED_NONE)
1443 break; 1447 break;
1444 printk(KERN_ERR "%s: no media detected\n", dev->name); 1448 printk(KERN_ERR "%s: no media detected\n", dev->name);
1445 goto release_irq; 1449 goto release_dma;
1446 } 1450 }
1447 switch(result) { 1451 switch(result) {
1448 case DETECTED_NONE: 1452 case DETECTED_NONE:
1449 printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); 1453 printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
1450 goto release_irq; 1454 goto release_dma;
1451 case DETECTED_RJ45H: 1455 case DETECTED_RJ45H:
1452 printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); 1456 printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
1453 break; 1457 break;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 36be6efc6398..e0d76c75aea0 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.3.2-1.287" 78#define MYRI10GE_VERSION_STR "1.3.99-1.347"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 8db342f2fdc9..70cfdb46aa27 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -240,12 +240,15 @@ static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
240 if (sk == NULL) 240 if (sk == NULL)
241 return NULL; 241 return NULL;
242 242
243 sock_hold(sk);
243 session = (struct pppol2tp_session *)(sk->sk_user_data); 244 session = (struct pppol2tp_session *)(sk->sk_user_data);
244 if (session == NULL) 245 if (session == NULL) {
245 return NULL; 246 sock_put(sk);
247 goto out;
248 }
246 249
247 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 250 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
248 251out:
249 return session; 252 return session;
250} 253}
251 254
@@ -256,12 +259,15 @@ static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
256 if (sk == NULL) 259 if (sk == NULL)
257 return NULL; 260 return NULL;
258 261
262 sock_hold(sk);
259 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); 263 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
260 if (tunnel == NULL) 264 if (tunnel == NULL) {
261 return NULL; 265 sock_put(sk);
266 goto out;
267 }
262 268
263 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 269 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
264 270out:
265 return tunnel; 271 return tunnel;
266} 272}
267 273
@@ -716,12 +722,14 @@ discard:
716 session->stats.rx_errors++; 722 session->stats.rx_errors++;
717 kfree_skb(skb); 723 kfree_skb(skb);
718 sock_put(session->sock); 724 sock_put(session->sock);
725 sock_put(sock);
719 726
720 return 0; 727 return 0;
721 728
722error: 729error:
723 /* Put UDP header back */ 730 /* Put UDP header back */
724 __skb_push(skb, sizeof(struct udphdr)); 731 __skb_push(skb, sizeof(struct udphdr));
732 sock_put(sock);
725 733
726no_tunnel: 734no_tunnel:
727 return 1; 735 return 1;
@@ -745,10 +753,13 @@ static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
745 "%s: received %d bytes\n", tunnel->name, skb->len); 753 "%s: received %d bytes\n", tunnel->name, skb->len);
746 754
747 if (pppol2tp_recv_core(sk, skb)) 755 if (pppol2tp_recv_core(sk, skb))
748 goto pass_up; 756 goto pass_up_put;
749 757
758 sock_put(sk);
750 return 0; 759 return 0;
751 760
761pass_up_put:
762 sock_put(sk);
752pass_up: 763pass_up:
753 return 1; 764 return 1;
754} 765}
@@ -858,7 +869,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
858 869
859 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 870 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
860 if (tunnel == NULL) 871 if (tunnel == NULL)
861 goto error; 872 goto error_put_sess;
862 873
863 /* What header length is configured for this session? */ 874 /* What header length is configured for this session? */
864 hdr_len = pppol2tp_l2tp_header_len(session); 875 hdr_len = pppol2tp_l2tp_header_len(session);
@@ -870,7 +881,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
870 sizeof(ppph) + total_len, 881 sizeof(ppph) + total_len,
871 0, GFP_KERNEL); 882 0, GFP_KERNEL);
872 if (!skb) 883 if (!skb)
873 goto error; 884 goto error_put_sess_tun;
874 885
875 /* Reserve space for headers. */ 886 /* Reserve space for headers. */
876 skb_reserve(skb, NET_SKB_PAD); 887 skb_reserve(skb, NET_SKB_PAD);
@@ -900,7 +911,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
900 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); 911 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
901 if (error < 0) { 912 if (error < 0) {
902 kfree_skb(skb); 913 kfree_skb(skb);
903 goto error; 914 goto error_put_sess_tun;
904 } 915 }
905 skb_put(skb, total_len); 916 skb_put(skb, total_len);
906 917
@@ -947,10 +958,33 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
947 session->stats.tx_errors++; 958 session->stats.tx_errors++;
948 } 959 }
949 960
961 return error;
962
963error_put_sess_tun:
964 sock_put(session->tunnel_sock);
965error_put_sess:
966 sock_put(sk);
950error: 967error:
951 return error; 968 return error;
952} 969}
953 970
971/* Automatically called when the skb is freed.
972 */
973static void pppol2tp_sock_wfree(struct sk_buff *skb)
974{
975 sock_put(skb->sk);
976}
977
978/* For data skbs that we transmit, we associate with the tunnel socket
979 * but don't do accounting.
980 */
981static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
982{
983 sock_hold(sk);
984 skb->sk = sk;
985 skb->destructor = pppol2tp_sock_wfree;
986}
987
954/* Transmit function called by generic PPP driver. Sends PPP frame 988/* Transmit function called by generic PPP driver. Sends PPP frame
955 * over PPPoL2TP socket. 989 * over PPPoL2TP socket.
956 * 990 *
@@ -993,10 +1027,10 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
993 1027
994 sk_tun = session->tunnel_sock; 1028 sk_tun = session->tunnel_sock;
995 if (sk_tun == NULL) 1029 if (sk_tun == NULL)
996 goto abort; 1030 goto abort_put_sess;
997 tunnel = pppol2tp_sock_to_tunnel(sk_tun); 1031 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
998 if (tunnel == NULL) 1032 if (tunnel == NULL)
999 goto abort; 1033 goto abort_put_sess;
1000 1034
1001 /* What header length is configured for this session? */ 1035 /* What header length is configured for this session? */
1002 hdr_len = pppol2tp_l2tp_header_len(session); 1036 hdr_len = pppol2tp_l2tp_header_len(session);
@@ -1009,7 +1043,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1009 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 1043 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
1010 old_headroom = skb_headroom(skb); 1044 old_headroom = skb_headroom(skb);
1011 if (skb_cow_head(skb, headroom)) 1045 if (skb_cow_head(skb, headroom))
1012 goto abort; 1046 goto abort_put_sess_tun;
1013 1047
1014 new_headroom = skb_headroom(skb); 1048 new_headroom = skb_headroom(skb);
1015 skb_orphan(skb); 1049 skb_orphan(skb);
@@ -1069,7 +1103,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1069 /* Get routing info from the tunnel socket */ 1103 /* Get routing info from the tunnel socket */
1070 dst_release(skb->dst); 1104 dst_release(skb->dst);
1071 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1105 skb->dst = dst_clone(__sk_dst_get(sk_tun));
1072 skb->sk = sk_tun; 1106 pppol2tp_skb_set_owner_w(skb, sk_tun);
1073 1107
1074 /* Queue the packet to IP for output */ 1108 /* Queue the packet to IP for output */
1075 len = skb->len; 1109 len = skb->len;
@@ -1086,8 +1120,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1086 session->stats.tx_errors++; 1120 session->stats.tx_errors++;
1087 } 1121 }
1088 1122
1123 sock_put(sk_tun);
1124 sock_put(sk);
1089 return 1; 1125 return 1;
1090 1126
1127abort_put_sess_tun:
1128 sock_put(sk_tun);
1129abort_put_sess:
1130 sock_put(sk);
1091abort: 1131abort:
1092 /* Free the original skb */ 1132 /* Free the original skb */
1093 kfree_skb(skb); 1133 kfree_skb(skb);
@@ -1191,7 +1231,7 @@ static void pppol2tp_tunnel_destruct(struct sock *sk)
1191{ 1231{
1192 struct pppol2tp_tunnel *tunnel; 1232 struct pppol2tp_tunnel *tunnel;
1193 1233
1194 tunnel = pppol2tp_sock_to_tunnel(sk); 1234 tunnel = sk->sk_user_data;
1195 if (tunnel == NULL) 1235 if (tunnel == NULL)
1196 goto end; 1236 goto end;
1197 1237
@@ -1230,10 +1270,12 @@ static void pppol2tp_session_destruct(struct sock *sk)
1230 if (sk->sk_user_data != NULL) { 1270 if (sk->sk_user_data != NULL) {
1231 struct pppol2tp_tunnel *tunnel; 1271 struct pppol2tp_tunnel *tunnel;
1232 1272
1233 session = pppol2tp_sock_to_session(sk); 1273 session = sk->sk_user_data;
1234 if (session == NULL) 1274 if (session == NULL)
1235 goto out; 1275 goto out;
1236 1276
1277 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
1278
1237 /* Don't use pppol2tp_sock_to_tunnel() here to 1279 /* Don't use pppol2tp_sock_to_tunnel() here to
1238 * get the tunnel context because the tunnel 1280 * get the tunnel context because the tunnel
1239 * socket might have already been closed (its 1281 * socket might have already been closed (its
@@ -1279,6 +1321,7 @@ out:
1279static int pppol2tp_release(struct socket *sock) 1321static int pppol2tp_release(struct socket *sock)
1280{ 1322{
1281 struct sock *sk = sock->sk; 1323 struct sock *sk = sock->sk;
1324 struct pppol2tp_session *session;
1282 int error; 1325 int error;
1283 1326
1284 if (!sk) 1327 if (!sk)
@@ -1296,9 +1339,18 @@ static int pppol2tp_release(struct socket *sock)
1296 sock_orphan(sk); 1339 sock_orphan(sk);
1297 sock->sk = NULL; 1340 sock->sk = NULL;
1298 1341
1342 session = pppol2tp_sock_to_session(sk);
1343
1299 /* Purge any queued data */ 1344 /* Purge any queued data */
1300 skb_queue_purge(&sk->sk_receive_queue); 1345 skb_queue_purge(&sk->sk_receive_queue);
1301 skb_queue_purge(&sk->sk_write_queue); 1346 skb_queue_purge(&sk->sk_write_queue);
1347 if (session != NULL) {
1348 struct sk_buff *skb;
1349 while ((skb = skb_dequeue(&session->reorder_q))) {
1350 kfree_skb(skb);
1351 sock_put(sk);
1352 }
1353 }
1302 1354
1303 release_sock(sk); 1355 release_sock(sk);
1304 1356
@@ -1601,7 +1653,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1601 1653
1602 error = ppp_register_channel(&po->chan); 1654 error = ppp_register_channel(&po->chan);
1603 if (error) 1655 if (error)
1604 goto end; 1656 goto end_put_tun;
1605 1657
1606 /* This is how we get the session context from the socket. */ 1658 /* This is how we get the session context from the socket. */
1607 sk->sk_user_data = session; 1659 sk->sk_user_data = session;
@@ -1621,6 +1673,8 @@ out_no_ppp:
1621 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1673 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1622 "%s: created\n", session->name); 1674 "%s: created\n", session->name);
1623 1675
1676end_put_tun:
1677 sock_put(tunnel_sock);
1624end: 1678end:
1625 release_sock(sk); 1679 release_sock(sk);
1626 1680
@@ -1668,6 +1722,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
1668 *usockaddr_len = len; 1722 *usockaddr_len = len;
1669 1723
1670 error = 0; 1724 error = 0;
1725 sock_put(sock->sk);
1671 1726
1672end: 1727end:
1673 return error; 1728 return error;
@@ -1906,14 +1961,17 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
1906 err = -EBADF; 1961 err = -EBADF;
1907 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 1962 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
1908 if (tunnel == NULL) 1963 if (tunnel == NULL)
1909 goto end; 1964 goto end_put_sess;
1910 1965
1911 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); 1966 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
1912 goto end; 1967 sock_put(session->tunnel_sock);
1968 goto end_put_sess;
1913 } 1969 }
1914 1970
1915 err = pppol2tp_session_ioctl(session, cmd, arg); 1971 err = pppol2tp_session_ioctl(session, cmd, arg);
1916 1972
1973end_put_sess:
1974 sock_put(sk);
1917end: 1975end:
1918 return err; 1976 return err;
1919} 1977}
@@ -2059,14 +2117,17 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2059 err = -EBADF; 2117 err = -EBADF;
2060 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 2118 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2061 if (tunnel == NULL) 2119 if (tunnel == NULL)
2062 goto end; 2120 goto end_put_sess;
2063 2121
2064 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); 2122 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
2123 sock_put(session->tunnel_sock);
2065 } else 2124 } else
2066 err = pppol2tp_session_setsockopt(sk, session, optname, val); 2125 err = pppol2tp_session_setsockopt(sk, session, optname, val);
2067 2126
2068 err = 0; 2127 err = 0;
2069 2128
2129end_put_sess:
2130 sock_put(sk);
2070end: 2131end:
2071 return err; 2132 return err;
2072} 2133}
@@ -2181,20 +2242,24 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
2181 err = -EBADF; 2242 err = -EBADF;
2182 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 2243 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2183 if (tunnel == NULL) 2244 if (tunnel == NULL)
2184 goto end; 2245 goto end_put_sess;
2185 2246
2186 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 2247 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
2248 sock_put(session->tunnel_sock);
2187 } else 2249 } else
2188 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 2250 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
2189 2251
2190 err = -EFAULT; 2252 err = -EFAULT;
2191 if (put_user(len, (int __user *) optlen)) 2253 if (put_user(len, (int __user *) optlen))
2192 goto end; 2254 goto end_put_sess;
2193 2255
2194 if (copy_to_user((void __user *) optval, &val, len)) 2256 if (copy_to_user((void __user *) optval, &val, len))
2195 goto end; 2257 goto end_put_sess;
2196 2258
2197 err = 0; 2259 err = 0;
2260
2261end_put_sess:
2262 sock_put(sk);
2198end: 2263end:
2199 return err; 2264 return err;
2200} 2265}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index b4b63805ee8f..61955f8d8011 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -972,7 +972,7 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
973 973
974 len = skb->len; 974 len = skb->len;
975 if (unlikely(len < ETH_ZLEN)) { 975 if (len < ETH_ZLEN) {
976 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, 976 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
977 0, ETH_ZLEN - len); 977 0, ETH_ZLEN - len);
978 len = ETH_ZLEN; 978 len = ETH_ZLEN;
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index dbdcee4b0f8d..55c0d9760be8 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -459,7 +459,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
459 tries--; 459 tries--;
460 } 460 }
461 461
462 EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", 462 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
463 max_tries); 463 max_tries);
464 return 0; 464 return 0;
465} 465}
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b4e7f30ea4e8..1aa425be3067 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -111,7 +111,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne
111 struct hme_tx_logent *tlp; 111 struct hme_tx_logent *tlp;
112 unsigned long flags; 112 unsigned long flags;
113 113
114 save_and_cli(flags); 114 local_irq_save(flags);
115 tlp = &tx_log[txlog_cur_entry]; 115 tlp = &tx_log[txlog_cur_entry];
116 tlp->tstamp = (unsigned int)jiffies; 116 tlp->tstamp = (unsigned int)jiffies;
117 tlp->tx_new = hp->tx_new; 117 tlp->tx_new = hp->tx_new;
@@ -119,7 +119,7 @@ static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigne
119 tlp->action = a; 119 tlp->action = a;
120 tlp->status = s; 120 tlp->status = s;
121 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); 121 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
122 restore_flags(flags); 122 local_irq_restore(flags);
123} 123}
124static __inline__ void tx_dump_log(void) 124static __inline__ void tx_dump_log(void)
125{ 125{
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index f9d13fa05d64..55670b5eb611 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1729,12 +1729,15 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1729 if (!dev) 1729 if (!dev)
1730 return -EINVAL; 1730 return -EINVAL;
1731 1731
1732 if (netif_running(dev)) 1732 if (!netif_running(dev))
1733 tulip_down(dev); 1733 goto save_state;
1734
1735 tulip_down(dev);
1734 1736
1735 netif_device_detach(dev); 1737 netif_device_detach(dev);
1736 free_irq(dev->irq, dev); 1738 free_irq(dev->irq, dev);
1737 1739
1740save_state:
1738 pci_save_state(pdev); 1741 pci_save_state(pdev);
1739 pci_disable_device(pdev); 1742 pci_disable_device(pdev);
1740 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1743 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1754,6 +1757,9 @@ static int tulip_resume(struct pci_dev *pdev)
1754 pci_set_power_state(pdev, PCI_D0); 1757 pci_set_power_state(pdev, PCI_D0);
1755 pci_restore_state(pdev); 1758 pci_restore_state(pdev);
1756 1759
1760 if (!netif_running(dev))
1761 return 0;
1762
1757 if ((retval = pci_enable_device(pdev))) { 1763 if ((retval = pci_enable_device(pdev))) {
1758 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); 1764 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
1759 return retval; 1765 return retval;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 299b7f176950..f5839c4a5cbd 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -73,6 +73,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
73 "tx-frames-ok", 73 "tx-frames-ok",
74 "tx-excessive-differ-frames", 74 "tx-excessive-differ-frames",
75 "tx-256-511-frames", 75 "tx-256-511-frames",
76 "tx-512-1023-frames",
76 "tx-1024-1518-frames", 77 "tx-1024-1518-frames",
77 "tx-jumbo-frames", 78 "tx-jumbo-frames",
78}; 79};
@@ -308,7 +309,7 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
308 buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; 309 buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
309 } 310 }
310 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) 311 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
311 memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * 312 memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
312 ETH_GSTRING_LEN); 313 ETH_GSTRING_LEN);
313} 314}
314 315
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fe7cdf2a2a23..5450eac9e263 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,6 +47,9 @@ struct virtnet_info
47 /* Number of input buffers, and max we've ever had. */ 47 /* Number of input buffers, and max we've ever had. */
48 unsigned int num, max; 48 unsigned int num, max;
49 49
50 /* For cleaning up after transmission. */
51 struct tasklet_struct tasklet;
52
50 /* Receive & send queues. */ 53 /* Receive & send queues. */
51 struct sk_buff_head recv; 54 struct sk_buff_head recv;
52 struct sk_buff_head send; 55 struct sk_buff_head send;
@@ -68,8 +71,13 @@ static void skb_xmit_done(struct virtqueue *svq)
68 71
69 /* Suppress further interrupts. */ 72 /* Suppress further interrupts. */
70 svq->vq_ops->disable_cb(svq); 73 svq->vq_ops->disable_cb(svq);
74
71 /* We were waiting for more output buffers. */ 75 /* We were waiting for more output buffers. */
72 netif_wake_queue(vi->dev); 76 netif_wake_queue(vi->dev);
77
78 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
79 * queued, start_xmit won't be called. */
80 tasklet_schedule(&vi->tasklet);
73} 81}
74 82
75static void receive_skb(struct net_device *dev, struct sk_buff *skb, 83static void receive_skb(struct net_device *dev, struct sk_buff *skb,
@@ -278,6 +286,18 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
278 return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 286 return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
279} 287}
280 288
289static void xmit_tasklet(unsigned long data)
290{
291 struct virtnet_info *vi = (void *)data;
292
293 netif_tx_lock_bh(vi->dev);
294 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
295 vi->svq->vq_ops->kick(vi->svq);
296 vi->last_xmit_skb = NULL;
297 }
298 netif_tx_unlock_bh(vi->dev);
299}
300
281static int start_xmit(struct sk_buff *skb, struct net_device *dev) 301static int start_xmit(struct sk_buff *skb, struct net_device *dev)
282{ 302{
283 struct virtnet_info *vi = netdev_priv(dev); 303 struct virtnet_info *vi = netdev_priv(dev);
@@ -287,21 +307,25 @@ again:
287 free_old_xmit_skbs(vi); 307 free_old_xmit_skbs(vi);
288 308
289 /* If we has a buffer left over from last time, send it now. */ 309 /* If we has a buffer left over from last time, send it now. */
290 if (vi->last_xmit_skb) { 310 if (unlikely(vi->last_xmit_skb)) {
291 if (xmit_skb(vi, vi->last_xmit_skb) != 0) { 311 if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
292 /* Drop this skb: we only queue one. */ 312 /* Drop this skb: we only queue one. */
293 vi->dev->stats.tx_dropped++; 313 vi->dev->stats.tx_dropped++;
294 kfree_skb(skb); 314 kfree_skb(skb);
315 skb = NULL;
295 goto stop_queue; 316 goto stop_queue;
296 } 317 }
297 vi->last_xmit_skb = NULL; 318 vi->last_xmit_skb = NULL;
298 } 319 }
299 320
300 /* Put new one in send queue and do transmit */ 321 /* Put new one in send queue and do transmit */
301 __skb_queue_head(&vi->send, skb); 322 if (likely(skb)) {
302 if (xmit_skb(vi, skb) != 0) { 323 __skb_queue_head(&vi->send, skb);
303 vi->last_xmit_skb = skb; 324 if (xmit_skb(vi, skb) != 0) {
304 goto stop_queue; 325 vi->last_xmit_skb = skb;
326 skb = NULL;
327 goto stop_queue;
328 }
305 } 329 }
306done: 330done:
307 vi->svq->vq_ops->kick(vi->svq); 331 vi->svq->vq_ops->kick(vi->svq);
@@ -428,6 +452,8 @@ static int virtnet_probe(struct virtio_device *vdev)
428 skb_queue_head_init(&vi->recv); 452 skb_queue_head_init(&vi->recv);
429 skb_queue_head_init(&vi->send); 453 skb_queue_head_init(&vi->send);
430 454
455 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
456
431 err = register_netdev(dev); 457 err = register_netdev(dev);
432 if (err) { 458 if (err) {
433 pr_debug("virtio_net: registering device failed\n"); 459 pr_debug("virtio_net: registering device failed\n");
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4e1c690ff45f..32019fb878d8 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2905,7 +2905,7 @@ EXPORT_SYMBOL(init_airo_card);
2905 2905
2906static int waitbusy (struct airo_info *ai) { 2906static int waitbusy (struct airo_info *ai) {
2907 int delay = 0; 2907 int delay = 0;
2908 while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { 2908 while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
2909 udelay (10); 2909 udelay (10);
2910 if ((++delay % 20) == 0) 2910 if ((++delay % 20) == 0)
2911 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); 2911 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 14a5eea2573e..204077c13870 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3039,7 +3039,6 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
3039/* Locking: wl->mutex */ 3039/* Locking: wl->mutex */
3040static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) 3040static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
3041{ 3041{
3042 struct b43legacy_wl *wl = dev->wl;
3043 struct b43legacy_phy *phy = &dev->phy; 3042 struct b43legacy_phy *phy = &dev->phy;
3044 u32 macctl; 3043 u32 macctl;
3045 3044
@@ -3054,12 +3053,6 @@ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
3054 macctl |= B43legacy_MACCTL_PSM_JMP0; 3053 macctl |= B43legacy_MACCTL_PSM_JMP0;
3055 b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); 3054 b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
3056 3055
3057 mutex_unlock(&wl->mutex);
3058 /* Must unlock as it would otherwise deadlock. No races here.
3059 * Cancel possibly pending workqueues. */
3060 cancel_work_sync(&dev->restart_work);
3061 mutex_lock(&wl->mutex);
3062
3063 b43legacy_leds_exit(dev); 3056 b43legacy_leds_exit(dev);
3064 b43legacy_rng_exit(dev->wl); 3057 b43legacy_rng_exit(dev->wl);
3065 b43legacy_pio_free(dev); 3058 b43legacy_pio_free(dev);
@@ -3486,6 +3479,8 @@ static void b43legacy_chip_reset(struct work_struct *work)
3486 } 3479 }
3487 } 3480 }
3488out: 3481out:
3482 if (err)
3483 wl->current_dev = NULL; /* Failed to init the dev. */
3489 mutex_unlock(&wl->mutex); 3484 mutex_unlock(&wl->mutex);
3490 if (err) 3485 if (err)
3491 b43legacyerr(wl, "Controller restart FAILED\n"); 3486 b43legacyerr(wl, "Controller restart FAILED\n");
@@ -3618,9 +3613,11 @@ static void b43legacy_one_core_detach(struct ssb_device *dev)
3618 struct b43legacy_wldev *wldev; 3613 struct b43legacy_wldev *wldev;
3619 struct b43legacy_wl *wl; 3614 struct b43legacy_wl *wl;
3620 3615
3616 /* Do not cancel ieee80211-workqueue based work here.
3617 * See comment in b43legacy_remove(). */
3618
3621 wldev = ssb_get_drvdata(dev); 3619 wldev = ssb_get_drvdata(dev);
3622 wl = wldev->wl; 3620 wl = wldev->wl;
3623 cancel_work_sync(&wldev->restart_work);
3624 b43legacy_debugfs_remove_device(wldev); 3621 b43legacy_debugfs_remove_device(wldev);
3625 b43legacy_wireless_core_detach(wldev); 3622 b43legacy_wireless_core_detach(wldev);
3626 list_del(&wldev->list); 3623 list_del(&wldev->list);
@@ -3789,6 +3786,10 @@ static void b43legacy_remove(struct ssb_device *dev)
3789 struct b43legacy_wl *wl = ssb_get_devtypedata(dev); 3786 struct b43legacy_wl *wl = ssb_get_devtypedata(dev);
3790 struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); 3787 struct b43legacy_wldev *wldev = ssb_get_drvdata(dev);
3791 3788
3789 /* We must cancel any work here before unregistering from ieee80211,
3790 * as the ieee80211 unreg will destroy the workqueue. */
3791 cancel_work_sync(&wldev->restart_work);
3792
3792 B43legacy_WARN_ON(!wl); 3793 B43legacy_WARN_ON(!wl);
3793 if (wl->current_dev == wldev) 3794 if (wl->current_dev == wldev)
3794 ieee80211_unregister_hw(wl->hw); 3795 ieee80211_unregister_hw(wl->hw);
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index d74c061994ae..729336774828 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -7558,8 +7558,31 @@ static int ipw_associate(void *data)
7558 priv->ieee->iw_mode == IW_MODE_ADHOC && 7558 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7559 priv->config & CFG_ADHOC_CREATE && 7559 priv->config & CFG_ADHOC_CREATE &&
7560 priv->config & CFG_STATIC_ESSID && 7560 priv->config & CFG_STATIC_ESSID &&
7561 priv->config & CFG_STATIC_CHANNEL && 7561 priv->config & CFG_STATIC_CHANNEL) {
7562 !list_empty(&priv->ieee->network_free_list)) { 7562 /* Use oldest network if the free list is empty */
7563 if (list_empty(&priv->ieee->network_free_list)) {
7564 struct ieee80211_network *oldest = NULL;
7565 struct ieee80211_network *target;
7566 DECLARE_MAC_BUF(mac);
7567
7568 list_for_each_entry(target, &priv->ieee->network_list, list) {
7569 if ((oldest == NULL) ||
7570 (target->last_scanned < oldest->last_scanned))
7571 oldest = target;
7572 }
7573
7574 /* If there are no more slots, expire the oldest */
7575 list_del(&oldest->list);
7576 target = oldest;
7577 IPW_DEBUG_ASSOC("Expired '%s' (%s) from "
7578 "network list.\n",
7579 escape_essid(target->ssid,
7580 target->ssid_len),
7581 print_mac(mac, target->bssid));
7582 list_add_tail(&target->list,
7583 &priv->ieee->network_free_list);
7584 }
7585
7563 element = priv->ieee->network_free_list.next; 7586 element = priv->ieee->network_free_list.next;
7564 network = list_entry(element, struct ieee80211_network, list); 7587 network = list_entry(element, struct ieee80211_network, list);
7565 ipw_adhoc_create(priv, network); 7588 ipw_adhoc_create(priv, network);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index ad2fabca9116..0aa0ce3b2c42 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -312,8 +312,8 @@ static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
312 if (tlv_type != TLV_TYPE_BCNMISS) 312 if (tlv_type != TLV_TYPE_BCNMISS)
313 tlv->freq = freq; 313 tlv->freq = freq;
314 314
315 /* The command header, the event mask, and the one TLV */ 315 /* The command header, the action, the event mask, and one TLV */
316 events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 2 + sizeof(*tlv)); 316 events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv));
317 317
318 ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); 318 ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
319 319
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 98ddbb3b3273..1610a7308c1d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -49,6 +49,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
49 {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ 49 {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */
50 50
51 /* Version 2 devices (3887) */ 51 /* Version 2 devices (3887) */
52 {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */
52 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ 53 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
53 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ 54 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
54 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ 55 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 75def13e797d..d28c53868093 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -537,12 +537,12 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
537 int err = 0; 537 int err = 0;
538 u32 tmp; 538 u32 tmp;
539 539
540 might_sleep();
541
542 if (!pdev) 540 if (!pdev)
543 goto out; 541 goto out;
544 bus = pdev->bus; 542 bus = pdev->bus;
545 543
544 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
545
546 /* Enable interrupts for this device. */ 546 /* Enable interrupts for this device. */
547 if (bus->host_pci && 547 if (bus->host_pci &&
548 ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) { 548 ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
diff --git a/include/linux/in_route.h b/include/linux/in_route.h
index 61f25c30a2a0..b261b8c915f0 100644
--- a/include/linux/in_route.h
+++ b/include/linux/in_route.h
@@ -10,19 +10,19 @@
10#define RTCF_NOPMTUDISC RTM_F_NOPMTUDISC 10#define RTCF_NOPMTUDISC RTM_F_NOPMTUDISC
11 11
12#define RTCF_NOTIFY 0x00010000 12#define RTCF_NOTIFY 0x00010000
13#define RTCF_DIRECTDST 0x00020000 13#define RTCF_DIRECTDST 0x00020000 /* unused */
14#define RTCF_REDIRECTED 0x00040000 14#define RTCF_REDIRECTED 0x00040000
15#define RTCF_TPROXY 0x00080000 15#define RTCF_TPROXY 0x00080000 /* unused */
16 16
17#define RTCF_FAST 0x00200000 17#define RTCF_FAST 0x00200000 /* unused */
18#define RTCF_MASQ 0x00400000 18#define RTCF_MASQ 0x00400000 /* unused */
19#define RTCF_SNAT 0x00800000 19#define RTCF_SNAT 0x00800000 /* unused */
20#define RTCF_DOREDIRECT 0x01000000 20#define RTCF_DOREDIRECT 0x01000000
21#define RTCF_DIRECTSRC 0x04000000 21#define RTCF_DIRECTSRC 0x04000000
22#define RTCF_DNAT 0x08000000 22#define RTCF_DNAT 0x08000000
23#define RTCF_BROADCAST 0x10000000 23#define RTCF_BROADCAST 0x10000000
24#define RTCF_MULTICAST 0x20000000 24#define RTCF_MULTICAST 0x20000000
25#define RTCF_REJECT 0x40000000 25#define RTCF_REJECT 0x40000000 /* unused */
26#define RTCF_LOCAL 0x80000000 26#define RTCF_LOCAL 0x80000000
27 27
28#define RTCF_NAT (RTCF_DNAT|RTCF_SNAT) 28#define RTCF_NAT (RTCF_DNAT|RTCF_SNAT)
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 7009b0cdd06f..c6f51ad52d5b 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -117,7 +117,6 @@ struct in_ifaddr
117 __be32 ifa_address; 117 __be32 ifa_address;
118 __be32 ifa_mask; 118 __be32 ifa_mask;
119 __be32 ifa_broadcast; 119 __be32 ifa_broadcast;
120 __be32 ifa_anycast;
121 unsigned char ifa_scope; 120 unsigned char ifa_scope;
122 unsigned char ifa_flags; 121 unsigned char ifa_flags;
123 unsigned char ifa_prefixlen; 122 unsigned char ifa_prefixlen;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 44c81c744538..a2aec2c0cfb5 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -267,10 +267,10 @@ enum rtattr_type_t
267 RTA_PREFSRC, 267 RTA_PREFSRC,
268 RTA_METRICS, 268 RTA_METRICS,
269 RTA_MULTIPATH, 269 RTA_MULTIPATH,
270 RTA_PROTOINFO, 270 RTA_PROTOINFO, /* no longer used */
271 RTA_FLOW, 271 RTA_FLOW,
272 RTA_CACHEINFO, 272 RTA_CACHEINFO,
273 RTA_SESSION, 273 RTA_SESSION, /* no longer used */
274 RTA_MP_ALGO, /* no longer used */ 274 RTA_MP_ALGO, /* no longer used */
275 RTA_TABLE, 275 RTA_TABLE,
276 __RTA_MAX 276 __RTA_MAX
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 0a2f0372df31..bbd3d583c6e6 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -94,6 +94,28 @@ extern void addrconf_join_solict(struct net_device *dev,
94extern void addrconf_leave_solict(struct inet6_dev *idev, 94extern void addrconf_leave_solict(struct inet6_dev *idev,
95 struct in6_addr *addr); 95 struct in6_addr *addr);
96 96
97static inline unsigned long addrconf_timeout_fixup(u32 timeout,
98 unsigned unit)
99{
100 if (timeout == 0xffffffff)
101 return ~0UL;
102
103 /*
104 * Avoid arithmetic overflow.
105 * Assuming unit is constant and non-zero, this "if" statement
106 * will go away on 64bit archs.
107 */
108 if (0xfffffffe > LONG_MAX / unit && timeout > LONG_MAX / unit)
109 return LONG_MAX / unit;
110
111 return timeout;
112}
113
114static inline int addrconf_finite_timeout(unsigned long timeout)
115{
116 return ~timeout;
117}
118
97/* 119/*
98 * IPv6 Address Label subsystem (addrlabel.c) 120 * IPv6 Address Label subsystem (addrlabel.c)
99 */ 121 */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index decdda546829..747c255d1df0 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -162,9 +162,9 @@ static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
162 * @skb: socket buffer the message is stored in 162 * @skb: socket buffer the message is stored in
163 * @hdr: generic netlink message header 163 * @hdr: generic netlink message header
164 */ 164 */
165static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr) 165static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
166{ 166{
167 return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); 167 nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
168} 168}
169 169
170/** 170/**
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 112dcdf7e34e..dfc3701dfcc3 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -556,14 +556,12 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
556 * @skb: socket buffer the message is stored in 556 * @skb: socket buffer the message is stored in
557 * @mark: mark to trim to 557 * @mark: mark to trim to
558 * 558 *
559 * Trims the message to the provided mark. Returns -1. 559 * Trims the message to the provided mark.
560 */ 560 */
561static inline int nlmsg_trim(struct sk_buff *skb, const void *mark) 561static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
562{ 562{
563 if (mark) 563 if (mark)
564 skb_trim(skb, (unsigned char *) mark - skb->data); 564 skb_trim(skb, (unsigned char *) mark - skb->data);
565
566 return -1;
567} 565}
568 566
569/** 567/**
@@ -572,11 +570,11 @@ static inline int nlmsg_trim(struct sk_buff *skb, const void *mark)
572 * @nlh: netlink message header 570 * @nlh: netlink message header
573 * 571 *
574 * Removes the complete netlink message including all 572 * Removes the complete netlink message including all
575 * attributes from the socket buffer again. Returns -1. 573 * attributes from the socket buffer again.
576 */ 574 */
577static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) 575static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
578{ 576{
579 return nlmsg_trim(skb, nlh); 577 nlmsg_trim(skb, nlh);
580} 578}
581 579
582/** 580/**
@@ -775,7 +773,7 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
775 int nested_len = nla_len(nla) - NLA_ALIGN(len); 773 int nested_len = nla_len(nla) - NLA_ALIGN(len);
776 774
777 if (nested_len < 0) 775 if (nested_len < 0)
778 return -1; 776 return -EINVAL;
779 if (nested_len >= nla_attr_size(0)) 777 if (nested_len >= nla_attr_size(0))
780 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 778 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
781 nested_len, policy); 779 nested_len, policy);
@@ -1080,11 +1078,11 @@ static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
1080 * @start: container attribute 1078 * @start: container attribute
1081 * 1079 *
1082 * Removes the container attribute and including all nested 1080 * Removes the container attribute and including all nested
1083 * attributes. Returns -1. 1081 * attributes. Returns -EMSGSIZE
1084 */ 1082 */
1085static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) 1083static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
1086{ 1084{
1087 return nlmsg_trim(skb, start); 1085 nlmsg_trim(skb, start);
1088} 1086}
1089 1087
1090/** 1088/**
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 0ce0443c5b79..7f25195f9855 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -548,7 +548,8 @@ struct sctp_af {
548 struct dst_entry *(*get_dst) (struct sctp_association *asoc, 548 struct dst_entry *(*get_dst) (struct sctp_association *asoc,
549 union sctp_addr *daddr, 549 union sctp_addr *daddr,
550 union sctp_addr *saddr); 550 union sctp_addr *saddr);
551 void (*get_saddr) (struct sctp_association *asoc, 551 void (*get_saddr) (struct sctp_sock *sk,
552 struct sctp_association *asoc,
552 struct dst_entry *dst, 553 struct dst_entry *dst,
553 union sctp_addr *daddr, 554 union sctp_addr *daddr,
554 union sctp_addr *saddr); 555 union sctp_addr *saddr);
@@ -587,6 +588,7 @@ struct sctp_af {
587 int (*is_ce) (const struct sk_buff *sk); 588 int (*is_ce) (const struct sk_buff *sk);
588 void (*seq_dump_addr)(struct seq_file *seq, 589 void (*seq_dump_addr)(struct seq_file *seq,
589 union sctp_addr *addr); 590 union sctp_addr *addr);
591 void (*ecn_capable)(struct sock *sk);
590 __u16 net_header_len; 592 __u16 net_header_len;
591 int sockaddr_len; 593 int sockaddr_len;
592 sa_family_t sa_family; 594 sa_family_t sa_family;
@@ -901,7 +903,10 @@ struct sctp_transport {
901 * calculation completes (i.e. the DATA chunk 903 * calculation completes (i.e. the DATA chunk
902 * is SACK'd) clear this flag. 904 * is SACK'd) clear this flag.
903 */ 905 */
904 int rto_pending; 906 __u8 rto_pending;
907
908 /* Flag to track the current fast recovery state */
909 __u8 fast_recovery;
905 910
906 /* 911 /*
907 * These are the congestion stats. 912 * These are the congestion stats.
@@ -920,6 +925,9 @@ struct sctp_transport {
920 /* Data that has been sent, but not acknowledged. */ 925 /* Data that has been sent, but not acknowledged. */
921 __u32 flight_size; 926 __u32 flight_size;
922 927
928 /* TSN marking the fast recovery exit point */
929 __u32 fast_recovery_exit;
930
923 /* Destination */ 931 /* Destination */
924 struct dst_entry *dst; 932 struct dst_entry *dst;
925 /* Source address. */ 933 /* Source address. */
@@ -1044,7 +1052,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
1044 struct sctp_sock *); 1052 struct sctp_sock *);
1045void sctp_transport_pmtu(struct sctp_transport *); 1053void sctp_transport_pmtu(struct sctp_transport *);
1046void sctp_transport_free(struct sctp_transport *); 1054void sctp_transport_free(struct sctp_transport *);
1047void sctp_transport_reset_timers(struct sctp_transport *); 1055void sctp_transport_reset_timers(struct sctp_transport *, int);
1048void sctp_transport_hold(struct sctp_transport *); 1056void sctp_transport_hold(struct sctp_transport *);
1049void sctp_transport_put(struct sctp_transport *); 1057void sctp_transport_put(struct sctp_transport *);
1050void sctp_transport_update_rto(struct sctp_transport *, __u32); 1058void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1134,6 +1142,9 @@ struct sctp_outq {
1134 /* How many unackd bytes do we have in-flight? */ 1142 /* How many unackd bytes do we have in-flight? */
1135 __u32 outstanding_bytes; 1143 __u32 outstanding_bytes;
1136 1144
1145 /* Are we doing fast-rtx on this queue */
1146 char fast_rtx;
1147
1137 /* Corked? */ 1148 /* Corked? */
1138 char cork; 1149 char cork;
1139 1150
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 27394e0447d8..112934a3288d 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -40,7 +40,8 @@ extern int datagram_recv_ctl(struct sock *sk,
40 struct msghdr *msg, 40 struct msghdr *msg,
41 struct sk_buff *skb); 41 struct sk_buff *skb);
42 42
43extern int datagram_send_ctl(struct msghdr *msg, 43extern int datagram_send_ctl(struct net *net,
44 struct msghdr *msg,
44 struct flowi *fl, 45 struct flowi *fl,
45 struct ipv6_txoptions *opt, 46 struct ipv6_txoptions *opt,
46 int *hlimit, int *tclass); 47 int *hlimit, int *tclass);
diff --git a/include/net/udp.h b/include/net/udp.h
index 3e55a99b0ba3..ccce83707046 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -135,6 +135,7 @@ extern void udp_err(struct sk_buff *, u32);
135 135
136extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, 136extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
137 struct msghdr *msg, size_t len); 137 struct msghdr *msg, size_t len);
138extern void udp_flush_pending_frames(struct sock *sk);
138 139
139extern int udp_rcv(struct sk_buff *skb); 140extern int udp_rcv(struct sk_buff *skb);
140extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); 141extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index d8f215733175..034aa10a5198 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -64,20 +64,15 @@ void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
64 64
65void ax25_requeue_frames(ax25_cb *ax25) 65void ax25_requeue_frames(ax25_cb *ax25)
66{ 66{
67 struct sk_buff *skb, *skb_prev = NULL; 67 struct sk_buff *skb;
68 68
69 /* 69 /*
70 * Requeue all the un-ack-ed frames on the output queue to be picked 70 * Requeue all the un-ack-ed frames on the output queue to be picked
71 * up by ax25_kick called from the timer. This arrangement handles the 71 * up by ax25_kick called from the timer. This arrangement handles the
72 * possibility of an empty output queue. 72 * possibility of an empty output queue.
73 */ 73 */
74 while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { 74 while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
75 if (skb_prev == NULL) 75 skb_queue_head(&ax25->write_queue, skb);
76 skb_queue_head(&ax25->write_queue, skb);
77 else
78 skb_append(skb_prev, skb, &ax25->write_queue);
79 skb_prev = skb;
80 }
81} 76}
82 77
83/* 78/*
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c3f749abb2d0..c9191871c1e0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -566,11 +566,22 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
566 if (dlc->state == BT_CLOSED) { 566 if (dlc->state == BT_CLOSED) {
567 if (!dev->tty) { 567 if (!dev->tty) {
568 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { 568 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
569 if (rfcomm_dev_get(dev->id) == NULL) 569 /* Drop DLC lock here to avoid deadlock
570 * 1. rfcomm_dev_get will take rfcomm_dev_lock
571 * but in rfcomm_dev_add there's lock order:
572 * rfcomm_dev_lock -> dlc lock
573 * 2. rfcomm_dev_put will deadlock if it's
574 * the last reference
575 */
576 rfcomm_dlc_unlock(dlc);
577 if (rfcomm_dev_get(dev->id) == NULL) {
578 rfcomm_dlc_lock(dlc);
570 return; 579 return;
580 }
571 581
572 rfcomm_dev_del(dev); 582 rfcomm_dev_del(dev);
573 rfcomm_dev_put(dev); 583 rfcomm_dev_put(dev);
584 rfcomm_dlc_lock(dlc);
574 } 585 }
575 } else 586 } else
576 tty_hangup(dev->tty); 587 tty_hangup(dev->tty);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5d9d7130bd6e..65f01f71b3f3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1714,7 +1714,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1714 return nla_nest_end(skb, nest); 1714 return nla_nest_end(skb, nest);
1715 1715
1716nla_put_failure: 1716nla_put_failure:
1717 return nla_nest_cancel(skb, nest); 1717 nla_nest_cancel(skb, nest);
1718 return -EMSGSIZE;
1718} 1719}
1719 1720
1720static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 1721static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
@@ -2057,9 +2058,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2057 goto nla_put_failure; 2058 goto nla_put_failure;
2058 } 2059 }
2059 2060
2060 ci.ndm_used = now - neigh->used; 2061 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2061 ci.ndm_confirmed = now - neigh->confirmed; 2062 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2062 ci.ndm_updated = now - neigh->updated; 2063 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2063 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; 2064 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2064 read_unlock_bh(&neigh->lock); 2065 read_unlock_bh(&neigh->lock);
2065 2066
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cf857c4dc7b1..a9a77216310e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -498,7 +498,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
498 return nla_nest_end(skb, mx); 498 return nla_nest_end(skb, mx);
499 499
500nla_put_failure: 500nla_put_failure:
501 return nla_nest_cancel(skb, mx); 501 nla_nest_cancel(skb, mx);
502 return -EMSGSIZE;
502} 503}
503 504
504int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 505int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5c459f2b7985..1e556d312117 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1445,6 +1445,7 @@ done:
1445 1445
1446 if (spd.nr_pages) { 1446 if (spd.nr_pages) {
1447 int ret; 1447 int ret;
1448 struct sock *sk = __skb->sk;
1448 1449
1449 /* 1450 /*
1450 * Drop the socket lock, otherwise we have reverse 1451 * Drop the socket lock, otherwise we have reverse
@@ -1455,9 +1456,9 @@ done:
1455 * we call into ->sendpage() with the i_mutex lock held 1456 * we call into ->sendpage() with the i_mutex lock held
1456 * and networking will grab the socket lock. 1457 * and networking will grab the socket lock.
1457 */ 1458 */
1458 release_sock(__skb->sk); 1459 release_sock(sk);
1459 ret = splice_to_pipe(pipe, &spd); 1460 ret = splice_to_pipe(pipe, &spd);
1460 lock_sock(__skb->sk); 1461 lock_sock(sk);
1461 return ret; 1462 return ret;
1462 } 1463 }
1463 1464
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 0ad1cd57bc39..c77aff9c6eb3 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -75,7 +75,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
75 75
76 end = start + skb_shinfo(skb)->frags[i].size; 76 end = start + skb_shinfo(skb)->frags[i].size;
77 copy = end - offset; 77 copy = end - offset;
78 if ((copy = end - offset) > 0) { 78 if (copy > 0) {
79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
80 struct page *page = frag->page; 80 struct page *page = frag->page;
81 81
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6848e4760f34..79a7ef6209ff 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -90,7 +90,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
90 [IFA_LOCAL] = { .type = NLA_U32 }, 90 [IFA_LOCAL] = { .type = NLA_U32 },
91 [IFA_ADDRESS] = { .type = NLA_U32 }, 91 [IFA_ADDRESS] = { .type = NLA_U32 },
92 [IFA_BROADCAST] = { .type = NLA_U32 }, 92 [IFA_BROADCAST] = { .type = NLA_U32 },
93 [IFA_ANYCAST] = { .type = NLA_U32 },
94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 93 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
95}; 94};
96 95
@@ -536,9 +535,6 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
536 if (tb[IFA_BROADCAST]) 535 if (tb[IFA_BROADCAST])
537 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]); 536 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
538 537
539 if (tb[IFA_ANYCAST])
540 ifa->ifa_anycast = nla_get_be32(tb[IFA_ANYCAST]);
541
542 if (tb[IFA_LABEL]) 538 if (tb[IFA_LABEL])
543 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); 539 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
544 else 540 else
@@ -745,7 +741,6 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
745 break; 741 break;
746 inet_del_ifa(in_dev, ifap, 0); 742 inet_del_ifa(in_dev, ifap, 0);
747 ifa->ifa_broadcast = 0; 743 ifa->ifa_broadcast = 0;
748 ifa->ifa_anycast = 0;
749 ifa->ifa_scope = 0; 744 ifa->ifa_scope = 0;
750 } 745 }
751 746
@@ -1113,7 +1108,6 @@ static inline size_t inet_nlmsg_size(void)
1113 + nla_total_size(4) /* IFA_ADDRESS */ 1108 + nla_total_size(4) /* IFA_ADDRESS */
1114 + nla_total_size(4) /* IFA_LOCAL */ 1109 + nla_total_size(4) /* IFA_LOCAL */
1115 + nla_total_size(4) /* IFA_BROADCAST */ 1110 + nla_total_size(4) /* IFA_BROADCAST */
1116 + nla_total_size(4) /* IFA_ANYCAST */
1117 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */ 1111 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
1118} 1112}
1119 1113
@@ -1143,9 +1137,6 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1143 if (ifa->ifa_broadcast) 1137 if (ifa->ifa_broadcast)
1144 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1138 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
1145 1139
1146 if (ifa->ifa_anycast)
1147 NLA_PUT_BE32(skb, IFA_ANYCAST, ifa->ifa_anycast);
1148
1149 if (ifa->ifa_label[0]) 1140 if (ifa->ifa_label[0])
1150 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 1141 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
1151 1142
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0f1557a4ac7a..0b2ac6a3d903 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -506,7 +506,6 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
506 [RTA_PREFSRC] = { .type = NLA_U32 }, 506 [RTA_PREFSRC] = { .type = NLA_U32 },
507 [RTA_METRICS] = { .type = NLA_NESTED }, 507 [RTA_METRICS] = { .type = NLA_NESTED },
508 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 508 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
509 [RTA_PROTOINFO] = { .type = NLA_U32 },
510 [RTA_FLOW] = { .type = NLA_U32 }, 509 [RTA_FLOW] = { .type = NLA_U32 },
511}; 510};
512 511
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index fead049daf43..e7e091d365ff 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -608,6 +608,14 @@ static void raw_close(struct sock *sk, long timeout)
608 sk_common_release(sk); 608 sk_common_release(sk);
609} 609}
610 610
611static int raw_destroy(struct sock *sk)
612{
613 lock_sock(sk);
614 ip_flush_pending_frames(sk);
615 release_sock(sk);
616 return 0;
617}
618
611/* This gets rid of all the nasties in af_inet. -DaveM */ 619/* This gets rid of all the nasties in af_inet. -DaveM */
612static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 620static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
613{ 621{
@@ -820,6 +828,7 @@ struct proto raw_prot = {
820 .name = "RAW", 828 .name = "RAW",
821 .owner = THIS_MODULE, 829 .owner = THIS_MODULE,
822 .close = raw_close, 830 .close = raw_close,
831 .destroy = raw_destroy,
823 .connect = ip4_datagram_connect, 832 .connect = ip4_datagram_connect,
824 .disconnect = udp_disconnect, 833 .disconnect = udp_disconnect,
825 .ioctl = raw_ioctl, 834 .ioctl = raw_ioctl,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index df41026b60db..96be336064fb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1792,7 +1792,7 @@ static int __mkroute_input(struct sk_buff *skb,
1792 if (err) 1792 if (err)
1793 flags |= RTCF_DIRECTSRC; 1793 flags |= RTCF_DIRECTSRC;
1794 1794
1795 if (out_dev == in_dev && err && !(flags & RTCF_MASQ) && 1795 if (out_dev == in_dev && err &&
1796 (IN_DEV_SHARED_MEDIA(out_dev) || 1796 (IN_DEV_SHARED_MEDIA(out_dev) ||
1797 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1797 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1798 flags |= RTCF_DOREDIRECT; 1798 flags |= RTCF_DOREDIRECT;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f88653138621..ab66683b8043 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1227,7 +1227,14 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1227 copied += used; 1227 copied += used;
1228 offset += used; 1228 offset += used;
1229 } 1229 }
1230 if (offset != skb->len) 1230 /*
1231 * If recv_actor drops the lock (e.g. TCP splice
1232 * receive) the skb pointer might be invalid when
1233 * getting here: tcp_collapse might have deleted it
1234 * while aggregating skbs from the socket queue.
1235 */
1236 skb = tcp_recv_skb(sk, seq-1, &offset);
1237 if (!skb || (offset+1 != skb->len))
1231 break; 1238 break;
1232 } 1239 }
1233 if (tcp_hdr(skb)->fin) { 1240 if (tcp_hdr(skb)->fin) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b54d9d37b636..eba873e9b560 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1392,9 +1392,9 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1392 1392
1393 if (before(next_dup->start_seq, skip_to_seq)) { 1393 if (before(next_dup->start_seq, skip_to_seq)) {
1394 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1394 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
1395 tcp_sacktag_walk(skb, sk, NULL, 1395 skb = tcp_sacktag_walk(skb, sk, NULL,
1396 next_dup->start_seq, next_dup->end_seq, 1396 next_dup->start_seq, next_dup->end_seq,
1397 1, fack_count, reord, flag); 1397 1, fack_count, reord, flag);
1398 } 1398 }
1399 1399
1400 return skb; 1400 return skb;
@@ -2483,6 +2483,20 @@ static inline void tcp_complete_cwr(struct sock *sk)
2483 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2483 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2484} 2484}
2485 2485
2486static void tcp_try_keep_open(struct sock *sk)
2487{
2488 struct tcp_sock *tp = tcp_sk(sk);
2489 int state = TCP_CA_Open;
2490
2491 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
2492 state = TCP_CA_Disorder;
2493
2494 if (inet_csk(sk)->icsk_ca_state != state) {
2495 tcp_set_ca_state(sk, state);
2496 tp->high_seq = tp->snd_nxt;
2497 }
2498}
2499
2486static void tcp_try_to_open(struct sock *sk, int flag) 2500static void tcp_try_to_open(struct sock *sk, int flag)
2487{ 2501{
2488 struct tcp_sock *tp = tcp_sk(sk); 2502 struct tcp_sock *tp = tcp_sk(sk);
@@ -2496,15 +2510,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2496 tcp_enter_cwr(sk, 1); 2510 tcp_enter_cwr(sk, 1);
2497 2511
2498 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2512 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2499 int state = TCP_CA_Open; 2513 tcp_try_keep_open(sk);
2500
2501 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
2502 state = TCP_CA_Disorder;
2503
2504 if (inet_csk(sk)->icsk_ca_state != state) {
2505 tcp_set_ca_state(sk, state);
2506 tp->high_seq = tp->snd_nxt;
2507 }
2508 tcp_moderate_cwnd(tp); 2514 tcp_moderate_cwnd(tp);
2509 } else { 2515 } else {
2510 tcp_cwnd_down(sk, flag); 2516 tcp_cwnd_down(sk, flag);
@@ -3310,8 +3316,11 @@ no_queue:
3310 return 1; 3316 return 1;
3311 3317
3312old_ack: 3318old_ack:
3313 if (TCP_SKB_CB(skb)->sacked) 3319 if (TCP_SKB_CB(skb)->sacked) {
3314 tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3320 tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3321 if (icsk->icsk_ca_state == TCP_CA_Open)
3322 tcp_try_keep_open(sk);
3323 }
3315 3324
3316uninteresting_ack: 3325uninteresting_ack:
3317 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3326 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e399bde7813a..ad993ecb4810 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2131,6 +2131,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2131 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2131 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2132 if (tcp_transmit_skb(sk, skb, 0, priority)) 2132 if (tcp_transmit_skb(sk, skb, 0, priority))
2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2134
2135 TCP_INC_STATS(TCP_MIB_OUTRSTS);
2134} 2136}
2135 2137
2136/* WARNING: This routine must only be called when we have already sent 2138/* WARNING: This routine must only be called when we have already sent
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index d3b709a6f264..cb1f0e83830b 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -97,7 +97,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
97{ 97{
98 struct xfrm_tunnel *handler; 98 struct xfrm_tunnel *handler;
99 99
100 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 100 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
101 goto drop; 101 goto drop;
102 102
103 for (handler = tunnel64_handlers; handler; handler = handler->next) 103 for (handler = tunnel64_handlers; handler; handler = handler->next)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index db1cb7c96d63..56fcda3694ba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -420,7 +420,7 @@ void udp_err(struct sk_buff *skb, u32 info)
420/* 420/*
421 * Throw away all pending data and cancel the corking. Socket is locked. 421 * Throw away all pending data and cancel the corking. Socket is locked.
422 */ 422 */
423static void udp_flush_pending_frames(struct sock *sk) 423void udp_flush_pending_frames(struct sock *sk)
424{ 424{
425 struct udp_sock *up = udp_sk(sk); 425 struct udp_sock *up = udp_sk(sk);
426 426
@@ -430,6 +430,7 @@ static void udp_flush_pending_frames(struct sock *sk)
430 ip_flush_pending_frames(sk); 430 ip_flush_pending_frames(sk);
431 } 431 }
432} 432}
433EXPORT_SYMBOL(udp_flush_pending_frames);
433 434
434/** 435/**
435 * udp4_hwcsum_outgoing - handle outgoing HW checksumming 436 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3a835578fd1c..147588f4c7c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -731,8 +731,13 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
731 onlink = -1; 731 onlink = -1;
732 732
733 spin_lock(&ifa->lock); 733 spin_lock(&ifa->lock);
734 lifetime = min_t(unsigned long, 734
735 ifa->valid_lft, 0x7fffffffUL/HZ); 735 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
736 /*
737 * Note: Because this address is
738 * not permanent, lifetime <
739 * LONG_MAX / HZ here.
740 */
736 if (time_before(expires, 741 if (time_before(expires,
737 ifa->tstamp + lifetime * HZ)) 742 ifa->tstamp + lifetime * HZ))
738 expires = ifa->tstamp + lifetime * HZ; 743 expires = ifa->tstamp + lifetime * HZ;
@@ -1722,7 +1727,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1722 __u32 valid_lft; 1727 __u32 valid_lft;
1723 __u32 prefered_lft; 1728 __u32 prefered_lft;
1724 int addr_type; 1729 int addr_type;
1725 unsigned long rt_expires;
1726 struct inet6_dev *in6_dev; 1730 struct inet6_dev *in6_dev;
1727 1731
1728 pinfo = (struct prefix_info *) opt; 1732 pinfo = (struct prefix_info *) opt;
@@ -1764,28 +1768,23 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1764 * 2) Configure prefixes with the auto flag set 1768 * 2) Configure prefixes with the auto flag set
1765 */ 1769 */
1766 1770
1767 if (valid_lft == INFINITY_LIFE_TIME) 1771 if (pinfo->onlink) {
1768 rt_expires = ~0UL; 1772 struct rt6_info *rt;
1769 else if (valid_lft >= 0x7FFFFFFF/HZ) { 1773 unsigned long rt_expires;
1774
1770 /* Avoid arithmetic overflow. Really, we could 1775 /* Avoid arithmetic overflow. Really, we could
1771 * save rt_expires in seconds, likely valid_lft, 1776 * save rt_expires in seconds, likely valid_lft,
1772 * but it would require division in fib gc, that it 1777 * but it would require division in fib gc, that it
1773 * not good. 1778 * not good.
1774 */ 1779 */
1775 rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ); 1780 if (HZ > USER_HZ)
1776 } else 1781 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
1777 rt_expires = valid_lft * HZ; 1782 else
1783 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
1778 1784
1779 /* 1785 if (addrconf_finite_timeout(rt_expires))
1780 * We convert this (in jiffies) to clock_t later. 1786 rt_expires *= HZ;
1781 * Avoid arithmetic overflow there as well.
1782 * Overflow can happen only if HZ < USER_HZ.
1783 */
1784 if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ)
1785 rt_expires = 0x7FFFFFFF / USER_HZ;
1786 1787
1787 if (pinfo->onlink) {
1788 struct rt6_info *rt;
1789 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1788 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1790 dev->ifindex, 1); 1789 dev->ifindex, 1);
1791 1790
@@ -1794,7 +1793,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1794 if (valid_lft == 0) { 1793 if (valid_lft == 0) {
1795 ip6_del_rt(rt); 1794 ip6_del_rt(rt);
1796 rt = NULL; 1795 rt = NULL;
1797 } else if (~rt_expires) { 1796 } else if (addrconf_finite_timeout(rt_expires)) {
1798 /* not infinity */ 1797 /* not infinity */
1799 rt->rt6i_expires = jiffies + rt_expires; 1798 rt->rt6i_expires = jiffies + rt_expires;
1800 rt->rt6i_flags |= RTF_EXPIRES; 1799 rt->rt6i_flags |= RTF_EXPIRES;
@@ -1803,9 +1802,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1803 rt->rt6i_expires = 0; 1802 rt->rt6i_expires = 0;
1804 } 1803 }
1805 } else if (valid_lft) { 1804 } else if (valid_lft) {
1806 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
1807 clock_t expires = 0; 1805 clock_t expires = 0;
1808 if (~rt_expires) { 1806 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
1807 if (addrconf_finite_timeout(rt_expires)) {
1809 /* not infinity */ 1808 /* not infinity */
1810 flags |= RTF_EXPIRES; 1809 flags |= RTF_EXPIRES;
1811 expires = jiffies_to_clock_t(rt_expires); 1810 expires = jiffies_to_clock_t(rt_expires);
@@ -2027,7 +2026,7 @@ err_exit:
2027 * Manual configuration of address on an interface 2026 * Manual configuration of address on an interface
2028 */ 2027 */
2029static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2028static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2030 int plen, __u8 ifa_flags, __u32 prefered_lft, 2029 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2031 __u32 valid_lft) 2030 __u32 valid_lft)
2032{ 2031{
2033 struct inet6_ifaddr *ifp; 2032 struct inet6_ifaddr *ifp;
@@ -2036,9 +2035,13 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2036 int scope; 2035 int scope;
2037 u32 flags; 2036 u32 flags;
2038 clock_t expires; 2037 clock_t expires;
2038 unsigned long timeout;
2039 2039
2040 ASSERT_RTNL(); 2040 ASSERT_RTNL();
2041 2041
2042 if (plen > 128)
2043 return -EINVAL;
2044
2042 /* check the lifetime */ 2045 /* check the lifetime */
2043 if (!valid_lft || prefered_lft > valid_lft) 2046 if (!valid_lft || prefered_lft > valid_lft)
2044 return -EINVAL; 2047 return -EINVAL;
@@ -2052,22 +2055,23 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2052 2055
2053 scope = ipv6_addr_scope(pfx); 2056 scope = ipv6_addr_scope(pfx);
2054 2057
2055 if (valid_lft == INFINITY_LIFE_TIME) { 2058 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2056 ifa_flags |= IFA_F_PERMANENT; 2059 if (addrconf_finite_timeout(timeout)) {
2057 flags = 0; 2060 expires = jiffies_to_clock_t(timeout * HZ);
2058 expires = 0; 2061 valid_lft = timeout;
2059 } else {
2060 if (valid_lft >= 0x7FFFFFFF/HZ)
2061 valid_lft = 0x7FFFFFFF/HZ;
2062 flags = RTF_EXPIRES; 2062 flags = RTF_EXPIRES;
2063 expires = jiffies_to_clock_t(valid_lft * HZ); 2063 } else {
2064 expires = 0;
2065 flags = 0;
2066 ifa_flags |= IFA_F_PERMANENT;
2064 } 2067 }
2065 2068
2066 if (prefered_lft == 0) 2069 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2067 ifa_flags |= IFA_F_DEPRECATED; 2070 if (addrconf_finite_timeout(timeout)) {
2068 else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2071 if (timeout == 0)
2069 (prefered_lft != INFINITY_LIFE_TIME)) 2072 ifa_flags |= IFA_F_DEPRECATED;
2070 prefered_lft = 0x7FFFFFFF/HZ; 2073 prefered_lft = timeout;
2074 }
2071 2075
2072 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 2076 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
2073 2077
@@ -2095,12 +2099,15 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2095} 2099}
2096 2100
2097static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2101static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2098 int plen) 2102 unsigned int plen)
2099{ 2103{
2100 struct inet6_ifaddr *ifp; 2104 struct inet6_ifaddr *ifp;
2101 struct inet6_dev *idev; 2105 struct inet6_dev *idev;
2102 struct net_device *dev; 2106 struct net_device *dev;
2103 2107
2108 if (plen > 128)
2109 return -EINVAL;
2110
2104 dev = __dev_get_by_index(net, ifindex); 2111 dev = __dev_get_by_index(net, ifindex);
2105 if (!dev) 2112 if (!dev)
2106 return -ENODEV; 2113 return -ENODEV;
@@ -3169,26 +3176,28 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3169{ 3176{
3170 u32 flags; 3177 u32 flags;
3171 clock_t expires; 3178 clock_t expires;
3179 unsigned long timeout;
3172 3180
3173 if (!valid_lft || (prefered_lft > valid_lft)) 3181 if (!valid_lft || (prefered_lft > valid_lft))
3174 return -EINVAL; 3182 return -EINVAL;
3175 3183
3176 if (valid_lft == INFINITY_LIFE_TIME) { 3184 timeout = addrconf_timeout_fixup(valid_lft, HZ);
3177 ifa_flags |= IFA_F_PERMANENT; 3185 if (addrconf_finite_timeout(timeout)) {
3178 flags = 0; 3186 expires = jiffies_to_clock_t(timeout * HZ);
3179 expires = 0; 3187 valid_lft = timeout;
3180 } else {
3181 if (valid_lft >= 0x7FFFFFFF/HZ)
3182 valid_lft = 0x7FFFFFFF/HZ;
3183 flags = RTF_EXPIRES; 3188 flags = RTF_EXPIRES;
3184 expires = jiffies_to_clock_t(valid_lft * HZ); 3189 } else {
3190 expires = 0;
3191 flags = 0;
3192 ifa_flags |= IFA_F_PERMANENT;
3185 } 3193 }
3186 3194
3187 if (prefered_lft == 0) 3195 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
3188 ifa_flags |= IFA_F_DEPRECATED; 3196 if (addrconf_finite_timeout(timeout)) {
3189 else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 3197 if (timeout == 0)
3190 (prefered_lft != INFINITY_LIFE_TIME)) 3198 ifa_flags |= IFA_F_DEPRECATED;
3191 prefered_lft = 0x7FFFFFFF/HZ; 3199 prefered_lft = timeout;
3200 }
3192 3201
3193 spin_lock_bh(&ifp->lock); 3202 spin_lock_bh(&ifp->lock);
3194 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags; 3203 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 94fa6ae77cfe..b9c2de84a8a2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -496,7 +496,8 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
496 return 0; 496 return 0;
497} 497}
498 498
499int datagram_send_ctl(struct msghdr *msg, struct flowi *fl, 499int datagram_send_ctl(struct net *net,
500 struct msghdr *msg, struct flowi *fl,
500 struct ipv6_txoptions *opt, 501 struct ipv6_txoptions *opt,
501 int *hlimit, int *tclass) 502 int *hlimit, int *tclass)
502{ 503{
@@ -509,7 +510,6 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
509 510
510 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 511 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
511 int addr_type; 512 int addr_type;
512 struct net_device *dev = NULL;
513 513
514 if (!CMSG_OK(msg, cmsg)) { 514 if (!CMSG_OK(msg, cmsg)) {
515 err = -EINVAL; 515 err = -EINVAL;
@@ -522,6 +522,9 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
522 switch (cmsg->cmsg_type) { 522 switch (cmsg->cmsg_type) {
523 case IPV6_PKTINFO: 523 case IPV6_PKTINFO:
524 case IPV6_2292PKTINFO: 524 case IPV6_2292PKTINFO:
525 {
526 struct net_device *dev = NULL;
527
525 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) { 528 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
526 err = -EINVAL; 529 err = -EINVAL;
527 goto exit_f; 530 goto exit_f;
@@ -535,32 +538,32 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
535 fl->oif = src_info->ipi6_ifindex; 538 fl->oif = src_info->ipi6_ifindex;
536 } 539 }
537 540
538 addr_type = ipv6_addr_type(&src_info->ipi6_addr); 541 addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
539 542
540 if (addr_type == IPV6_ADDR_ANY) 543 if (fl->oif) {
541 break; 544 dev = dev_get_by_index(net, fl->oif);
545 if (!dev)
546 return -ENODEV;
547 } else if (addr_type & IPV6_ADDR_LINKLOCAL)
548 return -EINVAL;
542 549
543 if (addr_type & IPV6_ADDR_LINKLOCAL) { 550 if (addr_type != IPV6_ADDR_ANY) {
544 if (!src_info->ipi6_ifindex) 551 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
545 return -EINVAL; 552 if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
546 else { 553 strict ? dev : NULL, 0))
547 dev = dev_get_by_index(&init_net, src_info->ipi6_ifindex); 554 err = -EINVAL;
548 if (!dev) 555 else
549 return -ENODEV; 556 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
550 }
551 }
552 if (!ipv6_chk_addr(&init_net, &src_info->ipi6_addr,
553 dev, 0)) {
554 if (dev)
555 dev_put(dev);
556 err = -EINVAL;
557 goto exit_f;
558 } 557 }
558
559 if (dev) 559 if (dev)
560 dev_put(dev); 560 dev_put(dev);
561 561
562 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 562 if (err)
563 goto exit_f;
564
563 break; 565 break;
566 }
564 567
565 case IPV6_FLOWINFO: 568 case IPV6_FLOWINFO:
566 if (cmsg->cmsg_len < CMSG_LEN(4)) { 569 if (cmsg->cmsg_len < CMSG_LEN(4)) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index eb7a940310f4..37a4e777e347 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -354,7 +354,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
354 msg.msg_control = (void*)(fl->opt+1); 354 msg.msg_control = (void*)(fl->opt+1);
355 flowi.oif = 0; 355 flowi.oif = 0;
356 356
357 err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk, &junk); 357 err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
358 if (err) 358 if (err)
359 goto done; 359 goto done;
360 err = -EINVAL; 360 err = -EINVAL;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56d55fecf8ec..26b83e512a09 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -161,9 +161,17 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
161 struct ipv6_txoptions *opt; 161 struct ipv6_txoptions *opt;
162 struct sk_buff *pktopt; 162 struct sk_buff *pktopt;
163 163
164 if (sk->sk_protocol != IPPROTO_UDP && 164 if (sk->sk_type == SOCK_RAW)
165 sk->sk_protocol != IPPROTO_UDPLITE && 165 break;
166 sk->sk_protocol != IPPROTO_TCP) 166
167 if (sk->sk_protocol == IPPROTO_UDP ||
168 sk->sk_protocol == IPPROTO_UDPLITE) {
169 struct udp_sock *up = udp_sk(sk);
170 if (up->pending == AF_INET6) {
171 retv = -EBUSY;
172 break;
173 }
174 } else if (sk->sk_protocol != IPPROTO_TCP)
167 break; 175 break;
168 176
169 if (sk->sk_state != TCP_ESTABLISHED) { 177 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -416,7 +424,7 @@ sticky_done:
416 msg.msg_controllen = optlen; 424 msg.msg_controllen = optlen;
417 msg.msg_control = (void*)(opt+1); 425 msg.msg_control = (void*)(opt+1);
418 426
419 retv = datagram_send_ctl(&msg, &fl, opt, &junk, &junk); 427 retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk);
420 if (retv) 428 if (retv)
421 goto done; 429 goto done;
422update: 430update:
@@ -832,7 +840,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
832 len = min_t(unsigned int, len, ipv6_optlen(hdr)); 840 len = min_t(unsigned int, len, ipv6_optlen(hdr));
833 if (copy_to_user(optval, hdr, len)) 841 if (copy_to_user(optval, hdr, len))
834 return -EFAULT; 842 return -EFAULT;
835 return ipv6_optlen(hdr); 843 return len;
836} 844}
837 845
838static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 846static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
@@ -975,6 +983,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
975 len = ipv6_getsockopt_sticky(sk, np->opt, 983 len = ipv6_getsockopt_sticky(sk, np->opt,
976 optname, optval, len); 984 optname, optval, len);
977 release_sock(sk); 985 release_sock(sk);
986 /* check if ipv6_getsockopt_sticky() returns err code */
987 if (len < 0)
988 return len;
978 return put_user(len, optlen); 989 return put_user(len, optlen);
979 } 990 }
980 991
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 2dccad48058c..e65e26e210ee 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -209,7 +209,9 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
209 arg.dst = dst; 209 arg.dst = dst;
210 hash = ip6qhashfn(id, src, dst); 210 hash = ip6qhashfn(id, src, dst);
211 211
212 local_bh_disable();
212 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 213 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
214 local_bh_enable();
213 if (q == NULL) 215 if (q == NULL)
214 goto oom; 216 goto oom;
215 217
@@ -638,10 +640,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
638 goto ret_orig; 640 goto ret_orig;
639 } 641 }
640 642
641 spin_lock(&fq->q.lock); 643 spin_lock_bh(&fq->q.lock);
642 644
643 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 645 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
644 spin_unlock(&fq->q.lock); 646 spin_unlock_bh(&fq->q.lock);
645 pr_debug("Can't insert skb to queue\n"); 647 pr_debug("Can't insert skb to queue\n");
646 fq_put(fq); 648 fq_put(fq);
647 goto ret_orig; 649 goto ret_orig;
@@ -653,7 +655,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
653 if (ret_skb == NULL) 655 if (ret_skb == NULL)
654 pr_debug("Can't reassemble fragmented packets\n"); 656 pr_debug("Can't reassemble fragmented packets\n");
655 } 657 }
656 spin_unlock(&fq->q.lock); 658 spin_unlock_bh(&fq->q.lock);
657 659
658 fq_put(fq); 660 fq_put(fq);
659 return ret_skb; 661 return ret_skb;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 232e0dc45bf5..8fee9a15b2d3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -813,7 +813,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
813 memset(opt, 0, sizeof(struct ipv6_txoptions)); 813 memset(opt, 0, sizeof(struct ipv6_txoptions));
814 opt->tot_len = sizeof(struct ipv6_txoptions); 814 opt->tot_len = sizeof(struct ipv6_txoptions);
815 815
816 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 816 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
817 if (err < 0) { 817 if (err < 0) {
818 fl6_sock_release(flowlabel); 818 fl6_sock_release(flowlabel);
819 return err; 819 return err;
@@ -1164,6 +1164,14 @@ static void rawv6_close(struct sock *sk, long timeout)
1164 sk_common_release(sk); 1164 sk_common_release(sk);
1165} 1165}
1166 1166
1167static int raw6_destroy(struct sock *sk)
1168{
1169 lock_sock(sk);
1170 ip6_flush_pending_frames(sk);
1171 release_sock(sk);
1172 return 0;
1173}
1174
1167static int rawv6_init_sk(struct sock *sk) 1175static int rawv6_init_sk(struct sock *sk)
1168{ 1176{
1169 struct raw6_sock *rp = raw6_sk(sk); 1177 struct raw6_sock *rp = raw6_sk(sk);
@@ -1187,6 +1195,7 @@ struct proto rawv6_prot = {
1187 .name = "RAWv6", 1195 .name = "RAWv6",
1188 .owner = THIS_MODULE, 1196 .owner = THIS_MODULE,
1189 .close = rawv6_close, 1197 .close = rawv6_close,
1198 .destroy = raw6_destroy,
1190 .connect = ip6_datagram_connect, 1199 .connect = ip6_datagram_connect,
1191 .disconnect = udp_disconnect, 1200 .disconnect = udp_disconnect,
1192 .ioctl = rawv6_ioctl, 1201 .ioctl = rawv6_ioctl,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 48534c6c0735..220cffe9e63b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -446,7 +446,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
446 struct route_info *rinfo = (struct route_info *) opt; 446 struct route_info *rinfo = (struct route_info *) opt;
447 struct in6_addr prefix_buf, *prefix; 447 struct in6_addr prefix_buf, *prefix;
448 unsigned int pref; 448 unsigned int pref;
449 u32 lifetime; 449 unsigned long lifetime;
450 struct rt6_info *rt; 450 struct rt6_info *rt;
451 451
452 if (len < sizeof(struct route_info)) { 452 if (len < sizeof(struct route_info)) {
@@ -472,13 +472,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
472 if (pref == ICMPV6_ROUTER_PREF_INVALID) 472 if (pref == ICMPV6_ROUTER_PREF_INVALID)
473 pref = ICMPV6_ROUTER_PREF_MEDIUM; 473 pref = ICMPV6_ROUTER_PREF_MEDIUM;
474 474
475 lifetime = ntohl(rinfo->lifetime); 475 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
476 if (lifetime == 0xffffffff) {
477 /* infinity */
478 } else if (lifetime > 0x7fffffff/HZ - 1) {
479 /* Avoid arithmetic overflow */
480 lifetime = 0x7fffffff/HZ - 1;
481 }
482 476
483 if (rinfo->length == 3) 477 if (rinfo->length == 3)
484 prefix = (struct in6_addr *)rinfo->prefix; 478 prefix = (struct in6_addr *)rinfo->prefix;
@@ -506,7 +500,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
506 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 500 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
507 501
508 if (rt) { 502 if (rt) {
509 if (lifetime == 0xffffffff) { 503 if (!addrconf_finite_timeout(lifetime)) {
510 rt->rt6i_flags &= ~RTF_EXPIRES; 504 rt->rt6i_flags &= ~RTF_EXPIRES;
511 } else { 505 } else {
512 rt->rt6i_expires = jiffies + HZ * lifetime; 506 rt->rt6i_expires = jiffies + HZ * lifetime;
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 6323921b40be..669f280989c3 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -109,7 +109,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
109{ 109{
110 struct xfrm6_tunnel *handler; 110 struct xfrm6_tunnel *handler;
111 111
112 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 112 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
113 goto drop; 113 goto drop;
114 114
115 for (handler = tunnel46_handlers; handler; handler = handler->next) 115 for (handler = tunnel46_handlers; handler; handler = handler->next)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1fd784f3e2ec..dd309626ae9a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -534,7 +534,9 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
534{ 534{
535 struct udp_sock *up = udp_sk(sk); 535 struct udp_sock *up = udp_sk(sk);
536 536
537 if (up->pending) { 537 if (up->pending == AF_INET)
538 udp_flush_pending_frames(sk);
539 else if (up->pending) {
538 up->len = 0; 540 up->len = 0;
539 up->pending = 0; 541 up->pending = 0;
540 ip6_flush_pending_frames(sk); 542 ip6_flush_pending_frames(sk);
@@ -731,7 +733,7 @@ do_udp_sendmsg:
731 memset(opt, 0, sizeof(struct ipv6_txoptions)); 733 memset(opt, 0, sizeof(struct ipv6_txoptions));
732 opt->tot_len = sizeof(*opt); 734 opt->tot_len = sizeof(*opt);
733 735
734 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 736 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
735 if (err < 0) { 737 if (err < 0) {
736 fl6_sock_release(flowlabel); 738 fl6_sock_release(flowlabel);
737 return err; 739 return err;
@@ -848,12 +850,14 @@ do_append_data:
848 } else { 850 } else {
849 dst_release(dst); 851 dst_release(dst);
850 } 852 }
853 dst = NULL;
851 } 854 }
852 855
853 if (err > 0) 856 if (err > 0)
854 err = np->recverr ? net_xmit_errno(err) : 0; 857 err = np->recverr ? net_xmit_errno(err) : 0;
855 release_sock(sk); 858 release_sock(sk);
856out: 859out:
860 dst_release(dst);
857 fl6_sock_release(flowlabel); 861 fl6_sock_release(flowlabel);
858 if (!err) 862 if (!err)
859 return len; 863 return len;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index ae54b20d0470..3eb5bcc75f99 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1093,11 +1093,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1093 1093
1094 init_waitqueue_head(&self->query_wait); 1094 init_waitqueue_head(&self->query_wait);
1095 1095
1096 /* Initialise networking socket struct */
1097 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
1098 sk->sk_family = PF_IRDA;
1099 sk->sk_protocol = protocol;
1100
1101 switch (sock->type) { 1096 switch (sock->type) {
1102 case SOCK_STREAM: 1097 case SOCK_STREAM:
1103 sock->ops = &irda_stream_ops; 1098 sock->ops = &irda_stream_ops;
@@ -1124,13 +1119,20 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1124 self->max_sdu_size_rx = TTP_SAR_UNBOUND; 1119 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1125 break; 1120 break;
1126 default: 1121 default:
1122 sk_free(sk);
1127 return -ESOCKTNOSUPPORT; 1123 return -ESOCKTNOSUPPORT;
1128 } 1124 }
1129 break; 1125 break;
1130 default: 1126 default:
1127 sk_free(sk);
1131 return -ESOCKTNOSUPPORT; 1128 return -ESOCKTNOSUPPORT;
1132 } 1129 }
1133 1130
1131 /* Initialise networking socket struct */
1132 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
1133 sk->sk_family = PF_IRDA;
1134 sk->sk_protocol = protocol;
1135
1134 /* Register as a client with IrLMP */ 1136 /* Register as a client with IrLMP */
1135 self->ckey = irlmp_register_client(0, NULL, NULL, NULL); 1137 self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
1136 self->mask.word = 0xffff; 1138 self->mask.word = 0xffff;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 2e89a00df92c..70907f6baac3 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -73,7 +73,8 @@ connlimit_iphash6(const union nf_inet_addr *addr,
73static inline bool already_closed(const struct nf_conn *conn) 73static inline bool already_closed(const struct nf_conn *conn)
74{ 74{
75 if (nf_ct_protonum(conn) == IPPROTO_TCP) 75 if (nf_ct_protonum(conn) == IPPROTO_TCP)
76 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT; 76 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
77 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
77 else 78 else
78 return 0; 79 return 0;
79} 80}
diff --git a/net/netlink/attr.c b/net/netlink/attr.c
index feb326f4a752..47bbf45ae5d7 100644
--- a/net/netlink/attr.c
+++ b/net/netlink/attr.c
@@ -400,13 +400,13 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
400 * @attrlen: length of attribute payload 400 * @attrlen: length of attribute payload
401 * @data: head of attribute payload 401 * @data: head of attribute payload
402 * 402 *
403 * Returns -1 if the tailroom of the skb is insufficient to store 403 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
404 * the attribute header and payload. 404 * the attribute header and payload.
405 */ 405 */
406int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) 406int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
407{ 407{
408 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) 408 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
409 return -1; 409 return -EMSGSIZE;
410 410
411 __nla_put(skb, attrtype, attrlen, data); 411 __nla_put(skb, attrtype, attrlen, data);
412 return 0; 412 return 0;
@@ -418,13 +418,13 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
418 * @attrlen: length of attribute payload 418 * @attrlen: length of attribute payload
419 * @data: head of attribute payload 419 * @data: head of attribute payload
420 * 420 *
421 * Returns -1 if the tailroom of the skb is insufficient to store 421 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
422 * the attribute payload. 422 * the attribute payload.
423 */ 423 */
424int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) 424int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
425{ 425{
426 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 426 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
427 return -1; 427 return -EMSGSIZE;
428 428
429 __nla_put_nohdr(skb, attrlen, data); 429 __nla_put_nohdr(skb, attrlen, data);
430 return 0; 430 return 0;
@@ -436,13 +436,13 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
436 * @attrlen: length of attribute payload 436 * @attrlen: length of attribute payload
437 * @data: head of attribute payload 437 * @data: head of attribute payload
438 * 438 *
439 * Returns -1 if the tailroom of the skb is insufficient to store 439 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
440 * the attribute payload. 440 * the attribute payload.
441 */ 441 */
442int nla_append(struct sk_buff *skb, int attrlen, const void *data) 442int nla_append(struct sk_buff *skb, int attrlen, const void *data)
443{ 443{
444 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 444 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
445 return -1; 445 return -EMSGSIZE;
446 446
447 memcpy(skb_put(skb, attrlen), data, attrlen); 447 memcpy(skb_put(skb, attrlen), data, attrlen);
448 return 0; 448 return 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d16929c9b4bc..f5aa23c3e886 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -554,7 +554,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
554 return genlmsg_end(skb, hdr); 554 return genlmsg_end(skb, hdr);
555 555
556nla_put_failure: 556nla_put_failure:
557 return genlmsg_cancel(skb, hdr); 557 genlmsg_cancel(skb, hdr);
558 return -EMSGSIZE;
558} 559}
559 560
560static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, 561static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
@@ -590,7 +591,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
590 return genlmsg_end(skb, hdr); 591 return genlmsg_end(skb, hdr);
591 592
592nla_put_failure: 593nla_put_failure:
593 return genlmsg_cancel(skb, hdr); 594 genlmsg_cancel(skb, hdr);
595 return -EMSGSIZE;
594} 596}
595 597
596static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 598static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 0df911fd67b1..64465bacbe79 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -444,7 +444,8 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
444 return nla_nest_end(skb, opts); 444 return nla_nest_end(skb, opts);
445 445
446nla_put_failure: 446nla_put_failure:
447 return nla_nest_cancel(skb, opts); 447 nla_nest_cancel(skb, opts);
448 return -EMSGSIZE;
448} 449}
449 450
450static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) 451static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -466,7 +467,8 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
466 return nla_nest_end(skb, opts); 467 return nla_nest_end(skb, opts);
467 468
468nla_put_failure: 469nla_put_failure:
469 return nla_nest_cancel(skb, opts); 470 nla_nest_cancel(skb, opts);
471 return -EMSGSIZE;
470} 472}
471 473
472static const struct Qdisc_class_ops dsmark_class_ops = { 474static const struct Qdisc_class_ops dsmark_class_ops = {
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 3a9d226ff1e4..c89fba56db56 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -582,7 +582,8 @@ append_opt:
582 return nla_nest_end(skb, opts); 582 return nla_nest_end(skb, opts);
583 583
584nla_put_failure: 584nla_put_failure:
585 return nla_nest_cancel(skb, opts); 585 nla_nest_cancel(skb, opts);
586 return -EMSGSIZE;
586} 587}
587 588
588static void gred_destroy(struct Qdisc *sch) 589static void gred_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 87293d0db1d7..fdfaa3fcc16d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1360,7 +1360,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1360 1360
1361 nla_put_failure: 1361 nla_put_failure:
1362 nla_nest_cancel(skb, nest); 1362 nla_nest_cancel(skb, nest);
1363 return -1; 1363 return -EMSGSIZE;
1364} 1364}
1365 1365
1366static int 1366static int
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3dcd493f4f4a..5c569853b9c0 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -281,7 +281,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
281 return nla_nest_end(skb, opts); 281 return nla_nest_end(skb, opts);
282 282
283nla_put_failure: 283nla_put_failure:
284 return nla_nest_cancel(skb, opts); 284 nla_nest_cancel(skb, opts);
285 return -EMSGSIZE;
285} 286}
286 287
287static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 288static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index b4cd2b71953f..532634861db1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1203,6 +1203,9 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1203 struct list_head *head = &asoc->peer.transport_addr_list; 1203 struct list_head *head = &asoc->peer.transport_addr_list;
1204 struct list_head *pos; 1204 struct list_head *pos;
1205 1205
1206 if (asoc->peer.transport_count == 1)
1207 return;
1208
1206 /* Find the next transport in a round-robin fashion. */ 1209 /* Find the next transport in a round-robin fashion. */
1207 t = asoc->peer.retran_path; 1210 t = asoc->peer.retran_path;
1208 pos = &t->transports; 1211 pos = &t->transports;
@@ -1217,6 +1220,15 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1217 1220
1218 t = list_entry(pos, struct sctp_transport, transports); 1221 t = list_entry(pos, struct sctp_transport, transports);
1219 1222
1223 /* We have exhausted the list, but didn't find any
1224 * other active transports. If so, use the next
1225 * transport.
1226 */
1227 if (t == asoc->peer.retran_path) {
1228 t = next;
1229 break;
1230 }
1231
1220 /* Try to find an active transport. */ 1232 /* Try to find an active transport. */
1221 1233
1222 if ((t->state == SCTP_ACTIVE) || 1234 if ((t->state == SCTP_ACTIVE) ||
@@ -1229,15 +1241,6 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1229 if (!next) 1241 if (!next)
1230 next = t; 1242 next = t;
1231 } 1243 }
1232
1233 /* We have exhausted the list, but didn't find any
1234 * other active transports. If so, use the next
1235 * transport.
1236 */
1237 if (t == asoc->peer.retran_path) {
1238 t = next;
1239 break;
1240 }
1241 } 1244 }
1242 1245
1243 asoc->peer.retran_path = t; 1246 asoc->peer.retran_path = t;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e45e44c60635..a2f4d4d51593 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -299,7 +299,8 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
299/* Fills in the source address(saddr) based on the destination address(daddr) 299/* Fills in the source address(saddr) based on the destination address(daddr)
300 * and asoc's bind address list. 300 * and asoc's bind address list.
301 */ 301 */
302static void sctp_v6_get_saddr(struct sctp_association *asoc, 302static void sctp_v6_get_saddr(struct sctp_sock *sk,
303 struct sctp_association *asoc,
303 struct dst_entry *dst, 304 struct dst_entry *dst,
304 union sctp_addr *daddr, 305 union sctp_addr *daddr,
305 union sctp_addr *saddr) 306 union sctp_addr *saddr)
@@ -318,7 +319,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
318 if (!asoc) { 319 if (!asoc) {
319 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, 320 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
320 &daddr->v6.sin6_addr, 321 &daddr->v6.sin6_addr,
321 inet6_sk(asoc->base.sk)->srcprefs, 322 inet6_sk(&sk->inet.sk)->srcprefs,
322 &saddr->v6.sin6_addr); 323 &saddr->v6.sin6_addr);
323 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", 324 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
324 NIP6(saddr->v6.sin6_addr)); 325 NIP6(saddr->v6.sin6_addr));
@@ -726,6 +727,11 @@ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
726 seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr)); 727 seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr));
727} 728}
728 729
730static void sctp_v6_ecn_capable(struct sock *sk)
731{
732 inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
733}
734
729/* Initialize a PF_INET6 socket msg_name. */ 735/* Initialize a PF_INET6 socket msg_name. */
730static void sctp_inet6_msgname(char *msgname, int *addr_len) 736static void sctp_inet6_msgname(char *msgname, int *addr_len)
731{ 737{
@@ -996,6 +1002,7 @@ static struct sctp_af sctp_af_inet6 = {
996 .skb_iif = sctp_v6_skb_iif, 1002 .skb_iif = sctp_v6_skb_iif,
997 .is_ce = sctp_v6_is_ce, 1003 .is_ce = sctp_v6_is_ce,
998 .seq_dump_addr = sctp_v6_seq_dump_addr, 1004 .seq_dump_addr = sctp_v6_seq_dump_addr,
1005 .ecn_capable = sctp_v6_ecn_capable,
999 .net_header_len = sizeof(struct ipv6hdr), 1006 .net_header_len = sizeof(struct ipv6hdr),
1000 .sockaddr_len = sizeof(struct sockaddr_in6), 1007 .sockaddr_len = sizeof(struct sockaddr_in6),
1001#ifdef CONFIG_COMPAT 1008#ifdef CONFIG_COMPAT
diff --git a/net/sctp/output.c b/net/sctp/output.c
index cf4f9fb6819d..6d45bae93b46 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -548,7 +548,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
548 * Note: The works for IPv6 layer checks this bit too later 548 * Note: The works for IPv6 layer checks this bit too later
549 * in transmission. See IP6_ECN_flow_xmit(). 549 * in transmission. See IP6_ECN_flow_xmit().
550 */ 550 */
551 INET_ECN_xmit(nskb->sk); 551 (*tp->af_specific->ecn_capable)(nskb->sk);
552 552
553 /* Set up the IP options. */ 553 /* Set up the IP options. */
554 /* BUG: not implemented 554 /* BUG: not implemented
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 59edfd25a19c..ace6770e9048 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,6 +208,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 INIT_LIST_HEAD(&q->sacked); 208 INIT_LIST_HEAD(&q->sacked);
209 INIT_LIST_HEAD(&q->abandoned); 209 INIT_LIST_HEAD(&q->abandoned);
210 210
211 q->fast_rtx = 0;
211 q->outstanding_bytes = 0; 212 q->outstanding_bytes = 0;
212 q->empty = 1; 213 q->empty = 1;
213 q->cork = 0; 214 q->cork = 0;
@@ -500,6 +501,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
500 case SCTP_RTXR_FAST_RTX: 501 case SCTP_RTXR_FAST_RTX:
501 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 502 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
502 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 503 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
504 q->fast_rtx = 1;
503 break; 505 break;
504 case SCTP_RTXR_PMTUD: 506 case SCTP_RTXR_PMTUD:
505 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 507 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
@@ -518,9 +520,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
518 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 520 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
519 * following the procedures outlined in C1 - C5. 521 * following the procedures outlined in C1 - C5.
520 */ 522 */
521 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 523 if (reason == SCTP_RTXR_T3_RTX)
524 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
522 525
523 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 526 /* Flush the queues only on timeout, since fast_rtx is only
527 * triggered during sack processing and the queue
528 * will be flushed at the end.
529 */
530 if (reason != SCTP_RTXR_FAST_RTX)
531 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
524 532
525 if (error) 533 if (error)
526 q->asoc->base.sk->sk_err = -error; 534 q->asoc->base.sk->sk_err = -error;
@@ -538,17 +546,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
538 int rtx_timeout, int *start_timer) 546 int rtx_timeout, int *start_timer)
539{ 547{
540 struct list_head *lqueue; 548 struct list_head *lqueue;
541 struct list_head *lchunk;
542 struct sctp_transport *transport = pkt->transport; 549 struct sctp_transport *transport = pkt->transport;
543 sctp_xmit_t status; 550 sctp_xmit_t status;
544 struct sctp_chunk *chunk, *chunk1; 551 struct sctp_chunk *chunk, *chunk1;
545 struct sctp_association *asoc; 552 struct sctp_association *asoc;
553 int fast_rtx;
546 int error = 0; 554 int error = 0;
555 int timer = 0;
556 int done = 0;
547 557
548 asoc = q->asoc; 558 asoc = q->asoc;
549 lqueue = &q->retransmit; 559 lqueue = &q->retransmit;
560 fast_rtx = q->fast_rtx;
550 561
551 /* RFC 2960 6.3.3 Handle T3-rtx Expiration 562 /* This loop handles time-out retransmissions, fast retransmissions,
563 * and retransmissions due to opening of whindow.
564 *
565 * RFC 2960 6.3.3 Handle T3-rtx Expiration
552 * 566 *
553 * E3) Determine how many of the earliest (i.e., lowest TSN) 567 * E3) Determine how many of the earliest (i.e., lowest TSN)
554 * outstanding DATA chunks for the address for which the 568 * outstanding DATA chunks for the address for which the
@@ -563,12 +577,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
563 * [Just to be painfully clear, if we are retransmitting 577 * [Just to be painfully clear, if we are retransmitting
564 * because a timeout just happened, we should send only ONE 578 * because a timeout just happened, we should send only ONE
565 * packet of retransmitted data.] 579 * packet of retransmitted data.]
580 *
581 * For fast retransmissions we also send only ONE packet. However,
582 * if we are just flushing the queue due to open window, we'll
583 * try to send as much as possible.
566 */ 584 */
567 lchunk = sctp_list_dequeue(lqueue); 585 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
568
569 while (lchunk) {
570 chunk = list_entry(lchunk, struct sctp_chunk,
571 transmitted_list);
572 586
573 /* Make sure that Gap Acked TSNs are not retransmitted. A 587 /* Make sure that Gap Acked TSNs are not retransmitted. A
574 * simple approach is just to move such TSNs out of the 588 * simple approach is just to move such TSNs out of the
@@ -576,58 +590,60 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
576 * next chunk. 590 * next chunk.
577 */ 591 */
578 if (chunk->tsn_gap_acked) { 592 if (chunk->tsn_gap_acked) {
579 list_add_tail(lchunk, &transport->transmitted); 593 list_del(&chunk->transmitted_list);
580 lchunk = sctp_list_dequeue(lqueue); 594 list_add_tail(&chunk->transmitted_list,
595 &transport->transmitted);
581 continue; 596 continue;
582 } 597 }
583 598
599 /* If we are doing fast retransmit, ignore non-fast_rtransmit
600 * chunks
601 */
602 if (fast_rtx && !chunk->fast_retransmit)
603 continue;
604
584 /* Attempt to append this chunk to the packet. */ 605 /* Attempt to append this chunk to the packet. */
585 status = sctp_packet_append_chunk(pkt, chunk); 606 status = sctp_packet_append_chunk(pkt, chunk);
586 607
587 switch (status) { 608 switch (status) {
588 case SCTP_XMIT_PMTU_FULL: 609 case SCTP_XMIT_PMTU_FULL:
589 /* Send this packet. */ 610 /* Send this packet. */
590 if ((error = sctp_packet_transmit(pkt)) == 0) 611 error = sctp_packet_transmit(pkt);
591 *start_timer = 1;
592 612
593 /* If we are retransmitting, we should only 613 /* If we are retransmitting, we should only
594 * send a single packet. 614 * send a single packet.
595 */ 615 */
596 if (rtx_timeout) { 616 if (rtx_timeout || fast_rtx)
597 list_add(lchunk, lqueue); 617 done = 1;
598 lchunk = NULL;
599 }
600 618
601 /* Bundle lchunk in the next round. */ 619 /* Bundle next chunk in the next round. */
602 break; 620 break;
603 621
604 case SCTP_XMIT_RWND_FULL: 622 case SCTP_XMIT_RWND_FULL:
605 /* Send this packet. */ 623 /* Send this packet. */
606 if ((error = sctp_packet_transmit(pkt)) == 0) 624 error = sctp_packet_transmit(pkt);
607 *start_timer = 1;
608 625
609 /* Stop sending DATA as there is no more room 626 /* Stop sending DATA as there is no more room
610 * at the receiver. 627 * at the receiver.
611 */ 628 */
612 list_add(lchunk, lqueue); 629 done = 1;
613 lchunk = NULL;
614 break; 630 break;
615 631
616 case SCTP_XMIT_NAGLE_DELAY: 632 case SCTP_XMIT_NAGLE_DELAY:
617 /* Send this packet. */ 633 /* Send this packet. */
618 if ((error = sctp_packet_transmit(pkt)) == 0) 634 error = sctp_packet_transmit(pkt);
619 *start_timer = 1;
620 635
621 /* Stop sending DATA because of nagle delay. */ 636 /* Stop sending DATA because of nagle delay. */
622 list_add(lchunk, lqueue); 637 done = 1;
623 lchunk = NULL;
624 break; 638 break;
625 639
626 default: 640 default:
627 /* The append was successful, so add this chunk to 641 /* The append was successful, so add this chunk to
628 * the transmitted list. 642 * the transmitted list.
629 */ 643 */
630 list_add_tail(lchunk, &transport->transmitted); 644 list_del(&chunk->transmitted_list);
645 list_add_tail(&chunk->transmitted_list,
646 &transport->transmitted);
631 647
632 /* Mark the chunk as ineligible for fast retransmit 648 /* Mark the chunk as ineligible for fast retransmit
633 * after it is retransmitted. 649 * after it is retransmitted.
@@ -635,27 +651,44 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
635 if (chunk->fast_retransmit > 0) 651 if (chunk->fast_retransmit > 0)
636 chunk->fast_retransmit = -1; 652 chunk->fast_retransmit = -1;
637 653
638 *start_timer = 1; 654 /* Force start T3-rtx timer when fast retransmitting
639 q->empty = 0; 655 * the earliest outstanding TSN
656 */
657 if (!timer && fast_rtx &&
658 ntohl(chunk->subh.data_hdr->tsn) ==
659 asoc->ctsn_ack_point + 1)
660 timer = 2;
640 661
641 /* Retrieve a new chunk to bundle. */ 662 q->empty = 0;
642 lchunk = sctp_list_dequeue(lqueue);
643 break; 663 break;
644 } 664 }
645 665
646 /* If we are here due to a retransmit timeout or a fast 666 /* Set the timer if there were no errors */
647 * retransmit and if there are any chunks left in the retransmit 667 if (!error && !timer)
648 * queue that could not fit in the PMTU sized packet, they need 668 timer = 1;
649 * to be marked as ineligible for a subsequent fast retransmit. 669
650 */ 670 if (done)
651 if (rtx_timeout && !lchunk) { 671 break;
652 list_for_each_entry(chunk1, lqueue, transmitted_list) { 672 }
653 if (chunk1->fast_retransmit > 0) 673
654 chunk1->fast_retransmit = -1; 674 /* If we are here due to a retransmit timeout or a fast
655 } 675 * retransmit and if there are any chunks left in the retransmit
676 * queue that could not fit in the PMTU sized packet, they need
677 * to be marked as ineligible for a subsequent fast retransmit.
678 */
679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit > 0)
682 chunk1->fast_retransmit = -1;
656 } 683 }
657 } 684 }
658 685
686 *start_timer = timer;
687
688 /* Clear fast retransmit hint */
689 if (fast_rtx)
690 q->fast_rtx = 0;
691
659 return error; 692 return error;
660} 693}
661 694
@@ -862,7 +895,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
862 rtx_timeout, &start_timer); 895 rtx_timeout, &start_timer);
863 896
864 if (start_timer) 897 if (start_timer)
865 sctp_transport_reset_timers(transport); 898 sctp_transport_reset_timers(transport,
899 start_timer-1);
866 900
867 /* This can happen on COOKIE-ECHO resend. Only 901 /* This can happen on COOKIE-ECHO resend. Only
868 * one chunk can get bundled with a COOKIE-ECHO. 902 * one chunk can get bundled with a COOKIE-ECHO.
@@ -977,7 +1011,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
977 list_add_tail(&chunk->transmitted_list, 1011 list_add_tail(&chunk->transmitted_list,
978 &transport->transmitted); 1012 &transport->transmitted);
979 1013
980 sctp_transport_reset_timers(transport); 1014 sctp_transport_reset_timers(transport, start_timer-1);
981 1015
982 q->empty = 0; 1016 q->empty = 0;
983 1017
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 0ec234b762c2..b435a193c5df 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -470,11 +470,11 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
470 /* Walk through the bind address list and look for a bind 470 /* Walk through the bind address list and look for a bind
471 * address that matches the source address of the returned dst. 471 * address that matches the source address of the returned dst.
472 */ 472 */
473 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
473 rcu_read_lock(); 474 rcu_read_lock();
474 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 475 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
475 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 476 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
476 continue; 477 continue;
477 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
478 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 478 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
479 goto out_unlock; 479 goto out_unlock;
480 } 480 }
@@ -519,7 +519,8 @@ out:
519/* For v4, the source address is cached in the route entry(dst). So no need 519/* For v4, the source address is cached in the route entry(dst). So no need
520 * to cache it separately and hence this is an empty routine. 520 * to cache it separately and hence this is an empty routine.
521 */ 521 */
522static void sctp_v4_get_saddr(struct sctp_association *asoc, 522static void sctp_v4_get_saddr(struct sctp_sock *sk,
523 struct sctp_association *asoc,
523 struct dst_entry *dst, 524 struct dst_entry *dst,
524 union sctp_addr *daddr, 525 union sctp_addr *daddr,
525 union sctp_addr *saddr) 526 union sctp_addr *saddr)
@@ -616,6 +617,11 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
616 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); 617 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
617} 618}
618 619
620static void sctp_v4_ecn_capable(struct sock *sk)
621{
622 INET_ECN_xmit(sk);
623}
624
619/* Event handler for inet address addition/deletion events. 625/* Event handler for inet address addition/deletion events.
620 * The sctp_local_addr_list needs to be protocted by a spin lock since 626 * The sctp_local_addr_list needs to be protocted by a spin lock since
621 * multiple notifiers (say IPv4 and IPv6) may be running at the same 627 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -934,6 +940,7 @@ static struct sctp_af sctp_af_inet = {
934 .skb_iif = sctp_v4_skb_iif, 940 .skb_iif = sctp_v4_skb_iif,
935 .is_ce = sctp_v4_is_ce, 941 .is_ce = sctp_v4_is_ce,
936 .seq_dump_addr = sctp_v4_seq_dump_addr, 942 .seq_dump_addr = sctp_v4_seq_dump_addr,
943 .ecn_capable = sctp_v4_ecn_capable,
937 .net_header_len = sizeof(struct iphdr), 944 .net_header_len = sizeof(struct iphdr),
938 .sockaddr_len = sizeof(struct sockaddr_in), 945 .sockaddr_len = sizeof(struct sockaddr_in),
939#ifdef CONFIG_COMPAT 946#ifdef CONFIG_COMPAT
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index f4938f6c5abe..3f34f61221ec 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -79,6 +79,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
81 peer->rto_pending = 0; 81 peer->rto_pending = 0;
82 peer->fast_recovery = 0;
82 83
83 peer->last_time_heard = jiffies; 84 peer->last_time_heard = jiffies;
84 peer->last_time_used = jiffies; 85 peer->last_time_used = jiffies;
@@ -190,7 +191,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
190/* Start T3_rtx timer if it is not already running and update the heartbeat 191/* Start T3_rtx timer if it is not already running and update the heartbeat
191 * timer. This routine is called every time a DATA chunk is sent. 192 * timer. This routine is called every time a DATA chunk is sent.
192 */ 193 */
193void sctp_transport_reset_timers(struct sctp_transport *transport) 194void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
194{ 195{
195 /* RFC 2960 6.3.2 Retransmission Timer Rules 196 /* RFC 2960 6.3.2 Retransmission Timer Rules
196 * 197 *
@@ -200,7 +201,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
200 * address. 201 * address.
201 */ 202 */
202 203
203 if (!timer_pending(&transport->T3_rtx_timer)) 204 if (force || !timer_pending(&transport->T3_rtx_timer))
204 if (!mod_timer(&transport->T3_rtx_timer, 205 if (!mod_timer(&transport->T3_rtx_timer,
205 jiffies + transport->rto)) 206 jiffies + transport->rto))
206 sctp_transport_hold(transport); 207 sctp_transport_hold(transport);
@@ -291,7 +292,7 @@ void sctp_transport_route(struct sctp_transport *transport,
291 if (saddr) 292 if (saddr)
292 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 293 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
293 else 294 else
294 af->get_saddr(asoc, dst, daddr, &transport->saddr); 295 af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
295 296
296 transport->dst = dst; 297 transport->dst = dst;
297 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 298 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
@@ -403,11 +404,16 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
403 cwnd = transport->cwnd; 404 cwnd = transport->cwnd;
404 flight_size = transport->flight_size; 405 flight_size = transport->flight_size;
405 406
407 /* See if we need to exit Fast Recovery first */
408 if (transport->fast_recovery &&
409 TSN_lte(transport->fast_recovery_exit, sack_ctsn))
410 transport->fast_recovery = 0;
411
406 /* The appropriate cwnd increase algorithm is performed if, and only 412 /* The appropriate cwnd increase algorithm is performed if, and only
407 * if the cumulative TSN has advanced and the congestion window is 413 * if the cumulative TSN whould advanced and the congestion window is
408 * being fully utilized. 414 * being fully utilized.
409 */ 415 */
410 if ((transport->asoc->ctsn_ack_point >= sack_ctsn) || 416 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
411 (flight_size < cwnd)) 417 (flight_size < cwnd))
412 return; 418 return;
413 419
@@ -416,17 +422,23 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
416 pmtu = transport->asoc->pathmtu; 422 pmtu = transport->asoc->pathmtu;
417 423
418 if (cwnd <= ssthresh) { 424 if (cwnd <= ssthresh) {
419 /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less 425 /* RFC 4960 7.2.1
420 * than or equal to ssthresh an SCTP endpoint MUST use the 426 * o When cwnd is less than or equal to ssthresh, an SCTP
421 * slow start algorithm to increase cwnd only if the current 427 * endpoint MUST use the slow-start algorithm to increase
422 * congestion window is being fully utilized and an incoming 428 * cwnd only if the current congestion window is being fully
423 * SACK advances the Cumulative TSN Ack Point. Only when these 429 * utilized, an incoming SACK advances the Cumulative TSN
424 * two conditions are met can the cwnd be increased otherwise 430 * Ack Point, and the data sender is not in Fast Recovery.
425 * the cwnd MUST not be increased. If these conditions are met 431 * Only when these three conditions are met can the cwnd be
426 * then cwnd MUST be increased by at most the lesser of 432 * increased; otherwise, the cwnd MUST not be increased.
427 * 1) the total size of the previously outstanding DATA 433 * If these conditions are met, then cwnd MUST be increased
428 * chunk(s) acknowledged, and 2) the destination's path MTU. 434 * by, at most, the lesser of 1) the total size of the
435 * previously outstanding DATA chunk(s) acknowledged, and
436 * 2) the destination's path MTU. This upper bound protects
437 * against the ACK-Splitting attack outlined in [SAVAGE99].
429 */ 438 */
439 if (transport->fast_recovery)
440 return;
441
430 if (bytes_acked > pmtu) 442 if (bytes_acked > pmtu)
431 cwnd += pmtu; 443 cwnd += pmtu;
432 else 444 else
@@ -502,6 +514,13 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
502 * cwnd = ssthresh 514 * cwnd = ssthresh
503 * partial_bytes_acked = 0 515 * partial_bytes_acked = 0
504 */ 516 */
517 if (transport->fast_recovery)
518 return;
519
520 /* Mark Fast recovery */
521 transport->fast_recovery = 1;
522 transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
523
505 transport->ssthresh = max(transport->cwnd/2, 524 transport->ssthresh = max(transport->cwnd/2,
506 4*transport->asoc->pathmtu); 525 4*transport->asoc->pathmtu);
507 transport->cwnd = transport->ssthresh; 526 transport->cwnd = transport->ssthresh;
@@ -586,6 +605,7 @@ void sctp_transport_reset(struct sctp_transport *t)
586 t->flight_size = 0; 605 t->flight_size = 0;
587 t->error_count = 0; 606 t->error_count = 0;
588 t->rto_pending = 0; 607 t->rto_pending = 0;
608 t->fast_recovery = 0;
589 609
590 /* Initialize the state information for SFR-CACC */ 610 /* Initialize the state information for SFR-CACC */
591 t->cacc.changeover_active = 0; 611 t->cacc.changeover_active = 0;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2bdd4dddc0e1..fb75f265b39c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -187,7 +187,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
187 return genlmsg_end(msg, hdr); 187 return genlmsg_end(msg, hdr);
188 188
189 nla_put_failure: 189 nla_put_failure:
190 return genlmsg_cancel(msg, hdr); 190 genlmsg_cancel(msg, hdr);
191 return -EMSGSIZE;
191} 192}
192 193
193static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) 194static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
@@ -273,7 +274,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
273 return genlmsg_end(msg, hdr); 274 return genlmsg_end(msg, hdr);
274 275
275 nla_put_failure: 276 nla_put_failure:
276 return genlmsg_cancel(msg, hdr); 277 genlmsg_cancel(msg, hdr);
278 return -EMSGSIZE;
277} 279}
278 280
279static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) 281static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
@@ -928,7 +930,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
928 return genlmsg_end(msg, hdr); 930 return genlmsg_end(msg, hdr);
929 931
930 nla_put_failure: 932 nla_put_failure:
931 return genlmsg_cancel(msg, hdr); 933 genlmsg_cancel(msg, hdr);
934 return -EMSGSIZE;
932} 935}
933 936
934static int nl80211_dump_station(struct sk_buff *skb, 937static int nl80211_dump_station(struct sk_buff *skb,
@@ -1267,7 +1270,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
1267 return genlmsg_end(msg, hdr); 1270 return genlmsg_end(msg, hdr);
1268 1271
1269 nla_put_failure: 1272 nla_put_failure:
1270 return genlmsg_cancel(msg, hdr); 1273 genlmsg_cancel(msg, hdr);
1274 return -EMSGSIZE;
1271} 1275}
1272 1276
1273static int nl80211_dump_mpath(struct sk_buff *skb, 1277static int nl80211_dump_mpath(struct sk_buff *skb,
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index ac765dd9c7f5..23a2cc04b8cd 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -200,8 +200,8 @@ static struct xfrm_algo_desc aalg_list[] = {
200 } 200 }
201}, 201},
202{ 202{
203 .name = "hmac(ripemd160)", 203 .name = "hmac(rmd160)",
204 .compat = "ripemd160", 204 .compat = "rmd160",
205 205
206 .uinfo = { 206 .uinfo = {
207 .auth = { 207 .auth = {