aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/br2684.c11
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/compat.c11
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/inet_timewait_sock.c61
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c59
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_timer.c29
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh.h5
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/wireless/reg.c75
-rw-r--r--net/wireless/wext-compat.c1
33 files changed, 285 insertions, 213 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 26a646d4eb32..c9230c398697 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = {
554 .ndo_validate_addr = eth_validate_addr, 554 .ndo_validate_addr = eth_validate_addr,
555}; 555};
556 556
557static const struct net_device_ops br2684_netdev_ops_routed = {
558 .ndo_start_xmit = br2684_start_xmit,
559 .ndo_set_mac_address = br2684_mac_addr,
560 .ndo_change_mtu = eth_change_mtu
561};
562
557static void br2684_setup(struct net_device *netdev) 563static void br2684_setup(struct net_device *netdev)
558{ 564{
559 struct br2684_dev *brdev = BRPRIV(netdev); 565 struct br2684_dev *brdev = BRPRIV(netdev);
@@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev)
569static void br2684_setup_routed(struct net_device *netdev) 575static void br2684_setup_routed(struct net_device *netdev)
570{ 576{
571 struct br2684_dev *brdev = BRPRIV(netdev); 577 struct br2684_dev *brdev = BRPRIV(netdev);
572 brdev->net_dev = netdev;
573 578
579 brdev->net_dev = netdev;
574 netdev->hard_header_len = 0; 580 netdev->hard_header_len = 0;
575 581 netdev->netdev_ops = &br2684_netdev_ops_routed;
576 netdev->netdev_ops = &br2684_netdev_ops;
577 netdev->addr_len = 0; 582 netdev->addr_len = 0;
578 netdev->mtu = 1500; 583 netdev->mtu = 1500;
579 netdev->type = ARPHRD_PPP; 584 netdev->type = ARPHRD_PPP;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b2d644560323..42749b7b917c 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -62,7 +62,6 @@ static int lec_open(struct net_device *dev);
62static netdev_tx_t lec_start_xmit(struct sk_buff *skb, 62static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
63 struct net_device *dev); 63 struct net_device *dev);
64static int lec_close(struct net_device *dev); 64static int lec_close(struct net_device *dev);
65static void lec_init(struct net_device *dev);
66static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 65static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
67 const unsigned char *mac_addr); 66 const unsigned char *mac_addr);
68static int lec_arp_remove(struct lec_priv *priv, 67static int lec_arp_remove(struct lec_priv *priv,
@@ -670,13 +669,6 @@ static const struct net_device_ops lec_netdev_ops = {
670 .ndo_set_multicast_list = lec_set_multicast_list, 669 .ndo_set_multicast_list = lec_set_multicast_list,
671}; 670};
672 671
673
674static void lec_init(struct net_device *dev)
675{
676 dev->netdev_ops = &lec_netdev_ops;
677 printk("%s: Initialized!\n", dev->name);
678}
679
680static const unsigned char lec_ctrl_magic[] = { 672static const unsigned char lec_ctrl_magic[] = {
681 0xff, 673 0xff,
682 0x00, 674 0x00,
@@ -893,6 +885,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
893 dev_lec[i] = alloc_etherdev(size); 885 dev_lec[i] = alloc_etherdev(size);
894 if (!dev_lec[i]) 886 if (!dev_lec[i])
895 return -ENOMEM; 887 return -ENOMEM;
888 dev_lec[i]->netdev_ops = &lec_netdev_ops;
896 snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); 889 snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
897 if (register_netdev(dev_lec[i])) { 890 if (register_netdev(dev_lec[i])) {
898 free_netdev(dev_lec[i]); 891 free_netdev(dev_lec[i]);
@@ -901,7 +894,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
901 894
902 priv = netdev_priv(dev_lec[i]); 895 priv = netdev_priv(dev_lec[i]);
903 priv->is_trdev = is_trdev; 896 priv->is_trdev = is_trdev;
904 lec_init(dev_lec[i]);
905 } else { 897 } else {
906 priv = netdev_priv(dev_lec[i]); 898 priv = netdev_priv(dev_lec[i]);
907 if (priv->lecd) 899 if (priv->lecd)
diff --git a/net/compat.c b/net/compat.c
index e1a56ade803b..a1fb1b079a82 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
754 754
755asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, 755asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
756 unsigned vlen, unsigned int flags, 756 unsigned vlen, unsigned int flags,
757 struct timespec __user *timeout) 757 struct compat_timespec __user *timeout)
758{ 758{
759 int datagrams; 759 int datagrams;
760 struct timespec ktspec; 760 struct timespec ktspec;
761 struct compat_timespec __user *utspec;
762 761
763 if (timeout == NULL) 762 if (timeout == NULL)
764 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 763 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
765 flags | MSG_CMSG_COMPAT, NULL); 764 flags | MSG_CMSG_COMPAT, NULL);
766 765
767 utspec = (struct compat_timespec __user *)timeout; 766 if (get_compat_timespec(&ktspec, timeout))
768 if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
769 get_user(ktspec.tv_nsec, &utspec->tv_nsec))
770 return -EFAULT; 767 return -EFAULT;
771 768
772 datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 769 datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
773 flags | MSG_CMSG_COMPAT, &ktspec); 770 flags | MSG_CMSG_COMPAT, &ktspec);
774 if (datagrams > 0 && 771 if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
775 (put_user(ktspec.tv_sec, &utspec->tv_sec) ||
776 put_user(ktspec.tv_nsec, &utspec->tv_nsec)))
777 datagrams = -EFAULT; 772 datagrams = -EFAULT;
778 773
779 return datagrams; 774 return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index c36a17aafcf3..be9924f60ec3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev)
4771 4771
4772static void rollback_registered_many(struct list_head *head) 4772static void rollback_registered_many(struct list_head *head)
4773{ 4773{
4774 struct net_device *dev; 4774 struct net_device *dev, *tmp;
4775 4775
4776 BUG_ON(dev_boot_phase); 4776 BUG_ON(dev_boot_phase);
4777 ASSERT_RTNL(); 4777 ASSERT_RTNL();
4778 4778
4779 list_for_each_entry(dev, head, unreg_list) { 4779 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4780 /* Some devices call without registering 4780 /* Some devices call without registering
4781 * for initialization unwind. 4781 * for initialization unwind. Remove those
4782 * devices and proceed with the remaining.
4782 */ 4783 */
4783 if (dev->reg_state == NETREG_UNINITIALIZED) { 4784 if (dev->reg_state == NETREG_UNINITIALIZED) {
4784 pr_debug("unregister_netdevice: device %s/%p never " 4785 pr_debug("unregister_netdevice: device %s/%p never "
4785 "was registered\n", dev->name, dev); 4786 "was registered\n", dev->name, dev);
4786 4787
4787 WARN_ON(1); 4788 WARN_ON(1);
4788 return; 4789 list_del(&dev->unreg_list);
4790 continue;
4789 } 4791 }
4790 4792
4791 BUG_ON(dev->reg_state != NETREG_REGISTERED); 4793 BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
5033 rollback_registered(dev); 5035 rollback_registered(dev);
5034 dev->reg_state = NETREG_UNREGISTERED; 5036 dev->reg_state = NETREG_UNREGISTERED;
5035 } 5037 }
5038 /*
5039 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications.
5041 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5036 5043
5037out: 5044out:
5038 return ret; 5045 return ret;
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5595 /* Notify protocols, that a new device appeared. */ 5602 /* Notify protocols, that a new device appeared. */
5596 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5603 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5597 5604
5605 /*
5606 * Prevent userspace races by waiting until the network
5607 * device is fully setup before sending notifications.
5608 */
5609 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5610
5598 synchronize_net(); 5611 synchronize_net();
5599 err = 0; 5612 err = 0;
5600out: 5613out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a568199..794bcb897ff0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1364 case NETDEV_UNREGISTER: 1364 case NETDEV_UNREGISTER:
1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
1366 break; 1366 break;
1367 case NETDEV_REGISTER:
1368 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1369 break;
1370 case NETDEV_UP: 1367 case NETDEV_UP:
1371 case NETDEV_DOWN: 1368 case NETDEV_DOWN:
1372 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1369 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1373 break; 1370 break;
1371 case NETDEV_POST_INIT:
1372 case NETDEV_REGISTER:
1374 case NETDEV_CHANGE: 1373 case NETDEV_CHANGE:
1375 case NETDEV_GOING_DOWN: 1374 case NETDEV_GOING_DOWN:
1375 case NETDEV_UNREGISTER_BATCH:
1376 break; 1376 break;
1377 default: 1377 default:
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index efbcfdc12796..dad7bc4878e0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
408 408
409 dccp_sync_mss(newsk, dst_mtu(dst)); 409 dccp_sync_mss(newsk, dst_mtu(dst));
410 410
411 __inet_hash_nolisten(newsk); 411 __inet_hash_nolisten(newsk, NULL);
412 __inet_inherit_port(sk, newsk); 412 __inet_inherit_port(sk, newsk);
413 413
414 return newsk; 414 return newsk;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6574215a1f51..baf05cf43c28 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk)
46 return; 46 return;
47 } 47 }
48 local_bh_disable(); 48 local_bh_disable();
49 __inet6_hash(sk); 49 __inet6_hash(sk, NULL);
50 local_bh_enable(); 50 local_bh_enable();
51 } 51 }
52} 52}
@@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
644 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 644 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
645 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 645 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
646 646
647 __inet6_hash(newsk); 647 __inet6_hash(newsk, NULL);
648 __inet_inherit_port(sk, newsk); 648 __inet_inherit_port(sk, newsk);
649 649
650 return newsk; 650 return newsk;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 21e5e32d8c60..2b79377b468d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
351 inet->inet_dport); 351 inet->inet_dport);
352} 352}
353 353
354void __inet_hash_nolisten(struct sock *sk) 354int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
355{ 355{
356 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 356 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
357 struct hlist_nulls_head *list; 357 struct hlist_nulls_head *list;
358 spinlock_t *lock; 358 spinlock_t *lock;
359 struct inet_ehash_bucket *head; 359 struct inet_ehash_bucket *head;
360 int twrefcnt = 0;
360 361
361 WARN_ON(!sk_unhashed(sk)); 362 WARN_ON(!sk_unhashed(sk));
362 363
@@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk)
367 368
368 spin_lock(lock); 369 spin_lock(lock);
369 __sk_nulls_add_node_rcu(sk, list); 370 __sk_nulls_add_node_rcu(sk, list);
371 if (tw) {
372 WARN_ON(sk->sk_hash != tw->tw_hash);
373 twrefcnt = inet_twsk_unhash(tw);
374 }
370 spin_unlock(lock); 375 spin_unlock(lock);
371 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 376 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
377 return twrefcnt;
372} 378}
373EXPORT_SYMBOL_GPL(__inet_hash_nolisten); 379EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
374 380
@@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk)
378 struct inet_listen_hashbucket *ilb; 384 struct inet_listen_hashbucket *ilb;
379 385
380 if (sk->sk_state != TCP_LISTEN) { 386 if (sk->sk_state != TCP_LISTEN) {
381 __inet_hash_nolisten(sk); 387 __inet_hash_nolisten(sk, NULL);
382 return; 388 return;
383 } 389 }
384 390
@@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
427 struct sock *sk, u32 port_offset, 433 struct sock *sk, u32 port_offset,
428 int (*check_established)(struct inet_timewait_death_row *, 434 int (*check_established)(struct inet_timewait_death_row *,
429 struct sock *, __u16, struct inet_timewait_sock **), 435 struct sock *, __u16, struct inet_timewait_sock **),
430 void (*hash)(struct sock *sk)) 436 int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
431{ 437{
432 struct inet_hashinfo *hinfo = death_row->hashinfo; 438 struct inet_hashinfo *hinfo = death_row->hashinfo;
433 const unsigned short snum = inet_sk(sk)->inet_num; 439 const unsigned short snum = inet_sk(sk)->inet_num;
@@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
435 struct inet_bind_bucket *tb; 441 struct inet_bind_bucket *tb;
436 int ret; 442 int ret;
437 struct net *net = sock_net(sk); 443 struct net *net = sock_net(sk);
444 int twrefcnt = 1;
438 445
439 if (!snum) { 446 if (!snum) {
440 int i, remaining, low, high, port; 447 int i, remaining, low, high, port;
@@ -493,13 +500,18 @@ ok:
493 inet_bind_hash(sk, tb, port); 500 inet_bind_hash(sk, tb, port);
494 if (sk_unhashed(sk)) { 501 if (sk_unhashed(sk)) {
495 inet_sk(sk)->inet_sport = htons(port); 502 inet_sk(sk)->inet_sport = htons(port);
496 hash(sk); 503 twrefcnt += hash(sk, tw);
497 } 504 }
505 if (tw)
506 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
498 spin_unlock(&head->lock); 507 spin_unlock(&head->lock);
499 508
500 if (tw) { 509 if (tw) {
501 inet_twsk_deschedule(tw, death_row); 510 inet_twsk_deschedule(tw, death_row);
502 inet_twsk_put(tw); 511 while (twrefcnt) {
512 twrefcnt--;
513 inet_twsk_put(tw);
514 }
503 } 515 }
504 516
505 ret = 0; 517 ret = 0;
@@ -510,7 +522,7 @@ ok:
510 tb = inet_csk(sk)->icsk_bind_hash; 522 tb = inet_csk(sk)->icsk_bind_hash;
511 spin_lock_bh(&head->lock); 523 spin_lock_bh(&head->lock);
512 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 524 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
513 hash(sk); 525 hash(sk, NULL);
514 spin_unlock_bh(&head->lock); 526 spin_unlock_bh(&head->lock);
515 return 0; 527 return 0;
516 } else { 528 } else {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 0fdf45e4c90c..cc94cc2d8b2d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -15,9 +15,13 @@
15#include <net/ip.h> 15#include <net/ip.h>
16 16
17 17
18/* 18/**
19 * unhash a timewait socket from established hash 19 * inet_twsk_unhash - unhash a timewait socket from established hash
20 * lock must be hold by caller 20 * @tw: timewait socket
21 *
22 * unhash a timewait socket from established hash, if hashed.
23 * ehash lock must be held by caller.
24 * Returns 1 if caller should call inet_twsk_put() after lock release.
21 */ 25 */
22int inet_twsk_unhash(struct inet_timewait_sock *tw) 26int inet_twsk_unhash(struct inet_timewait_sock *tw)
23{ 27{
@@ -26,6 +30,37 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
26 30
27 hlist_nulls_del_rcu(&tw->tw_node); 31 hlist_nulls_del_rcu(&tw->tw_node);
28 sk_nulls_node_init(&tw->tw_node); 32 sk_nulls_node_init(&tw->tw_node);
33 /*
34 * We cannot call inet_twsk_put() ourself under lock,
35 * caller must call it for us.
36 */
37 return 1;
38}
39
40/**
41 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
42 * @tw: timewait socket
43 * @hashinfo: hashinfo pointer
44 *
45 * unhash a timewait socket from bind hash, if hashed.
46 * bind hash lock must be held by caller.
47 * Returns 1 if caller should call inet_twsk_put() after lock release.
48 */
49int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
50 struct inet_hashinfo *hashinfo)
51{
52 struct inet_bind_bucket *tb = tw->tw_tb;
53
54 if (!tb)
55 return 0;
56
57 __hlist_del(&tw->tw_bind_node);
58 tw->tw_tb = NULL;
59 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
60 /*
61 * We cannot call inet_twsk_put() ourself under lock,
62 * caller must call it for us.
63 */
29 return 1; 64 return 1;
30} 65}
31 66
@@ -34,7 +69,6 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
34 struct inet_hashinfo *hashinfo) 69 struct inet_hashinfo *hashinfo)
35{ 70{
36 struct inet_bind_hashbucket *bhead; 71 struct inet_bind_hashbucket *bhead;
37 struct inet_bind_bucket *tb;
38 int refcnt; 72 int refcnt;
39 /* Unlink from established hashes. */ 73 /* Unlink from established hashes. */
40 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); 74 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
@@ -46,15 +80,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
46 /* Disassociate with bind bucket. */ 80 /* Disassociate with bind bucket. */
47 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, 81 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
48 hashinfo->bhash_size)]; 82 hashinfo->bhash_size)];
83
49 spin_lock(&bhead->lock); 84 spin_lock(&bhead->lock);
50 tb = tw->tw_tb; 85 refcnt += inet_twsk_bind_unhash(tw, hashinfo);
51 if (tb) {
52 __hlist_del(&tw->tw_bind_node);
53 tw->tw_tb = NULL;
54 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
55 refcnt++;
56 }
57 spin_unlock(&bhead->lock); 86 spin_unlock(&bhead->lock);
87
58#ifdef SOCK_REFCNT_DEBUG 88#ifdef SOCK_REFCNT_DEBUG
59 if (atomic_read(&tw->tw_refcnt) != 1) { 89 if (atomic_read(&tw->tw_refcnt) != 1) {
60 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", 90 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
@@ -126,7 +156,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
126 156
127 /* 157 /*
128 * Notes : 158 * Notes :
129 * - We initially set tw_refcnt to 0 in inet_twsk_alloc() 159 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
130 * - We add one reference for the bhash link 160 * - We add one reference for the bhash link
131 * - We add one reference for the ehash link 161 * - We add one reference for the ehash link
132 * - We want this refcnt update done before allowing other 162 * - We want this refcnt update done before allowing other
@@ -136,7 +166,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
136 166
137 spin_unlock(lock); 167 spin_unlock(lock);
138} 168}
139
140EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 169EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
141 170
142struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) 171struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
@@ -177,7 +206,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
177 206
178 return tw; 207 return tw;
179} 208}
180
181EXPORT_SYMBOL_GPL(inet_twsk_alloc); 209EXPORT_SYMBOL_GPL(inet_twsk_alloc);
182 210
183/* Returns non-zero if quota exceeded. */ 211/* Returns non-zero if quota exceeded. */
@@ -256,7 +284,6 @@ void inet_twdr_hangman(unsigned long data)
256out: 284out:
257 spin_unlock(&twdr->death_lock); 285 spin_unlock(&twdr->death_lock);
258} 286}
259
260EXPORT_SYMBOL_GPL(inet_twdr_hangman); 287EXPORT_SYMBOL_GPL(inet_twdr_hangman);
261 288
262void inet_twdr_twkill_work(struct work_struct *work) 289void inet_twdr_twkill_work(struct work_struct *work)
@@ -287,7 +314,6 @@ void inet_twdr_twkill_work(struct work_struct *work)
287 spin_unlock_bh(&twdr->death_lock); 314 spin_unlock_bh(&twdr->death_lock);
288 } 315 }
289} 316}
290
291EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); 317EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
292 318
293/* These are always called from BH context. See callers in 319/* These are always called from BH context. See callers in
@@ -307,7 +333,6 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw,
307 spin_unlock(&twdr->death_lock); 333 spin_unlock(&twdr->death_lock);
308 __inet_twsk_kill(tw, twdr->hashinfo); 334 __inet_twsk_kill(tw, twdr->hashinfo);
309} 335}
310
311EXPORT_SYMBOL(inet_twsk_deschedule); 336EXPORT_SYMBOL(inet_twsk_deschedule);
312 337
313void inet_twsk_schedule(struct inet_timewait_sock *tw, 338void inet_twsk_schedule(struct inet_timewait_sock *tw,
@@ -388,7 +413,6 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
388 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 413 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
389 spin_unlock(&twdr->death_lock); 414 spin_unlock(&twdr->death_lock);
390} 415}
391
392EXPORT_SYMBOL_GPL(inet_twsk_schedule); 416EXPORT_SYMBOL_GPL(inet_twsk_schedule);
393 417
394void inet_twdr_twcal_tick(unsigned long data) 418void inet_twdr_twcal_tick(unsigned long data)
@@ -449,7 +473,6 @@ out:
449#endif 473#endif
450 spin_unlock(&twdr->death_lock); 474 spin_unlock(&twdr->death_lock);
451} 475}
452
453EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 476EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
454 477
455void inet_twsk_purge(struct inet_hashinfo *hashinfo, 478void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a289..66fd80ef2473 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
277 277
278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
279 279
280 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
283
284 if (tcp_opt.saw_tstamp)
285 cookie_check_timestamp(&tcp_opt);
286
280 ret = NULL; 287 ret = NULL;
281 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 288 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
282 if (!req) 289 if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
292 ireq->loc_addr = ip_hdr(skb)->daddr; 299 ireq->loc_addr = ip_hdr(skb)->daddr;
293 ireq->rmt_addr = ip_hdr(skb)->saddr; 300 ireq->rmt_addr = ip_hdr(skb)->saddr;
294 ireq->ecn_ok = 0; 301 ireq->ecn_ok = 0;
302 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok;
305 ireq->wscale_ok = tcp_opt.wscale_ok;
306 ireq->tstamp_ok = tcp_opt.saw_tstamp;
307 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
295 308
296 /* We throwed the options of the initial SYN away, so we hope 309 /* We throwed the options of the initial SYN away, so we hope
297 * the ACK carries the same options again (see RFC1122 4.2.3.8) 310 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
340 } 353 }
341 } 354 }
342 355
343 /* check for timestamp cookie support */
344 memset(&tcp_opt, 0, sizeof(tcp_opt));
345 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
346
347 if (tcp_opt.saw_tstamp)
348 cookie_check_timestamp(&tcp_opt);
349
350 ireq->snd_wscale = tcp_opt.snd_wscale;
351 ireq->rcv_wscale = tcp_opt.rcv_wscale;
352 ireq->sack_ok = tcp_opt.sack_ok;
353 ireq->wscale_ok = tcp_opt.wscale_ok;
354 ireq->tstamp_ok = tcp_opt.saw_tstamp;
355 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
356
357 /* Try to redo what tcp_v4_send_synack did. */ 356 /* Try to redo what tcp_v4_send_synack did. */
358 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
359 358
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c8666b70cde0..b0a26bb25e2e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2540,11 +2540,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2540 ctd.tcpct_cookie_desired = cvp->cookie_desired; 2540 ctd.tcpct_cookie_desired = cvp->cookie_desired;
2541 ctd.tcpct_s_data_desired = cvp->s_data_desired; 2541 ctd.tcpct_s_data_desired = cvp->s_data_desired;
2542 2542
2543 /* Cookie(s) saved, return as nonce */
2544 if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) {
2545 /* impossible? */
2546 return -EINVAL;
2547 }
2548 memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], 2543 memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2549 cvp->cookie_pair_size); 2544 cvp->cookie_pair_size);
2550 ctd.tcpct_used = cvp->cookie_pair_size; 2545 ctd.tcpct_used = cvp->cookie_pair_size;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 57ae96a04220..28e029632493 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk)
2717 } 2717 }
2718} 2718}
2719 2719
2720/* We can clear retrans_stamp when there are no retransmissions in the
2721 * window. It would seem that it is trivially available for us in
2722 * tp->retrans_out, however, that kind of assumptions doesn't consider
2723 * what will happen if errors occur when sending retransmission for the
2724 * second time. ...It could the that such segment has only
2725 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2726 * the head skb is enough except for some reneging corner cases that
2727 * are not worth the effort.
2728 *
2729 * Main reason for all this complexity is the fact that connection dying
2730 * time now depends on the validity of the retrans_stamp, in particular,
2731 * that successive retransmissions of a segment must not advance
2732 * retrans_stamp under any conditions.
2733 */
2734static int tcp_any_retrans_done(struct sock *sk)
2735{
2736 struct tcp_sock *tp = tcp_sk(sk);
2737 struct sk_buff *skb;
2738
2739 if (tp->retrans_out)
2740 return 1;
2741
2742 skb = tcp_write_queue_head(sk);
2743 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2744 return 1;
2745
2746 return 0;
2747}
2748
2720/* Undo during fast recovery after partial ACK. */ 2749/* Undo during fast recovery after partial ACK. */
2721 2750
2722static int tcp_try_undo_partial(struct sock *sk, int acked) 2751static int tcp_try_undo_partial(struct sock *sk, int acked)
@@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2729 /* Plain luck! Hole if filled with delayed 2758 /* Plain luck! Hole if filled with delayed
2730 * packet, rather than with a retransmit. 2759 * packet, rather than with a retransmit.
2731 */ 2760 */
2732 if (tp->retrans_out == 0) 2761 if (!tcp_any_retrans_done(sk))
2733 tp->retrans_stamp = 0; 2762 tp->retrans_stamp = 0;
2734 2763
2735 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2764 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
@@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk)
2788 struct tcp_sock *tp = tcp_sk(sk); 2817 struct tcp_sock *tp = tcp_sk(sk);
2789 int state = TCP_CA_Open; 2818 int state = TCP_CA_Open;
2790 2819
2791 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) 2820 if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
2792 state = TCP_CA_Disorder; 2821 state = TCP_CA_Disorder;
2793 2822
2794 if (inet_csk(sk)->icsk_ca_state != state) { 2823 if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2803 2832
2804 tcp_verify_left_out(tp); 2833 tcp_verify_left_out(tp);
2805 2834
2806 if (!tp->frto_counter && tp->retrans_out == 0) 2835 if (!tp->frto_counter && !tcp_any_retrans_done(sk))
2807 tp->retrans_stamp = 0; 2836 tp->retrans_stamp = 0;
2808 2837
2809 if (flag & FLAG_ECE) 2838 if (flag & FLAG_ECE)
@@ -3698,7 +3727,7 @@ old_ack:
3698 * the fast version below fails. 3727 * the fast version below fails.
3699 */ 3728 */
3700void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3701 u8 **hvpp, int estab, struct dst_entry *dst) 3730 u8 **hvpp, int estab)
3702{ 3731{
3703 unsigned char *ptr; 3732 unsigned char *ptr;
3704 struct tcphdr *th = tcp_hdr(skb); 3733 struct tcphdr *th = tcp_hdr(skb);
@@ -3737,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3737 break; 3766 break;
3738 case TCPOPT_WINDOW: 3767 case TCPOPT_WINDOW:
3739 if (opsize == TCPOLEN_WINDOW && th->syn && 3768 if (opsize == TCPOLEN_WINDOW && th->syn &&
3740 !estab && sysctl_tcp_window_scaling && 3769 !estab && sysctl_tcp_window_scaling) {
3741 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
3742 __u8 snd_wscale = *(__u8 *)ptr; 3770 __u8 snd_wscale = *(__u8 *)ptr;
3743 opt_rx->wscale_ok = 1; 3771 opt_rx->wscale_ok = 1;
3744 if (snd_wscale > 14) { 3772 if (snd_wscale > 14) {
@@ -3754,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3754 case TCPOPT_TIMESTAMP: 3782 case TCPOPT_TIMESTAMP:
3755 if ((opsize == TCPOLEN_TIMESTAMP) && 3783 if ((opsize == TCPOLEN_TIMESTAMP) &&
3756 ((estab && opt_rx->tstamp_ok) || 3784 ((estab && opt_rx->tstamp_ok) ||
3757 (!estab && sysctl_tcp_timestamps && 3785 (!estab && sysctl_tcp_timestamps))) {
3758 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
3759 opt_rx->saw_tstamp = 1; 3786 opt_rx->saw_tstamp = 1;
3760 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3787 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3761 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3788 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3763,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3763 break; 3790 break;
3764 case TCPOPT_SACK_PERM: 3791 case TCPOPT_SACK_PERM:
3765 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3792 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3766 !estab && sysctl_tcp_sack && 3793 !estab && sysctl_tcp_sack) {
3767 !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
3768 opt_rx->sack_ok = 1; 3794 opt_rx->sack_ok = 1;
3769 tcp_sack_reset(opt_rx); 3795 tcp_sack_reset(opt_rx);
3770 } 3796 }
@@ -3849,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3849 if (tcp_parse_aligned_timestamp(tp, th)) 3875 if (tcp_parse_aligned_timestamp(tp, th))
3850 return 1; 3876 return 1;
3851 } 3877 }
3852 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3878 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3853 return 1; 3879 return 1;
3854} 3880}
3855 3881
@@ -4104,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4104static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4130static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4105{ 4131{
4106 struct tcp_sock *tp = tcp_sk(sk); 4132 struct tcp_sock *tp = tcp_sk(sk);
4107 struct dst_entry *dst = __sk_dst_get(sk);
4108 4133
4109 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4134 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4110 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4111 int mib_idx; 4135 int mib_idx;
4112 4136
4113 if (before(seq, tp->rcv_nxt)) 4137 if (before(seq, tp->rcv_nxt))
@@ -4136,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4136static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4160static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
4137{ 4161{
4138 struct tcp_sock *tp = tcp_sk(sk); 4162 struct tcp_sock *tp = tcp_sk(sk);
4139 struct dst_entry *dst = __sk_dst_get(sk);
4140 4163
4141 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4164 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4142 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4165 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4143 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4166 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4144 tcp_enter_quickack_mode(sk); 4167 tcp_enter_quickack_mode(sk);
4145 4168
4146 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4169 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4147 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4148 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4170 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4149 4171
4150 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4172 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5399,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5399 u8 *hash_location; 5421 u8 *hash_location;
5400 struct inet_connection_sock *icsk = inet_csk(sk); 5422 struct inet_connection_sock *icsk = inet_csk(sk);
5401 struct tcp_sock *tp = tcp_sk(sk); 5423 struct tcp_sock *tp = tcp_sk(sk);
5402 struct dst_entry *dst = __sk_dst_get(sk);
5403 struct tcp_cookie_values *cvp = tp->cookie_values; 5424 struct tcp_cookie_values *cvp = tp->cookie_values;
5404 int saved_clamp = tp->rx_opt.mss_clamp; 5425 int saved_clamp = tp->rx_opt.mss_clamp;
5405 5426
5406 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); 5427 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
5407 5428
5408 if (th->ack) { 5429 if (th->ack) {
5409 /* rfc793: 5430 /* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 29002ab26e0d..65b8ebfd078a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; 1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1263#endif 1263#endif
1264 1264
1265 ireq = inet_rsk(req);
1266 ireq->loc_addr = daddr;
1267 ireq->rmt_addr = saddr;
1268 ireq->no_srccheck = inet_sk(sk)->transparent;
1269 ireq->opt = tcp_v4_save_options(sk, skb);
1270
1271 dst = inet_csk_route_req(sk, req);
1272 if(!dst)
1273 goto drop_and_free;
1274
1275 tcp_clear_options(&tmp_opt); 1265 tcp_clear_options(&tmp_opt);
1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1266 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1277 tmp_opt.user_mss = tp->rx_opt.user_mss; 1267 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1268 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1279 1269
1280 if (tmp_opt.cookie_plus > 0 && 1270 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp && 1271 tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1309 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1320 tcp_openreq_init(req, &tmp_opt, skb); 1310 tcp_openreq_init(req, &tmp_opt, skb);
1321 1311
1312 ireq = inet_rsk(req);
1313 ireq->loc_addr = daddr;
1314 ireq->rmt_addr = saddr;
1315 ireq->no_srccheck = inet_sk(sk)->transparent;
1316 ireq->opt = tcp_v4_save_options(sk, skb);
1317
1322 if (security_inet_conn_request(sk, skb, req)) 1318 if (security_inet_conn_request(sk, skb, req))
1323 goto drop_and_release; 1319 goto drop_and_free;
1324 1320
1325 if (!want_cookie) 1321 if (!want_cookie)
1326 TCP_ECN_create_request(req, tcp_hdr(skb)); 1322 TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1341 */
1346 if (tmp_opt.saw_tstamp && 1342 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1343 tcp_death_row.sysctl_tw_recycle &&
1344 (dst = inet_csk_route_req(sk, req)) != NULL &&
1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1345 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1349 peer->v4daddr == saddr) { 1346 peer->v4daddr == saddr) {
1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1347 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1464,7 +1461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1464 } 1461 }
1465#endif 1462#endif
1466 1463
1467 __inet_hash_nolisten(newsk); 1464 __inet_hash_nolisten(newsk, NULL);
1468 __inet_inherit_port(sk, newsk); 1465 __inet_inherit_port(sk, newsk);
1469 1466
1470 return newsk; 1467 return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d097..f206ee5dda80 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 int paws_reject = 0; 96 int paws_reject = 0;
97 97
98 tmp_opt.saw_tstamp = 0;
98 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
99 tmp_opt.tstamp_ok = 1; 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
100 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
101 101
102 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
103 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
527 int paws_reject = 0; 527 int paws_reject = 0;
528 528
529 if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { 529 tmp_opt.saw_tstamp = 0;
530 tmp_opt.tstamp_ok = 1; 530 if (th->doff > (sizeof(struct tcphdr)>>2)) {
531 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); 531 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
532 532
533 if (tmp_opt.saw_tstamp) { 533 if (tmp_opt.saw_tstamp) {
534 tmp_opt.ts_recent = req->ts_recent; 534 tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d820..383ce237640f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
553 struct tcp_md5sig_key **md5) { 553 struct tcp_md5sig_key **md5) {
554 struct tcp_sock *tp = tcp_sk(sk); 554 struct tcp_sock *tp = tcp_sk(sk);
555 struct tcp_cookie_values *cvp = tp->cookie_values; 555 struct tcp_cookie_values *cvp = tp->cookie_values;
556 struct dst_entry *dst = __sk_dst_get(sk);
557 unsigned remaining = MAX_TCP_OPTION_SPACE; 556 unsigned remaining = MAX_TCP_OPTION_SPACE;
558 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 557 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
559 tcp_cookie_size_check(cvp->cookie_desired) : 558 tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
581 opts->mss = tcp_advertise_mss(sk); 580 opts->mss = tcp_advertise_mss(sk);
582 remaining -= TCPOLEN_MSS_ALIGNED; 581 remaining -= TCPOLEN_MSS_ALIGNED;
583 582
584 if (likely(sysctl_tcp_timestamps && 583 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
585 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
586 *md5 == NULL)) {
587 opts->options |= OPTION_TS; 584 opts->options |= OPTION_TS;
588 opts->tsval = TCP_SKB_CB(skb)->when; 585 opts->tsval = TCP_SKB_CB(skb)->when;
589 opts->tsecr = tp->rx_opt.ts_recent; 586 opts->tsecr = tp->rx_opt.ts_recent;
590 remaining -= TCPOLEN_TSTAMP_ALIGNED; 587 remaining -= TCPOLEN_TSTAMP_ALIGNED;
591 } 588 }
592 if (likely(sysctl_tcp_window_scaling && 589 if (likely(sysctl_tcp_window_scaling)) {
593 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
594 opts->ws = tp->rx_opt.rcv_wscale; 590 opts->ws = tp->rx_opt.rcv_wscale;
595 opts->options |= OPTION_WSCALE; 591 opts->options |= OPTION_WSCALE;
596 remaining -= TCPOLEN_WSCALE_ALIGNED; 592 remaining -= TCPOLEN_WSCALE_ALIGNED;
597 } 593 }
598 if (likely(sysctl_tcp_sack && 594 if (likely(sysctl_tcp_sack)) {
599 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
600 opts->options |= OPTION_SACK_ADVERTISE; 595 opts->options |= OPTION_SACK_ADVERTISE;
601 if (unlikely(!(OPTION_TS & opts->options))) 596 if (unlikely(!(OPTION_TS & opts->options)))
602 remaining -= TCPOLEN_SACKPERM_ALIGNED; 597 remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
2527 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2522 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2528 */ 2523 */
2529 tp->tcp_header_len = sizeof(struct tcphdr) + 2524 tp->tcp_header_len = sizeof(struct tcphdr) +
2530 (sysctl_tcp_timestamps && 2525 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2531 (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
2532 TCPOLEN_TSTAMP_ALIGNED : 0));
2533 2526
2534#ifdef CONFIG_TCP_MD5SIG 2527#ifdef CONFIG_TCP_MD5SIG
2535 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2528 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
2555 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2548 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2556 &tp->rcv_wnd, 2549 &tp->rcv_wnd,
2557 &tp->window_clamp, 2550 &tp->window_clamp,
2558 (sysctl_tcp_window_scaling && 2551 sysctl_tcp_window_scaling,
2559 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
2560 &rcv_wscale); 2552 &rcv_wscale);
2561 2553
2562 tp->rx_opt.rcv_wscale = rcv_wscale; 2554 tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8353a538cd4c..8816a20c2597 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
132 } 132 }
133} 133}
134 134
135/* This function calculates a "timeout" which is equivalent to the timeout of a
136 * TCP connection after "boundary" unsucessful, exponentially backed-off
137 * retransmissions with an initial RTO of TCP_RTO_MIN.
138 */
139static bool retransmits_timed_out(struct sock *sk,
140 unsigned int boundary)
141{
142 unsigned int timeout, linear_backoff_thresh;
143 unsigned int start_ts;
144
145 if (!inet_csk(sk)->icsk_retransmits)
146 return false;
147
148 if (unlikely(!tcp_sk(sk)->retrans_stamp))
149 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
150 else
151 start_ts = tcp_sk(sk)->retrans_stamp;
152
153 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
154
155 if (boundary <= linear_backoff_thresh)
156 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
157 else
158 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
159 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
160
161 return (tcp_time_stamp - start_ts) >= timeout;
162}
163
135/* A write timeout has occurred. Process the after effects. */ 164/* A write timeout has occurred. Process the after effects. */
136static int tcp_write_timeout(struct sock *sk) 165static int tcp_write_timeout(struct sock *sk)
137{ 166{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca9..f0126fdd7e04 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 * force rand to be an odd multiple of UDP_HTABLE_SIZE 216 * force rand to be an odd multiple of UDP_HTABLE_SIZE
217 */ 217 */
218 rand = (rand | 1) * (udptable->mask + 1); 218 rand = (rand | 1) * (udptable->mask + 1);
219 for (last = first + udptable->mask + 1; 219 last = first + udptable->mask + 1;
220 first != last; 220 do {
221 first++) {
222 hslot = udp_hashslot(udptable, net, first); 221 hslot = udp_hashslot(udptable, net, first);
223 bitmap_zero(bitmap, PORTS_PER_CHAIN); 222 bitmap_zero(bitmap, PORTS_PER_CHAIN);
224 spin_lock_bh(&hslot->lock); 223 spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
238 snum += rand; 237 snum += rand;
239 } while (snum != first); 238 } while (snum != first);
240 spin_unlock_bh(&hslot->lock); 239 spin_unlock_bh(&hslot->lock);
241 } 240 } while (++first != last);
242 goto fail; 241 goto fail;
243 } else { 242 } else {
244 hslot = udp_hashslot(udptable, net, snum); 243 hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index c813e294ec0c..633a6c266136 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -22,9 +22,10 @@
22#include <net/inet6_hashtables.h> 22#include <net/inet6_hashtables.h>
23#include <net/ip.h> 23#include <net/ip.h>
24 24
25void __inet6_hash(struct sock *sk) 25int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
26{ 26{
27 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 27 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
28 int twrefcnt = 0;
28 29
29 WARN_ON(!sk_unhashed(sk)); 30 WARN_ON(!sk_unhashed(sk));
30 31
@@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk)
45 lock = inet_ehash_lockp(hashinfo, hash); 46 lock = inet_ehash_lockp(hashinfo, hash);
46 spin_lock(lock); 47 spin_lock(lock);
47 __sk_nulls_add_node_rcu(sk, list); 48 __sk_nulls_add_node_rcu(sk, list);
49 if (tw) {
50 WARN_ON(sk->sk_hash != tw->tw_hash);
51 twrefcnt = inet_twsk_unhash(tw);
52 }
48 spin_unlock(lock); 53 spin_unlock(lock);
49 } 54 }
50 55
51 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 56 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
57 return twrefcnt;
52} 58}
53EXPORT_SYMBOL(__inet6_hash); 59EXPORT_SYMBOL(__inet6_hash);
54 60
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f2..7208a06576c6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
185 185
186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
187 187
188 /* check for timestamp cookie support */
189 memset(&tcp_opt, 0, sizeof(tcp_opt));
190 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
191
192 if (tcp_opt.saw_tstamp)
193 cookie_check_timestamp(&tcp_opt);
194
188 ret = NULL; 195 ret = NULL;
189 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 196 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
190 if (!req) 197 if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
218 req->expires = 0UL; 225 req->expires = 0UL;
219 req->retrans = 0; 226 req->retrans = 0;
220 ireq->ecn_ok = 0; 227 ireq->ecn_ok = 0;
228 ireq->snd_wscale = tcp_opt.snd_wscale;
229 ireq->rcv_wscale = tcp_opt.rcv_wscale;
230 ireq->sack_ok = tcp_opt.sack_ok;
231 ireq->wscale_ok = tcp_opt.wscale_ok;
232 ireq->tstamp_ok = tcp_opt.saw_tstamp;
233 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
221 treq->rcv_isn = ntohl(th->seq) - 1; 234 treq->rcv_isn = ntohl(th->seq) - 1;
222 treq->snt_isn = cookie; 235 treq->snt_isn = cookie;
223 236
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
253 goto out_free; 266 goto out_free;
254 } 267 }
255 268
256 /* check for timestamp cookie support */
257 memset(&tcp_opt, 0, sizeof(tcp_opt));
258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
259
260 if (tcp_opt.saw_tstamp)
261 cookie_check_timestamp(&tcp_opt);
262
263 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
264
265 ireq->snd_wscale = tcp_opt.snd_wscale;
266 ireq->rcv_wscale = tcp_opt.rcv_wscale;
267 ireq->sack_ok = tcp_opt.sack_ok;
268 ireq->wscale_ok = tcp_opt.wscale_ok;
269 ireq->tstamp_ok = tcp_opt.saw_tstamp;
270
271 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
272 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
273 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index aadd7cef73b3..febfd595a40d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk)
96 return; 96 return;
97 } 97 }
98 local_bh_disable(); 98 local_bh_disable();
99 __inet6_hash(sk); 99 __inet6_hash(sk, NULL);
100 local_bh_enable(); 100 local_bh_enable();
101 } 101 }
102} 102}
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 struct inet6_request_sock *treq; 1169 struct inet6_request_sock *treq;
1170 struct ipv6_pinfo *np = inet6_sk(sk); 1170 struct ipv6_pinfo *np = inet6_sk(sk);
1171 struct tcp_sock *tp = tcp_sk(sk); 1171 struct tcp_sock *tp = tcp_sk(sk);
1172 struct dst_entry *dst = __sk_dst_get(sk);
1173 __u32 isn = TCP_SKB_CB(skb)->when; 1172 __u32 isn = TCP_SKB_CB(skb)->when;
1174#ifdef CONFIG_SYN_COOKIES 1173#ifdef CONFIG_SYN_COOKIES
1175 int want_cookie = 0; 1174 int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1208 tcp_clear_options(&tmp_opt); 1207 tcp_clear_options(&tmp_opt);
1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1208 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1210 tmp_opt.user_mss = tp->rx_opt.user_mss; 1209 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1210 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1212 1211
1213 if (tmp_opt.cookie_plus > 0 && 1212 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp && 1213 tmp_opt.saw_tstamp &&
@@ -1496,7 +1495,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1496 } 1495 }
1497#endif 1496#endif
1498 1497
1499 __inet6_hash(newsk); 1498 __inet6_hash(newsk, NULL);
1500 __inet_inherit_port(sk, newsk); 1499 __inet_inherit_port(sk, newsk);
1501 1500
1502 return newsk; 1501 return newsk;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 84209fbbeb17..76fa6fef6473 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1193 x->aalg->alg_key_len = key->sadb_key_bits; 1193 x->aalg->alg_key_len = key->sadb_key_bits;
1194 memcpy(x->aalg->alg_key, key+1, keysize); 1194 memcpy(x->aalg->alg_key, key+1, keysize);
1195 } 1195 }
1196 x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
1196 x->props.aalgo = sa->sadb_sa_auth; 1197 x->props.aalgo = sa->sadb_sa_auth;
1197 /* x->algo.flags = sa->sadb_sa_flags; */ 1198 /* x->algo.flags = sa->sadb_sa_flags; */
1198 } 1199 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 93ee1fd5c08d..6dc3579c0ac5 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -354,7 +354,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
354 sinfo->rx_packets = sta->rx_packets; 354 sinfo->rx_packets = sta->rx_packets;
355 sinfo->tx_packets = sta->tx_packets; 355 sinfo->tx_packets = sta->tx_packets;
356 356
357 if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 357 if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
358 (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
358 sinfo->filled |= STATION_INFO_SIGNAL; 359 sinfo->filled |= STATION_INFO_SIGNAL;
359 sinfo->signal = (s8)sta->last_signal; 360 sinfo->signal = (s8)sta->last_signal;
360 } 361 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 419f186cfcf0..91dc8636d644 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -746,6 +746,7 @@ struct ieee80211_local {
746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
747 747
748 bool pspolling; 748 bool pspolling;
749 bool scan_ps_enabled;
749 /* 750 /*
750 * PS can only be enabled when we have exactly one managed 751 * PS can only be enabled when we have exactly one managed
751 * interface (and monitors) in PS, this then points there. 752 * interface (and monitors) in PS, this then points there.
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c0fe46493f71..6a4331429598 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -427,7 +427,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
427 char *addr5, char *addr6) 427 char *addr5, char *addr6)
428{ 428{
429 int aelen = 0; 429 int aelen = 0;
430 memset(meshhdr, 0, sizeof(meshhdr)); 430 memset(meshhdr, 0, sizeof(*meshhdr));
431 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; 431 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
432 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); 432 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
433 sdata->u.mesh.mesh_seqnum++; 433 sdata->u.mesh.mesh_seqnum++;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 31e102541869..85562c59d7d6 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -188,8 +188,9 @@ struct mesh_rmc {
188 */ 188 */
189#define MESH_PREQ_MIN_INT 10 189#define MESH_PREQ_MIN_INT 10
190#define MESH_DIAM_TRAVERSAL_TIME 50 190#define MESH_DIAM_TRAVERSAL_TIME 50
191/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their 191/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
192 * expiration 192 * timing out. This way it will remain ACTIVE and no data frames will be
193 * unnecesarily held in the pending queue.
193 */ 194 */
194#define MESH_PATH_REFRESH_TIME 1000 195#define MESH_PATH_REFRESH_TIME 1000
195#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) 196#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 833b2f3670c5..d28acb6b1f81 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -937,7 +937,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
937 937
938 if (mpath->flags & MESH_PATH_ACTIVE) { 938 if (mpath->flags & MESH_PATH_ACTIVE) {
939 if (time_after(jiffies, 939 if (time_after(jiffies,
940 mpath->exp_time + 940 mpath->exp_time -
941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && 942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
943 !(mpath->flags & MESH_PATH_RESOLVING) && 943 !(mpath->flags & MESH_PATH_RESOLVING) &&
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6dc7b5ad9a41..d8d50fb5e823 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1083,8 +1083,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1083 1083
1084 ieee80211_set_wmm_default(sdata); 1084 ieee80211_set_wmm_default(sdata);
1085 1085
1086 ieee80211_recalc_idle(local);
1087
1088 /* channel(_type) changes are handled by ieee80211_hw_config */ 1086 /* channel(_type) changes are handled by ieee80211_hw_config */
1089 local->oper_channel_type = NL80211_CHAN_NO_HT; 1087 local->oper_channel_type = NL80211_CHAN_NO_HT;
1090 1088
@@ -1370,6 +1368,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1370 1368
1371 if (!wk) { 1369 if (!wk) {
1372 ieee80211_set_disassoc(sdata, true); 1370 ieee80211_set_disassoc(sdata, true);
1371 ieee80211_recalc_idle(sdata->local);
1373 } else { 1372 } else {
1374 list_del(&wk->list); 1373 list_del(&wk->list);
1375 kfree(wk); 1374 kfree(wk);
@@ -1403,6 +1402,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1403 sdata->dev->name, mgmt->sa, reason_code); 1402 sdata->dev->name, mgmt->sa, reason_code);
1404 1403
1405 ieee80211_set_disassoc(sdata, false); 1404 ieee80211_set_disassoc(sdata, false);
1405 ieee80211_recalc_idle(sdata->local);
1406 return RX_MGMT_CFG80211_DISASSOC; 1406 return RX_MGMT_CFG80211_DISASSOC;
1407} 1407}
1408 1408
@@ -2117,6 +2117,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2117 " after %dms, disconnecting.\n", 2117 " after %dms, disconnecting.\n",
2118 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 2118 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2119 ieee80211_set_disassoc(sdata, true); 2119 ieee80211_set_disassoc(sdata, true);
2120 ieee80211_recalc_idle(local);
2120 mutex_unlock(&ifmgd->mtx); 2121 mutex_unlock(&ifmgd->mtx);
2121 /* 2122 /*
2122 * must be outside lock due to cfg80211, 2123 * must be outside lock due to cfg80211,
@@ -2560,6 +2561,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2560 IEEE80211_STYPE_DEAUTH, req->reason_code, 2561 IEEE80211_STYPE_DEAUTH, req->reason_code,
2561 cookie); 2562 cookie);
2562 2563
2564 ieee80211_recalc_idle(sdata->local);
2565
2563 return 0; 2566 return 0;
2564} 2567}
2565 2568
@@ -2592,5 +2595,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2592 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 2595 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
2593 IEEE80211_STYPE_DISASSOC, req->reason_code, 2596 IEEE80211_STYPE_DISASSOC, req->reason_code,
2594 cookie); 2597 cookie);
2598
2599 ieee80211_recalc_idle(sdata->local);
2600
2595 return 0; 2601 return 0;
2596} 2602}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f237df408378..9f2807aeaf52 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1712,7 +1712,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1712 mpp_path_add(proxied_addr, mpp_addr, sdata); 1712 mpp_path_add(proxied_addr, mpp_addr, sdata);
1713 } else { 1713 } else {
1714 spin_lock_bh(&mppath->state_lock); 1714 spin_lock_bh(&mppath->state_lock);
1715 mppath->exp_time = jiffies;
1716 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1715 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1717 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1716 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1718 spin_unlock_bh(&mppath->state_lock); 1717 spin_unlock_bh(&mppath->state_lock);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4cf387c944bf..f1a4c7160300 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -227,7 +227,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) 227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
228{ 228{
229 struct ieee80211_local *local = sdata->local; 229 struct ieee80211_local *local = sdata->local;
230 bool ps = false; 230
231 local->scan_ps_enabled = false;
231 232
232 /* FIXME: what to do when local->pspolling is true? */ 233 /* FIXME: what to do when local->pspolling is true? */
233 234
@@ -235,12 +236,13 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
235 cancel_work_sync(&local->dynamic_ps_enable_work); 236 cancel_work_sync(&local->dynamic_ps_enable_work);
236 237
237 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
238 ps = true; 239 local->scan_ps_enabled = true;
239 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 240 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
240 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 241 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
241 } 242 }
242 243
243 if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) 244 if (!(local->scan_ps_enabled) ||
245 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
244 /* 246 /*
245 * If power save was enabled, no need to send a nullfunc 247 * If power save was enabled, no need to send a nullfunc
246 * frame because AP knows that we are sleeping. But if the 248 * frame because AP knows that we are sleeping. But if the
@@ -261,7 +263,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
261 263
262 if (!local->ps_sdata) 264 if (!local->ps_sdata)
263 ieee80211_send_nullfunc(local, sdata, 0); 265 ieee80211_send_nullfunc(local, sdata, 0);
264 else { 266 else if (local->scan_ps_enabled) {
265 /* 267 /*
266 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware 268 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
267 * will send a nullfunc frame with the powersave bit set 269 * will send a nullfunc frame with the powersave bit set
@@ -277,6 +279,16 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
277 */ 279 */
278 local->hw.conf.flags |= IEEE80211_CONF_PS; 280 local->hw.conf.flags |= IEEE80211_CONF_PS;
279 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 281 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
282 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
283 /*
284 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
285 * had been running before leaving the operating channel,
286 * restart the timer now and send a nullfunc frame to inform
287 * the AP that we are awake.
288 */
289 ieee80211_send_nullfunc(local, sdata, 0);
290 mod_timer(&local->dynamic_ps_timer, jiffies +
291 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
280 } 292 }
281} 293}
282 294
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d09f78bb2442..78a6e924c7e1 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -579,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
579 if (elen > left) 579 if (elen > left)
580 break; 580 break;
581 581
582 if (calc_crc && id < 64 && (filter & BIT(id))) 582 if (calc_crc && id < 64 && (filter & (1ULL << id)))
583 crc = crc32_be(crc, pos - 2, elen + 2); 583 crc = crc32_be(crc, pos - 2, elen + 2);
584 584
585 switch (id) { 585 switch (id) {
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 448e5a0fcc2e..c218e07e5caf 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -579,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev,
579 579
580static const char *rfkill_get_type_str(enum rfkill_type type) 580static const char *rfkill_get_type_str(enum rfkill_type type)
581{ 581{
582 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
583
582 switch (type) { 584 switch (type) {
583 case RFKILL_TYPE_WLAN: 585 case RFKILL_TYPE_WLAN:
584 return "wlan"; 586 return "wlan";
@@ -597,8 +599,6 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
597 default: 599 default:
598 BUG(); 600 BUG();
599 } 601 }
600
601 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
602} 602}
603 603
604static ssize_t rfkill_type_show(struct device *dev, 604static ssize_t rfkill_type_show(struct device *dev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c01470e7de15..baa898add287 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -141,62 +141,35 @@ static const struct ieee80211_regdomain us_regdom = {
141 .reg_rules = { 141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */ 142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), 143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
144 /* IEEE 802.11a, channel 36 */ 144 /* IEEE 802.11a, channel 36..48 */
145 REG_RULE(5180-10, 5180+10, 40, 6, 23, 0), 145 REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
146 /* IEEE 802.11a, channel 40 */
147 REG_RULE(5200-10, 5200+10, 40, 6, 23, 0),
148 /* IEEE 802.11a, channel 44 */
149 REG_RULE(5220-10, 5220+10, 40, 6, 23, 0),
150 /* IEEE 802.11a, channels 48..64 */ 146 /* IEEE 802.11a, channels 48..64 */
151 REG_RULE(5240-10, 5320+10, 40, 6, 23, 0), 147 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
148 /* IEEE 802.11a, channels 100..124 */
149 REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
150 /* IEEE 802.11a, channels 132..144 */
151 REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 149..165, outdoor */ 152 /* IEEE 802.11a, channels 149..165, outdoor */
153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0), 153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
154 } 154 }
155}; 155};
156 156
157static const struct ieee80211_regdomain jp_regdom = { 157static const struct ieee80211_regdomain jp_regdom = {
158 .n_reg_rules = 3, 158 .n_reg_rules = 6,
159 .alpha2 = "JP", 159 .alpha2 = "JP",
160 .reg_rules = { 160 .reg_rules = {
161 /* IEEE 802.11b/g, channels 1..14 */ 161 /* IEEE 802.11b/g, channels 1..11 */
162 REG_RULE(2412-10, 2484+10, 40, 6, 20, 0), 162 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
163 /* IEEE 802.11a, channels 34..48 */ 163 /* IEEE 802.11b/g, channels 12..13 */
164 REG_RULE(5170-10, 5240+10, 40, 6, 20, 164 REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
165 NL80211_RRF_PASSIVE_SCAN), 165 /* IEEE 802.11b/g, channel 14 */
166 REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
167 /* IEEE 802.11a, channels 36..48 */
168 REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
166 /* IEEE 802.11a, channels 52..64 */ 169 /* IEEE 802.11a, channels 52..64 */
167 REG_RULE(5260-10, 5320+10, 40, 6, 20, 170 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
168 NL80211_RRF_NO_IBSS | 171 /* IEEE 802.11a, channels 100..144 */
169 NL80211_RRF_DFS), 172 REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
170 }
171};
172
173static const struct ieee80211_regdomain eu_regdom = {
174 .n_reg_rules = 6,
175 /*
176 * This alpha2 is bogus, we leave it here just for stupid
177 * backward compatibility
178 */
179 .alpha2 = "EU",
180 .reg_rules = {
181 /* IEEE 802.11b/g, channels 1..13 */
182 REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
183 /* IEEE 802.11a, channel 36 */
184 REG_RULE(5180-10, 5180+10, 40, 6, 23,
185 NL80211_RRF_PASSIVE_SCAN),
186 /* IEEE 802.11a, channel 40 */
187 REG_RULE(5200-10, 5200+10, 40, 6, 23,
188 NL80211_RRF_PASSIVE_SCAN),
189 /* IEEE 802.11a, channel 44 */
190 REG_RULE(5220-10, 5220+10, 40, 6, 23,
191 NL80211_RRF_PASSIVE_SCAN),
192 /* IEEE 802.11a, channels 48..64 */
193 REG_RULE(5240-10, 5320+10, 40, 6, 20,
194 NL80211_RRF_NO_IBSS |
195 NL80211_RRF_DFS),
196 /* IEEE 802.11a, channels 100..140 */
197 REG_RULE(5500-10, 5700+10, 40, 6, 30,
198 NL80211_RRF_NO_IBSS |
199 NL80211_RRF_DFS),
200 } 173 }
201}; 174};
202 175
@@ -206,15 +179,17 @@ static const struct ieee80211_regdomain *static_regdom(char *alpha2)
206 return &us_regdom; 179 return &us_regdom;
207 if (alpha2[0] == 'J' && alpha2[1] == 'P') 180 if (alpha2[0] == 'J' && alpha2[1] == 'P')
208 return &jp_regdom; 181 return &jp_regdom;
182 /* Use world roaming rules for "EU", since it was a pseudo
183 domain anyway... */
209 if (alpha2[0] == 'E' && alpha2[1] == 'U') 184 if (alpha2[0] == 'E' && alpha2[1] == 'U')
210 return &eu_regdom; 185 return &world_regdom;
211 /* Default, as per the old rules */ 186 /* Default, world roaming rules */
212 return &us_regdom; 187 return &world_regdom;
213} 188}
214 189
215static bool is_old_static_regdom(const struct ieee80211_regdomain *rd) 190static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
216{ 191{
217 if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom) 192 if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
218 return true; 193 return true;
219 return false; 194 return false;
220} 195}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 584eb4826e02..54face3d4424 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -479,6 +479,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
479 } 479 }
480 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); 480 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
481 } 481 }
482 wdev->wext.connect.privacy = false;
482 /* 483 /*
483 * Applications using wireless extensions expect to be 484 * Applications using wireless extensions expect to be
484 * able to delete keys that don't exist, so allow that. 485 * able to delete keys that don't exist, so allow that.