aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c2
-rw-r--r--net/802/tr.c3
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c8
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/ax25/ax25_uid.c12
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/hci_event.c74
-rw-r--r--net/bluetooth/hci_sysfs.c87
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/ipv4/netfilter/arp_tables.c125
-rw-r--r--net/ipv4/netfilter/ip_tables.c126
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c123
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/xfrm6_output.c1
-rw-r--r--net/iucv/af_iucv.c24
-rw-r--r--net/mac80211/Kconfig7
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/pm.c15
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_pid_algo.c73
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/wext.c43
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/x_tables.c53
-rw-r--r--net/netlabel/netlabel_addrlist.c26
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_netem.c8
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c127
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/scan.c41
-rw-r--r--net/xfrm/xfrm_state.c6
55 files changed, 716 insertions, 586 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index f1611a1e06a7..539e6064e6d4 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -215,3 +215,5 @@ struct net_device *alloc_fddidev(int sizeof_priv)
215 return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup); 215 return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup);
216} 216}
217EXPORT_SYMBOL(alloc_fddidev); 217EXPORT_SYMBOL(alloc_fddidev);
218
219MODULE_LICENSE("GPL");
diff --git a/net/802/tr.c b/net/802/tr.c
index e7eb13084d71..e874447ad144 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -561,6 +561,9 @@ static int rif_seq_show(struct seq_file *seq, void *v)
561 } 561 }
562 seq_putc(seq, '\n'); 562 seq_putc(seq, '\n');
563 } 563 }
564
565 if (dev)
566 dev_put(dev);
564 } 567 }
565 return 0; 568 return 0;
566} 569}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2b7390e377b3..d1e10546eb85 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -492,6 +492,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
492 continue; 492 continue;
493 493
494 dev_change_flags(vlandev, flgs & ~IFF_UP); 494 dev_change_flags(vlandev, flgs & ~IFF_UP);
495 vlan_transfer_operstate(dev, vlandev);
495 } 496 }
496 break; 497 break;
497 498
@@ -507,6 +508,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
507 continue; 508 continue;
508 509
509 dev_change_flags(vlandev, flgs | IFF_UP); 510 dev_change_flags(vlandev, flgs | IFF_UP);
511 vlan_transfer_operstate(dev, vlandev);
510 } 512 }
511 break; 513 break;
512 514
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 654e45f5719d..c67fe6f75653 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -121,8 +121,10 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 if (!skb) 121 if (!skb)
122 return NET_RX_DROP; 122 return NET_RX_DROP;
123 123
124 if (netpoll_rx_on(skb)) 124 if (netpoll_rx_on(skb)) {
125 skb->protocol = eth_type_trans(skb, skb->dev);
125 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); 126 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
127 }
126 128
127 return napi_frags_finish(napi, skb, 129 return napi_frags_finish(napi, skb,
128 vlan_gro_common(napi, grp, vlan_tci, skb)); 130 vlan_gro_common(napi, grp, vlan_tci, skb));
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1b34135cf990..b4b9068e55a7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -462,6 +462,7 @@ static int vlan_dev_open(struct net_device *dev)
462 if (vlan->flags & VLAN_FLAG_GVRP) 462 if (vlan->flags & VLAN_FLAG_GVRP)
463 vlan_gvrp_request_join(dev); 463 vlan_gvrp_request_join(dev);
464 464
465 netif_carrier_on(dev);
465 return 0; 466 return 0;
466 467
467clear_allmulti: 468clear_allmulti:
@@ -471,6 +472,7 @@ del_unicast:
471 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
472 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 473 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
473out: 474out:
475 netif_carrier_off(dev);
474 return err; 476 return err;
475} 477}
476 478
@@ -492,6 +494,7 @@ static int vlan_dev_stop(struct net_device *dev)
492 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
493 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 495 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len);
494 496
497 netif_carrier_off(dev);
495 return 0; 498 return 0;
496} 499}
497 500
@@ -612,6 +615,8 @@ static int vlan_dev_init(struct net_device *dev)
612 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 615 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
613 int subclass = 0; 616 int subclass = 0;
614 617
618 netif_carrier_off(dev);
619
615 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 620 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
616 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 621 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
617 dev->iflink = real_dev->ifindex; 622 dev->iflink = real_dev->ifindex;
@@ -668,7 +673,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
668 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 673 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
669 struct net_device *real_dev = vlan->real_dev; 674 struct net_device *real_dev = vlan->real_dev;
670 675
671 if (!real_dev->ethtool_ops->get_settings) 676 if (!real_dev->ethtool_ops ||
677 !real_dev->ethtool_ops->get_settings)
672 return -EOPNOTSUPP; 678 return -EOPNOTSUPP;
673 679
674 return real_dev->ethtool_ops->get_settings(real_dev, cmd); 680 return real_dev->ethtool_ops->get_settings(real_dev, cmd);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 334fcd4a4ea4..3100a8940afc 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -549,6 +549,7 @@ static void br2684_setup(struct net_device *netdev)
549 struct br2684_dev *brdev = BRPRIV(netdev); 549 struct br2684_dev *brdev = BRPRIV(netdev);
550 550
551 ether_setup(netdev); 551 ether_setup(netdev);
552 brdev->net_dev = netdev;
552 553
553 netdev->netdev_ops = &br2684_netdev_ops; 554 netdev->netdev_ops = &br2684_netdev_ops;
554 555
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 57aeba729bae..832bcf092a01 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -148,9 +148,13 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
148{ 148{
149 struct ax25_uid_assoc *pt; 149 struct ax25_uid_assoc *pt;
150 struct hlist_node *node; 150 struct hlist_node *node;
151 int i = 0; 151 int i = 1;
152 152
153 read_lock(&ax25_uid_lock); 153 read_lock(&ax25_uid_lock);
154
155 if (*pos == 0)
156 return SEQ_START_TOKEN;
157
154 ax25_uid_for_each(pt, node, &ax25_uid_list) { 158 ax25_uid_for_each(pt, node, &ax25_uid_list) {
155 if (i == *pos) 159 if (i == *pos)
156 return pt; 160 return pt;
@@ -162,8 +166,10 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
162static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) 166static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
163{ 167{
164 ++*pos; 168 ++*pos;
165 169 if (v == SEQ_START_TOKEN)
166 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, 170 return ax25_uid_list.first;
171 else
172 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
167 ax25_uid_assoc, uid_node); 173 ax25_uid_assoc, uid_node);
168} 174}
169 175
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 1181db08d9de..61309b26f271 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -215,6 +215,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
215 conn->state = BT_OPEN; 215 conn->state = BT_OPEN;
216 216
217 conn->power_save = 1; 217 conn->power_save = 1;
218 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
218 219
219 switch (type) { 220 switch (type) {
220 case ACL_LINK: 221 case ACL_LINK:
@@ -247,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
247 if (hdev->notify) 248 if (hdev->notify)
248 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 249 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
249 250
251 hci_conn_init_sysfs(conn);
252
250 tasklet_enable(&hdev->tx_task); 253 tasklet_enable(&hdev->tx_task);
251 254
252 return conn; 255 return conn;
@@ -424,12 +427,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
424 if (sec_level == BT_SECURITY_SDP) 427 if (sec_level == BT_SECURITY_SDP)
425 return 1; 428 return 1;
426 429
427 if (sec_level == BT_SECURITY_LOW) { 430 if (sec_level == BT_SECURITY_LOW &&
428 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) 431 (!conn->ssp_mode || !conn->hdev->ssp_mode))
429 return hci_conn_auth(conn, sec_level, auth_type); 432 return 1;
430 else
431 return 1;
432 }
433 433
434 if (conn->link_mode & HCI_LM_ENCRYPT) 434 if (conn->link_mode & HCI_LM_ENCRYPT)
435 return hci_conn_auth(conn, sec_level, auth_type); 435 return hci_conn_auth(conn, sec_level, auth_type);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 55534244c3a0..4e7cb88e5da9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -866,8 +866,16 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
866 hci_dev_lock(hdev); 866 hci_dev_lock(hdev);
867 867
868 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 868 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
869 if (!conn) 869 if (!conn) {
870 goto unlock; 870 if (ev->link_type != SCO_LINK)
871 goto unlock;
872
873 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
874 if (!conn)
875 goto unlock;
876
877 conn->type = SCO_LINK;
878 }
871 879
872 if (!ev->status) { 880 if (!ev->status) {
873 conn->handle = __le16_to_cpu(ev->handle); 881 conn->handle = __le16_to_cpu(ev->handle);
@@ -875,6 +883,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
875 if (conn->type == ACL_LINK) { 883 if (conn->type == ACL_LINK) {
876 conn->state = BT_CONFIG; 884 conn->state = BT_CONFIG;
877 hci_conn_hold(conn); 885 hci_conn_hold(conn);
886 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
878 } else 887 } else
879 conn->state = BT_CONNECTED; 888 conn->state = BT_CONNECTED;
880 889
@@ -1055,9 +1064,14 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1055 hci_proto_connect_cfm(conn, ev->status); 1064 hci_proto_connect_cfm(conn, ev->status);
1056 hci_conn_put(conn); 1065 hci_conn_put(conn);
1057 } 1066 }
1058 } else 1067 } else {
1059 hci_auth_cfm(conn, ev->status); 1068 hci_auth_cfm(conn, ev->status);
1060 1069
1070 hci_conn_hold(conn);
1071 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1072 hci_conn_put(conn);
1073 }
1074
1061 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1075 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1062 if (!ev->status) { 1076 if (!ev->status) {
1063 struct hci_cp_set_conn_encrypt cp; 1077 struct hci_cp_set_conn_encrypt cp;
@@ -1471,7 +1485,21 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
1471 1485
1472static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1486static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1473{ 1487{
1488 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1489 struct hci_conn *conn;
1490
1474 BT_DBG("%s", hdev->name); 1491 BT_DBG("%s", hdev->name);
1492
1493 hci_dev_lock(hdev);
1494
1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1496 if (conn) {
1497 hci_conn_hold(conn);
1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1499 hci_conn_put(conn);
1500 }
1501
1502 hci_dev_unlock(hdev);
1475} 1503}
1476 1504
1477static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1505static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1481,7 +1509,21 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
1481 1509
1482static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 1510static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1483{ 1511{
1512 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1513 struct hci_conn *conn;
1514
1484 BT_DBG("%s", hdev->name); 1515 BT_DBG("%s", hdev->name);
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1520 if (conn) {
1521 hci_conn_hold(conn);
1522 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1523 hci_conn_put(conn);
1524 }
1525
1526 hci_dev_unlock(hdev);
1485} 1527}
1486 1528
1487static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 1529static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1646,20 +1688,28 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1646 conn->type = SCO_LINK; 1688 conn->type = SCO_LINK;
1647 } 1689 }
1648 1690
1649 if (conn->out && ev->status == 0x1c && conn->attempt < 2) { 1691 switch (ev->status) {
1650 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1692 case 0x00:
1651 (hdev->esco_type & EDR_ESCO_MASK);
1652 hci_setup_sync(conn, conn->link->handle);
1653 goto unlock;
1654 }
1655
1656 if (!ev->status) {
1657 conn->handle = __le16_to_cpu(ev->handle); 1693 conn->handle = __le16_to_cpu(ev->handle);
1658 conn->state = BT_CONNECTED; 1694 conn->state = BT_CONNECTED;
1659 1695
1660 hci_conn_add_sysfs(conn); 1696 hci_conn_add_sysfs(conn);
1661 } else 1697 break;
1698
1699 case 0x1c: /* SCO interval rejected */
1700 case 0x1f: /* Unspecified error */
1701 if (conn->out && conn->attempt < 2) {
1702 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1703 (hdev->esco_type & EDR_ESCO_MASK);
1704 hci_setup_sync(conn, conn->link->handle);
1705 goto unlock;
1706 }
1707 /* fall through */
1708
1709 default:
1662 conn->state = BT_CLOSED; 1710 conn->state = BT_CLOSED;
1711 break;
1712 }
1663 1713
1664 hci_proto_connect_cfm(conn, ev->status); 1714 hci_proto_connect_cfm(conn, ev->status);
1665 if (ev->status) 1715 if (ev->status)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index ed82796d4a0f..582d8877078c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -9,8 +9,7 @@
9struct class *bt_class = NULL; 9struct class *bt_class = NULL;
10EXPORT_SYMBOL_GPL(bt_class); 10EXPORT_SYMBOL_GPL(bt_class);
11 11
12static struct workqueue_struct *btaddconn; 12static struct workqueue_struct *bt_workq;
13static struct workqueue_struct *btdelconn;
14 13
15static inline char *link_typetostr(int type) 14static inline char *link_typetostr(int type)
16{ 15{
@@ -88,9 +87,10 @@ static struct device_type bt_link = {
88 87
89static void add_conn(struct work_struct *work) 88static void add_conn(struct work_struct *work)
90{ 89{
91 struct hci_conn *conn = container_of(work, struct hci_conn, work); 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
92 91
93 flush_workqueue(btdelconn); 92 /* ensure previous del is complete */
93 flush_work(&conn->work_del);
94 94
95 if (device_add(&conn->dev) < 0) { 95 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 96 BT_ERR("Failed to register connection device");
@@ -98,27 +98,6 @@ static void add_conn(struct work_struct *work)
98 } 98 }
99} 99}
100 100
101void hci_conn_add_sysfs(struct hci_conn *conn)
102{
103 struct hci_dev *hdev = conn->hdev;
104
105 BT_DBG("conn %p", conn);
106
107 conn->dev.type = &bt_link;
108 conn->dev.class = bt_class;
109 conn->dev.parent = &hdev->dev;
110
111 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
112
113 dev_set_drvdata(&conn->dev, conn);
114
115 device_initialize(&conn->dev);
116
117 INIT_WORK(&conn->work, add_conn);
118
119 queue_work(btaddconn, &conn->work);
120}
121
122/* 101/*
123 * The rfcomm tty device will possibly retain even when conn 102 * The rfcomm tty device will possibly retain even when conn
124 * is down, and sysfs doesn't support move zombie device, 103 * is down, and sysfs doesn't support move zombie device,
@@ -131,9 +110,15 @@ static int __match_tty(struct device *dev, void *data)
131 110
132static void del_conn(struct work_struct *work) 111static void del_conn(struct work_struct *work)
133{ 112{
134 struct hci_conn *conn = container_of(work, struct hci_conn, work); 113 struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
135 struct hci_dev *hdev = conn->hdev; 114 struct hci_dev *hdev = conn->hdev;
136 115
116 /* ensure previous add is complete */
117 flush_work(&conn->work_add);
118
119 if (!device_is_registered(&conn->dev))
120 return;
121
137 while (1) { 122 while (1) {
138 struct device *dev; 123 struct device *dev;
139 124
@@ -149,16 +134,40 @@ static void del_conn(struct work_struct *work)
149 hci_dev_put(hdev); 134 hci_dev_put(hdev);
150} 135}
151 136
152void hci_conn_del_sysfs(struct hci_conn *conn) 137void hci_conn_init_sysfs(struct hci_conn *conn)
153{ 138{
139 struct hci_dev *hdev = conn->hdev;
140
154 BT_DBG("conn %p", conn); 141 BT_DBG("conn %p", conn);
155 142
156 if (!device_is_registered(&conn->dev)) 143 conn->dev.type = &bt_link;
157 return; 144 conn->dev.class = bt_class;
145 conn->dev.parent = &hdev->dev;
146
147 dev_set_drvdata(&conn->dev, conn);
148
149 device_initialize(&conn->dev);
150
151 INIT_WORK(&conn->work_add, add_conn);
152 INIT_WORK(&conn->work_del, del_conn);
153}
154
155void hci_conn_add_sysfs(struct hci_conn *conn)
156{
157 struct hci_dev *hdev = conn->hdev;
158
159 BT_DBG("conn %p", conn);
158 160
159 INIT_WORK(&conn->work, del_conn); 161 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
160 162
161 queue_work(btdelconn, &conn->work); 163 queue_work(bt_workq, &conn->work_add);
164}
165
166void hci_conn_del_sysfs(struct hci_conn *conn)
167{
168 BT_DBG("conn %p", conn);
169
170 queue_work(bt_workq, &conn->work_del);
162} 171}
163 172
164static inline char *host_typetostr(int type) 173static inline char *host_typetostr(int type)
@@ -435,20 +444,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
435 444
436int __init bt_sysfs_init(void) 445int __init bt_sysfs_init(void)
437{ 446{
438 btaddconn = create_singlethread_workqueue("btaddconn"); 447 bt_workq = create_singlethread_workqueue("bluetooth");
439 if (!btaddconn) 448 if (!bt_workq)
440 return -ENOMEM; 449 return -ENOMEM;
441 450
442 btdelconn = create_singlethread_workqueue("btdelconn");
443 if (!btdelconn) {
444 destroy_workqueue(btaddconn);
445 return -ENOMEM;
446 }
447
448 bt_class = class_create(THIS_MODULE, "bluetooth"); 451 bt_class = class_create(THIS_MODULE, "bluetooth");
449 if (IS_ERR(bt_class)) { 452 if (IS_ERR(bt_class)) {
450 destroy_workqueue(btdelconn); 453 destroy_workqueue(bt_workq);
451 destroy_workqueue(btaddconn);
452 return PTR_ERR(bt_class); 454 return PTR_ERR(bt_class);
453 } 455 }
454 456
@@ -457,8 +459,7 @@ int __init bt_sysfs_init(void)
457 459
458void bt_sysfs_cleanup(void) 460void bt_sysfs_cleanup(void)
459{ 461{
460 destroy_workqueue(btaddconn); 462 destroy_workqueue(bt_workq);
461 destroy_workqueue(btdelconn);
462 463
463 class_destroy(bt_class); 464 class_destroy(bt_class);
464} 465}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1d0fb0f23c63..374536e050aa 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1194,6 +1194,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1194 1194
1195 rfcomm_send_ua(d->session, d->dlci); 1195 rfcomm_send_ua(d->session, d->dlci);
1196 1196
1197 rfcomm_dlc_clear_timer(d);
1198
1197 rfcomm_dlc_lock(d); 1199 rfcomm_dlc_lock(d);
1198 d->state = BT_CONNECTED; 1200 d->state = BT_CONNECTED;
1199 d->state_change(d, 0); 1201 d->state_change(d, 0);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 547bafc79e28..10f0528c3bf5 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -674,8 +674,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
674 674
675 rcu_read_unlock(); 675 rcu_read_unlock();
676 676
677 /* free the skbuff allocated by the netdevice driver */ 677 /* consume the skbuff allocated by the netdevice driver */
678 kfree_skb(skb); 678 consume_skb(skb);
679 679
680 if (matches > 0) { 680 if (matches > 0) {
681 can_stats.matches++; 681 can_stats.matches++;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index d0de644b378d..b01a76abe1d2 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk)
64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; 64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
65} 65}
66 66
67static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
68 void *key)
69{
70 unsigned long bits = (unsigned long)key;
71
72 /*
73 * Avoid a wakeup if event not interesting for us
74 */
75 if (bits && !(bits & (POLLIN | POLLERR)))
76 return 0;
77 return autoremove_wake_function(wait, mode, sync, key);
78}
67/* 79/*
68 * Wait for a packet.. 80 * Wait for a packet..
69 */ 81 */
70static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) 82static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
71{ 83{
72 int error; 84 int error;
73 DEFINE_WAIT(wait); 85 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
74 86
75 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 87 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
76 88
diff --git a/net/core/dev.c b/net/core/dev.c
index 91d792d17e09..e2e9e4af3ace 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1336{ 1336{
1337 struct packet_type *ptype; 1337 struct packet_type *ptype;
1338 1338
1339#ifdef CONFIG_NET_CLS_ACT
1340 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1341 net_timestamp(skb);
1342#else
1339 net_timestamp(skb); 1343 net_timestamp(skb);
1344#endif
1340 1345
1341 rcu_read_lock(); 1346 rcu_read_lock();
1342 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1347 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev)
1430{ 1435{
1431 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1436 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1432 netif_running(dev)) { 1437 netif_running(dev)) {
1433 netif_stop_queue(dev); 1438 netif_tx_stop_all_queues(dev);
1434 } 1439 }
1435} 1440}
1436EXPORT_SYMBOL(netif_device_detach); 1441EXPORT_SYMBOL(netif_device_detach);
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev)
1445{ 1450{
1446 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1451 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) { 1452 netif_running(dev)) {
1448 netif_wake_queue(dev); 1453 netif_tx_wake_all_queues(dev);
1449 __netdev_watchdog_up(dev); 1454 __netdev_watchdog_up(dev);
1450 } 1455 }
1451} 1456}
@@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1730{ 1735{
1731 u32 hash; 1736 u32 hash;
1732 1737
1733 if (skb_rx_queue_recorded(skb)) { 1738 if (skb_rx_queue_recorded(skb))
1734 hash = skb_get_rx_queue(skb); 1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues;
1735 } else if (skb->sk && skb->sk->sk_hash) { 1740
1741 if (skb->sk && skb->sk->sk_hash)
1736 hash = skb->sk->sk_hash; 1742 hash = skb->sk->sk_hash;
1737 } else 1743 else
1738 hash = skb->protocol; 1744 hash = skb->protocol;
1739 1745
1740 hash = jhash_1word(hash, skb_tx_hashrnd); 1746 hash = jhash_1word(hash, skb_tx_hashrnd);
@@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb)
2328 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2334 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2329 int err = -ENOENT; 2335 int err = -ENOENT;
2330 2336
2331 if (NAPI_GRO_CB(skb)->count == 1) 2337 if (NAPI_GRO_CB(skb)->count == 1) {
2338 skb_shinfo(skb)->gso_size = 0;
2332 goto out; 2339 goto out;
2340 }
2333 2341
2334 rcu_read_lock(); 2342 rcu_read_lock();
2335 list_for_each_entry_rcu(ptype, head, list) { 2343 list_for_each_entry_rcu(ptype, head, list) {
@@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb)
2348 } 2356 }
2349 2357
2350out: 2358out:
2351 skb_shinfo(skb)->gso_size = 0;
2352 return netif_receive_skb(skb); 2359 return netif_receive_skb(skb);
2353} 2360}
2354 2361
@@ -2539,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2539 } 2546 }
2540 2547
2541 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2542 frag = &info->frags[info->nr_frags - 1]; 2549 frag = info->frags;
2543 2550
2544 for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { 2551 for (i = 0; i < info->nr_frags; i++) {
2545 skb_fill_page_desc(skb, i, frag->page, frag->page_offset, 2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2546 frag->size); 2553 frag->size);
2547 frag++; 2554 frag++;
@@ -4399,7 +4406,7 @@ int register_netdevice(struct net_device *dev)
4399 dev->iflink = -1; 4406 dev->iflink = -1;
4400 4407
4401#ifdef CONFIG_COMPAT_NET_DEV_OPS 4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4402 /* Netdevice_ops API compatiability support. 4409 /* Netdevice_ops API compatibility support.
4403 * This is temporary until all network devices are converted. 4410 * This is temporary until all network devices are converted.
4404 */ 4411 */
4405 if (dev->netdev_ops) { 4412 if (dev->netdev_ops) {
@@ -4410,7 +4417,7 @@ int register_netdevice(struct net_device *dev)
4410 dev->name, netdev_drivername(dev, drivername, 64)); 4417 dev->name, netdev_drivername(dev, drivername, 64));
4411 4418
4412 /* This works only because net_device_ops and the 4419 /* This works only because net_device_ops and the
4413 compatiablity structure are the same. */ 4420 compatibility structure are the same. */
4414 dev->netdev_ops = (void *) &(dev->init); 4421 dev->netdev_ops = (void *) &(dev->init);
4415 } 4422 }
4416#endif 4423#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ce6356cd9f71..f091a5a845c1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1365 1365
1366static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1366static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1367 unsigned int *offset, 1367 unsigned int *offset,
1368 struct sk_buff *skb) 1368 struct sk_buff *skb, struct sock *sk)
1369{ 1369{
1370 struct sock *sk = skb->sk;
1371 struct page *p = sk->sk_sndmsg_page; 1370 struct page *p = sk->sk_sndmsg_page;
1372 unsigned int off; 1371 unsigned int off;
1373 1372
@@ -1405,13 +1404,14 @@ new_page:
1405 */ 1404 */
1406static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1405static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1407 unsigned int *len, unsigned int offset, 1406 unsigned int *len, unsigned int offset,
1408 struct sk_buff *skb, int linear) 1407 struct sk_buff *skb, int linear,
1408 struct sock *sk)
1409{ 1409{
1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1411 return 1; 1411 return 1;
1412 1412
1413 if (linear) { 1413 if (linear) {
1414 page = linear_to_page(page, len, &offset, skb); 1414 page = linear_to_page(page, len, &offset, skb, sk);
1415 if (!page) 1415 if (!page)
1416 return 1; 1416 return 1;
1417 } else 1417 } else
@@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
1442static inline int __splice_segment(struct page *page, unsigned int poff, 1442static inline int __splice_segment(struct page *page, unsigned int poff,
1443 unsigned int plen, unsigned int *off, 1443 unsigned int plen, unsigned int *off,
1444 unsigned int *len, struct sk_buff *skb, 1444 unsigned int *len, struct sk_buff *skb,
1445 struct splice_pipe_desc *spd, int linear) 1445 struct splice_pipe_desc *spd, int linear,
1446 struct sock *sk)
1446{ 1447{
1447 if (!*len) 1448 if (!*len)
1448 return 1; 1449 return 1;
@@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1465 /* the linear region may spread across several pages */ 1466 /* the linear region may spread across several pages */
1466 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1467 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1467 1468
1468 if (spd_fill_page(spd, page, &flen, poff, skb, linear)) 1469 if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
1469 return 1; 1470 return 1;
1470 1471
1471 __segment_seek(&page, &poff, &plen, flen); 1472 __segment_seek(&page, &poff, &plen, flen);
@@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1481 * pipe is full or if we already spliced the requested length. 1482 * pipe is full or if we already spliced the requested length.
1482 */ 1483 */
1483static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1484static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1484 unsigned int *len, 1485 unsigned int *len, struct splice_pipe_desc *spd,
1485 struct splice_pipe_desc *spd) 1486 struct sock *sk)
1486{ 1487{
1487 int seg; 1488 int seg;
1488 1489
@@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1492 if (__splice_segment(virt_to_page(skb->data), 1493 if (__splice_segment(virt_to_page(skb->data),
1493 (unsigned long) skb->data & (PAGE_SIZE - 1), 1494 (unsigned long) skb->data & (PAGE_SIZE - 1),
1494 skb_headlen(skb), 1495 skb_headlen(skb),
1495 offset, len, skb, spd, 1)) 1496 offset, len, skb, spd, 1, sk))
1496 return 1; 1497 return 1;
1497 1498
1498 /* 1499 /*
@@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1502 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1503 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1503 1504
1504 if (__splice_segment(f->page, f->page_offset, f->size, 1505 if (__splice_segment(f->page, f->page_offset, f->size,
1505 offset, len, skb, spd, 0)) 1506 offset, len, skb, spd, 0, sk))
1506 return 1; 1507 return 1;
1507 } 1508 }
1508 1509
@@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1528 .ops = &sock_pipe_buf_ops, 1529 .ops = &sock_pipe_buf_ops,
1529 .spd_release = sock_spd_release, 1530 .spd_release = sock_spd_release,
1530 }; 1531 };
1532 struct sock *sk = skb->sk;
1531 1533
1532 /* 1534 /*
1533 * __skb_splice_bits() only fails if the output has no room left, 1535 * __skb_splice_bits() only fails if the output has no room left,
1534 * so no point in going over the frag_list for the error case. 1536 * so no point in going over the frag_list for the error case.
1535 */ 1537 */
1536 if (__skb_splice_bits(skb, &offset, &tlen, &spd)) 1538 if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
1537 goto done; 1539 goto done;
1538 else if (!tlen) 1540 else if (!tlen)
1539 goto done; 1541 goto done;
@@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1545 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1547 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1546 1548
1547 for (; list && tlen; list = list->next) { 1549 for (; list && tlen; list = list->next) {
1548 if (__skb_splice_bits(list, &offset, &tlen, &spd)) 1550 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
1549 break; 1551 break;
1550 } 1552 }
1551 } 1553 }
1552 1554
1553done: 1555done:
1554 if (spd.nr_pages) { 1556 if (spd.nr_pages) {
1555 struct sock *sk = skb->sk;
1556 int ret; 1557 int ret;
1557 1558
1558 /* 1559 /*
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 5ba533d234db..831fe1879dc0 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -253,9 +253,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
253 indev = in ? in->name : nulldevname; 253 indev = in ? in->name : nulldevname;
254 outdev = out ? out->name : nulldevname; 254 outdev = out ? out->name : nulldevname;
255 255
256 rcu_read_lock_bh(); 256 xt_info_rdlock_bh();
257 private = rcu_dereference(table->private); 257 private = table->private;
258 table_base = rcu_dereference(private->entries[smp_processor_id()]); 258 table_base = private->entries[smp_processor_id()];
259 259
260 e = get_entry(table_base, private->hook_entry[hook]); 260 e = get_entry(table_base, private->hook_entry[hook]);
261 back = get_entry(table_base, private->underflow[hook]); 261 back = get_entry(table_base, private->underflow[hook]);
@@ -273,6 +273,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
273 273
274 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + 274 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
275 (2 * skb->dev->addr_len); 275 (2 * skb->dev->addr_len);
276
276 ADD_COUNTER(e->counters, hdr_len, 1); 277 ADD_COUNTER(e->counters, hdr_len, 1);
277 278
278 t = arpt_get_target(e); 279 t = arpt_get_target(e);
@@ -328,8 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
328 e = (void *)e + e->next_offset; 329 e = (void *)e + e->next_offset;
329 } 330 }
330 } while (!hotdrop); 331 } while (!hotdrop);
331 332 xt_info_rdunlock_bh();
332 rcu_read_unlock_bh();
333 333
334 if (hotdrop) 334 if (hotdrop)
335 return NF_DROP; 335 return NF_DROP;
@@ -711,9 +711,12 @@ static void get_counters(const struct xt_table_info *t,
711 /* Instead of clearing (by a previous call to memset()) 711 /* Instead of clearing (by a previous call to memset())
712 * the counters and using adds, we set the counters 712 * the counters and using adds, we set the counters
713 * with data used by 'current' CPU 713 * with data used by 'current' CPU
714 * We dont care about preemption here. 714 *
715 * Bottom half has to be disabled to prevent deadlock
716 * if new softirq were to run and call ipt_do_table
715 */ 717 */
716 curcpu = raw_smp_processor_id(); 718 local_bh_disable();
719 curcpu = smp_processor_id();
717 720
718 i = 0; 721 i = 0;
719 ARPT_ENTRY_ITERATE(t->entries[curcpu], 722 ARPT_ENTRY_ITERATE(t->entries[curcpu],
@@ -726,73 +729,22 @@ static void get_counters(const struct xt_table_info *t,
726 if (cpu == curcpu) 729 if (cpu == curcpu)
727 continue; 730 continue;
728 i = 0; 731 i = 0;
732 xt_info_wrlock(cpu);
729 ARPT_ENTRY_ITERATE(t->entries[cpu], 733 ARPT_ENTRY_ITERATE(t->entries[cpu],
730 t->size, 734 t->size,
731 add_entry_to_counter, 735 add_entry_to_counter,
732 counters, 736 counters,
733 &i); 737 &i);
738 xt_info_wrunlock(cpu);
734 } 739 }
735}
736
737
738/* We're lazy, and add to the first CPU; overflow works its fey magic
739 * and everything is OK. */
740static int
741add_counter_to_entry(struct arpt_entry *e,
742 const struct xt_counters addme[],
743 unsigned int *i)
744{
745 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
746
747 (*i)++;
748 return 0;
749}
750
751/* Take values from counters and add them back onto the current cpu */
752static void put_counters(struct xt_table_info *t,
753 const struct xt_counters counters[])
754{
755 unsigned int i, cpu;
756
757 local_bh_disable();
758 cpu = smp_processor_id();
759 i = 0;
760 ARPT_ENTRY_ITERATE(t->entries[cpu],
761 t->size,
762 add_counter_to_entry,
763 counters,
764 &i);
765 local_bh_enable(); 740 local_bh_enable();
766} 741}
767 742
768static inline int
769zero_entry_counter(struct arpt_entry *e, void *arg)
770{
771 e->counters.bcnt = 0;
772 e->counters.pcnt = 0;
773 return 0;
774}
775
776static void
777clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
778{
779 unsigned int cpu;
780 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
781
782 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
783 for_each_possible_cpu(cpu) {
784 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
785 ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
786 zero_entry_counter, NULL);
787 }
788}
789
790static struct xt_counters *alloc_counters(struct xt_table *table) 743static struct xt_counters *alloc_counters(struct xt_table *table)
791{ 744{
792 unsigned int countersize; 745 unsigned int countersize;
793 struct xt_counters *counters; 746 struct xt_counters *counters;
794 struct xt_table_info *private = table->private; 747 struct xt_table_info *private = table->private;
795 struct xt_table_info *info;
796 748
797 /* We need atomic snapshot of counters: rest doesn't change 749 /* We need atomic snapshot of counters: rest doesn't change
798 * (other than comefrom, which userspace doesn't care 750 * (other than comefrom, which userspace doesn't care
@@ -802,30 +754,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
802 counters = vmalloc_node(countersize, numa_node_id()); 754 counters = vmalloc_node(countersize, numa_node_id());
803 755
804 if (counters == NULL) 756 if (counters == NULL)
805 goto nomem; 757 return ERR_PTR(-ENOMEM);
806
807 info = xt_alloc_table_info(private->size);
808 if (!info)
809 goto free_counters;
810
811 clone_counters(info, private);
812
813 mutex_lock(&table->lock);
814 xt_table_entry_swap_rcu(private, info);
815 synchronize_net(); /* Wait until smoke has cleared */
816 758
817 get_counters(info, counters); 759 get_counters(private, counters);
818 put_counters(private, counters);
819 mutex_unlock(&table->lock);
820
821 xt_free_table_info(info);
822 760
823 return counters; 761 return counters;
824
825 free_counters:
826 vfree(counters);
827 nomem:
828 return ERR_PTR(-ENOMEM);
829} 762}
830 763
831static int copy_entries_to_user(unsigned int total_size, 764static int copy_entries_to_user(unsigned int total_size,
@@ -1094,8 +1027,9 @@ static int __do_replace(struct net *net, const char *name,
1094 (newinfo->number <= oldinfo->initial_entries)) 1027 (newinfo->number <= oldinfo->initial_entries))
1095 module_put(t->me); 1028 module_put(t->me);
1096 1029
1097 /* Get the old counters. */ 1030 /* Get the old counters, and synchronize with replace */
1098 get_counters(oldinfo, counters); 1031 get_counters(oldinfo, counters);
1032
1099 /* Decrease module usage counts and free resource */ 1033 /* Decrease module usage counts and free resource */
1100 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1034 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1101 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1035 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1165,10 +1099,23 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1165 return ret; 1099 return ret;
1166} 1100}
1167 1101
1102/* We're lazy, and add to the first CPU; overflow works its fey magic
1103 * and everything is OK. */
1104static int
1105add_counter_to_entry(struct arpt_entry *e,
1106 const struct xt_counters addme[],
1107 unsigned int *i)
1108{
1109 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1110
1111 (*i)++;
1112 return 0;
1113}
1114
1168static int do_add_counters(struct net *net, void __user *user, unsigned int len, 1115static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1169 int compat) 1116 int compat)
1170{ 1117{
1171 unsigned int i; 1118 unsigned int i, curcpu;
1172 struct xt_counters_info tmp; 1119 struct xt_counters_info tmp;
1173 struct xt_counters *paddc; 1120 struct xt_counters *paddc;
1174 unsigned int num_counters; 1121 unsigned int num_counters;
@@ -1224,26 +1171,26 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1224 goto free; 1171 goto free;
1225 } 1172 }
1226 1173
1227 mutex_lock(&t->lock); 1174 local_bh_disable();
1228 private = t->private; 1175 private = t->private;
1229 if (private->number != num_counters) { 1176 if (private->number != num_counters) {
1230 ret = -EINVAL; 1177 ret = -EINVAL;
1231 goto unlock_up_free; 1178 goto unlock_up_free;
1232 } 1179 }
1233 1180
1234 preempt_disable();
1235 i = 0; 1181 i = 0;
1236 /* Choose the copy that is on our node */ 1182 /* Choose the copy that is on our node */
1237 loc_cpu_entry = private->entries[smp_processor_id()]; 1183 curcpu = smp_processor_id();
1184 loc_cpu_entry = private->entries[curcpu];
1185 xt_info_wrlock(curcpu);
1238 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1186 ARPT_ENTRY_ITERATE(loc_cpu_entry,
1239 private->size, 1187 private->size,
1240 add_counter_to_entry, 1188 add_counter_to_entry,
1241 paddc, 1189 paddc,
1242 &i); 1190 &i);
1243 preempt_enable(); 1191 xt_info_wrunlock(curcpu);
1244 unlock_up_free: 1192 unlock_up_free:
1245 mutex_unlock(&t->lock); 1193 local_bh_enable();
1246
1247 xt_table_unlock(t); 1194 xt_table_unlock(t);
1248 module_put(t->me); 1195 module_put(t->me);
1249 free: 1196 free:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 810c0b62c7d4..2ec8d7290c40 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -338,10 +338,9 @@ ipt_do_table(struct sk_buff *skb,
338 tgpar.hooknum = hook; 338 tgpar.hooknum = hook;
339 339
340 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 340 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
341 341 xt_info_rdlock_bh();
342 rcu_read_lock_bh(); 342 private = table->private;
343 private = rcu_dereference(table->private); 343 table_base = private->entries[smp_processor_id()];
344 table_base = rcu_dereference(private->entries[smp_processor_id()]);
345 344
346 e = get_entry(table_base, private->hook_entry[hook]); 345 e = get_entry(table_base, private->hook_entry[hook]);
347 346
@@ -436,8 +435,7 @@ ipt_do_table(struct sk_buff *skb,
436 e = (void *)e + e->next_offset; 435 e = (void *)e + e->next_offset;
437 } 436 }
438 } while (!hotdrop); 437 } while (!hotdrop);
439 438 xt_info_rdunlock_bh();
440 rcu_read_unlock_bh();
441 439
442#ifdef DEBUG_ALLOW_ALL 440#ifdef DEBUG_ALLOW_ALL
443 return NF_ACCEPT; 441 return NF_ACCEPT;
@@ -896,10 +894,13 @@ get_counters(const struct xt_table_info *t,
896 894
897 /* Instead of clearing (by a previous call to memset()) 895 /* Instead of clearing (by a previous call to memset())
898 * the counters and using adds, we set the counters 896 * the counters and using adds, we set the counters
899 * with data used by 'current' CPU 897 * with data used by 'current' CPU.
900 * We dont care about preemption here. 898 *
899 * Bottom half has to be disabled to prevent deadlock
900 * if new softirq were to run and call ipt_do_table
901 */ 901 */
902 curcpu = raw_smp_processor_id(); 902 local_bh_disable();
903 curcpu = smp_processor_id();
903 904
904 i = 0; 905 i = 0;
905 IPT_ENTRY_ITERATE(t->entries[curcpu], 906 IPT_ENTRY_ITERATE(t->entries[curcpu],
@@ -912,74 +913,22 @@ get_counters(const struct xt_table_info *t,
912 if (cpu == curcpu) 913 if (cpu == curcpu)
913 continue; 914 continue;
914 i = 0; 915 i = 0;
916 xt_info_wrlock(cpu);
915 IPT_ENTRY_ITERATE(t->entries[cpu], 917 IPT_ENTRY_ITERATE(t->entries[cpu],
916 t->size, 918 t->size,
917 add_entry_to_counter, 919 add_entry_to_counter,
918 counters, 920 counters,
919 &i); 921 &i);
922 xt_info_wrunlock(cpu);
920 } 923 }
921
922}
923
924/* We're lazy, and add to the first CPU; overflow works its fey magic
925 * and everything is OK. */
926static int
927add_counter_to_entry(struct ipt_entry *e,
928 const struct xt_counters addme[],
929 unsigned int *i)
930{
931 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
932
933 (*i)++;
934 return 0;
935}
936
937/* Take values from counters and add them back onto the current cpu */
938static void put_counters(struct xt_table_info *t,
939 const struct xt_counters counters[])
940{
941 unsigned int i, cpu;
942
943 local_bh_disable();
944 cpu = smp_processor_id();
945 i = 0;
946 IPT_ENTRY_ITERATE(t->entries[cpu],
947 t->size,
948 add_counter_to_entry,
949 counters,
950 &i);
951 local_bh_enable(); 924 local_bh_enable();
952} 925}
953 926
954
955static inline int
956zero_entry_counter(struct ipt_entry *e, void *arg)
957{
958 e->counters.bcnt = 0;
959 e->counters.pcnt = 0;
960 return 0;
961}
962
963static void
964clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
965{
966 unsigned int cpu;
967 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
968
969 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
970 for_each_possible_cpu(cpu) {
971 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
972 IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
973 zero_entry_counter, NULL);
974 }
975}
976
977static struct xt_counters * alloc_counters(struct xt_table *table) 927static struct xt_counters * alloc_counters(struct xt_table *table)
978{ 928{
979 unsigned int countersize; 929 unsigned int countersize;
980 struct xt_counters *counters; 930 struct xt_counters *counters;
981 struct xt_table_info *private = table->private; 931 struct xt_table_info *private = table->private;
982 struct xt_table_info *info;
983 932
984 /* We need atomic snapshot of counters: rest doesn't change 933 /* We need atomic snapshot of counters: rest doesn't change
985 (other than comefrom, which userspace doesn't care 934 (other than comefrom, which userspace doesn't care
@@ -988,30 +937,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
988 counters = vmalloc_node(countersize, numa_node_id()); 937 counters = vmalloc_node(countersize, numa_node_id());
989 938
990 if (counters == NULL) 939 if (counters == NULL)
991 goto nomem; 940 return ERR_PTR(-ENOMEM);
992 941
993 info = xt_alloc_table_info(private->size); 942 get_counters(private, counters);
994 if (!info)
995 goto free_counters;
996
997 clone_counters(info, private);
998
999 mutex_lock(&table->lock);
1000 xt_table_entry_swap_rcu(private, info);
1001 synchronize_net(); /* Wait until smoke has cleared */
1002
1003 get_counters(info, counters);
1004 put_counters(private, counters);
1005 mutex_unlock(&table->lock);
1006
1007 xt_free_table_info(info);
1008 943
1009 return counters; 944 return counters;
1010
1011 free_counters:
1012 vfree(counters);
1013 nomem:
1014 return ERR_PTR(-ENOMEM);
1015} 945}
1016 946
1017static int 947static int
@@ -1306,8 +1236,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1306 (newinfo->number <= oldinfo->initial_entries)) 1236 (newinfo->number <= oldinfo->initial_entries))
1307 module_put(t->me); 1237 module_put(t->me);
1308 1238
1309 /* Get the old counters. */ 1239 /* Get the old counters, and synchronize with replace */
1310 get_counters(oldinfo, counters); 1240 get_counters(oldinfo, counters);
1241
1311 /* Decrease module usage counts and free resource */ 1242 /* Decrease module usage counts and free resource */
1312 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1243 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1313 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1244 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1377,11 +1308,23 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1377 return ret; 1308 return ret;
1378} 1309}
1379 1310
1311/* We're lazy, and add to the first CPU; overflow works its fey magic
1312 * and everything is OK. */
1313static int
1314add_counter_to_entry(struct ipt_entry *e,
1315 const struct xt_counters addme[],
1316 unsigned int *i)
1317{
1318 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1319
1320 (*i)++;
1321 return 0;
1322}
1380 1323
1381static int 1324static int
1382do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) 1325do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1383{ 1326{
1384 unsigned int i; 1327 unsigned int i, curcpu;
1385 struct xt_counters_info tmp; 1328 struct xt_counters_info tmp;
1386 struct xt_counters *paddc; 1329 struct xt_counters *paddc;
1387 unsigned int num_counters; 1330 unsigned int num_counters;
@@ -1437,25 +1380,26 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1437 goto free; 1380 goto free;
1438 } 1381 }
1439 1382
1440 mutex_lock(&t->lock); 1383 local_bh_disable();
1441 private = t->private; 1384 private = t->private;
1442 if (private->number != num_counters) { 1385 if (private->number != num_counters) {
1443 ret = -EINVAL; 1386 ret = -EINVAL;
1444 goto unlock_up_free; 1387 goto unlock_up_free;
1445 } 1388 }
1446 1389
1447 preempt_disable();
1448 i = 0; 1390 i = 0;
1449 /* Choose the copy that is on our node */ 1391 /* Choose the copy that is on our node */
1450 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1392 curcpu = smp_processor_id();
1393 loc_cpu_entry = private->entries[curcpu];
1394 xt_info_wrlock(curcpu);
1451 IPT_ENTRY_ITERATE(loc_cpu_entry, 1395 IPT_ENTRY_ITERATE(loc_cpu_entry,
1452 private->size, 1396 private->size,
1453 add_counter_to_entry, 1397 add_counter_to_entry,
1454 paddc, 1398 paddc,
1455 &i); 1399 &i);
1456 preempt_enable(); 1400 xt_info_wrunlock(curcpu);
1457 unlock_up_free: 1401 unlock_up_free:
1458 mutex_unlock(&t->lock); 1402 local_bh_enable();
1459 xt_table_unlock(t); 1403 xt_table_unlock(t);
1460 module_put(t->me); 1404 module_put(t->me);
1461 free: 1405 free:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c40debe51b38..c4c60e9f068a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3397,7 +3397,7 @@ int __init ip_rt_init(void)
3397 0, 3397 0,
3398 &rt_hash_log, 3398 &rt_hash_log,
3399 &rt_hash_mask, 3399 &rt_hash_mask,
3400 0); 3400 rhash_entries ? 0 : 512 * 1024);
3401 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); 3401 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3402 rt_hash_lock_init(); 3402 rt_hash_lock_init();
3403 3403
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fafbec8b073e..1d7f49c6f0ca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2511,6 +2511,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2511 struct sk_buff *p; 2511 struct sk_buff *p;
2512 struct tcphdr *th; 2512 struct tcphdr *th;
2513 struct tcphdr *th2; 2513 struct tcphdr *th2;
2514 unsigned int len;
2514 unsigned int thlen; 2515 unsigned int thlen;
2515 unsigned int flags; 2516 unsigned int flags;
2516 unsigned int mss = 1; 2517 unsigned int mss = 1;
@@ -2531,6 +2532,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2531 2532
2532 skb_gro_pull(skb, thlen); 2533 skb_gro_pull(skb, thlen);
2533 2534
2535 len = skb_gro_len(skb);
2534 flags = tcp_flag_word(th); 2536 flags = tcp_flag_word(th);
2535 2537
2536 for (; (p = *head); head = &p->next) { 2538 for (; (p = *head); head = &p->next) {
@@ -2561,7 +2563,7 @@ found:
2561 2563
2562 mss = skb_shinfo(p)->gso_size; 2564 mss = skb_shinfo(p)->gso_size;
2563 2565
2564 flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb); 2566 flush |= (len > mss) | !len;
2565 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 2567 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2566 2568
2567 if (flush || skb_gro_receive(head, skb)) { 2569 if (flush || skb_gro_receive(head, skb)) {
@@ -2574,7 +2576,7 @@ found:
2574 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 2576 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2575 2577
2576out_check_final: 2578out_check_final:
2577 flush = skb_gro_len(skb) < mss; 2579 flush = len < mss;
2578 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2580 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2579 TCP_FLAG_SYN | TCP_FLAG_FIN); 2581 TCP_FLAG_SYN | TCP_FLAG_FIN);
2580 2582
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bc8e27a163d..eec3e6f9956c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
597 tcp_grow_window(sk, skb); 597 tcp_grow_window(sk, skb);
598} 598}
599 599
600static u32 tcp_rto_min(struct sock *sk)
601{
602 struct dst_entry *dst = __sk_dst_get(sk);
603 u32 rto_min = TCP_RTO_MIN;
604
605 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
606 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
607 return rto_min;
608}
609
610/* Called to compute a smoothed rtt estimate. The data fed to this 600/* Called to compute a smoothed rtt estimate. The data fed to this
611 * routine either comes from timestamps, or from segments that were 601 * routine either comes from timestamps, or from segments that were
612 * known _not_ to have been retransmitted [see Karn/Partridge 602 * known _not_ to have been retransmitted [see Karn/Partridge
@@ -928,6 +918,8 @@ static void tcp_init_metrics(struct sock *sk)
928 tcp_set_rto(sk); 918 tcp_set_rto(sk);
929 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 919 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
930 goto reset; 920 goto reset;
921
922cwnd:
931 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 923 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp; 924 tp->snd_cwnd_stamp = tcp_time_stamp;
933 return; 925 return;
@@ -942,6 +934,7 @@ reset:
942 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 934 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
943 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 935 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
944 } 936 }
937 goto cwnd;
945} 938}
946 939
947static void tcp_update_reordering(struct sock *sk, const int metric, 940static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53300fa2359f..59aec609cec6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -778,7 +778,7 @@ static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
778 778
779 if (tp->lost_skb_hint && 779 if (tp->lost_skb_hint &&
780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
781 (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked)) 781 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
782 tp->lost_cnt_hint -= decr; 782 tp->lost_cnt_hint -= decr;
783 783
784 tcp_verify_left_out(tp); 784 tcp_verify_left_out(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bda08a09357d..7a1d1ce22e66 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -222,7 +222,7 @@ fail:
222 return error; 222 return error;
223} 223}
224 224
225int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 225static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
226{ 226{
227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
228 228
@@ -1823,7 +1823,6 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
1823EXPORT_SYMBOL(udp_lib_setsockopt); 1823EXPORT_SYMBOL(udp_lib_setsockopt);
1824EXPORT_SYMBOL(udp_poll); 1824EXPORT_SYMBOL(udp_poll);
1825EXPORT_SYMBOL(udp_lib_get_port); 1825EXPORT_SYMBOL(udp_lib_get_port);
1826EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
1827 1826
1828#ifdef CONFIG_PROC_FS 1827#ifdef CONFIG_PROC_FS
1829EXPORT_SYMBOL(udp_proc_register); 1828EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d31df0f4bc9a..a7fdf9a27f15 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
380 default: 380 default:
381 goto sticky_done; 381 goto sticky_done;
382 } 382 }
383
384 if ((rthdr->hdrlen & 1) ||
385 (rthdr->hdrlen >> 1) != rthdr->segments_left)
386 goto sticky_done;
387 } 383 }
388 384
389 retv = 0; 385 retv = 0;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 800ae8542471..219e165aea10 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb,
365 365
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
367 367
368 rcu_read_lock_bh(); 368 xt_info_rdlock_bh();
369 private = rcu_dereference(table->private); 369 private = table->private;
370 table_base = rcu_dereference(private->entries[smp_processor_id()]); 370 table_base = private->entries[smp_processor_id()];
371 371
372 e = get_entry(table_base, private->hook_entry[hook]); 372 e = get_entry(table_base, private->hook_entry[hook]);
373 373
@@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb,
466#ifdef CONFIG_NETFILTER_DEBUG 466#ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; 467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
468#endif 468#endif
469 rcu_read_unlock_bh(); 469 xt_info_rdunlock_bh();
470 470
471#ifdef DEBUG_ALLOW_ALL 471#ifdef DEBUG_ALLOW_ALL
472 return NF_ACCEPT; 472 return NF_ACCEPT;
@@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t,
926 /* Instead of clearing (by a previous call to memset()) 926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters 927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU 928 * with data used by 'current' CPU
929 * We dont care about preemption here. 929 *
930 * Bottom half has to be disabled to prevent deadlock
931 * if new softirq were to run and call ipt_do_table
930 */ 932 */
931 curcpu = raw_smp_processor_id(); 933 local_bh_disable();
934 curcpu = smp_processor_id();
932 935
933 i = 0; 936 i = 0;
934 IP6T_ENTRY_ITERATE(t->entries[curcpu], 937 IP6T_ENTRY_ITERATE(t->entries[curcpu],
@@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t,
941 if (cpu == curcpu) 944 if (cpu == curcpu)
942 continue; 945 continue;
943 i = 0; 946 i = 0;
947 xt_info_wrlock(cpu);
944 IP6T_ENTRY_ITERATE(t->entries[cpu], 948 IP6T_ENTRY_ITERATE(t->entries[cpu],
945 t->size, 949 t->size,
946 add_entry_to_counter, 950 add_entry_to_counter,
947 counters, 951 counters,
948 &i); 952 &i);
953 xt_info_wrunlock(cpu);
949 } 954 }
950}
951
952/* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
954static int
955add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
957 unsigned int *i)
958{
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
960
961 (*i)++;
962 return 0;
963}
964
965/* Take values from counters and add them back onto the current cpu */
966static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
968{
969 unsigned int i, cpu;
970
971 local_bh_disable();
972 cpu = smp_processor_id();
973 i = 0;
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
975 t->size,
976 add_counter_to_entry,
977 counters,
978 &i);
979 local_bh_enable(); 955 local_bh_enable();
980} 956}
981 957
982static inline int
983zero_entry_counter(struct ip6t_entry *e, void *arg)
984{
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
987 return 0;
988}
989
990static void
991clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
992{
993 unsigned int cpu;
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
995
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1001 }
1002}
1003
1004static struct xt_counters *alloc_counters(struct xt_table *table) 958static struct xt_counters *alloc_counters(struct xt_table *table)
1005{ 959{
1006 unsigned int countersize; 960 unsigned int countersize;
1007 struct xt_counters *counters; 961 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private; 962 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1010 963
1011 /* We need atomic snapshot of counters: rest doesn't change 964 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care 965 (other than comefrom, which userspace doesn't care
@@ -1015,30 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
1015 counters = vmalloc_node(countersize, numa_node_id()); 968 counters = vmalloc_node(countersize, numa_node_id());
1016 969
1017 if (counters == NULL) 970 if (counters == NULL)
1018 goto nomem; 971 return ERR_PTR(-ENOMEM);
1019 972
1020 info = xt_alloc_table_info(private->size); 973 get_counters(private, counters);
1021 if (!info)
1022 goto free_counters;
1023
1024 clone_counters(info, private);
1025
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1029
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1033
1034 xt_free_table_info(info);
1035 974
1036 return counters; 975 return counters;
1037
1038 free_counters:
1039 vfree(counters);
1040 nomem:
1041 return ERR_PTR(-ENOMEM);
1042} 976}
1043 977
1044static int 978static int
@@ -1334,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1334 (newinfo->number <= oldinfo->initial_entries)) 1268 (newinfo->number <= oldinfo->initial_entries))
1335 module_put(t->me); 1269 module_put(t->me);
1336 1270
1337 /* Get the old counters. */ 1271 /* Get the old counters, and synchronize with replace */
1338 get_counters(oldinfo, counters); 1272 get_counters(oldinfo, counters);
1273
1339 /* Decrease module usage counts and free resource */ 1274 /* Decrease module usage counts and free resource */
1340 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1275 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1341 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1276 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1405,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1405 return ret; 1340 return ret;
1406} 1341}
1407 1342
1343/* We're lazy, and add to the first CPU; overflow works its fey magic
1344 * and everything is OK. */
1345static int
1346add_counter_to_entry(struct ip6t_entry *e,
1347 const struct xt_counters addme[],
1348 unsigned int *i)
1349{
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1351
1352 (*i)++;
1353 return 0;
1354}
1355
1408static int 1356static int
1409do_add_counters(struct net *net, void __user *user, unsigned int len, 1357do_add_counters(struct net *net, void __user *user, unsigned int len,
1410 int compat) 1358 int compat)
1411{ 1359{
1412 unsigned int i; 1360 unsigned int i, curcpu;
1413 struct xt_counters_info tmp; 1361 struct xt_counters_info tmp;
1414 struct xt_counters *paddc; 1362 struct xt_counters *paddc;
1415 unsigned int num_counters; 1363 unsigned int num_counters;
@@ -1465,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1465 goto free; 1413 goto free;
1466 } 1414 }
1467 1415
1468 mutex_lock(&t->lock); 1416
1417 local_bh_disable();
1469 private = t->private; 1418 private = t->private;
1470 if (private->number != num_counters) { 1419 if (private->number != num_counters) {
1471 ret = -EINVAL; 1420 ret = -EINVAL;
1472 goto unlock_up_free; 1421 goto unlock_up_free;
1473 } 1422 }
1474 1423
1475 preempt_disable();
1476 i = 0; 1424 i = 0;
1477 /* Choose the copy that is on our node */ 1425 /* Choose the copy that is on our node */
1478 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1426 curcpu = smp_processor_id();
1427 xt_info_wrlock(curcpu);
1428 loc_cpu_entry = private->entries[curcpu];
1479 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1429 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1480 private->size, 1430 private->size,
1481 add_counter_to_entry, 1431 add_counter_to_entry,
1482 paddc, 1432 paddc,
1483 &i); 1433 &i);
1484 preempt_enable(); 1434 xt_info_wrunlock(curcpu);
1435
1485 unlock_up_free: 1436 unlock_up_free:
1486 mutex_unlock(&t->lock); 1437 local_bh_enable();
1487 xt_table_unlock(t); 1438 xt_table_unlock(t);
1488 module_put(t->me); 1439 module_put(t->me);
1489 free: 1440 free:
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6842dd2edd5b..8905712cfbb8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
53{ 53{
54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
56 __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
57 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 int sk_ipv6only = ipv6_only_sock(sk); 58 int sk_ipv6only = ipv6_only_sock(sk);
57 int sk2_ipv6only = inet_v6_ipv6only(sk2); 59 int sk2_ipv6only = inet_v6_ipv6only(sk2);
58 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 60 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
60 62
61 /* if both are mapped, treat as IPv4 */ 63 /* if both are mapped, treat as IPv4 */
62 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) 64 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
63 return ipv4_rcv_saddr_equal(sk, sk2); 65 return (!sk2_ipv6only &&
66 (!sk_rcv_saddr || !sk2_rcv_saddr ||
67 sk_rcv_saddr == sk2_rcv_saddr));
64 68
65 if (addr_type2 == IPV6_ADDR_ANY && 69 if (addr_type2 == IPV6_ADDR_ANY &&
66 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 70 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0af823cf7f1f..5ee5a031bc93 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -72,6 +72,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
72#endif 72#endif
73 73
74 skb->protocol = htons(ETH_P_IPV6); 74 skb->protocol = htons(ETH_P_IPV6);
75 skb->local_df = 1;
75 76
76 return x->outer_mode->output2(x, skb); 77 return x->outer_mode->output2(x, skb);
77} 78}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 49e786535dc8..b51c9187c347 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -172,6 +172,7 @@ static void iucv_sock_close(struct sock *sk)
172 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 172 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
173 } 173 }
174 174
175 case IUCV_CLOSING: /* fall through */
175 sk->sk_state = IUCV_CLOSED; 176 sk->sk_state = IUCV_CLOSED;
176 sk->sk_state_change(sk); 177 sk->sk_state_change(sk);
177 178
@@ -224,6 +225,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
224 spin_lock_init(&iucv_sk(sk)->message_q.lock); 225 spin_lock_init(&iucv_sk(sk)->message_q.lock);
225 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 226 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
226 iucv_sk(sk)->send_tag = 0; 227 iucv_sk(sk)->send_tag = 0;
228 iucv_sk(sk)->path = NULL;
229 memset(&iucv_sk(sk)->src_user_id , 0, 32);
227 230
228 sk->sk_destruct = iucv_sock_destruct; 231 sk->sk_destruct = iucv_sock_destruct;
229 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 232 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -811,6 +814,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
811 814
812 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 815 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
813 816
817 /* receive/dequeue next skb:
818 * the function understands MSG_PEEK and, thus, does not dequeue skb */
814 skb = skb_recv_datagram(sk, flags, noblock, &err); 819 skb = skb_recv_datagram(sk, flags, noblock, &err);
815 if (!skb) { 820 if (!skb) {
816 if (sk->sk_shutdown & RCV_SHUTDOWN) 821 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -858,9 +863,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
858 iucv_process_message_q(sk); 863 iucv_process_message_q(sk);
859 spin_unlock_bh(&iucv->message_q.lock); 864 spin_unlock_bh(&iucv->message_q.lock);
860 } 865 }
861 866 }
862 } else
863 skb_queue_head(&sk->sk_receive_queue, skb);
864 867
865done: 868done:
866 return err ? : copied; 869 return err ? : copied;
@@ -934,6 +937,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
934 937
935 lock_sock(sk); 938 lock_sock(sk);
936 switch (sk->sk_state) { 939 switch (sk->sk_state) {
940 case IUCV_DISCONN:
941 case IUCV_CLOSING:
942 case IUCV_SEVERED:
937 case IUCV_CLOSED: 943 case IUCV_CLOSED:
938 err = -ENOTCONN; 944 err = -ENOTCONN;
939 goto fail; 945 goto fail;
@@ -1113,8 +1119,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1113 struct sock_msg_q *save_msg; 1119 struct sock_msg_q *save_msg;
1114 int len; 1120 int len;
1115 1121
1116 if (sk->sk_shutdown & RCV_SHUTDOWN) 1122 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1123 iucv_message_reject(path, msg);
1117 return; 1124 return;
1125 }
1126
1127 spin_lock(&iucv->message_q.lock);
1118 1128
1119 if (!list_empty(&iucv->message_q.list) || 1129 if (!list_empty(&iucv->message_q.list) ||
1120 !skb_queue_empty(&iucv->backlog_skb_q)) 1130 !skb_queue_empty(&iucv->backlog_skb_q))
@@ -1129,9 +1139,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1129 if (!skb) 1139 if (!skb)
1130 goto save_message; 1140 goto save_message;
1131 1141
1132 spin_lock(&iucv->message_q.lock);
1133 iucv_process_message(sk, skb, path, msg); 1142 iucv_process_message(sk, skb, path, msg);
1134 spin_unlock(&iucv->message_q.lock); 1143 goto out_unlock;
1135 1144
1136 return; 1145 return;
1137 1146
@@ -1142,8 +1151,9 @@ save_message:
1142 save_msg->path = path; 1151 save_msg->path = path;
1143 save_msg->msg = *msg; 1152 save_msg->msg = *msg;
1144 1153
1145 spin_lock(&iucv->message_q.lock);
1146 list_add_tail(&save_msg->list, &iucv->message_q.list); 1154 list_add_tail(&save_msg->list, &iucv->message_q.list);
1155
1156out_unlock:
1147 spin_unlock(&iucv->message_q.lock); 1157 spin_unlock(&iucv->message_q.lock);
1148} 1158}
1149 1159
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f3d9ae350fb6..ecc3faf9f11a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -202,10 +202,3 @@ config MAC80211_DEBUG_COUNTERS
202 and show them in debugfs. 202 and show them in debugfs.
203 203
204 If unsure, say N. 204 If unsure, say N.
205
206config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
207 bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
208 depends on MAC80211_DEBUG_MENU
209 ---help---
210 Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
211 debug messages.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a6f1d8a869bc..14134193cd17 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -258,7 +258,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
258 (chan->max_power - local->power_constr_level) : 258 (chan->max_power - local->power_constr_level) :
259 chan->max_power; 259 chan->max_power;
260 260
261 if (local->user_power_level) 261 if (local->user_power_level >= 0)
262 power = min(power, local->user_power_level); 262 power = min(power, local->user_power_level);
263 263
264 if (local->hw.conf.power_level != power) { 264 if (local->hw.conf.power_level != power) {
@@ -757,6 +757,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
757 local->hw.conf.long_frame_max_tx_count = 4; 757 local->hw.conf.long_frame_max_tx_count = 4;
758 local->hw.conf.short_frame_max_tx_count = 7; 758 local->hw.conf.short_frame_max_tx_count = 7;
759 local->hw.conf.radio_enabled = true; 759 local->hw.conf.radio_enabled = true;
760 local->user_power_level = -1;
760 761
761 INIT_LIST_HEAD(&local->interfaces); 762 INIT_LIST_HEAD(&local->interfaces);
762 mutex_init(&local->iflist_mtx); 763 mutex_init(&local->iflist_mtx);
@@ -909,6 +910,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
909 if (result < 0) 910 if (result < 0)
910 goto fail_sta_info; 911 goto fail_sta_info;
911 912
913 result = ieee80211_wep_init(local);
914 if (result < 0) {
915 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
916 wiphy_name(local->hw.wiphy), result);
917 goto fail_wep;
918 }
919
912 rtnl_lock(); 920 rtnl_lock();
913 result = dev_alloc_name(local->mdev, local->mdev->name); 921 result = dev_alloc_name(local->mdev, local->mdev->name);
914 if (result < 0) 922 if (result < 0)
@@ -930,14 +938,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
930 goto fail_rate; 938 goto fail_rate;
931 } 939 }
932 940
933 result = ieee80211_wep_init(local);
934
935 if (result < 0) {
936 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
937 wiphy_name(local->hw.wiphy), result);
938 goto fail_wep;
939 }
940
941 /* add one default STA interface if supported */ 941 /* add one default STA interface if supported */
942 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { 942 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
943 result = ieee80211_if_add(local, "wlan%d", NULL, 943 result = ieee80211_if_add(local, "wlan%d", NULL,
@@ -967,13 +967,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
967 967
968 return 0; 968 return 0;
969 969
970fail_wep:
971 rate_control_deinitialize(local);
972fail_rate: 970fail_rate:
973 unregister_netdevice(local->mdev); 971 unregister_netdevice(local->mdev);
974 local->mdev = NULL; 972 local->mdev = NULL;
975fail_dev: 973fail_dev:
976 rtnl_unlock(); 974 rtnl_unlock();
975 ieee80211_wep_free(local);
976fail_wep:
977 sta_info_stop(local); 977 sta_info_stop(local);
978fail_sta_info: 978fail_sta_info:
979 debugfs_hw_del(local); 979 debugfs_hw_del(local);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7ecda9d59d8a..132938b073dc 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -441,6 +441,9 @@ static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
441 u8 index, indexn1, indexn2; 441 u8 index, indexn1, indexn2;
442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim; 442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
443 443
444 if (unlikely(!tim || elems->tim_len < 4))
445 return false;
446
444 aid &= 0x3fff; 447 aid &= 0x3fff;
445 index = aid / 8; 448 index = aid / 8;
446 mask = 1 << (aid & 7); 449 mask = 1 << (aid & 7);
@@ -945,9 +948,13 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
945 u.mgd.beacon_loss_work); 948 u.mgd.beacon_loss_work);
946 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 949 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
947 950
948 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " 951#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
949 "- sending probe request\n", sdata->dev->name, 952 if (net_ratelimit()) {
950 sdata->u.mgd.bssid); 953 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
954 "- sending probe request\n", sdata->dev->name,
955 sdata->u.mgd.bssid);
956 }
957#endif
951 958
952 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 959 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 960 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
@@ -1007,9 +1014,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1007 (local->hw.conf.flags & IEEE80211_CONF_PS)) && 1014 (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
1008 time_after(jiffies, 1015 time_after(jiffies,
1009 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { 1016 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
1010 printk(KERN_DEBUG "%s: beacon loss from AP %pM " 1017#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1011 "- sending probe request\n", 1018 if (net_ratelimit()) {
1012 sdata->dev->name, ifmgd->bssid); 1019 printk(KERN_DEBUG "%s: beacon loss from AP %pM "
1020 "- sending probe request\n",
1021 sdata->dev->name, ifmgd->bssid);
1022 }
1023#endif
1013 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1024 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1014 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1025 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1015 ifmgd->ssid_len, NULL, 0); 1026 ifmgd->ssid_len, NULL, 0);
@@ -1355,7 +1366,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1355 1366
1356 for (i = 0; i < elems.ext_supp_rates_len; i++) { 1367 for (i = 0; i < elems.ext_supp_rates_len; i++) {
1357 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; 1368 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
1358 bool is_basic = !!(elems.supp_rates[i] & 0x80); 1369 bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
1359 1370
1360 if (rate > 110) 1371 if (rate > 110)
1361 have_higher_than_11mbit = true; 1372 have_higher_than_11mbit = true;
@@ -1902,9 +1913,17 @@ static void ieee80211_sta_work(struct work_struct *work)
1902 1913
1903static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 1914static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
1904{ 1915{
1905 if (sdata->vif.type == NL80211_IFTYPE_STATION) 1916 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
1917 /*
1918 * Need to update last_beacon to avoid beacon loss
1919 * test to trigger.
1920 */
1921 sdata->u.mgd.last_beacon = jiffies;
1922
1923
1906 queue_work(sdata->local->hw.workqueue, 1924 queue_work(sdata->local->hw.workqueue,
1907 &sdata->u.mgd.work); 1925 &sdata->u.mgd.work);
1926 }
1908} 1927}
1909 1928
1910/* interface setup */ 1929/* interface setup */
@@ -2105,12 +2124,13 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
2105 struct ieee80211_local *local = 2124 struct ieee80211_local *local =
2106 container_of(work, struct ieee80211_local, 2125 container_of(work, struct ieee80211_local,
2107 dynamic_ps_enable_work); 2126 dynamic_ps_enable_work);
2127 /* XXX: using scan_sdata is completely broken! */
2108 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 2128 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2109 2129
2110 if (local->hw.conf.flags & IEEE80211_CONF_PS) 2130 if (local->hw.conf.flags & IEEE80211_CONF_PS)
2111 return; 2131 return;
2112 2132
2113 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 2133 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata)
2114 ieee80211_send_nullfunc(local, sdata, 1); 2134 ieee80211_send_nullfunc(local, sdata, 1);
2115 2135
2116 local->hw.conf.flags |= IEEE80211_CONF_PS; 2136 local->hw.conf.flags |= IEEE80211_CONF_PS;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 027302326498..81985d27cbda 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
156 case NL80211_IFTYPE_ADHOC: 156 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP: 157 case NL80211_IFTYPE_AP:
158 case NL80211_IFTYPE_MESH_POINT: 158 case NL80211_IFTYPE_MESH_POINT:
159 WARN_ON(ieee80211_if_config(sdata, changed)); 159 /*
160 ieee80211_bss_info_change_notify(sdata, ~0); 160 * Driver's config_interface can fail if rfkill is
161 * enabled. Accommodate this return code.
162 * FIXME: When mac80211 has knowledge of rfkill
163 * state the code below can change back to:
164 * WARN(ieee80211_if_config(sdata, changed));
165 * ieee80211_bss_info_change_notify(sdata, ~0);
166 */
167 if (ieee80211_if_config(sdata, changed))
168 printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
169 sdata->dev->name);
170 else
171 ieee80211_bss_info_change_notify(sdata, ~0);
161 break; 172 break;
162 case NL80211_IFTYPE_WDS: 173 case NL80211_IFTYPE_WDS:
163 break; 174 break;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 3824990d340b..70df3dcc3cf6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -476,7 +476,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
476 return NULL; 476 return NULL;
477 477
478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
479 sband = hw->wiphy->bands[hw->conf.channel->band]; 479 sband = hw->wiphy->bands[i];
480 if (sband->n_bitrates > max_rates) 480 if (sband->n_bitrates > max_rates)
481 max_rates = sband->n_bitrates; 481 max_rates = sband->n_bitrates;
482 } 482 }
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index b16801cde06f..01d59a8e334c 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
317 struct ieee80211_sta *sta, void *priv_sta) 317 struct ieee80211_sta *sta, void *priv_sta)
318{ 318{
319 struct rc_pid_sta_info *spinfo = priv_sta; 319 struct rc_pid_sta_info *spinfo = priv_sta;
320 struct rc_pid_info *pinfo = priv;
321 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
320 struct sta_info *si; 322 struct sta_info *si;
323 int i, j, tmp;
324 bool s;
321 325
322 /* TODO: This routine should consider using RSSI from previous packets 326 /* TODO: This routine should consider using RSSI from previous packets
323 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 327 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
324 * Until that method is implemented, we will use the lowest supported 328 * Until that method is implemented, we will use the lowest supported
325 * rate as a workaround. */ 329 * rate as a workaround. */
326 330
331 /* Sort the rates. This is optimized for the most common case (i.e.
332 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
333 * mapping too. */
334 for (i = 0; i < sband->n_bitrates; i++) {
335 rinfo[i].index = i;
336 rinfo[i].rev_index = i;
337 if (RC_PID_FAST_START)
338 rinfo[i].diff = 0;
339 else
340 rinfo[i].diff = i * pinfo->norm_offset;
341 }
342 for (i = 1; i < sband->n_bitrates; i++) {
343 s = 0;
344 for (j = 0; j < sband->n_bitrates - i; j++)
345 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
346 sband->bitrates[rinfo[j + 1].index].bitrate)) {
347 tmp = rinfo[j].index;
348 rinfo[j].index = rinfo[j + 1].index;
349 rinfo[j + 1].index = tmp;
350 rinfo[rinfo[j].index].rev_index = j;
351 rinfo[rinfo[j + 1].index].rev_index = j + 1;
352 s = 1;
353 }
354 if (!s)
355 break;
356 }
357
327 spinfo->txrate_idx = rate_lowest_index(sband, sta); 358 spinfo->txrate_idx = rate_lowest_index(sband, sta);
328 /* HACK */ 359 /* HACK */
329 si = container_of(sta, struct sta_info, sta); 360 si = container_of(sta, struct sta_info, sta);
@@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
336 struct rc_pid_info *pinfo; 367 struct rc_pid_info *pinfo;
337 struct rc_pid_rateinfo *rinfo; 368 struct rc_pid_rateinfo *rinfo;
338 struct ieee80211_supported_band *sband; 369 struct ieee80211_supported_band *sband;
339 int i, j, tmp; 370 int i, max_rates = 0;
340 bool s;
341#ifdef CONFIG_MAC80211_DEBUGFS 371#ifdef CONFIG_MAC80211_DEBUGFS
342 struct rc_pid_debugfs_entries *de; 372 struct rc_pid_debugfs_entries *de;
343#endif 373#endif
344 374
345 sband = hw->wiphy->bands[hw->conf.channel->band];
346
347 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 375 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
348 if (!pinfo) 376 if (!pinfo)
349 return NULL; 377 return NULL;
350 378
351 /* We can safely assume that sband won't change unless we get 379 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
352 * reinitialized. */ 380 sband = hw->wiphy->bands[i];
353 rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); 381 if (sband->n_bitrates > max_rates)
382 max_rates = sband->n_bitrates;
383 }
384
385 rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
354 if (!rinfo) { 386 if (!rinfo) {
355 kfree(pinfo); 387 kfree(pinfo);
356 return NULL; 388 return NULL;
@@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
368 pinfo->rinfo = rinfo; 400 pinfo->rinfo = rinfo;
369 pinfo->oldrate = 0; 401 pinfo->oldrate = 0;
370 402
371 /* Sort the rates. This is optimized for the most common case (i.e.
372 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
373 * mapping too. */
374 for (i = 0; i < sband->n_bitrates; i++) {
375 rinfo[i].index = i;
376 rinfo[i].rev_index = i;
377 if (RC_PID_FAST_START)
378 rinfo[i].diff = 0;
379 else
380 rinfo[i].diff = i * pinfo->norm_offset;
381 }
382 for (i = 1; i < sband->n_bitrates; i++) {
383 s = 0;
384 for (j = 0; j < sband->n_bitrates - i; j++)
385 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
386 sband->bitrates[rinfo[j + 1].index].bitrate)) {
387 tmp = rinfo[j].index;
388 rinfo[j].index = rinfo[j + 1].index;
389 rinfo[j + 1].index = tmp;
390 rinfo[rinfo[j].index].rev_index = j;
391 rinfo[rinfo[j + 1].index].rev_index = j + 1;
392 s = 1;
393 }
394 if (!s)
395 break;
396 }
397
398#ifdef CONFIG_MAC80211_DEBUGFS 403#ifdef CONFIG_MAC80211_DEBUGFS
399 de = &pinfo->dentries; 404 de = &pinfo->dentries;
400 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, 405 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 64ebe664effc..9776f73c51ad 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -29,6 +29,7 @@
29static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 29static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx, 30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb, 31 struct sk_buff *skb,
32 struct ieee80211_rx_status *status,
32 u16 mpdu_seq_num, 33 u16 mpdu_seq_num,
33 int bar_req); 34 int bar_req);
34/* 35/*
@@ -1396,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1396 * mac80211. That also explains the __skb_push() 1397 * mac80211. That also explains the __skb_push()
1397 * below. 1398 * below.
1398 */ 1399 */
1399 align = (unsigned long)skb->data & 4; 1400 align = (unsigned long)skb->data & 3;
1400 if (align) { 1401 if (align) {
1401 if (WARN_ON(skb_headroom(skb) < 3)) { 1402 if (WARN_ON(skb_headroom(skb) < 3)) {
1402 dev_kfree_skb(skb); 1403 dev_kfree_skb(skb);
@@ -1688,7 +1689,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1688 /* manage reordering buffer according to requested */ 1689 /* manage reordering buffer according to requested */
1689 /* sequence number */ 1690 /* sequence number */
1690 rcu_read_lock(); 1691 rcu_read_lock();
1691 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1692 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
1692 start_seq_num, 1); 1693 start_seq_num, 1);
1693 rcu_read_unlock(); 1694 rcu_read_unlock();
1694 return RX_DROP_UNUSABLE; 1695 return RX_DROP_UNUSABLE;
@@ -2293,6 +2294,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
2293static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 2294static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2294 struct tid_ampdu_rx *tid_agg_rx, 2295 struct tid_ampdu_rx *tid_agg_rx,
2295 struct sk_buff *skb, 2296 struct sk_buff *skb,
2297 struct ieee80211_rx_status *rxstatus,
2296 u16 mpdu_seq_num, 2298 u16 mpdu_seq_num,
2297 int bar_req) 2299 int bar_req)
2298{ 2300{
@@ -2374,6 +2376,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2374 2376
2375 /* put the frame in the reordering buffer */ 2377 /* put the frame in the reordering buffer */
2376 tid_agg_rx->reorder_buf[index] = skb; 2378 tid_agg_rx->reorder_buf[index] = skb;
2379 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2380 sizeof(*rxstatus));
2377 tid_agg_rx->stored_mpdu_num++; 2381 tid_agg_rx->stored_mpdu_num++;
2378 /* release the buffer until next missing frame */ 2382 /* release the buffer until next missing frame */
2379 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 2383 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
@@ -2399,7 +2403,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2399} 2403}
2400 2404
2401static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, 2405static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2402 struct sk_buff *skb) 2406 struct sk_buff *skb,
2407 struct ieee80211_rx_status *status)
2403{ 2408{
2404 struct ieee80211_hw *hw = &local->hw; 2409 struct ieee80211_hw *hw = &local->hw;
2405 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2410 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -2448,7 +2453,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2448 2453
2449 /* according to mpdu sequence number deal with reordering buffer */ 2454 /* according to mpdu sequence number deal with reordering buffer */
2450 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 2455 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2451 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, 2456 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2452 mpdu_seq_num, 0); 2457 mpdu_seq_num, 0);
2453 end_reorder: 2458 end_reorder:
2454 return ret; 2459 return ret;
@@ -2512,7 +2517,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2512 return; 2517 return;
2513 } 2518 }
2514 2519
2515 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2520 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2516 __ieee80211_rx_handle_packet(hw, skb, status, rate); 2521 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2517 2522
2518 rcu_read_unlock(); 2523 rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3fb04a86444d..63656266d567 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
772 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
773 773
774 /* internal error, why is TX_FRAGMENTED set? */ 774 /* internal error, why is TX_FRAGMENTED set? */
775 if (WARN_ON(skb->len <= frag_threshold)) 775 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
776 return TX_DROP; 776 return TX_DROP;
777 777
778 /* 778 /*
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index deb4ecec122a..959aa8379ccf 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -417,6 +417,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
417{ 417{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
419 struct ieee80211_channel* chan = local->hw.conf.channel; 419 struct ieee80211_channel* chan = local->hw.conf.channel;
420 bool reconf = false;
420 u32 reconf_flags = 0; 421 u32 reconf_flags = 0;
421 int new_power_level; 422 int new_power_level;
422 423
@@ -427,14 +428,38 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
427 if (!chan) 428 if (!chan)
428 return -EINVAL; 429 return -EINVAL;
429 430
430 if (data->txpower.fixed) 431 /* only change when not disabling */
431 new_power_level = min(data->txpower.value, chan->max_power); 432 if (!data->txpower.disabled) {
432 else /* Automatic power level setting */ 433 if (data->txpower.fixed) {
433 new_power_level = chan->max_power; 434 if (data->txpower.value < 0)
435 return -EINVAL;
436 new_power_level = data->txpower.value;
437 /*
438 * Debatable, but we cannot do a fixed power
439 * level above the regulatory constraint.
440 * Use "iwconfig wlan0 txpower 15dBm" instead.
441 */
442 if (new_power_level > chan->max_power)
443 return -EINVAL;
444 } else {
445 /*
446 * Automatic power level setting, max being the value
447 * passed in from userland.
448 */
449 if (data->txpower.value < 0)
450 new_power_level = -1;
451 else
452 new_power_level = data->txpower.value;
453 }
454
455 reconf = true;
434 456
435 local->user_power_level = new_power_level; 457 /*
436 if (local->hw.conf.power_level != new_power_level) 458 * ieee80211_hw_config() will limit to the channel's
437 reconf_flags |= IEEE80211_CONF_CHANGE_POWER; 459 * max power and possibly power constraint from AP.
460 */
461 local->user_power_level = new_power_level;
462 }
438 463
439 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { 464 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
440 local->hw.conf.radio_enabled = !(data->txpower.disabled); 465 local->hw.conf.radio_enabled = !(data->txpower.disabled);
@@ -442,7 +467,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
442 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 467 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
443 } 468 }
444 469
445 if (reconf_flags) 470 if (reconf || reconf_flags)
446 ieee80211_hw_config(local, reconf_flags); 471 ieee80211_hw_config(local, reconf_flags);
447 472
448 return 0; 473 return 0;
@@ -530,7 +555,7 @@ static int ieee80211_ioctl_giwfrag(struct net_device *dev,
530 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 555 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
531 556
532 frag->value = local->fragmentation_threshold; 557 frag->value = local->fragmentation_threshold;
533 frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD); 558 frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
534 frag->fixed = 1; 559 frag->fixed = 1;
535 560
536 return 0; 561 return 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 881203c4a142..cb3ad741ebf8 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -837,6 +837,7 @@ config NETFILTER_XT_MATCH_SOCKET
837 depends on NETFILTER_TPROXY 837 depends on NETFILTER_TPROXY
838 depends on NETFILTER_XTABLES 838 depends on NETFILTER_XTABLES
839 depends on NETFILTER_ADVANCED 839 depends on NETFILTER_ADVANCED
840 depends on !NF_CONNTRACK || NF_CONNTRACK
840 select NF_DEFRAG_IPV4 841 select NF_DEFRAG_IPV4
841 help 842 help
842 This option adds a `socket' match, which can be used to match 843 This option adds a `socket' match, which can be used to match
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index fd77619256a8..c523f0b8cee5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -988,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
988{ 988{
989 struct nf_conntrack_helper *helper; 989 struct nf_conntrack_helper *helper;
990 struct nf_conn_help *help = nfct_help(ct); 990 struct nf_conn_help *help = nfct_help(ct);
991 char *helpname; 991 char *helpname = NULL;
992 int err; 992 int err;
993 993
994 /* don't change helper of sibling connections */ 994 /* don't change helper of sibling connections */
@@ -1209,7 +1209,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1209 1209
1210 rcu_read_lock(); 1210 rcu_read_lock();
1211 if (cda[CTA_HELP]) { 1211 if (cda[CTA_HELP]) {
1212 char *helpname; 1212 char *helpname = NULL;
1213 1213
1214 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1214 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1215 if (err < 0) 1215 if (err < 0)
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 509a95621f9f..150e5cf62f85 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -625,20 +625,6 @@ void xt_free_table_info(struct xt_table_info *info)
625} 625}
626EXPORT_SYMBOL(xt_free_table_info); 626EXPORT_SYMBOL(xt_free_table_info);
627 627
628void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo,
629 struct xt_table_info *newinfo)
630{
631 unsigned int cpu;
632
633 for_each_possible_cpu(cpu) {
634 void *p = oldinfo->entries[cpu];
635 rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]);
636 newinfo->entries[cpu] = p;
637 }
638
639}
640EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu);
641
642/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 628/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
643struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 629struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
644 const char *name) 630 const char *name)
@@ -676,32 +662,43 @@ void xt_compat_unlock(u_int8_t af)
676EXPORT_SYMBOL_GPL(xt_compat_unlock); 662EXPORT_SYMBOL_GPL(xt_compat_unlock);
677#endif 663#endif
678 664
665DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
666EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
667
668
679struct xt_table_info * 669struct xt_table_info *
680xt_replace_table(struct xt_table *table, 670xt_replace_table(struct xt_table *table,
681 unsigned int num_counters, 671 unsigned int num_counters,
682 struct xt_table_info *newinfo, 672 struct xt_table_info *newinfo,
683 int *error) 673 int *error)
684{ 674{
685 struct xt_table_info *oldinfo, *private; 675 struct xt_table_info *private;
686 676
687 /* Do the substitution. */ 677 /* Do the substitution. */
688 mutex_lock(&table->lock); 678 local_bh_disable();
689 private = table->private; 679 private = table->private;
680
690 /* Check inside lock: is the old number correct? */ 681 /* Check inside lock: is the old number correct? */
691 if (num_counters != private->number) { 682 if (num_counters != private->number) {
692 duprintf("num_counters != table->private->number (%u/%u)\n", 683 duprintf("num_counters != table->private->number (%u/%u)\n",
693 num_counters, private->number); 684 num_counters, private->number);
694 mutex_unlock(&table->lock); 685 local_bh_enable();
695 *error = -EAGAIN; 686 *error = -EAGAIN;
696 return NULL; 687 return NULL;
697 } 688 }
698 oldinfo = private;
699 rcu_assign_pointer(table->private, newinfo);
700 newinfo->initial_entries = oldinfo->initial_entries;
701 mutex_unlock(&table->lock);
702 689
703 synchronize_net(); 690 table->private = newinfo;
704 return oldinfo; 691 newinfo->initial_entries = private->initial_entries;
692
693 /*
694 * Even though table entries have now been swapped, other CPU's
695 * may still be using the old entries. This is okay, because
696 * resynchronization happens because of the locking done
697 * during the get_counters() routine.
698 */
699 local_bh_enable();
700
701 return private;
705} 702}
706EXPORT_SYMBOL_GPL(xt_replace_table); 703EXPORT_SYMBOL_GPL(xt_replace_table);
707 704
@@ -734,7 +731,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
734 731
735 /* Simplifies replace_table code. */ 732 /* Simplifies replace_table code. */
736 table->private = bootstrap; 733 table->private = bootstrap;
737 mutex_init(&table->lock);
738 734
739 if (!xt_replace_table(table, 0, newinfo, &ret)) 735 if (!xt_replace_table(table, 0, newinfo, &ret))
740 goto unlock; 736 goto unlock;
@@ -1147,7 +1143,14 @@ static struct pernet_operations xt_net_ops = {
1147 1143
1148static int __init xt_init(void) 1144static int __init xt_init(void)
1149{ 1145{
1150 int i, rv; 1146 unsigned int i;
1147 int rv;
1148
1149 for_each_possible_cpu(i) {
1150 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1151 spin_lock_init(&lock->lock);
1152 lock->readers = 0;
1153 }
1151 1154
1152 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1155 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1153 if (!xt) 1156 if (!xt)
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 834c6eb7f484..c0519139679e 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
256{ 256{
257 struct netlbl_af4list *entry; 257 struct netlbl_af4list *entry;
258 258
259 entry = netlbl_af4list_search(addr, head); 259 entry = netlbl_af4list_search_exact(addr, mask, head);
260 if (entry != NULL && entry->addr == addr && entry->mask == mask) { 260 if (entry == NULL)
261 netlbl_af4list_remove_entry(entry); 261 return NULL;
262 return entry; 262 netlbl_af4list_remove_entry(entry);
263 } 263 return entry;
264
265 return NULL;
266} 264}
267 265
268#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 266#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
299{ 297{
300 struct netlbl_af6list *entry; 298 struct netlbl_af6list *entry;
301 299
302 entry = netlbl_af6list_search(addr, head); 300 entry = netlbl_af6list_search_exact(addr, mask, head);
303 if (entry != NULL && 301 if (entry == NULL)
304 ipv6_addr_equal(&entry->addr, addr) && 302 return NULL;
305 ipv6_addr_equal(&entry->mask, mask)) { 303 netlbl_af6list_remove_entry(entry);
306 netlbl_af6list_remove_entry(entry); 304 return entry;
307 return entry;
308 }
309
310 return NULL;
311} 305}
312#endif /* IPv6 */ 306#endif /* IPv6 */
313 307
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4e705f87969f..3be0e016ab7d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1084 1084
1085 /* Build a packet - the conventional user limit is 236 bytes. We can 1085 /* Build a packet - the conventional user limit is 236 bytes. We can
1086 do ludicrously large NetROM frames but must not overflow */ 1086 do ludicrously large NetROM frames but must not overflow */
1087 if (len > 65536) 1087 if (len > 65536) {
1088 return -EMSGSIZE; 1088 err = -EMSGSIZE;
1089 goto out;
1090 }
1089 1091
1090 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); 1092 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1093 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 74776de523ec..f546e81acc45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1758,8 +1758,9 @@ static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1758 1758
1759static inline char *alloc_one_pg_vec_page(unsigned long order) 1759static inline char *alloc_one_pg_vec_page(unsigned long order)
1760{ 1760{
1761 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 1761 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
1762 order); 1762
1763 return (char *) __get_free_pages(gfp_flags, order);
1763} 1764}
1764 1765
1765static char **alloc_pg_vec(struct tpacket_req *req, int order) 1766static char **alloc_pg_vec(struct tpacket_req *req, int order)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 0f36e8d59b29..877a7f65f707 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1072 unsigned char *asmptr; 1072 unsigned char *asmptr;
1073 int n, size, qbit = 0; 1073 int n, size, qbit = 0;
1074 1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1080 return -EINVAL; 1076 return -EINVAL;
1081 1077
@@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1273 skb_reset_transport_header(skb); 1269 skb_reset_transport_header(skb);
1274 copied = skb->len; 1270 copied = skb->len;
1275 1271
1276 /* ROSE empty frame has no meaning : ignore it */
1277 if (copied == 0) {
1278 skb_free_datagram(sk, skb);
1279 return copied;
1280 }
1281
1282 if (copied > size) { 1272 if (copied > size) {
1283 copied = size; 1273 copied = size;
1284 msg->msg_flags |= MSG_TRUNC; 1274 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 173fcc4b050d..0759f32e9dca 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -254,7 +254,7 @@ replay:
254 } 254 }
255 tp->ops = tp_ops; 255 tp->ops = tp_ops;
256 tp->protocol = protocol; 256 tp->protocol = protocol;
257 tp->prio = nprio ? : tcf_auto_prio(*back); 257 tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
258 tp->q = q; 258 tp->q = q;
259 tp->classify = tp_ops->classify; 259 tp->classify = tp_ops->classify;
260 tp->classid = parent; 260 tp->classid = parent;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 72cf86e3c090..fad596bf32d7 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -176,8 +176,10 @@ META_COLLECTOR(var_dev)
176 176
177META_COLLECTOR(int_vlan_tag) 177META_COLLECTOR(int_vlan_tag)
178{ 178{
179 unsigned short uninitialized_var(tag); 179 unsigned short tag;
180 if (vlan_get_tag(skb, &tag) < 0) 180
181 tag = vlan_tx_tag_get(skb);
182 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1; 183 *err = -1;
182 else 184 else
183 dst->value = tag; 185 dst->value = tag;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d876b8734848..2b88295cb7b7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -280,6 +280,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
280 if (unlikely(!skb)) 280 if (unlikely(!skb))
281 return NULL; 281 return NULL;
282 282
283#ifdef CONFIG_NET_CLS_ACT
284 /*
285 * If it's at ingress let's pretend the delay is
286 * from the network (tstamp will be updated).
287 */
288 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
289 skb->tstamp.tv64 = 0;
290#endif
283 pr_debug("netem_dequeue: return skb=%p\n", skb); 291 pr_debug("netem_dequeue: return skb=%p\n", skb);
284 sch->q.qlen--; 292 sch->q.qlen--;
285 return skb; 293 return skb;
diff --git a/net/socket.c b/net/socket.c
index 91d0c0254ffe..791d71a36a93 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -493,8 +493,7 @@ static struct socket *sock_alloc(void)
493 inode->i_uid = current_fsuid(); 493 inode->i_uid = current_fsuid();
494 inode->i_gid = current_fsgid(); 494 inode->i_gid = current_fsgid();
495 495
496 get_cpu_var(sockets_in_use)++; 496 percpu_add(sockets_in_use, 1);
497 put_cpu_var(sockets_in_use);
498 return sock; 497 return sock;
499} 498}
500 499
@@ -536,8 +535,7 @@ void sock_release(struct socket *sock)
536 if (sock->fasync_list) 535 if (sock->fasync_list)
537 printk(KERN_ERR "sock_release: fasync list not empty!\n"); 536 printk(KERN_ERR "sock_release: fasync list not empty!\n");
538 537
539 get_cpu_var(sockets_in_use)--; 538 percpu_sub(sockets_in_use, 1);
540 put_cpu_var(sockets_in_use);
541 if (!sock->file) { 539 if (!sock->file) {
542 iput(SOCK_INODE(sock)); 540 iput(SOCK_INODE(sock));
543 return; 541 return;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 9b49a6ab8ded..8847add6ca16 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1008,6 +1008,8 @@ svc_process(struct svc_rqst *rqstp)
1008 rqstp->rq_res.tail[0].iov_len = 0; 1008 rqstp->rq_res.tail[0].iov_len = 0;
1009 /* Will be turned off only in gss privacy case: */ 1009 /* Will be turned off only in gss privacy case: */
1010 rqstp->rq_splice_ok = 1; 1010 rqstp->rq_splice_ok = 1;
1011 /* Will be turned off only when NFSv4 Sessions are used */
1012 rqstp->rq_usedeferral = 1;
1011 1013
1012 /* Setup reply header */ 1014 /* Setup reply header */
1013 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1015 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1078,7 +1080,6 @@ svc_process(struct svc_rqst *rqstp)
1078 procp = versp->vs_proc + proc; 1080 procp = versp->vs_proc + proc;
1079 if (proc >= versp->vs_nproc || !procp->pc_func) 1081 if (proc >= versp->vs_nproc || !procp->pc_func)
1080 goto err_bad_proc; 1082 goto err_bad_proc;
1081 rqstp->rq_server = serv;
1082 rqstp->rq_procinfo = procp; 1083 rqstp->rq_procinfo = procp;
1083 1084
1084 /* Syntactic check complete */ 1085 /* Syntactic check complete */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 2819ee093f36..c200d92e57e4 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -14,6 +14,8 @@
14 14
15#define RPCDBG_FACILITY RPCDBG_SVCXPRT 15#define RPCDBG_FACILITY RPCDBG_SVCXPRT
16 16
17#define SVC_MAX_WAKING 5
18
17static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
18static int svc_deferred_recv(struct svc_rqst *rqstp); 20static int svc_deferred_recv(struct svc_rqst *rqstp);
19static struct cache_deferred_req *svc_defer(struct cache_req *req); 21static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -301,6 +303,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
301 struct svc_pool *pool; 303 struct svc_pool *pool;
302 struct svc_rqst *rqstp; 304 struct svc_rqst *rqstp;
303 int cpu; 305 int cpu;
306 int thread_avail;
304 307
305 if (!(xprt->xpt_flags & 308 if (!(xprt->xpt_flags &
306 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 309 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -312,18 +315,14 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
312 315
313 spin_lock_bh(&pool->sp_lock); 316 spin_lock_bh(&pool->sp_lock);
314 317
315 if (!list_empty(&pool->sp_threads) &&
316 !list_empty(&pool->sp_sockets))
317 printk(KERN_ERR
318 "svc_xprt_enqueue: "
319 "threads and transports both waiting??\n");
320
321 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 318 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
322 /* Don't enqueue dead transports */ 319 /* Don't enqueue dead transports */
323 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 320 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
324 goto out_unlock; 321 goto out_unlock;
325 } 322 }
326 323
324 pool->sp_stats.packets++;
325
327 /* Mark transport as busy. It will remain in this state until 326 /* Mark transport as busy. It will remain in this state until
328 * the provider calls svc_xprt_received. We update XPT_BUSY 327 * the provider calls svc_xprt_received. We update XPT_BUSY
329 * atomically because it also guards against trying to enqueue 328 * atomically because it also guards against trying to enqueue
@@ -356,7 +355,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
356 } 355 }
357 356
358 process: 357 process:
359 if (!list_empty(&pool->sp_threads)) { 358 /* Work out whether threads are available */
359 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
360 if (pool->sp_nwaking >= SVC_MAX_WAKING) {
361 /* too many threads are runnable and trying to wake up */
362 thread_avail = 0;
363 pool->sp_stats.overloads_avoided++;
364 }
365
366 if (thread_avail) {
360 rqstp = list_entry(pool->sp_threads.next, 367 rqstp = list_entry(pool->sp_threads.next,
361 struct svc_rqst, 368 struct svc_rqst,
362 rq_list); 369 rq_list);
@@ -371,11 +378,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
371 svc_xprt_get(xprt); 378 svc_xprt_get(xprt);
372 rqstp->rq_reserved = serv->sv_max_mesg; 379 rqstp->rq_reserved = serv->sv_max_mesg;
373 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 380 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
381 rqstp->rq_waking = 1;
382 pool->sp_nwaking++;
383 pool->sp_stats.threads_woken++;
374 BUG_ON(xprt->xpt_pool != pool); 384 BUG_ON(xprt->xpt_pool != pool);
375 wake_up(&rqstp->rq_wait); 385 wake_up(&rqstp->rq_wait);
376 } else { 386 } else {
377 dprintk("svc: transport %p put into queue\n", xprt); 387 dprintk("svc: transport %p put into queue\n", xprt);
378 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 388 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
389 pool->sp_stats.sockets_queued++;
379 BUG_ON(xprt->xpt_pool != pool); 390 BUG_ON(xprt->xpt_pool != pool);
380 } 391 }
381 392
@@ -588,6 +599,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
588 int pages; 599 int pages;
589 struct xdr_buf *arg; 600 struct xdr_buf *arg;
590 DECLARE_WAITQUEUE(wait, current); 601 DECLARE_WAITQUEUE(wait, current);
602 long time_left;
591 603
592 dprintk("svc: server %p waiting for data (to = %ld)\n", 604 dprintk("svc: server %p waiting for data (to = %ld)\n",
593 rqstp, timeout); 605 rqstp, timeout);
@@ -636,6 +648,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
636 return -EINTR; 648 return -EINTR;
637 649
638 spin_lock_bh(&pool->sp_lock); 650 spin_lock_bh(&pool->sp_lock);
651 if (rqstp->rq_waking) {
652 rqstp->rq_waking = 0;
653 pool->sp_nwaking--;
654 BUG_ON(pool->sp_nwaking < 0);
655 }
639 xprt = svc_xprt_dequeue(pool); 656 xprt = svc_xprt_dequeue(pool);
640 if (xprt) { 657 if (xprt) {
641 rqstp->rq_xprt = xprt; 658 rqstp->rq_xprt = xprt;
@@ -668,12 +685,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
668 add_wait_queue(&rqstp->rq_wait, &wait); 685 add_wait_queue(&rqstp->rq_wait, &wait);
669 spin_unlock_bh(&pool->sp_lock); 686 spin_unlock_bh(&pool->sp_lock);
670 687
671 schedule_timeout(timeout); 688 time_left = schedule_timeout(timeout);
672 689
673 try_to_freeze(); 690 try_to_freeze();
674 691
675 spin_lock_bh(&pool->sp_lock); 692 spin_lock_bh(&pool->sp_lock);
676 remove_wait_queue(&rqstp->rq_wait, &wait); 693 remove_wait_queue(&rqstp->rq_wait, &wait);
694 if (!time_left)
695 pool->sp_stats.threads_timedout++;
677 696
678 xprt = rqstp->rq_xprt; 697 xprt = rqstp->rq_xprt;
679 if (!xprt) { 698 if (!xprt) {
@@ -958,7 +977,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
958 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 977 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
959 struct svc_deferred_req *dr; 978 struct svc_deferred_req *dr;
960 979
961 if (rqstp->rq_arg.page_len) 980 if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
962 return NULL; /* if more than a page, give up FIXME */ 981 return NULL; /* if more than a page, give up FIXME */
963 if (rqstp->rq_deferred) { 982 if (rqstp->rq_deferred) {
964 dr = rqstp->rq_deferred; 983 dr = rqstp->rq_deferred;
@@ -1112,3 +1131,93 @@ int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
1112 return totlen; 1131 return totlen;
1113} 1132}
1114EXPORT_SYMBOL_GPL(svc_xprt_names); 1133EXPORT_SYMBOL_GPL(svc_xprt_names);
1134
1135
1136/*----------------------------------------------------------------------------*/
1137
1138static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1139{
1140 unsigned int pidx = (unsigned int)*pos;
1141 struct svc_serv *serv = m->private;
1142
1143 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1144
1145 lock_kernel();
1146 /* bump up the pseudo refcount while traversing */
1147 svc_get(serv);
1148 unlock_kernel();
1149
1150 if (!pidx)
1151 return SEQ_START_TOKEN;
1152 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1153}
1154
1155static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1156{
1157 struct svc_pool *pool = p;
1158 struct svc_serv *serv = m->private;
1159
1160 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1161
1162 if (p == SEQ_START_TOKEN) {
1163 pool = &serv->sv_pools[0];
1164 } else {
1165 unsigned int pidx = (pool - &serv->sv_pools[0]);
1166 if (pidx < serv->sv_nrpools-1)
1167 pool = &serv->sv_pools[pidx+1];
1168 else
1169 pool = NULL;
1170 }
1171 ++*pos;
1172 return pool;
1173}
1174
1175static void svc_pool_stats_stop(struct seq_file *m, void *p)
1176{
1177 struct svc_serv *serv = m->private;
1178
1179 lock_kernel();
1180 /* this function really, really should have been called svc_put() */
1181 svc_destroy(serv);
1182 unlock_kernel();
1183}
1184
1185static int svc_pool_stats_show(struct seq_file *m, void *p)
1186{
1187 struct svc_pool *pool = p;
1188
1189 if (p == SEQ_START_TOKEN) {
1190 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
1191 return 0;
1192 }
1193
1194 seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
1195 pool->sp_id,
1196 pool->sp_stats.packets,
1197 pool->sp_stats.sockets_queued,
1198 pool->sp_stats.threads_woken,
1199 pool->sp_stats.overloads_avoided,
1200 pool->sp_stats.threads_timedout);
1201
1202 return 0;
1203}
1204
1205static const struct seq_operations svc_pool_stats_seq_ops = {
1206 .start = svc_pool_stats_start,
1207 .next = svc_pool_stats_next,
1208 .stop = svc_pool_stats_stop,
1209 .show = svc_pool_stats_show,
1210};
1211
1212int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1213{
1214 int err;
1215
1216 err = seq_open(file, &svc_pool_stats_seq_ops);
1217 if (!err)
1218 ((struct seq_file *) file->private_data)->private = serv;
1219 return err;
1220}
1221EXPORT_SYMBOL(svc_pool_stats_open);
1222
1223/*----------------------------------------------------------------------------*/
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d504234af4a..af3198814c15 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -345,7 +345,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
345 lock_sock(sock->sk); 345 lock_sock(sock->sk);
346 sock->sk->sk_sndbuf = snd * 2; 346 sock->sk->sk_sndbuf = snd * 2;
347 sock->sk->sk_rcvbuf = rcv * 2; 347 sock->sk->sk_rcvbuf = rcv * 2;
348 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
349 release_sock(sock->sk); 348 release_sock(sock->sk);
350#endif 349#endif
351} 350}
@@ -797,23 +796,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
797 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 796 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
798 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 797 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
799 798
800 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
801 /* sndbuf needs to have room for one request
802 * per thread, otherwise we can stall even when the
803 * network isn't a bottleneck.
804 *
805 * We count all threads rather than threads in a
806 * particular pool, which provides an upper bound
807 * on the number of threads which will access the socket.
808 *
809 * rcvbuf just needs to be able to hold a few requests.
810 * Normally they will be removed from the queue
811 * as soon a a complete request arrives.
812 */
813 svc_sock_setbufsize(svsk->sk_sock,
814 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
815 3 * serv->sv_max_mesg);
816
817 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 799 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
818 800
819 /* Receive data. If we haven't got the record length yet, get 801 /* Receive data. If we haven't got the record length yet, get
@@ -1061,15 +1043,6 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1061 1043
1062 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1044 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1063 1045
1064 /* initialise setting must have enough space to
1065 * receive and respond to one request.
1066 * svc_tcp_recvfrom will re-adjust if necessary
1067 */
1068 svc_sock_setbufsize(svsk->sk_sock,
1069 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1070 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1071
1072 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1073 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1046 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1074 if (sk->sk_state != TCP_ESTABLISHED) 1047 if (sk->sk_state != TCP_ESTABLISHED)
1075 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1048 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1139,8 +1112,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1139 /* Initialize the socket */ 1112 /* Initialize the socket */
1140 if (sock->type == SOCK_DGRAM) 1113 if (sock->type == SOCK_DGRAM)
1141 svc_udp_init(svsk, serv); 1114 svc_udp_init(svsk, serv);
1142 else 1115 else {
1116 /* initialise setting must have enough space to
1117 * receive and respond to one request.
1118 */
1119 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1120 4 * serv->sv_max_mesg);
1143 svc_tcp_init(svsk, serv); 1121 svc_tcp_init(svsk, serv);
1122 }
1144 1123
1145 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1124 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1146 svsk, svsk->sk_sk); 1125 svsk, svsk->sk_sk);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d43daa236ef9..0a592e4295f0 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -90,7 +90,7 @@ struct cfg80211_internal_bss {
90 struct rb_node rbn; 90 struct rb_node rbn;
91 unsigned long ts; 91 unsigned long ts;
92 struct kref ref; 92 struct kref ref;
93 bool hold; 93 bool hold, ies_allocated;
94 94
95 /* must be last because of priv member */ 95 /* must be last because of priv member */
96 struct cfg80211_bss pub; 96 struct cfg80211_bss pub;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 353e1a4ece83..2456e4ee445e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3334,7 +3334,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3334 struct sk_buff *msg; 3334 struct sk_buff *msg;
3335 void *hdr; 3335 void *hdr;
3336 3336
3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
3338 if (!msg) 3338 if (!msg)
3339 return; 3339 return;
3340 3340
@@ -3353,7 +3353,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3353 return; 3353 return;
3354 } 3354 }
3355 3355
3356 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); 3356 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3357 return; 3357 return;
3358 3358
3359 nla_put_failure: 3359 nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6327e1617acb..08265ca15785 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info);
907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
908 const struct ieee80211_reg_rule **reg_rule) 908 const struct ieee80211_reg_rule **reg_rule)
909{ 909{
910 assert_cfg80211_lock();
910 return freq_reg_info_regd(wiphy, center_freq, 911 return freq_reg_info_regd(wiphy, center_freq,
911 bandwidth, reg_rule, NULL); 912 bandwidth, reg_rule, NULL);
912} 913}
@@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1133 if (is_world_regdom(cfg80211_regdomain->alpha2) || 1134 if (is_world_regdom(cfg80211_regdomain->alpha2) ||
1134 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) 1135 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
1135 return true; 1136 return true;
1136 if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1137 if (last_request &&
1138 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1137 wiphy->custom_regulatory) 1139 wiphy->custom_regulatory)
1138 return true; 1140 return true;
1139 return false; 1141 return false;
@@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1142/* Reap the advantages of previously found beacons */ 1144/* Reap the advantages of previously found beacons */
1143static void reg_process_beacons(struct wiphy *wiphy) 1145static void reg_process_beacons(struct wiphy *wiphy)
1144{ 1146{
1147 /*
1148 * Means we are just firing up cfg80211, so no beacons would
1149 * have been processed yet.
1150 */
1151 if (!last_request)
1152 return;
1145 if (!reg_is_world_roaming(wiphy)) 1153 if (!reg_is_world_roaming(wiphy))
1146 return; 1154 return;
1147 wiphy_update_beacon_reg(wiphy); 1155 wiphy_update_beacon_reg(wiphy);
@@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1176 struct ieee80211_supported_band *sband; 1184 struct ieee80211_supported_band *sband;
1177 struct ieee80211_channel *chan; 1185 struct ieee80211_channel *chan;
1178 1186
1187 assert_cfg80211_lock();
1188
1179 sband = wiphy->bands[band]; 1189 sband = wiphy->bands[band];
1180 BUG_ON(chan_idx >= sband->n_channels); 1190 BUG_ON(chan_idx >= sband->n_channels);
1181 chan = &sband->channels[chan_idx]; 1191 chan = &sband->channels[chan_idx];
@@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1214 const struct ieee80211_regdomain *regd) 1224 const struct ieee80211_regdomain *regd)
1215{ 1225{
1216 enum ieee80211_band band; 1226 enum ieee80211_band band;
1227
1228 mutex_lock(&cfg80211_mutex);
1217 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1229 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1218 if (wiphy->bands[band]) 1230 if (wiphy->bands[band])
1219 handle_band_custom(wiphy, band, regd); 1231 handle_band_custom(wiphy, band, regd);
1220 } 1232 }
1233 mutex_unlock(&cfg80211_mutex);
1221} 1234}
1222EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1235EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1223 1236
@@ -1423,7 +1436,7 @@ new_request:
1423 return call_crda(last_request->alpha2); 1436 return call_crda(last_request->alpha2);
1424} 1437}
1425 1438
1426/* This currently only processes user and driver regulatory hints */ 1439/* This processes *all* regulatory hints */
1427static void reg_process_hint(struct regulatory_request *reg_request) 1440static void reg_process_hint(struct regulatory_request *reg_request)
1428{ 1441{
1429 int r = 0; 1442 int r = 0;
@@ -2095,11 +2108,12 @@ int set_regdom(const struct ieee80211_regdomain *rd)
2095/* Caller must hold cfg80211_mutex */ 2108/* Caller must hold cfg80211_mutex */
2096void reg_device_remove(struct wiphy *wiphy) 2109void reg_device_remove(struct wiphy *wiphy)
2097{ 2110{
2098 struct wiphy *request_wiphy; 2111 struct wiphy *request_wiphy = NULL;
2099 2112
2100 assert_cfg80211_lock(); 2113 assert_cfg80211_lock();
2101 2114
2102 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2115 if (last_request)
2116 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2103 2117
2104 kfree(wiphy->regd); 2118 kfree(wiphy->regd);
2105 if (!last_request || !request_wiphy) 2119 if (!last_request || !request_wiphy)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2a00e362f5fe..1f260c40b6ca 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -58,6 +58,10 @@ static void bss_release(struct kref *ref)
58 bss = container_of(ref, struct cfg80211_internal_bss, ref); 58 bss = container_of(ref, struct cfg80211_internal_bss, ref);
59 if (bss->pub.free_priv) 59 if (bss->pub.free_priv)
60 bss->pub.free_priv(&bss->pub); 60 bss->pub.free_priv(&bss->pub);
61
62 if (bss->ies_allocated)
63 kfree(bss->pub.information_elements);
64
61 kfree(bss); 65 kfree(bss);
62} 66}
63 67
@@ -360,19 +364,42 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
360 364
361 found = rb_find_bss(dev, res); 365 found = rb_find_bss(dev, res);
362 366
363 if (found && overwrite) { 367 if (found) {
364 list_replace(&found->list, &res->list);
365 rb_replace_node(&found->rbn, &res->rbn,
366 &dev->bss_tree);
367 kref_put(&found->ref, bss_release);
368 found = res;
369 } else if (found) {
370 kref_get(&found->ref); 368 kref_get(&found->ref);
371 found->pub.beacon_interval = res->pub.beacon_interval; 369 found->pub.beacon_interval = res->pub.beacon_interval;
372 found->pub.tsf = res->pub.tsf; 370 found->pub.tsf = res->pub.tsf;
373 found->pub.signal = res->pub.signal; 371 found->pub.signal = res->pub.signal;
374 found->pub.capability = res->pub.capability; 372 found->pub.capability = res->pub.capability;
375 found->ts = res->ts; 373 found->ts = res->ts;
374
375 /* overwrite IEs */
376 if (overwrite) {
377 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
378 size_t ielen = res->pub.len_information_elements;
379
380 if (ksize(found) >= used + ielen) {
381 memcpy(found->pub.information_elements,
382 res->pub.information_elements, ielen);
383 found->pub.len_information_elements = ielen;
384 } else {
385 u8 *ies = found->pub.information_elements;
386
387 if (found->ies_allocated) {
388 if (ksize(ies) < ielen)
389 ies = krealloc(ies, ielen,
390 GFP_ATOMIC);
391 } else
392 ies = kmalloc(ielen, GFP_ATOMIC);
393
394 if (ies) {
395 memcpy(ies, res->pub.information_elements, ielen);
396 found->ies_allocated = true;
397 found->pub.information_elements = ies;
398 found->pub.len_information_elements = ielen;
399 }
400 }
401 }
402
376 kref_put(&res->ref, bss_release); 403 kref_put(&res->ref, bss_release);
377 } else { 404 } else {
378 /* this "consumes" the reference */ 405 /* this "consumes" the reference */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 82271720d970..5f1f86565f16 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
794{ 794{
795 static xfrm_address_t saddr_wildcard = { }; 795 static xfrm_address_t saddr_wildcard = { };
796 struct net *net = xp_net(pol); 796 struct net *net = xp_net(pol);
797 unsigned int h; 797 unsigned int h, h_wildcard;
798 struct hlist_node *entry; 798 struct hlist_node *entry;
799 struct xfrm_state *x, *x0, *to_put; 799 struct xfrm_state *x, *x0, *to_put;
800 int acquire_in_progress = 0; 800 int acquire_in_progress = 0;
@@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
819 if (best) 819 if (best)
820 goto found; 820 goto found;
821 821
822 h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 822 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
824 if (x->props.family == family && 824 if (x->props.family == family &&
825 x->props.reqid == tmpl->reqid && 825 x->props.reqid == tmpl->reqid &&
826 !(x->props.flags & XFRM_STATE_WILDRECV) && 826 !(x->props.flags & XFRM_STATE_WILDRECV) &&