aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/caif/caif_dev.c22
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfcnfg.c1
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c3
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/net_namespace.c31
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/sock.c11
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c33
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_cubic.c10
-rw-r--r--net/ipv4/tcp_input.c86
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv6/addrconf.c61
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/l2tp/l2tp_ip.c5
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/debugfs_key.c7
-rw-r--r--net/mac80211/ibss.c1
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_plink.c4
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/rds/af_rds.c20
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sunrpc/auth_generic.c17
-rw-r--r--net/unix/af_unix.c19
50 files changed, 298 insertions, 242 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 845da3ee56a..9de93714213 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -55,7 +55,7 @@
55 55
56#define AUTO_OFF_TIMEOUT 2000 56#define AUTO_OFF_TIMEOUT 2000
57 57
58int enable_hs; 58bool enable_hs;
59 59
60static void hci_rx_work(struct work_struct *work); 60static void hci_rx_work(struct work_struct *work);
61static void hci_cmd_work(struct work_struct *work); 61static void hci_cmd_work(struct work_struct *work);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 673728add60..82c57069415 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -59,8 +59,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
59{ 59{
60 struct caif_net *caifn; 60 struct caif_net *caifn;
61 caifn = net_generic(net, caif_net_id); 61 caifn = net_generic(net, caif_net_id);
62 if (!caifn)
63 return NULL;
64 return caifn->cfg; 62 return caifn->cfg;
65} 63}
66EXPORT_SYMBOL(get_cfcnfg); 64EXPORT_SYMBOL(get_cfcnfg);
@@ -69,8 +67,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
69{ 67{
70 struct caif_net *caifn; 68 struct caif_net *caifn;
71 caifn = net_generic(net, caif_net_id); 69 caifn = net_generic(net, caif_net_id);
72 if (!caifn)
73 return NULL;
74 return &caifn->caifdevs; 70 return &caifn->caifdevs;
75} 71}
76 72
@@ -99,8 +95,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
99 struct caif_device_entry *caifd; 95 struct caif_device_entry *caifd;
100 96
101 caifdevs = caif_device_list(dev_net(dev)); 97 caifdevs = caif_device_list(dev_net(dev));
102 if (!caifdevs)
103 return NULL;
104 98
105 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 99 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
106 if (!caifd) 100 if (!caifd)
@@ -120,8 +114,6 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
120 struct caif_device_entry_list *caifdevs = 114 struct caif_device_entry_list *caifdevs =
121 caif_device_list(dev_net(dev)); 115 caif_device_list(dev_net(dev));
122 struct caif_device_entry *caifd; 116 struct caif_device_entry *caifd;
123 if (!caifdevs)
124 return NULL;
125 117
126 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 118 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
127 if (caifd->netdev == dev) 119 if (caifd->netdev == dev)
@@ -321,8 +313,6 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
321 struct caif_device_entry_list *caifdevs; 313 struct caif_device_entry_list *caifdevs;
322 314
323 caifdevs = caif_device_list(dev_net(dev)); 315 caifdevs = caif_device_list(dev_net(dev));
324 if (!cfg || !caifdevs)
325 return;
326 caifd = caif_device_alloc(dev); 316 caifd = caif_device_alloc(dev);
327 if (!caifd) 317 if (!caifd)
328 return; 318 return;
@@ -374,8 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
374 364
375 cfg = get_cfcnfg(dev_net(dev)); 365 cfg = get_cfcnfg(dev_net(dev));
376 caifdevs = caif_device_list(dev_net(dev)); 366 caifdevs = caif_device_list(dev_net(dev));
377 if (!cfg || !caifdevs)
378 return 0;
379 367
380 caifd = caif_get(dev); 368 caifd = caif_get(dev);
381 if (caifd == NULL && dev->type != ARPHRD_CAIF) 369 if (caifd == NULL && dev->type != ARPHRD_CAIF)
@@ -507,9 +495,6 @@ static struct notifier_block caif_device_notifier = {
507static int caif_init_net(struct net *net) 495static int caif_init_net(struct net *net)
508{ 496{
509 struct caif_net *caifn = net_generic(net, caif_net_id); 497 struct caif_net *caifn = net_generic(net, caif_net_id);
510 if (WARN_ON(!caifn))
511 return -EINVAL;
512
513 INIT_LIST_HEAD(&caifn->caifdevs.list); 498 INIT_LIST_HEAD(&caifn->caifdevs.list);
514 mutex_init(&caifn->caifdevs.lock); 499 mutex_init(&caifn->caifdevs.lock);
515 500
@@ -527,9 +512,6 @@ static void caif_exit_net(struct net *net)
527 caif_device_list(net); 512 caif_device_list(net);
528 struct cfcnfg *cfg = get_cfcnfg(net); 513 struct cfcnfg *cfg = get_cfcnfg(net);
529 514
530 if (!cfg || !caifdevs)
531 return;
532
533 rtnl_lock(); 515 rtnl_lock();
534 mutex_lock(&caifdevs->lock); 516 mutex_lock(&caifdevs->lock);
535 517
@@ -569,7 +551,7 @@ static int __init caif_device_init(void)
569{ 551{
570 int result; 552 int result;
571 553
572 result = register_pernet_device(&caif_net_ops); 554 result = register_pernet_subsys(&caif_net_ops);
573 555
574 if (result) 556 if (result)
575 return result; 557 return result;
@@ -582,7 +564,7 @@ static int __init caif_device_init(void)
582 564
583static void __exit caif_device_exit(void) 565static void __exit caif_device_exit(void)
584{ 566{
585 unregister_pernet_device(&caif_net_ops); 567 unregister_pernet_subsys(&caif_net_ops);
586 unregister_netdevice_notifier(&caif_device_notifier); 568 unregister_netdevice_notifier(&caif_device_notifier);
587 dev_remove_pack(&caif_packet_type); 569 dev_remove_pack(&caif_packet_type);
588} 570}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a9862808645..a97d97a3a51 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
541 541
542 if (cf_sk->layer.dn == NULL) 542 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb);
543 return -EINVAL; 544 return -EINVAL;
545 }
544 546
545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 547 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
546} 548}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
683 } 685 }
684 err = transmit_skb(skb, cf_sk, 686 err = transmit_skb(skb, cf_sk,
685 msg->msg_flags&MSG_DONTWAIT, timeo); 687 msg->msg_flags&MSG_DONTWAIT, timeo);
686 if (err < 0) { 688 if (err < 0)
687 kfree_skb(skb); 689 /* skb is already freed */
688 goto pipe_err; 690 goto pipe_err;
689 } 691
690 sent += size; 692 sent += size;
691 } 693 }
692 694
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 598aafb4cb5..ba9cfd47778 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
309 int err; 309 int err;
310 struct cfctrl_link_param param; 310 struct cfctrl_link_param param;
311 struct cfcnfg *cfg = get_cfcnfg(net); 311 struct cfcnfg *cfg = get_cfcnfg(net);
312 caif_assert(cfg != NULL);
313 312
314 rcu_read_lock(); 313 rcu_read_lock();
315 err = caif_connect_req_to_link_param(cfg, conn_req, &param); 314 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e..94b08612a4d 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
252 251
253 rcu_read_lock(); 252 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
257 256
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || 257 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 258 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 259 layer->id != 0)
261 260 cfmuxl_remove_uplayer(layr, layer->id);
262 idx = layer->id % UP_CACHE_SIZE; 261
263 spin_lock_bh(&muxl->receive_lock);
264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */ 262 /* NOTE: ctrlcmd is not allowed to block */
269 layer->ctrlcmd(layer, ctrl, phyid); 263 layer->ctrlcmd(layer, ctrl, phyid);
270 } 264 }
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3..761ad9d6cc3 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
85 } else { 85 } else {
86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
88 ceph_debugfs_client_init(client);
89 client->have_fsid = true;
90 } 88 }
91 return 0; 89 return 0;
92} 90}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42b..1845cde2622 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/ceph/mon_client.h> 9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h> 10#include <linux/ceph/libceph.h>
11#include <linux/ceph/debugfs.h>
11#include <linux/ceph/decode.h> 12#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h> 13#include <linux/ceph/auth.h>
14 14
15/* 15/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
340 client->monc.monmap = monmap; 340 client->monc.monmap = monmap;
341 kfree(old); 341 kfree(old);
342 342
343 if (!client->have_fsid) {
344 client->have_fsid = true;
345 mutex_unlock(&monc->mutex);
346 /*
347 * do debugfs initialization without mutex to avoid
348 * creating a locking dependency
349 */
350 ceph_debugfs_client_init(client);
351 goto out_unlocked;
352 }
343out: 353out:
344 mutex_unlock(&monc->mutex); 354 mutex_unlock(&monc->mutex);
355out_unlocked:
345 wake_up_all(&client->auth_wq); 356 wake_up_all(&client->auth_wq);
346} 357}
347 358
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985..6ca32f6b310 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 921aa2b4b41..3f79db1b612 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
@@ -1311,6 +1313,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1311 case ETHTOOL_GRXCSUM: 1313 case ETHTOOL_GRXCSUM:
1312 case ETHTOOL_GTXCSUM: 1314 case ETHTOOL_GTXCSUM:
1313 case ETHTOOL_GSG: 1315 case ETHTOOL_GSG:
1316 case ETHTOOL_GSSET_INFO:
1314 case ETHTOOL_GSTRINGS: 1317 case ETHTOOL_GSTRINGS:
1315 case ETHTOOL_GTSO: 1318 case ETHTOOL_GTSO:
1316 case ETHTOOL_GPERMADDR: 1319 case ETHTOOL_GPERMADDR:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0985b9b14b8..a225089df5b 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1,4 +1,5 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/export.h>
2#include <linux/ip.h> 3#include <linux/ip.h>
3#include <linux/ipv6.h> 4#include <linux/ipv6.h>
4#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index aefcd7acbff..0e950fda9a0 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
30 30
31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
32 32
33static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
34
35static struct net_generic *net_alloc_generic(void)
36{
37 struct net_generic *ng;
38 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
39
40 ng = kzalloc(generic_size, GFP_KERNEL);
41 if (ng)
42 ng->len = max_gen_ptrs;
43
44 return ng;
45}
46
33static int net_assign_generic(struct net *net, int id, void *data) 47static int net_assign_generic(struct net *net, int id, void *data)
34{ 48{
35 struct net_generic *ng, *old_ng; 49 struct net_generic *ng, *old_ng;
@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
43 if (old_ng->len >= id) 57 if (old_ng->len >= id)
44 goto assign; 58 goto assign;
45 59
46 ng = kzalloc(sizeof(struct net_generic) + 60 ng = net_alloc_generic();
47 id * sizeof(void *), GFP_KERNEL);
48 if (ng == NULL) 61 if (ng == NULL)
49 return -ENOMEM; 62 return -ENOMEM;
50 63
@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
59 * the old copy for kfree after a grace period. 72 * the old copy for kfree after a grace period.
60 */ 73 */
61 74
62 ng->len = id;
63 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 75 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
64 76
65 rcu_assign_pointer(net->gen, ng); 77 rcu_assign_pointer(net->gen, ng);
@@ -161,18 +173,6 @@ out_undo:
161 goto out; 173 goto out;
162} 174}
163 175
164static struct net_generic *net_alloc_generic(void)
165{
166 struct net_generic *ng;
167 size_t generic_size = sizeof(struct net_generic) +
168 INITIAL_NET_GEN_PTRS * sizeof(void *);
169
170 ng = kzalloc(generic_size, GFP_KERNEL);
171 if (ng)
172 ng->len = INITIAL_NET_GEN_PTRS;
173
174 return ng;
175}
176 176
177#ifdef CONFIG_NET_NS 177#ifdef CONFIG_NET_NS
178static struct kmem_cache *net_cachep; 178static struct kmem_cache *net_cachep;
@@ -483,6 +483,7 @@ again:
483 } 483 }
484 return error; 484 return error;
485 } 485 }
486 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
486 } 487 }
487 error = __register_pernet_operations(list, ops); 488 error = __register_pernet_operations(list, ops);
488 if (error) { 489 if (error) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b0829866..ddefc513b44 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b7..4dacc44637e 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 65f80c7b165..4d8ce93cd50 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -767,8 +767,8 @@ done:
767 return i; 767 return i;
768} 768}
769 769
770static unsigned long num_arg(const char __user * user_buffer, 770static long num_arg(const char __user *user_buffer, unsigned long maxlen,
771 unsigned long maxlen, unsigned long *num) 771 unsigned long *num)
772{ 772{
773 int i; 773 int i;
774 *num = 0; 774 *num = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f16444bc6cb..65aebd45002 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1509,6 +1509,9 @@ errout:
1509 1509
1510 if (send_addr_notify) 1510 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1512 min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
1513 min_ifinfo_dump_size);
1514
1512 return err; 1515 return err;
1513} 1516}
1514 1517
diff --git a/net/core/sock.c b/net/core/sock.c
index 5c5af9988f9..02f8dfe320b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif
@@ -1827,7 +1824,7 @@ suppress_allocation:
1827 /* Alas. Undo changes. */ 1824 /* Alas. Undo changes. */
1828 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1825 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1829 1826
1830 sk_memory_allocated_sub(sk, amt, parent_status); 1827 sk_memory_allocated_sub(sk, amt);
1831 1828
1832 return 0; 1829 return 0;
1833} 1830}
@@ -1840,7 +1837,7 @@ EXPORT_SYMBOL(__sk_mem_schedule);
1840void __sk_mem_reclaim(struct sock *sk) 1837void __sk_mem_reclaim(struct sock *sk)
1841{ 1838{
1842 sk_memory_allocated_sub(sk, 1839 sk_memory_allocated_sub(sk,
1843 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); 1840 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
1844 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1841 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1845 1842
1846 if (sk_under_memory_pressure(sk) && 1843 if (sk_under_memory_pressure(sk) &&
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776..d183262943d 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
409 409
410config INET_UDP_DIAG 410config INET_UDP_DIAG
411 tristate "UDP: socket monitoring interface" 411 tristate "UDP: socket monitoring interface"
412 depends on INET_DIAG 412 depends on INET_DIAG && (IPV6 || IPV6=n)
413 default n 413 default n
414 ---help--- 414 ---help---
415 Support for UDP socket monitoring interface used by the ss tool. 415 Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f..63e49890ad3 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
863 if (addr_type == RTN_UNICAST && 863 if (addr_type == RTN_UNICAST &&
864 (arp_fwd_proxy(in_dev, dev, rt) || 864 (arp_fwd_proxy(in_dev, dev, rt) ||
865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
866 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 866 (rt->dst.dev != dev &&
867 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
867 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 868 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
868 if (n) 869 if (n)
869 neigh_release(n); 870 neigh_release(n);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2e4e24476c4..19d66cefd7d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -123,11 +123,14 @@ again:
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
126 spin_unlock(&head->lock);
127 snum = smallest_rover; 126 snum = smallest_rover;
128 goto have_snum; 127 goto tb_found;
129 } 128 }
130 } 129 }
130 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
131 snum = rover;
132 goto tb_found;
133 }
131 goto next; 134 goto next;
132 } 135 }
133 break; 136 break;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 2b53a1f7abf..6b3ca5ba445 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -422,6 +422,10 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
422 if (register_netdevice(dev) < 0) 422 if (register_netdevice(dev) < 0)
423 goto failed_free; 423 goto failed_free;
424 424
425 /* Can use a lockless transmit, unless we generate output sequences */
426 if (!(nt->parms.o_flags & GRE_SEQ))
427 dev->features |= NETIF_F_LLTX;
428
425 dev_hold(dev); 429 dev_hold(dev);
426 ipgre_tunnel_link(ign, nt); 430 ipgre_tunnel_link(ign, nt);
427 return nt; 431 return nt;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f767907..42dd1a90ede 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop; 576 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3569d8ecaea..6afc807ee2a 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = {
216 SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), 216 SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
217 SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), 217 SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
218 SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), 218 SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
219 SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
220 SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), 219 SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
221 SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), 220 SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
222 SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), 221 SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4aa7e9dc0cb..7a7724da9bf 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
778static __net_init int ipv4_sysctl_init_net(struct net *net) 778static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 779{
780 struct ctl_table *table; 780 struct ctl_table *table;
781 unsigned long limit;
782 781
783 table = ipv4_net_table; 782 table = ipv4_net_table;
784 if (!net_eq(net, &init_net)) { 783 if (!net_eq(net, &init_net)) {
@@ -814,11 +813,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
814 813
815 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 814 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
816 815
817 limit = nr_free_buffer_pages() / 8; 816 tcp_init_mem(net);
818 limit = max(limit, 128UL);
819 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
820 net->ipv4.sysctl_tcp_mem[1] = limit;
821 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
822 817
823 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
824 net_ipv4_ctl_path, table); 819 net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9bcdec3ad77..37755ccc0e9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3216,6 +3227,15 @@ static int __init set_thash_entries(char *str)
3216} 3227}
3217__setup("thash_entries=", set_thash_entries); 3228__setup("thash_entries=", set_thash_entries);
3218 3229
3230void tcp_init_mem(struct net *net)
3231{
3232 unsigned long limit = nr_free_buffer_pages() / 8;
3233 limit = max(limit, 128UL);
3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
3235 net->ipv4.sysctl_tcp_mem[1] = limit;
3236 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
3237}
3238
3219void __init tcp_init(void) 3239void __init tcp_init(void)
3220{ 3240{
3221 struct sk_buff *skb = NULL; 3241 struct sk_buff *skb = NULL;
@@ -3276,9 +3296,10 @@ void __init tcp_init(void)
3276 sysctl_tcp_max_orphans = cnt / 2; 3296 sysctl_tcp_max_orphans = cnt / 2;
3277 sysctl_max_syn_backlog = max(128, cnt / 256); 3297 sysctl_max_syn_backlog = max(128, cnt / 256);
3278 3298
3299 tcp_init_mem(&init_net);
3279 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3300 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3280 limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) 3301 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3281 << (PAGE_SHIFT - 7); 3302 limit = max(limit, 128UL);
3282 max_share = min(4UL*1024*1024, limit); 3303 max_share = min(4UL*1024*1024, limit);
3283 3304
3284 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3305 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 6187eb4d1dc..f45e1c24244 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca)
63{ 63{
64 ca->cnt = 0; 64 ca->cnt = 0;
65 ca->last_max_cwnd = 0; 65 ca->last_max_cwnd = 0;
66 ca->loss_cwnd = 0;
67 ca->last_cwnd = 0; 66 ca->last_cwnd = 0;
68 ca->last_time = 0; 67 ca->last_time = 0;
69 ca->epoch_start = 0; 68 ca->epoch_start = 0;
@@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca)
72 71
73static void bictcp_init(struct sock *sk) 72static void bictcp_init(struct sock *sk)
74{ 73{
75 bictcp_reset(inet_csk_ca(sk)); 74 struct bictcp *ca = inet_csk_ca(sk);
75
76 bictcp_reset(ca);
77 ca->loss_cwnd = 0;
78
76 if (initial_ssthresh) 79 if (initial_ssthresh)
77 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 80 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
78} 81}
@@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
127 } 130 }
128 131
129 /* if in slow start or link utilization is very low */ 132 /* if in slow start or link utilization is very low */
130 if (ca->loss_cwnd == 0) { 133 if (ca->last_max_cwnd == 0) {
131 if (ca->cnt > 20) /* increase cwnd 5% per RTT */ 134 if (ca->cnt > 20) /* increase cwnd 5% per RTT */
132 ca->cnt = 20; 135 ca->cnt = 20;
133 } 136 }
@@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
185{ 188{
186 const struct tcp_sock *tp = tcp_sk(sk); 189 const struct tcp_sock *tp = tcp_sk(sk);
187 const struct bictcp *ca = inet_csk_ca(sk); 190 const struct bictcp *ca = inet_csk_ca(sk);
188 return max(tp->snd_cwnd, ca->last_max_cwnd); 191 return max(tp->snd_cwnd, ca->loss_cwnd);
189} 192}
190 193
191static void bictcp_state(struct sock *sk, u8 new_state) 194static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index f376b05cca8..a9077f441cb 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca)
107{ 107{
108 ca->cnt = 0; 108 ca->cnt = 0;
109 ca->last_max_cwnd = 0; 109 ca->last_max_cwnd = 0;
110 ca->loss_cwnd = 0;
111 ca->last_cwnd = 0; 110 ca->last_cwnd = 0;
112 ca->last_time = 0; 111 ca->last_time = 0;
113 ca->bic_origin_point = 0; 112 ca->bic_origin_point = 0;
@@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk)
142 141
143static void bictcp_init(struct sock *sk) 142static void bictcp_init(struct sock *sk)
144{ 143{
145 bictcp_reset(inet_csk_ca(sk)); 144 struct bictcp *ca = inet_csk_ca(sk);
145
146 bictcp_reset(ca);
147 ca->loss_cwnd = 0;
146 148
147 if (hystart) 149 if (hystart)
148 bictcp_hystart_reset(sk); 150 bictcp_hystart_reset(sk);
@@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
275 * The initial growth of cubic function may be too conservative 277 * The initial growth of cubic function may be too conservative
276 * when the available bandwidth is still unknown. 278 * when the available bandwidth is still unknown.
277 */ 279 */
278 if (ca->loss_cwnd == 0 && ca->cnt > 20) 280 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
279 ca->cnt = 20; /* increase cwnd 5% per RTT */ 281 ca->cnt = 20; /* increase cwnd 5% per RTT */
280 282
281 /* TCP Friendly */ 283 /* TCP Friendly */
@@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
342{ 344{
343 struct bictcp *ca = inet_csk_ca(sk); 345 struct bictcp *ca = inet_csk_ca(sk);
344 346
345 return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); 347 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
346} 348}
347 349
348static void bictcp_state(struct sock *sk, u8 new_state) 350static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2877c3e0958..53c8ce4046b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly;
105#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 105#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
106#define FLAG_DATA_SACKED 0x20 /* New SACK. */ 106#define FLAG_DATA_SACKED 0x20 /* New SACK. */
107#define FLAG_ECE 0x40 /* ECE in this ACK */ 107#define FLAG_ECE 0x40 /* ECE in this ACK */
108#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
109#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 108#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
110#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 109#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
111#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 110#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1040 * These 6 states form finite state machine, controlled by the following events: 1039 * These 6 states form finite state machine, controlled by the following events:
1041 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1040 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1042 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1041 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1043 * 3. Loss detection event of one of three flavors: 1042 * 3. Loss detection event of two flavors:
1044 * A. Scoreboard estimator decided the packet is lost. 1043 * A. Scoreboard estimator decided the packet is lost.
1045 * A'. Reno "three dupacks" marks head of queue lost. 1044 * A'. Reno "three dupacks" marks head of queue lost.
1046 * A''. Its FACK modfication, head until snd.fack is lost. 1045 * A''. Its FACK modification, head until snd.fack is lost.
1047 * B. SACK arrives sacking data transmitted after never retransmitted 1046 * B. SACK arrives sacking SND.NXT at the moment, when the
1048 * hole was sent out.
1049 * C. SACK arrives sacking SND.NXT at the moment, when the
1050 * segment was retransmitted. 1047 * segment was retransmitted.
1051 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1048 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1052 * 1049 *
@@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1153} 1150}
1154 1151
1155/* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1152/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
1156 * Event "C". Later note: FACK people cheated me again 8), we have to account 1153 * Event "B". Later note: FACK people cheated me again 8), we have to account
1157 * for reordering! Ugly, but should help. 1154 * for reordering! Ugly, but should help.
1158 * 1155 *
1159 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1156 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
@@ -1310,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1310 return in_sack; 1307 return in_sack;
1311} 1308}
1312 1309
1313static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1310/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1314 struct tcp_sacktag_state *state, 1311static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq,
1315 int dup_sack, int pcount) 1314 int dup_sack, int pcount)
1316{ 1315{
1317 struct tcp_sock *tp = tcp_sk(sk); 1316 struct tcp_sock *tp = tcp_sk(sk);
1318 u8 sacked = TCP_SKB_CB(skb)->sacked;
1319 int fack_count = state->fack_count; 1317 int fack_count = state->fack_count;
1320 1318
1321 /* Account D-SACK for retransmitted packet. */ 1319 /* Account D-SACK for retransmitted packet. */
1322 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1320 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1323 if (tp->undo_marker && tp->undo_retrans && 1321 if (tp->undo_marker && tp->undo_retrans &&
1324 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1322 after(end_seq, tp->undo_marker))
1325 tp->undo_retrans--; 1323 tp->undo_retrans--;
1326 if (sacked & TCPCB_SACKED_ACKED) 1324 if (sacked & TCPCB_SACKED_ACKED)
1327 state->reord = min(fack_count, state->reord); 1325 state->reord = min(fack_count, state->reord);
1328 } 1326 }
1329 1327
1330 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1328 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1331 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1329 if (!after(end_seq, tp->snd_una))
1332 return sacked; 1330 return sacked;
1333 1331
1334 if (!(sacked & TCPCB_SACKED_ACKED)) { 1332 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1347,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1347 /* New sack for not retransmitted frame, 1345 /* New sack for not retransmitted frame,
1348 * which was in hole. It is reordering. 1346 * which was in hole. It is reordering.
1349 */ 1347 */
1350 if (before(TCP_SKB_CB(skb)->seq, 1348 if (before(start_seq,
1351 tcp_highest_sack_seq(tp))) 1349 tcp_highest_sack_seq(tp)))
1352 state->reord = min(fack_count, 1350 state->reord = min(fack_count,
1353 state->reord); 1351 state->reord);
1354 1352
1355 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1353 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1356 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1354 if (!after(end_seq, tp->frto_highmark))
1357 state->flag |= FLAG_ONLY_ORIG_SACKED; 1355 state->flag |= FLAG_ONLY_ORIG_SACKED;
1358 } 1356 }
1359 1357
@@ -1371,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1371 1369
1372 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1370 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1373 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1371 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1374 before(TCP_SKB_CB(skb)->seq, 1372 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1375 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1376 tp->lost_cnt_hint += pcount; 1373 tp->lost_cnt_hint += pcount;
1377 1374
1378 if (fack_count > tp->fackets_out) 1375 if (fack_count > tp->fackets_out)
@@ -1391,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1391 return sacked; 1388 return sacked;
1392} 1389}
1393 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1396 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1398,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1398{ 1398{
1399 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1401 1403
1402 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1403 1405
1404 if (skb == tp->lost_skb_hint) 1406 /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
1407 if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
1405 tp->lost_cnt_hint += pcount; 1408 tp->lost_cnt_hint += pcount;
1406 1409
1407 TCP_SKB_CB(prev)->end_seq += shifted; 1410 TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1427,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1427 skb_shinfo(skb)->gso_type = 0; 1430 skb_shinfo(skb)->gso_type = 0;
1428 } 1431 }
1429 1432
1430 /* We discard results */ 1433 /* Adjust counters and hints for the newly sacked sequence range but
1431 tcp_sacktag_one(skb, sk, state, dup_sack, pcount); 1434 * discard the return value since prev is already marked.
1435 */
1436 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1437 start_seq, end_seq, dup_sack, pcount);
1432 1438
1433 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1439 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1434 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1440 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1667,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1667 break; 1673 break;
1668 1674
1669 if (in_sack) { 1675 if (in_sack) {
1670 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1676 TCP_SKB_CB(skb)->sacked =
1671 state, 1677 tcp_sacktag_one(sk,
1672 dup_sack, 1678 state,
1673 tcp_skb_pcount(skb)); 1679 TCP_SKB_CB(skb)->sacked,
1680 TCP_SKB_CB(skb)->seq,
1681 TCP_SKB_CB(skb)->end_seq,
1682 dup_sack,
1683 tcp_skb_pcount(skb));
1674 1684
1675 if (!before(TCP_SKB_CB(skb)->seq, 1685 if (!before(TCP_SKB_CB(skb)->seq,
1676 tcp_highest_sack_seq(tp))) 1686 tcp_highest_sack_seq(tp)))
@@ -1844,10 +1854,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1844 if (found_dup_sack && ((i + 1) == first_sack_index)) 1854 if (found_dup_sack && ((i + 1) == first_sack_index))
1845 next_dup = &sp[i + 1]; 1855 next_dup = &sp[i + 1];
1846 1856
1847 /* Event "B" in the comment above. */
1848 if (after(end_seq, tp->high_seq))
1849 state.flag |= FLAG_DATA_LOST;
1850
1851 /* Skip too early cached blocks */ 1857 /* Skip too early cached blocks */
1852 while (tcp_sack_cache_ok(tp, cache) && 1858 while (tcp_sack_cache_ok(tp, cache) &&
1853 !before(start_seq, cache->end_seq)) 1859 !before(start_seq, cache->end_seq))
@@ -2515,8 +2521,11 @@ static void tcp_timeout_skbs(struct sock *sk)
2515 tcp_verify_left_out(tp); 2521 tcp_verify_left_out(tp);
2516} 2522}
2517 2523
2518/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2524/* Detect loss in event "A" above by marking head of queue up as lost.
2519 * is against sacked "cnt", otherwise it's against facked "cnt" 2525 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments
2526 * are considered lost. For RFC3517 SACK, a segment is considered lost if it
2527 * has at least tp->reordering SACKed seqments above it; "packets" refers to
2528 * the maximum SACKed segments to pass before reaching this limit.
2520 */ 2529 */
2521static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2530static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2522{ 2531{
@@ -2525,6 +2534,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2525 int cnt, oldcnt; 2534 int cnt, oldcnt;
2526 int err; 2535 int err;
2527 unsigned int mss; 2536 unsigned int mss;
2537 /* Use SACK to deduce losses of new sequences sent during recovery */
2538 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
2528 2539
2529 WARN_ON(packets > tp->packets_out); 2540 WARN_ON(packets > tp->packets_out);
2530 if (tp->lost_skb_hint) { 2541 if (tp->lost_skb_hint) {
@@ -2546,7 +2557,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2546 tp->lost_skb_hint = skb; 2557 tp->lost_skb_hint = skb;
2547 tp->lost_cnt_hint = cnt; 2558 tp->lost_cnt_hint = cnt;
2548 2559
2549 if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2560 if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
2550 break; 2561 break;
2551 2562
2552 oldcnt = cnt; 2563 oldcnt = cnt;
@@ -3033,19 +3044,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3033 if (tcp_check_sack_reneging(sk, flag)) 3044 if (tcp_check_sack_reneging(sk, flag))
3034 return; 3045 return;
3035 3046
3036 /* C. Process data loss notification, provided it is valid. */ 3047 /* C. Check consistency of the current state. */
3037 if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
3038 before(tp->snd_una, tp->high_seq) &&
3039 icsk->icsk_ca_state != TCP_CA_Open &&
3040 tp->fackets_out > tp->reordering) {
3041 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
3042 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
3043 }
3044
3045 /* D. Check consistency of the current state. */
3046 tcp_verify_left_out(tp); 3048 tcp_verify_left_out(tp);
3047 3049
3048 /* E. Check state exit conditions. State can be terminated 3050 /* D. Check state exit conditions. State can be terminated
3049 * when high_seq is ACKed. */ 3051 * when high_seq is ACKed. */
3050 if (icsk->icsk_ca_state == TCP_CA_Open) { 3052 if (icsk->icsk_ca_state == TCP_CA_Open) {
3051 WARN_ON(tp->retrans_out != 0); 3053 WARN_ON(tp->retrans_out != 0);
@@ -3077,7 +3079,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3077 } 3079 }
3078 } 3080 }
3079 3081
3080 /* F. Process state. */ 3082 /* E. Process state. */
3081 switch (icsk->icsk_ca_state) { 3083 switch (icsk->icsk_ca_state) {
3082 case TCP_CA_Recovery: 3084 case TCP_CA_Recovery:
3083 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 3085 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1eb4ad57670..94d683a61cb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
631 arg.iov[0].iov_len = sizeof(rep.th); 631 arg.iov[0].iov_len = sizeof(rep.th);
632 632
633#ifdef CONFIG_TCP_MD5SIG 633#ifdef CONFIG_TCP_MD5SIG
634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; 634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
635 if (key) { 635 if (key) {
636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 636 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) | 637 (TCPOPT_NOP << 16) |
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.iov[0].iov_len, IPPROTO_TCP, 0); 651 arg.iov[0].iov_len, IPPROTO_TCP, 0);
652 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 652 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
654 /* When socket is gone, all binding information is lost.
655 * routing might fail in this case. using iif for oif to
656 * make sure we can deliver it
657 */
658 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
654 659
655 net = dev_net(skb_dst(skb)->dev); 660 net = dev_net(skb_dst(skb)->dev);
656 arg.tos = ip_hdr(skb)->tos; 661 arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8c8de2780c7..4ff3b6dc74f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1141,11 +1141,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1141 sk_mem_uncharge(sk, len); 1141 sk_mem_uncharge(sk, len);
1142 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1142 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1143 1143
1144 /* Any change of skb->len requires recalculation of tso 1144 /* Any change of skb->len requires recalculation of tso factor. */
1145 * factor and mss.
1146 */
1147 if (tcp_skb_pcount(skb) > 1) 1145 if (tcp_skb_pcount(skb) > 1)
1148 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 1146 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1149 1147
1150 return 0; 1148 return 0;
1151} 1149}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399d..cd2e0723266 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_check_oom(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84 /* Catch exceptional cases, when connection requires reset. 81 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 82 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a225d5ee3c2..c02280a4d12 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
502 rcu_read_unlock(); 502 rcu_read_unlock();
503} 503}
504 504
505static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 505static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
506{ 506{
507 struct net *net; 507 struct net *net;
508 int old;
509
510 if (!rtnl_trylock())
511 return restart_syscall();
508 512
509 net = (struct net *)table->extra2; 513 net = (struct net *)table->extra2;
510 if (p == &net->ipv6.devconf_dflt->forwarding) 514 old = *p;
511 return 0; 515 *p = newf;
512 516
513 if (!rtnl_trylock()) { 517 if (p == &net->ipv6.devconf_dflt->forwarding) {
514 /* Restore the original values before restarting */ 518 rtnl_unlock();
515 *p = old; 519 return 0;
516 return restart_syscall();
517 } 520 }
518 521
519 if (p == &net->ipv6.devconf_all->forwarding) { 522 if (p == &net->ipv6.devconf_all->forwarding) {
520 __s32 newf = net->ipv6.devconf_all->forwarding;
521 net->ipv6.devconf_dflt->forwarding = newf; 523 net->ipv6.devconf_dflt->forwarding = newf;
522 addrconf_forward_change(net, newf); 524 addrconf_forward_change(net, newf);
523 } else if ((!*p) ^ (!old)) 525 } else if ((!newf) ^ (!old))
524 dev_forward_change((struct inet6_dev *)table->extra1); 526 dev_forward_change((struct inet6_dev *)table->extra1);
525 rtnl_unlock(); 527 rtnl_unlock();
526 528
527 if (*p) 529 if (newf)
528 rt6_purge_dflt_routers(net); 530 rt6_purge_dflt_routers(net);
529 return 1; 531 return 1;
530} 532}
@@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
4260 int *valp = ctl->data; 4262 int *valp = ctl->data;
4261 int val = *valp; 4263 int val = *valp;
4262 loff_t pos = *ppos; 4264 loff_t pos = *ppos;
4265 ctl_table lctl;
4263 int ret; 4266 int ret;
4264 4267
4265 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4268 /*
4269 * ctl->data points to idev->cnf.forwarding, we should
4270 * not modify it until we get the rtnl lock.
4271 */
4272 lctl = *ctl;
4273 lctl.data = &val;
4274
4275 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
4266 4276
4267 if (write) 4277 if (write)
4268 ret = addrconf_fixup_forwarding(ctl, valp, val); 4278 ret = addrconf_fixup_forwarding(ctl, valp, val);
@@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4300 rcu_read_unlock(); 4310 rcu_read_unlock();
4301} 4311}
4302 4312
4303static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) 4313static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
4304{ 4314{
4305 struct net *net; 4315 struct net *net;
4316 int old;
4317
4318 if (!rtnl_trylock())
4319 return restart_syscall();
4306 4320
4307 net = (struct net *)table->extra2; 4321 net = (struct net *)table->extra2;
4322 old = *p;
4323 *p = newf;
4308 4324
4309 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4325 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
4326 rtnl_unlock();
4310 return 0; 4327 return 0;
4311
4312 if (!rtnl_trylock()) {
4313 /* Restore the original values before restarting */
4314 *p = old;
4315 return restart_syscall();
4316 } 4328 }
4317 4329
4318 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4330 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4319 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
4320 net->ipv6.devconf_dflt->disable_ipv6 = newf; 4331 net->ipv6.devconf_dflt->disable_ipv6 = newf;
4321 addrconf_disable_change(net, newf); 4332 addrconf_disable_change(net, newf);
4322 } else if ((!*p) ^ (!old)) 4333 } else if ((!newf) ^ (!old))
4323 dev_disable_change((struct inet6_dev *)table->extra1); 4334 dev_disable_change((struct inet6_dev *)table->extra1);
4324 4335
4325 rtnl_unlock(); 4336 rtnl_unlock();
@@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
4333 int *valp = ctl->data; 4344 int *valp = ctl->data;
4334 int val = *valp; 4345 int val = *valp;
4335 loff_t pos = *ppos; 4346 loff_t pos = *ppos;
4347 ctl_table lctl;
4336 int ret; 4348 int ret;
4337 4349
4338 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4350 /*
4351 * ctl->data points to idev->cnf.disable_ipv6, we should
4352 * not modify it until we get the rtnl lock.
4353 */
4354 lctl = *ctl;
4355 lctl.data = &val;
4356
4357 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
4339 4358
4340 if (write) 4359 if (write)
4341 ret = addrconf_disable_ipv6(ctl, valp, val); 4360 ret = addrconf_disable_ipv6(ctl, valp, val);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 906c7ca4354..3edd05ae438 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1083 1083
1084#ifdef CONFIG_TCP_MD5SIG 1084#ifdef CONFIG_TCP_MD5SIG
1085 if (sk) 1085 if (sk)
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
1087#endif 1087#endif
1088 1088
1089 if (th->ack) 1089 if (th->ack)
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d21e7ebd91c..55670ec3cd0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
393{ 393{
394 int rc; 394 int rc;
395 395
396 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
397 goto drop;
398
399 nf_reset(skb);
400
401 /* Charge it to the socket, dropping if the queue is full. */ 396 /* Charge it to the socket, dropping if the queue is full. */
402 rc = sock_queue_rcv_skb(sk, skb); 397 rc = sock_queue_rcv_skb(sk, skb);
403 if (rc < 0) 398 if (rc < 0)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index a18e6c3d36e..b9bef2c7502 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
713 struct sk_buff *skb = NULL; 713 struct sk_buff *skb = NULL;
714 struct sock *sk = sock->sk; 714 struct sock *sk = sock->sk;
715 struct llc_sock *llc = llc_sk(sk); 715 struct llc_sock *llc = llc_sk(sk);
716 unsigned long cpu_flags;
716 size_t copied = 0; 717 size_t copied = 0;
717 u32 peek_seq = 0; 718 u32 peek_seq = 0;
718 u32 *seq; 719 u32 *seq;
@@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
838 goto copy_uaddr; 839 goto copy_uaddr;
839 840
840 if (!(flags & MSG_PEEK)) { 841 if (!(flags & MSG_PEEK)) {
842 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
841 sk_eat_skb(sk, skb, 0); 843 sk_eat_skb(sk, skb, 0);
844 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
842 *seq = 0; 845 *seq = 0;
843 } 846 }
844 847
@@ -859,7 +862,9 @@ copy_uaddr:
859 llc_cmsg_rcv(msg, skb); 862 llc_cmsg_rcv(msg, skb);
860 863
861 if (!(flags & MSG_PEEK)) { 864 if (!(flags & MSG_PEEK)) {
865 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
862 sk_eat_skb(sk, skb, 0); 866 sk_eat_skb(sk, skb, 0);
867 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
863 *seq = 0; 868 *seq = 0;
864 } 869 }
865 870
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 38e6101190d..59edcd95a58 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -225,9 +225,9 @@ KEY_OPS(key);
225 key, &key_##name##_ops); 225 key, &key_##name##_ops);
226 226
227void ieee80211_debugfs_key_add(struct ieee80211_key *key) 227void ieee80211_debugfs_key_add(struct ieee80211_key *key)
228 { 228{
229 static int keycount; 229 static int keycount;
230 char buf[50]; 230 char buf[100];
231 struct sta_info *sta; 231 struct sta_info *sta;
232 232
233 if (!key->local->debugfs.keys) 233 if (!key->local->debugfs.keys)
@@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
244 244
245 sta = key->sta; 245 sta = key->sta;
246 if (sta) { 246 if (sta) {
247 sprintf(buf, "../../stations/%pM", sta->sta.addr); 247 sprintf(buf, "../../netdev:%s/stations/%pM",
248 sta->sdata->name, sta->sta.addr);
248 key->debugfs.stalink = 249 key->debugfs.stalink =
249 debugfs_create_symlink("station", key->debugfs.dir, buf); 250 debugfs_create_symlink("station", key->debugfs.dir, buf);
250 } 251 }
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b3d76b756cd..a4643969a13 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -106,6 +106,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
106 106
107 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 107 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
108 108
109 local->oper_channel = chan;
109 channel_type = ifibss->channel_type; 110 channel_type = ifibss->channel_type;
110 if (channel_type > NL80211_CHAN_HT20 && 111 if (channel_type > NL80211_CHAN_HT20 &&
111 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) 112 !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e47768cb8cb..01a21c2f6ab 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1314,6 +1314,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1314 continue; 1314 continue;
1315 } 1315 }
1316 /* count everything else */ 1316 /* count everything else */
1317 sdata->vif.bss_conf.idle = false;
1317 count++; 1318 count++;
1318 } 1319 }
1319 1320
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0a0d94ad9b0..b142bd4c239 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 73abb7524b2..54df1b2bafd 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
119 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + 119 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
120 sizeof(mgmt->u.action.u.mesh_action); 120 sizeof(mgmt->u.action.u.mesh_action);
121 121
122 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 122 skb = dev_alloc_skb(local->tx_headroom +
123 hdr_len + 123 hdr_len +
124 2 + 37); /* max HWMP IE */ 124 2 + 37); /* max HWMP IE */
125 if (!skb) 125 if (!skb)
126 return -1; 126 return -1;
127 skb_reserve(skb, local->hw.extra_tx_headroom); 127 skb_reserve(skb, local->tx_headroom);
128 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 128 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
129 memset(mgmt, 0, hdr_len); 129 memset(mgmt, 0, hdr_len);
130 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 130 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
250 if (time_before(jiffies, ifmsh->next_perr)) 250 if (time_before(jiffies, ifmsh->next_perr))
251 return -EAGAIN; 251 return -EAGAIN;
252 252
253 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 253 skb = dev_alloc_skb(local->tx_headroom +
254 hdr_len + 254 hdr_len +
255 2 + 15 /* PERR IE */); 255 2 + 15 /* PERR IE */);
256 if (!skb) 256 if (!skb)
257 return -1; 257 return -1;
258 skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); 258 skb_reserve(skb, local->tx_headroom);
259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
260 memset(mgmt, 0, hdr_len); 260 memset(mgmt, 0, hdr_len);
261 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 261 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 41ef1b47644..a17251730b9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
172 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + 172 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
173 sizeof(mgmt->u.action.u.self_prot); 173 sizeof(mgmt->u.action.u.self_prot);
174 174
175 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 175 skb = dev_alloc_skb(local->tx_headroom +
176 hdr_len + 176 hdr_len +
177 2 + /* capability info */ 177 2 + /* capability info */
178 2 + /* AID */ 178 2 + /* AID */
@@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
186 sdata->u.mesh.ie_len); 186 sdata->u.mesh.ie_len);
187 if (!skb) 187 if (!skb)
188 return -1; 188 return -1;
189 skb_reserve(skb, local->hw.extra_tx_headroom); 189 skb_reserve(skb, local->tx_headroom);
190 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 190 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
191 memset(mgmt, 0, hdr_len); 191 memset(mgmt, 0, hdr_len);
192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ecb4c84c1bb..295be92f7c7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2750{ 2750{
2751 struct ieee80211_local *local = sdata->local; 2751 struct ieee80211_local *local = sdata->local;
2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2753 struct ieee80211_work *wk;
2754 u8 bssid[ETH_ALEN]; 2753 u8 bssid[ETH_ALEN];
2755 bool assoc_bss = false; 2754 bool assoc_bss = false;
2756 2755
@@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2763 assoc_bss = true; 2762 assoc_bss = true;
2764 } else { 2763 } else {
2765 bool not_auth_yet = false; 2764 bool not_auth_yet = false;
2765 struct ieee80211_work *tmp, *wk = NULL;
2766 2766
2767 mutex_unlock(&ifmgd->mtx); 2767 mutex_unlock(&ifmgd->mtx);
2768 2768
2769 mutex_lock(&local->mtx); 2769 mutex_lock(&local->mtx);
2770 list_for_each_entry(wk, &local->work_list, list) { 2770 list_for_each_entry(tmp, &local->work_list, list) {
2771 if (wk->sdata != sdata) 2771 if (tmp->sdata != sdata)
2772 continue; 2772 continue;
2773 2773
2774 if (wk->type != IEEE80211_WORK_DIRECT_PROBE && 2774 if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
2775 wk->type != IEEE80211_WORK_AUTH && 2775 tmp->type != IEEE80211_WORK_AUTH &&
2776 wk->type != IEEE80211_WORK_ASSOC && 2776 tmp->type != IEEE80211_WORK_ASSOC &&
2777 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) 2777 tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2778 continue; 2778 continue;
2779 2779
2780 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) 2780 if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
2781 continue; 2781 continue;
2782 2782
2783 not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; 2783 not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
2784 list_del_rcu(&wk->list); 2784 list_del_rcu(&tmp->list);
2785 free_work(wk); 2785 synchronize_rcu();
2786 wk = tmp;
2786 break; 2787 break;
2787 } 2788 }
2788 mutex_unlock(&local->mtx); 2789 mutex_unlock(&local->mtx);
2789 2790
2791 if (wk && wk->type == IEEE80211_WORK_ASSOC) {
2792 /* clean up dummy sta & TX sync */
2793 sta_info_destroy_addr(wk->sdata, wk->filter_ta);
2794 if (wk->assoc.synced)
2795 drv_finish_tx_sync(local, wk->sdata,
2796 wk->filter_ta,
2797 IEEE80211_TX_SYNC_ASSOC);
2798 } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
2799 if (wk->probe_auth.synced)
2800 drv_finish_tx_sync(local, wk->sdata,
2801 wk->filter_ta,
2802 IEEE80211_TX_SYNC_AUTH);
2803 }
2804 kfree(wk);
2805
2790 /* 2806 /*
2791 * If somebody requests authentication and we haven't 2807 * If somebody requests authentication and we haven't
2792 * sent out an auth frame yet there's no need to send 2808 * sent out an auth frame yet there's no need to send
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 75140912076..5a5e504a8ff 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
612 tid_agg_rx->buf_size; 612 tid_agg_rx->buf_size;
613 if (!tid_agg_rx->reorder_buf[index] && 613 if (!tid_agg_rx->reorder_buf[index] &&
614 tid_agg_rx->stored_mpdu_num > 1) { 614 tid_agg_rx->stored_mpdu_num) {
615 /* 615 /*
616 * No buffers ready to be released, but check whether any 616 * No buffers ready to be released, but check whether any
617 * frames in the reorder buffer have timed out. 617 * frames in the reorder buffer have timed out.
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index bb6ad81b671..424ff622ab5 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
68{ 68{
69 struct sock *sk = sock->sk; 69 struct sock *sk = sock->sk;
70 struct rds_sock *rs; 70 struct rds_sock *rs;
71 unsigned long flags;
72 71
73 if (!sk) 72 if (!sk)
74 goto out; 73 goto out;
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
94 rds_rdma_drop_keys(rs); 93 rds_rdma_drop_keys(rs);
95 rds_notify_queue_get(rs, NULL); 94 rds_notify_queue_get(rs, NULL);
96 95
97 spin_lock_irqsave(&rds_sock_lock, flags); 96 spin_lock_bh(&rds_sock_lock);
98 list_del_init(&rs->rs_item); 97 list_del_init(&rs->rs_item);
99 rds_sock_count--; 98 rds_sock_count--;
100 spin_unlock_irqrestore(&rds_sock_lock, flags); 99 spin_unlock_bh(&rds_sock_lock);
101 100
102 rds_trans_put(rs->rs_transport); 101 rds_trans_put(rs->rs_transport);
103 102
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
409 408
410static int __rds_create(struct socket *sock, struct sock *sk, int protocol) 409static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
411{ 410{
412 unsigned long flags;
413 struct rds_sock *rs; 411 struct rds_sock *rs;
414 412
415 sock_init_data(sock, sk); 413 sock_init_data(sock, sk);
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
426 spin_lock_init(&rs->rs_rdma_lock); 424 spin_lock_init(&rs->rs_rdma_lock);
427 rs->rs_rdma_keys = RB_ROOT; 425 rs->rs_rdma_keys = RB_ROOT;
428 426
429 spin_lock_irqsave(&rds_sock_lock, flags); 427 spin_lock_bh(&rds_sock_lock);
430 list_add_tail(&rs->rs_item, &rds_sock_list); 428 list_add_tail(&rs->rs_item, &rds_sock_list);
431 rds_sock_count++; 429 rds_sock_count++;
432 spin_unlock_irqrestore(&rds_sock_lock, flags); 430 spin_unlock_bh(&rds_sock_lock);
433 431
434 return 0; 432 return 0;
435} 433}
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
471{ 469{
472 struct rds_sock *rs; 470 struct rds_sock *rs;
473 struct rds_incoming *inc; 471 struct rds_incoming *inc;
474 unsigned long flags;
475 unsigned int total = 0; 472 unsigned int total = 0;
476 473
477 len /= sizeof(struct rds_info_message); 474 len /= sizeof(struct rds_info_message);
478 475
479 spin_lock_irqsave(&rds_sock_lock, flags); 476 spin_lock_bh(&rds_sock_lock);
480 477
481 list_for_each_entry(rs, &rds_sock_list, rs_item) { 478 list_for_each_entry(rs, &rds_sock_list, rs_item) {
482 read_lock(&rs->rs_recv_lock); 479 read_lock(&rs->rs_recv_lock);
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
492 read_unlock(&rs->rs_recv_lock); 489 read_unlock(&rs->rs_recv_lock);
493 } 490 }
494 491
495 spin_unlock_irqrestore(&rds_sock_lock, flags); 492 spin_unlock_bh(&rds_sock_lock);
496 493
497 lens->nr = total; 494 lens->nr = total;
498 lens->each = sizeof(struct rds_info_message); 495 lens->each = sizeof(struct rds_info_message);
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
504{ 501{
505 struct rds_info_socket sinfo; 502 struct rds_info_socket sinfo;
506 struct rds_sock *rs; 503 struct rds_sock *rs;
507 unsigned long flags;
508 504
509 len /= sizeof(struct rds_info_socket); 505 len /= sizeof(struct rds_info_socket);
510 506
511 spin_lock_irqsave(&rds_sock_lock, flags); 507 spin_lock_bh(&rds_sock_lock);
512 508
513 if (len < rds_sock_count) 509 if (len < rds_sock_count)
514 goto out; 510 goto out;
@@ -529,7 +525,7 @@ out:
529 lens->nr = rds_sock_count; 525 lens->nr = rds_sock_count;
530 lens->each = sizeof(struct rds_info_socket); 526 lens->each = sizeof(struct rds_info_socket);
531 527
532 spin_unlock_irqrestore(&rds_sock_lock, flags); 528 spin_unlock_bh(&rds_sock_lock);
533} 529}
534 530
535static void rds_exit(void) 531static void rds_exit(void)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ff..ae3a035f539 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
232 if (toklen <= (n_parts + 1) * 4) 232 if (toklen <= (n_parts + 1) * 4)
233 return -EINVAL; 233 return -EINVAL;
234 234
235 princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); 235 princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
236 if (!princ->name_parts) 236 if (!princ->name_parts)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
355 355
356 _debug("n_elem %d", n_elem); 356 _debug("n_elem %d", n_elem);
357 357
358 td = kcalloc(sizeof(struct krb5_tagged_data), n_elem, 358 td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!td) 360 if (!td)
361 return -ENOMEM; 361 return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a..7e267d7b9c7 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
148 148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{ 150{
151 BUILD_BUG_ON(sizeof(skb->cb) < 151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
153 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
154} 153}
155 154
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e7e1d0b57b3..e83d61ca78c 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
130 130
131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{ 132{
133 BUILD_BUG_ON(sizeof(skb->cb) < 133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
135 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
136} 135}
137 136
@@ -419,7 +418,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 418
420 cb = netem_skb_cb(skb); 419 cb = netem_skb_cb(skb);
421 if (q->gap == 0 || /* not doing reordering */ 420 if (q->gap == 0 || /* not doing reordering */
422 q->counter < q->gap || /* inside last reordering gap */ 421 q->counter < q->gap - 1 || /* inside last reordering gap */
423 q->reorder < get_crandom(&q->reorder_cor)) { 422 q->reorder < get_crandom(&q->reorder_cor)) {
424 psched_time_t now; 423 psched_time_t now;
425 psched_tdiff_t delay; 424 psched_tdiff_t delay;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7..d7eea99333e 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
94 94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{ 96{
97 BUILD_BUG_ON(sizeof(skb->cb) < 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100} 99}
101 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9ac..60d47180f04 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
166 166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) 167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{ 168{
169 BUILD_BUG_ON(sizeof(skb->cb) < 169 qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); 170 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
172} 171}
173 172
174static unsigned int sfq_hash(const struct sfq_sched_data *q, 173static unsigned int sfq_hash(const struct sfq_sched_data *q,
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 1426ec3d0a5..75762f34697 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -92,6 +92,7 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
92 if (gcred->acred.group_info != NULL) 92 if (gcred->acred.group_info != NULL)
93 get_group_info(gcred->acred.group_info); 93 get_group_info(gcred->acred.group_info);
94 gcred->acred.machine_cred = acred->machine_cred; 94 gcred->acred.machine_cred = acred->machine_cred;
95 gcred->acred.principal = acred->principal;
95 96
96 dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", 97 dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
97 gcred->acred.machine_cred ? "machine" : "generic", 98 gcred->acred.machine_cred ? "machine" : "generic",
@@ -123,6 +124,17 @@ generic_destroy_cred(struct rpc_cred *cred)
123 call_rcu(&cred->cr_rcu, generic_free_cred_callback); 124 call_rcu(&cred->cr_rcu, generic_free_cred_callback);
124} 125}
125 126
127static int
128machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags)
129{
130 if (!gcred->acred.machine_cred ||
131 gcred->acred.principal != acred->principal ||
132 gcred->acred.uid != acred->uid ||
133 gcred->acred.gid != acred->gid)
134 return 0;
135 return 1;
136}
137
126/* 138/*
127 * Match credentials against current process creds. 139 * Match credentials against current process creds.
128 */ 140 */
@@ -132,9 +144,12 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
132 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); 144 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
133 int i; 145 int i;
134 146
147 if (acred->machine_cred)
148 return machine_cred_match(acred, gcred, flags);
149
135 if (gcred->acred.uid != acred->uid || 150 if (gcred->acred.uid != acred->uid ||
136 gcred->acred.gid != acred->gid || 151 gcred->acred.gid != acred->gid ||
137 gcred->acred.machine_cred != acred->machine_cred) 152 gcred->acred.machine_cred != 0)
138 goto out_nomatch; 153 goto out_nomatch;
139 154
140 /* Optimisation in the case where pointers are identical... */ 155 /* Optimisation in the case where pointers are identical... */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aad8fb69998..85d3bb7490a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1918,7 +1918,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1918 struct sk_buff *skb; 1918 struct sk_buff *skb;
1919 1919
1920 unix_state_lock(sk); 1920 unix_state_lock(sk);
1921 skb = skb_dequeue(&sk->sk_receive_queue); 1921 skb = skb_peek(&sk->sk_receive_queue);
1922 if (skb == NULL) { 1922 if (skb == NULL) {
1923 unix_sk(sk)->recursion_level = 0; 1923 unix_sk(sk)->recursion_level = 0;
1924 if (copied >= target) 1924 if (copied >= target)
@@ -1958,11 +1958,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1958 if (check_creds) { 1958 if (check_creds) {
1959 /* Never glue messages from different writers */ 1959 /* Never glue messages from different writers */
1960 if ((UNIXCB(skb).pid != siocb->scm->pid) || 1960 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1961 (UNIXCB(skb).cred != siocb->scm->cred)) { 1961 (UNIXCB(skb).cred != siocb->scm->cred))
1962 skb_queue_head(&sk->sk_receive_queue, skb);
1963 sk->sk_data_ready(sk, skb->len);
1964 break; 1962 break;
1965 }
1966 } else { 1963 } else {
1967 /* Copy credentials */ 1964 /* Copy credentials */
1968 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); 1965 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
@@ -1977,8 +1974,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1977 1974
1978 chunk = min_t(unsigned int, skb->len, size); 1975 chunk = min_t(unsigned int, skb->len, size);
1979 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1980 skb_queue_head(&sk->sk_receive_queue, skb);
1981 sk->sk_data_ready(sk, skb->len);
1982 if (copied == 0) 1977 if (copied == 0)
1983 copied = -EFAULT; 1978 copied = -EFAULT;
1984 break; 1979 break;
@@ -1993,13 +1988,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1993 if (UNIXCB(skb).fp) 1988 if (UNIXCB(skb).fp)
1994 unix_detach_fds(siocb->scm, skb); 1989 unix_detach_fds(siocb->scm, skb);
1995 1990
1996 /* put the skb back if we didn't use it up.. */ 1991 if (skb->len)
1997 if (skb->len) {
1998 skb_queue_head(&sk->sk_receive_queue, skb);
1999 sk->sk_data_ready(sk, skb->len);
2000 break; 1992 break;
2001 }
2002 1993
1994 skb_unlink(skb, &sk->sk_receive_queue);
2003 consume_skb(skb); 1995 consume_skb(skb);
2004 1996
2005 if (siocb->scm->fp) 1997 if (siocb->scm->fp)
@@ -2010,9 +2002,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2010 if (UNIXCB(skb).fp) 2002 if (UNIXCB(skb).fp)
2011 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); 2003 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2012 2004
2013 /* put message back and return */
2014 skb_queue_head(&sk->sk_receive_queue, skb);
2015 sk->sk_data_ready(sk, skb->len);
2016 break; 2005 break;
2017 } 2006 }
2018 } while (size); 2007 } while (size);