aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_socket.c10
-rw-r--r--net/caif/cfmuxl.c12
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/mon_client.c13
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c45
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/rxrpc/ar-key.c4
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c5
24 files changed, 105 insertions, 83 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index a98628086452..a97d97a3a512 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
540 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
541 541
542 if (cf_sk->layer.dn == NULL) 542 if (cf_sk->layer.dn == NULL) {
543 kfree_skb(skb);
543 return -EINVAL; 544 return -EINVAL;
545 }
544 546
545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); 547 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
546} 548}
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
683 } 685 }
684 err = transmit_skb(skb, cf_sk, 686 err = transmit_skb(skb, cf_sk,
685 msg->msg_flags&MSG_DONTWAIT, timeo); 687 msg->msg_flags&MSG_DONTWAIT, timeo);
686 if (err < 0) { 688 if (err < 0)
687 kfree_skb(skb); 689 /* skb is already freed */
688 goto pipe_err; 690 goto pipe_err;
689 } 691
690 sent += size; 692 sent += size;
691 } 693 }
692 694
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index b36f24a4c8e7..94b08612a4d8 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
251 int idx;
252 251
253 rcu_read_lock(); 252 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
257 256
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || 257 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 258 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 259 layer->id != 0)
261 260 cfmuxl_remove_uplayer(layr, layer->id);
262 idx = layer->id % UP_CACHE_SIZE; 261
263 spin_lock_bh(&muxl->receive_lock);
264 RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */ 262 /* NOTE: ctrlcmd is not allowed to block */
269 layer->ctrlcmd(layer, ctrl, phyid); 263 layer->ctrlcmd(layer, ctrl, phyid);
270 } 264 }
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 97f70e50ad3b..761ad9d6cc3b 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
85 } else { 85 } else {
86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); 86 pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
87 memcpy(&client->fsid, fsid, sizeof(*fsid)); 87 memcpy(&client->fsid, fsid, sizeof(*fsid));
88 ceph_debugfs_client_init(client);
89 client->have_fsid = true;
90 } 88 }
91 return 0; 89 return 0;
92} 90}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0b62deae42bd..1845cde26227 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/ceph/mon_client.h> 9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h> 10#include <linux/ceph/libceph.h>
11#include <linux/ceph/debugfs.h>
11#include <linux/ceph/decode.h> 12#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h> 13#include <linux/ceph/auth.h>
14 14
15/* 15/*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
340 client->monc.monmap = monmap; 340 client->monc.monmap = monmap;
341 kfree(old); 341 kfree(old);
342 342
343 if (!client->have_fsid) {
344 client->have_fsid = true;
345 mutex_unlock(&monc->mutex);
346 /*
347 * do debugfs initialization without mutex to avoid
348 * creating a locking dependency
349 */
350 ceph_debugfs_client_init(client);
351 goto out_unlocked;
352 }
343out: 353out:
344 mutex_unlock(&monc->mutex); 354 mutex_unlock(&monc->mutex);
355out_unlocked:
345 wake_up_all(&client->auth_wq); 356 wake_up_all(&client->auth_wq);
346} 357}
347 358
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index aa2a2c79776f..d183262943d9 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
409 409
410config INET_UDP_DIAG 410config INET_UDP_DIAG
411 tristate "UDP: socket monitoring interface" 411 tristate "UDP: socket monitoring interface"
412 depends on INET_DIAG 412 depends on INET_DIAG && (IPV6 || IPV6=n)
413 default n 413 default n
414 ---help--- 414 ---help---
415 Support for UDP socket monitoring interface used by the ss tool. 415 Support for UDP socket monitoring interface used by the ss tool.
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59402be133f0..63e49890ad31 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
863 if (addr_type == RTN_UNICAST && 863 if (addr_type == RTN_UNICAST &&
864 (arp_fwd_proxy(in_dev, dev, rt) || 864 (arp_fwd_proxy(in_dev, dev, rt) ||
865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 865 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
866 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 866 (rt->dst.dev != dev &&
867 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
867 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 868 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
868 if (n) 869 if (n)
869 neigh_release(n); 870 neigh_release(n);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1e60f7679075..42dd1a90edea 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
573 } 573 }
574 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
575 opt->is_changed = 1; 575 opt->is_changed = 1;
576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
577 ip_hdr(skb)->daddr = opt->nexthop; 576 ip_hdr(skb)->daddr = opt->nexthop;
577 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 optptr[2] = srrptr+4; 578 optptr[2] = srrptr+4;
579 } else if (net_ratelimit()) 579 } else if (net_ratelimit())
580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 580 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4cb9cd2f2c39..7a7724da9bff 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
778static __net_init int ipv4_sysctl_init_net(struct net *net) 778static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 779{
780 struct ctl_table *table; 780 struct ctl_table *table;
781 unsigned long limit;
782 781
783 table = ipv4_net_table; 782 table = ipv4_net_table;
784 if (!net_eq(net, &init_net)) { 783 if (!net_eq(net, &init_net)) {
@@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
815 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 814 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
816 815
817 tcp_init_mem(net); 816 tcp_init_mem(net);
818 limit = nr_free_buffer_pages() / 8;
819 limit = max(limit, 128UL);
820 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
821 net->ipv4.sysctl_tcp_mem[1] = limit;
822 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
823 817
824 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
825 net_ipv4_ctl_path, table); 819 net_ipv4_ctl_path, table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 06373b4a449a..37755ccc0e96 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3218,7 +3229,6 @@ __setup("thash_entries=", set_thash_entries);
3218 3229
3219void tcp_init_mem(struct net *net) 3230void tcp_init_mem(struct net *net)
3220{ 3231{
3221 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3222 unsigned long limit = nr_free_buffer_pages() / 8; 3232 unsigned long limit = nr_free_buffer_pages() / 8;
3223 limit = max(limit, 128UL); 3233 limit = max(limit, 128UL);
3224 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; 3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
@@ -3287,7 +3297,8 @@ void __init tcp_init(void)
3287 sysctl_max_syn_backlog = max(128, cnt / 256); 3297 sysctl_max_syn_backlog = max(128, cnt / 256);
3288 3298
3289 tcp_init_mem(&init_net); 3299 tcp_init_mem(&init_net);
3290 limit = nr_free_buffer_pages() / 8; 3300 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3301 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3291 limit = max(limit, 128UL); 3302 limit = max(limit, 128UL);
3292 max_share = min(4UL*1024*1024, limit); 3303 max_share = min(4UL*1024*1024, limit);
3293 3304
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 976034f82320..53c8ce4046b2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1307 return in_sack; 1307 return in_sack;
1308} 1308}
1309 1309
1310static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1310/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1311 struct tcp_sacktag_state *state, 1311static u8 tcp_sacktag_one(struct sock *sk,
1312 struct tcp_sacktag_state *state, u8 sacked,
1313 u32 start_seq, u32 end_seq,
1312 int dup_sack, int pcount) 1314 int dup_sack, int pcount)
1313{ 1315{
1314 struct tcp_sock *tp = tcp_sk(sk); 1316 struct tcp_sock *tp = tcp_sk(sk);
1315 u8 sacked = TCP_SKB_CB(skb)->sacked;
1316 int fack_count = state->fack_count; 1317 int fack_count = state->fack_count;
1317 1318
1318 /* Account D-SACK for retransmitted packet. */ 1319 /* Account D-SACK for retransmitted packet. */
1319 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1320 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1320 if (tp->undo_marker && tp->undo_retrans && 1321 if (tp->undo_marker && tp->undo_retrans &&
1321 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1322 after(end_seq, tp->undo_marker))
1322 tp->undo_retrans--; 1323 tp->undo_retrans--;
1323 if (sacked & TCPCB_SACKED_ACKED) 1324 if (sacked & TCPCB_SACKED_ACKED)
1324 state->reord = min(fack_count, state->reord); 1325 state->reord = min(fack_count, state->reord);
1325 } 1326 }
1326 1327
1327 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1328 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1328 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1329 if (!after(end_seq, tp->snd_una))
1329 return sacked; 1330 return sacked;
1330 1331
1331 if (!(sacked & TCPCB_SACKED_ACKED)) { 1332 if (!(sacked & TCPCB_SACKED_ACKED)) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1344 /* New sack for not retransmitted frame, 1345 /* New sack for not retransmitted frame,
1345 * which was in hole. It is reordering. 1346 * which was in hole. It is reordering.
1346 */ 1347 */
1347 if (before(TCP_SKB_CB(skb)->seq, 1348 if (before(start_seq,
1348 tcp_highest_sack_seq(tp))) 1349 tcp_highest_sack_seq(tp)))
1349 state->reord = min(fack_count, 1350 state->reord = min(fack_count,
1350 state->reord); 1351 state->reord);
1351 1352
1352 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1353 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1353 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1354 if (!after(end_seq, tp->frto_highmark))
1354 state->flag |= FLAG_ONLY_ORIG_SACKED; 1355 state->flag |= FLAG_ONLY_ORIG_SACKED;
1355 } 1356 }
1356 1357
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1368 1369
1369 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1370 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1370 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1371 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1371 before(TCP_SKB_CB(skb)->seq, 1372 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1372 TCP_SKB_CB(tp->lost_skb_hint)->seq))
1373 tp->lost_cnt_hint += pcount; 1373 tp->lost_cnt_hint += pcount;
1374 1374
1375 if (fack_count > tp->fackets_out) 1375 if (fack_count > tp->fackets_out)
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1388 return sacked; 1388 return sacked;
1389} 1389}
1390 1390
1391/* Shift newly-SACKed bytes from this skb to the immediately previous
1392 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1393 */
1391static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1394static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1392 struct tcp_sacktag_state *state, 1395 struct tcp_sacktag_state *state,
1393 unsigned int pcount, int shifted, int mss, 1396 unsigned int pcount, int shifted, int mss,
@@ -1395,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1395{ 1398{
1396 struct tcp_sock *tp = tcp_sk(sk); 1399 struct tcp_sock *tp = tcp_sk(sk);
1397 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1400 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1401 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1402 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1398 1403
1399 BUG_ON(!pcount); 1404 BUG_ON(!pcount);
1400 1405
1401 if (skb == tp->lost_skb_hint) 1406 /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
1407 if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
1402 tp->lost_cnt_hint += pcount; 1408 tp->lost_cnt_hint += pcount;
1403 1409
1404 TCP_SKB_CB(prev)->end_seq += shifted; 1410 TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1424,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1424 skb_shinfo(skb)->gso_type = 0; 1430 skb_shinfo(skb)->gso_type = 0;
1425 } 1431 }
1426 1432
1427 /* We discard results */ 1433 /* Adjust counters and hints for the newly sacked sequence range but
1428 tcp_sacktag_one(skb, sk, state, dup_sack, pcount); 1434 * discard the return value since prev is already marked.
1435 */
1436 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1437 start_seq, end_seq, dup_sack, pcount);
1429 1438
1430 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1439 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1431 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1440 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1664,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1664 break; 1673 break;
1665 1674
1666 if (in_sack) { 1675 if (in_sack) {
1667 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, 1676 TCP_SKB_CB(skb)->sacked =
1668 state, 1677 tcp_sacktag_one(sk,
1669 dup_sack, 1678 state,
1670 tcp_skb_pcount(skb)); 1679 TCP_SKB_CB(skb)->sacked,
1680 TCP_SKB_CB(skb)->seq,
1681 TCP_SKB_CB(skb)->end_seq,
1682 dup_sack,
1683 tcp_skb_pcount(skb));
1671 1684
1672 if (!before(TCP_SKB_CB(skb)->seq, 1685 if (!before(TCP_SKB_CB(skb)->seq,
1673 tcp_highest_sack_seq(tp))) 1686 tcp_highest_sack_seq(tp)))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 337ba4cca052..94d683a61cba 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
651 arg.iov[0].iov_len, IPPROTO_TCP, 0); 651 arg.iov[0].iov_len, IPPROTO_TCP, 0);
652 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 652 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 653 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
654 /* When socket is gone, all binding information is lost.
655 * routing might fail in this case. using iif for oif to
656 * make sure we can deliver it
657 */
658 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
654 659
655 net = dev_net(skb_dst(skb)->dev); 660 net = dev_net(skb_dst(skb)->dev);
656 arg.tos = ip_hdr(skb)->tos; 661 arg.tos = ip_hdr(skb)->tos;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index a516d1e399df..cd2e0723266d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_check_oom(sk, shift)) {
81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n");
83
84 /* Catch exceptional cases, when connection requires reset. 81 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 82 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0a0d94ad9b08..b142bd4c2390 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
911 result); 911 result);
912 912
913 ieee80211_led_init(local);
914
913 rtnl_lock(); 915 rtnl_lock();
914 916
915 result = ieee80211_init_rate_ctrl_alg(local, 917 result = ieee80211_init_rate_ctrl_alg(local,
@@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
931 933
932 rtnl_unlock(); 934 rtnl_unlock();
933 935
934 ieee80211_led_init(local);
935
936 local->network_latency_notifier.notifier_call = 936 local->network_latency_notifier.notifier_call =
937 ieee80211_max_network_latency; 937 ieee80211_max_network_latency;
938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 938 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 751409120769..5a5e504a8ffb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 611 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
612 tid_agg_rx->buf_size; 612 tid_agg_rx->buf_size;
613 if (!tid_agg_rx->reorder_buf[index] && 613 if (!tid_agg_rx->reorder_buf[index] &&
614 tid_agg_rx->stored_mpdu_num > 1) { 614 tid_agg_rx->stored_mpdu_num) {
615 /* 615 /*
616 * No buffers ready to be released, but check whether any 616 * No buffers ready to be released, but check whether any
617 * frames in the reorder buffer have timed out. 617 * frames in the reorder buffer have timed out.
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 4cba13e46ffd..ae3a035f5390 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
232 if (toklen <= (n_parts + 1) * 4) 232 if (toklen <= (n_parts + 1) * 4)
233 return -EINVAL; 233 return -EINVAL;
234 234
235 princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); 235 princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
236 if (!princ->name_parts) 236 if (!princ->name_parts)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
@@ -355,7 +355,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
355 355
356 _debug("n_elem %d", n_elem); 356 _debug("n_elem %d", n_elem);
357 357
358 td = kcalloc(sizeof(struct krb5_tagged_data), n_elem, 358 td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!td) 360 if (!td)
361 return -ENOMEM; 361 return -ENOMEM;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index e465064d39a3..7e267d7b9c75 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -148,8 +148,7 @@ struct choke_skb_cb {
148 148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) 149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{ 150{
151 BUILD_BUG_ON(sizeof(skb->cb) < 151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
153 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; 152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
154} 153}
155 154
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2776012132ea..e83d61ca78ca 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -130,8 +130,7 @@ struct netem_skb_cb {
130 130
131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) 131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{ 132{
133 BUILD_BUG_ON(sizeof(skb->cb) < 133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
135 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; 134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
136} 135}
137 136
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 96e42cae4c7a..d7eea99333e9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
94 94
95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) 95static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96{ 96{
97 BUILD_BUG_ON(sizeof(skb->cb) < 97 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
98 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; 98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100} 99}
101 100
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 67494aef9acf..60d47180f043 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
166 166
167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb) 167static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
168{ 168{
169 BUILD_BUG_ON(sizeof(skb->cb) < 169 qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
170 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb)); 170 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
171 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
172} 171}
173 172
174static unsigned int sfq_hash(const struct sfq_sched_data *q, 173static unsigned int sfq_hash(const struct sfq_sched_data *q,