diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-08 13:06:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-08 13:06:43 -0400 |
commit | f4ba394c1b02e7fc2179fda8d3941a5b3b65efb6 (patch) | |
tree | 4d64ebd34792dee247983d64f6a5014124c412d1 /net | |
parent | bf44ce8377316071fc53a1fe07b28f99a37c4462 (diff) | |
parent | 5d299f3d3c8a2fbc732b1bf03af36333ccec3130 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Missed rcu_assign_pointer() in mac80211 scanning, from Johannes
Berg.
2) Allow devices to limit the number of segments that an individual
TCP TSO packet can use at a time, to deal with device and/or driver
specific limitations. From Ben Hutchings.
3) Fix unexpected hard IPSEC expiration after setting the date. From
Fan Du.
4) Memory leak fix in bxn2x driver, from Jesper Juhl.
5) Fix two memory leaks in libertas driver, from Daniel Drake.
6) Fix deref of out-of-range array index in packet scheduler generic
actions layer. From Hiroaki SHIMODA.
7) Fix TX flow control errors in mlx4 driver, from Yevgeny Petrilin.
8) Fix CRIS eth_v10.c driver build, from Randy Dunlap.
9) Fix wrong SKB freeing in LLC protocol layer, from Sorin Dumitru.
10) The IP output path checks neigh lookup errors incorrectly, it needs
to use IS_ERR(). From Vasiliy Kulikov.
11) An estimator leak leads to deref of freed memory in timer handler,
fix from Hiroaki SHIMODA.
12) TCP early demux in ipv6 needs to use DST cookies in order to
validate the RX route properly. Fix from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits)
net: ipv6: fix TCP early demux
net: Use PTR_RET rather than if(IS_ERR(.. [1]
net_sched: act: Delete estimator in error path.
ip: fix error handling in ip_finish_output2()
llc: free the right skb
ixp4xx_eth: fix ptp_ixp46x build failure
drivers/atm/iphase.c: fix error return code
tcp_output: fix sparse warning for tcp_wfree
drivers/net/phy/mdio-mux-gpio.c: drop devm_kfree of devm_kzalloc'd data
batman-adv: select an internet gateway if none was chosen
mISDN: Bugfix for layer2 fixed TEI mode
igb: don't break user visible strings over multiple lines in igb_ethtool.c
igb: correct hardware type (i210/i211) check in igb_loopback_test()
igb: Fix for failure to init on some 82576 devices.
cris: fix eth_v10.c build error
cdc-ncm: tag Ericsson WWAN devices (eg F5521gw) with FLAG_WWAN
isdnloop: fix and simplify isdnloop_init()
hyperv: Move wait completion msg code into rndis_filter_halt_device()
net/mlx4_core: Remove port type restrictions
net/mlx4_en: Fixing TX queue stop/wake flow
...
Diffstat (limited to 'net')
-rw-r--r-- | net/batman-adv/gateway_client.c | 6 | ||||
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/sock.c | 1 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_cong.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 13 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 23 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 27 | ||||
-rw-r--r-- | net/llc/llc_station.c | 6 | ||||
-rw-r--r-- | net/mac80211/mesh.c | 3 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 2 | ||||
-rw-r--r-- | net/mac80211/scan.c | 3 | ||||
-rw-r--r-- | net/sched/act_gact.c | 14 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 7 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 5 | ||||
-rw-r--r-- | net/sched/act_simple.c | 5 | ||||
-rw-r--r-- | net/wireless/reg.c | 19 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 21 |
22 files changed, 135 insertions, 43 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index b421cc49d2cd..fc866f2e4528 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv) | |||
200 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) | 200 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) |
201 | goto out; | 201 | goto out; |
202 | 202 | ||
203 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect)) | ||
204 | goto out; | ||
205 | |||
206 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); | 203 | curr_gw = batadv_gw_get_selected_gw_node(bat_priv); |
207 | 204 | ||
205 | if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) | ||
206 | goto out; | ||
207 | |||
208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); | 208 | next_gw = batadv_gw_get_best_gw_node(bat_priv); |
209 | 209 | ||
210 | if (curr_gw == next_gw) | 210 | if (curr_gw == next_gw) |
diff --git a/net/core/dev.c b/net/core/dev.c index 0cb3fe8d8e72..f91abf800161 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2134,6 +2134,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2134 | __be16 protocol = skb->protocol; | 2134 | __be16 protocol = skb->protocol; |
2135 | netdev_features_t features = skb->dev->features; | 2135 | netdev_features_t features = skb->dev->features; |
2136 | 2136 | ||
2137 | if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) | ||
2138 | features &= ~NETIF_F_GSO_MASK; | ||
2139 | |||
2137 | if (protocol == htons(ETH_P_8021Q)) { | 2140 | if (protocol == htons(ETH_P_8021Q)) { |
2138 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 2141 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
2139 | protocol = veh->h_vlan_encapsulated_proto; | 2142 | protocol = veh->h_vlan_encapsulated_proto; |
@@ -5986,6 +5989,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | |||
5986 | dev_net_set(dev, &init_net); | 5989 | dev_net_set(dev, &init_net); |
5987 | 5990 | ||
5988 | dev->gso_max_size = GSO_MAX_SIZE; | 5991 | dev->gso_max_size = GSO_MAX_SIZE; |
5992 | dev->gso_max_segs = GSO_MAX_SEGS; | ||
5989 | 5993 | ||
5990 | INIT_LIST_HEAD(&dev->napi_list); | 5994 | INIT_LIST_HEAD(&dev->napi_list); |
5991 | INIT_LIST_HEAD(&dev->unreg_list); | 5995 | INIT_LIST_HEAD(&dev->unreg_list); |
diff --git a/net/core/sock.c b/net/core/sock.c index 6b654b3ddfda..8f67ced8d6a8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | |||
1458 | } else { | 1458 | } else { |
1459 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; | 1459 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; |
1460 | sk->sk_gso_max_size = dst->dev->gso_max_size; | 1460 | sk->sk_gso_max_size = dst->dev->gso_max_size; |
1461 | sk->sk_gso_max_segs = dst->dev->gso_max_segs; | ||
1461 | } | 1462 | } |
1462 | } | 1463 | } |
1463 | } | 1464 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ba39a52d18c1..76dde25fb9a0 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); | 197 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
198 | if (unlikely(!neigh)) | 198 | if (unlikely(!neigh)) |
199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); | 199 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
200 | if (neigh) { | 200 | if (!IS_ERR(neigh)) { |
201 | int res = dst_neigh_output(dst, neigh, skb); | 201 | int res = dst_neigh_output(dst, neigh, skb); |
202 | 202 | ||
203 | rcu_read_unlock_bh(); | 203 | rcu_read_unlock_bh(); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c035251beb07..e4ba974f143c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -70,7 +70,6 @@ | |||
70 | #include <linux/types.h> | 70 | #include <linux/types.h> |
71 | #include <linux/kernel.h> | 71 | #include <linux/kernel.h> |
72 | #include <linux/mm.h> | 72 | #include <linux/mm.h> |
73 | #include <linux/bootmem.h> | ||
74 | #include <linux/string.h> | 73 | #include <linux/string.h> |
75 | #include <linux/socket.h> | 74 | #include <linux/socket.h> |
76 | #include <linux/sockios.h> | 75 | #include <linux/sockios.h> |
@@ -80,7 +79,6 @@ | |||
80 | #include <linux/netdevice.h> | 79 | #include <linux/netdevice.h> |
81 | #include <linux/proc_fs.h> | 80 | #include <linux/proc_fs.h> |
82 | #include <linux/init.h> | 81 | #include <linux/init.h> |
83 | #include <linux/workqueue.h> | ||
84 | #include <linux/skbuff.h> | 82 | #include <linux/skbuff.h> |
85 | #include <linux/inetdevice.h> | 83 | #include <linux/inetdevice.h> |
86 | #include <linux/igmp.h> | 84 | #include <linux/igmp.h> |
@@ -88,11 +86,9 @@ | |||
88 | #include <linux/mroute.h> | 86 | #include <linux/mroute.h> |
89 | #include <linux/netfilter_ipv4.h> | 87 | #include <linux/netfilter_ipv4.h> |
90 | #include <linux/random.h> | 88 | #include <linux/random.h> |
91 | #include <linux/jhash.h> | ||
92 | #include <linux/rcupdate.h> | 89 | #include <linux/rcupdate.h> |
93 | #include <linux/times.h> | 90 | #include <linux/times.h> |
94 | #include <linux/slab.h> | 91 | #include <linux/slab.h> |
95 | #include <linux/prefetch.h> | ||
96 | #include <net/dst.h> | 92 | #include <net/dst.h> |
97 | #include <net/net_namespace.h> | 93 | #include <net/net_namespace.h> |
98 | #include <net/protocol.h> | 94 | #include <net/protocol.h> |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e7e6eeae49c0..2109ff4a1daf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
811 | old_size_goal + mss_now > xmit_size_goal)) { | 811 | old_size_goal + mss_now > xmit_size_goal)) { |
812 | xmit_size_goal = old_size_goal; | 812 | xmit_size_goal = old_size_goal; |
813 | } else { | 813 | } else { |
814 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; | 814 | tp->xmit_size_goal_segs = |
815 | min_t(u16, xmit_size_goal / mss_now, | ||
816 | sk->sk_gso_max_segs); | ||
815 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; | 817 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; |
816 | } | 818 | } |
817 | } | 819 | } |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 4d4db16e336e..1432cdb0644c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |||
291 | left = tp->snd_cwnd - in_flight; | 291 | left = tp->snd_cwnd - in_flight; |
292 | if (sk_can_gso(sk) && | 292 | if (sk_can_gso(sk) && |
293 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && | 293 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && |
294 | left * tp->mss_cache < sk->sk_gso_max_size) | 294 | left * tp->mss_cache < sk->sk_gso_max_size && |
295 | left < sk->sk_gso_max_segs) | ||
295 | return true; | 296 | return true; |
296 | return left <= tcp_max_tso_deferred_mss(tp); | 297 | return left <= tcp_max_tso_deferred_mss(tp); |
297 | } | 298 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fd2bc9e3c64..85308b90df80 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5392 | { | 5392 | { |
5393 | struct tcp_sock *tp = tcp_sk(sk); | 5393 | struct tcp_sock *tp = tcp_sk(sk); |
5394 | 5394 | ||
5395 | if (unlikely(sk->sk_rx_dst == NULL)) | ||
5396 | inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); | ||
5395 | /* | 5397 | /* |
5396 | * Header prediction. | 5398 | * Header prediction. |
5397 | * The code loosely follows the one in the famous | 5399 | * The code loosely follows the one in the famous |
@@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) | |||
5605 | tcp_set_state(sk, TCP_ESTABLISHED); | 5607 | tcp_set_state(sk, TCP_ESTABLISHED); |
5606 | 5608 | ||
5607 | if (skb != NULL) { | 5609 | if (skb != NULL) { |
5608 | inet_sk_rx_dst_set(sk, skb); | 5610 | icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); |
5609 | security_inet_conn_established(sk, skb); | 5611 | security_inet_conn_established(sk, skb); |
5610 | } | 5612 | } |
5611 | 5613 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 42b2a6a73092..272241f16fcb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1627 | sk->sk_rx_dst = NULL; | 1627 | sk->sk_rx_dst = NULL; |
1628 | } | 1628 | } |
1629 | } | 1629 | } |
1630 | if (unlikely(sk->sk_rx_dst == NULL)) | ||
1631 | inet_sk_rx_dst_set(sk, skb); | ||
1632 | |||
1633 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1630 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1634 | rsk = sk; | 1631 | rsk = sk; |
1635 | goto reset; | 1632 | goto reset; |
@@ -1872,10 +1869,20 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = { | |||
1872 | .twsk_destructor= tcp_twsk_destructor, | 1869 | .twsk_destructor= tcp_twsk_destructor, |
1873 | }; | 1870 | }; |
1874 | 1871 | ||
1872 | static void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
1873 | { | ||
1874 | struct dst_entry *dst = skb_dst(skb); | ||
1875 | |||
1876 | dst_hold(dst); | ||
1877 | sk->sk_rx_dst = dst; | ||
1878 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
1879 | } | ||
1880 | |||
1875 | const struct inet_connection_sock_af_ops ipv4_specific = { | 1881 | const struct inet_connection_sock_af_ops ipv4_specific = { |
1876 | .queue_xmit = ip_queue_xmit, | 1882 | .queue_xmit = ip_queue_xmit, |
1877 | .send_check = tcp_v4_send_check, | 1883 | .send_check = tcp_v4_send_check, |
1878 | .rebuild_header = inet_sk_rebuild_header, | 1884 | .rebuild_header = inet_sk_rebuild_header, |
1885 | .sk_rx_dst_set = inet_sk_rx_dst_set, | ||
1879 | .conn_request = tcp_v4_conn_request, | 1886 | .conn_request = tcp_v4_conn_request, |
1880 | .syn_recv_sock = tcp_v4_syn_recv_sock, | 1887 | .syn_recv_sock = tcp_v4_syn_recv_sock, |
1881 | .net_header_len = sizeof(struct iphdr), | 1888 | .net_header_len = sizeof(struct iphdr), |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 232a90c3ec86..d9c9dcef2de3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
387 | struct tcp_sock *oldtp = tcp_sk(sk); | 387 | struct tcp_sock *oldtp = tcp_sk(sk); |
388 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; | 388 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; |
389 | 389 | ||
390 | inet_sk_rx_dst_set(newsk, skb); | 390 | newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb); |
391 | 391 | ||
392 | /* TCP Cookie Transactions require space for the cookie pair, | 392 | /* TCP Cookie Transactions require space for the cookie pair, |
393 | * as it differs for each connection. There is no need to | 393 | * as it differs for each connection. There is no need to |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..20dfd892c86f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void) | |||
940 | * We cant xmit new skbs from this context, as we might already | 940 | * We cant xmit new skbs from this context, as we might already |
941 | * hold qdisc lock. | 941 | * hold qdisc lock. |
942 | */ | 942 | */ |
943 | void tcp_wfree(struct sk_buff *skb) | 943 | static void tcp_wfree(struct sk_buff *skb) |
944 | { | 944 | { |
945 | struct sock *sk = skb->sk; | 945 | struct sock *sk = skb->sk; |
946 | struct tcp_sock *tp = tcp_sk(sk); | 946 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. | 1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. |
1523 | */ | 1523 | */ |
1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, | 1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, |
1525 | unsigned int mss_now, unsigned int cwnd) | 1525 | unsigned int mss_now, unsigned int max_segs) |
1526 | { | 1526 | { |
1527 | const struct tcp_sock *tp = tcp_sk(sk); | 1527 | const struct tcp_sock *tp = tcp_sk(sk); |
1528 | u32 needed, window, cwnd_len; | 1528 | u32 needed, window, max_len; |
1529 | 1529 | ||
1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
1531 | cwnd_len = mss_now * cwnd; | 1531 | max_len = mss_now * max_segs; |
1532 | 1532 | ||
1533 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | 1533 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) |
1534 | return cwnd_len; | 1534 | return max_len; |
1535 | 1535 | ||
1536 | needed = min(skb->len, window); | 1536 | needed = min(skb->len, window); |
1537 | 1537 | ||
1538 | if (cwnd_len <= needed) | 1538 | if (max_len <= needed) |
1539 | return cwnd_len; | 1539 | return max_len; |
1540 | 1540 | ||
1541 | return needed - needed % mss_now; | 1541 | return needed - needed % mss_now; |
1542 | } | 1542 | } |
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1765 | limit = min(send_win, cong_win); | 1765 | limit = min(send_win, cong_win); |
1766 | 1766 | ||
1767 | /* If a full-sized TSO skb can be sent, do it. */ | 1767 | /* If a full-sized TSO skb can be sent, do it. */ |
1768 | if (limit >= sk->sk_gso_max_size) | 1768 | if (limit >= min_t(unsigned int, sk->sk_gso_max_size, |
1769 | sk->sk_gso_max_segs * tp->mss_cache)) | ||
1769 | goto send_now; | 1770 | goto send_now; |
1770 | 1771 | ||
1771 | /* Middle in queue won't get any more data, full sendable already? */ | 1772 | /* Middle in queue won't get any more data, full sendable already? */ |
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1999 | limit = mss_now; | 2000 | limit = mss_now; |
2000 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 2001 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
2001 | limit = tcp_mss_split_point(sk, skb, mss_now, | 2002 | limit = tcp_mss_split_point(sk, skb, mss_now, |
2002 | cwnd_quota); | 2003 | min_t(unsigned int, |
2004 | cwnd_quota, | ||
2005 | sk->sk_gso_max_segs)); | ||
2003 | 2006 | ||
2004 | if (skb->len > limit && | 2007 | if (skb->len > limit && |
2005 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2008 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c66b90f71c9b..5a439e9a4c01 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1447 | opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); | 1447 | opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); |
1448 | 1448 | ||
1449 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1449 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1450 | struct dst_entry *dst = sk->sk_rx_dst; | ||
1451 | |||
1450 | sock_rps_save_rxhash(sk, skb); | 1452 | sock_rps_save_rxhash(sk, skb); |
1453 | if (dst) { | ||
1454 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || | ||
1455 | dst->ops->check(dst, np->rx_dst_cookie) == NULL) { | ||
1456 | dst_release(dst); | ||
1457 | sk->sk_rx_dst = NULL; | ||
1458 | } | ||
1459 | } | ||
1460 | |||
1451 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) | 1461 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) |
1452 | goto reset; | 1462 | goto reset; |
1453 | if (opt_skb) | 1463 | if (opt_skb) |
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1705 | struct dst_entry *dst = sk->sk_rx_dst; | 1715 | struct dst_entry *dst = sk->sk_rx_dst; |
1706 | struct inet_sock *icsk = inet_sk(sk); | 1716 | struct inet_sock *icsk = inet_sk(sk); |
1707 | if (dst) | 1717 | if (dst) |
1708 | dst = dst_check(dst, 0); | 1718 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
1709 | if (dst && | 1719 | if (dst && |
1710 | icsk->rx_dst_ifindex == inet6_iif(skb)) | 1720 | icsk->rx_dst_ifindex == skb->skb_iif) |
1711 | skb_dst_set_noref(skb, dst); | 1721 | skb_dst_set_noref(skb, dst); |
1712 | } | 1722 | } |
1713 | } | 1723 | } |
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |||
1719 | .twsk_destructor= tcp_twsk_destructor, | 1729 | .twsk_destructor= tcp_twsk_destructor, |
1720 | }; | 1730 | }; |
1721 | 1731 | ||
1732 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | ||
1733 | { | ||
1734 | struct dst_entry *dst = skb_dst(skb); | ||
1735 | const struct rt6_info *rt = (const struct rt6_info *)dst; | ||
1736 | |||
1737 | dst_hold(dst); | ||
1738 | sk->sk_rx_dst = dst; | ||
1739 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | ||
1740 | if (rt->rt6i_node) | ||
1741 | inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; | ||
1742 | } | ||
1743 | |||
1722 | static const struct inet_connection_sock_af_ops ipv6_specific = { | 1744 | static const struct inet_connection_sock_af_ops ipv6_specific = { |
1723 | .queue_xmit = inet6_csk_xmit, | 1745 | .queue_xmit = inet6_csk_xmit, |
1724 | .send_check = tcp_v6_send_check, | 1746 | .send_check = tcp_v6_send_check, |
1725 | .rebuild_header = inet6_sk_rebuild_header, | 1747 | .rebuild_header = inet6_sk_rebuild_header, |
1748 | .sk_rx_dst_set = inet6_sk_rx_dst_set, | ||
1726 | .conn_request = tcp_v6_conn_request, | 1749 | .conn_request = tcp_v6_conn_request, |
1727 | .syn_recv_sock = tcp_v6_syn_recv_sock, | 1750 | .syn_recv_sock = tcp_v6_syn_recv_sock, |
1728 | .net_header_len = sizeof(struct ipv6hdr), | 1751 | .net_header_len = sizeof(struct ipv6hdr), |
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 39a8d8924b9c..6828e39ec2ec 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c | |||
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) | |||
268 | out: | 268 | out: |
269 | return rc; | 269 | return rc; |
270 | free: | 270 | free: |
271 | kfree_skb(skb); | 271 | kfree_skb(nskb); |
272 | goto out; | 272 | goto out; |
273 | } | 273 | } |
274 | 274 | ||
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb) | |||
293 | out: | 293 | out: |
294 | return rc; | 294 | return rc; |
295 | free: | 295 | free: |
296 | kfree_skb(skb); | 296 | kfree_skb(nskb); |
297 | goto out; | 297 | goto out; |
298 | } | 298 | } |
299 | 299 | ||
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) | |||
322 | out: | 322 | out: |
323 | return rc; | 323 | return rc; |
324 | free: | 324 | free: |
325 | kfree_skb(skb); | 325 | kfree_skb(nskb); |
326 | goto out; | 326 | goto out; |
327 | } | 327 | } |
328 | 328 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6fac18c0423f..85572353a7e3 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
622 | 622 | ||
623 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); | 623 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); |
624 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); | 624 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); |
625 | del_timer_sync(&sdata->u.mesh.mesh_path_timer); | ||
625 | /* | 626 | /* |
626 | * If the timer fired while we waited for it, it will have | 627 | * If the timer fired while we waited for it, it will have |
627 | * requeued the work. Now the work will be running again | 628 | * requeued the work. Now the work will be running again |
@@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
634 | local->fif_other_bss--; | 635 | local->fif_other_bss--; |
635 | atomic_dec(&local->iff_allmultis); | 636 | atomic_dec(&local->iff_allmultis); |
636 | ieee80211_configure_filter(local); | 637 | ieee80211_configure_filter(local); |
638 | |||
639 | sdata->u.mesh.timers_running = 0; | ||
637 | } | 640 | } |
638 | 641 | ||
639 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 642 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cef0c9e79aba..a4a5acdbaa4d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1430 | del_timer_sync(&sdata->u.mgd.bcn_mon_timer); | 1430 | del_timer_sync(&sdata->u.mgd.bcn_mon_timer); |
1431 | del_timer_sync(&sdata->u.mgd.timer); | 1431 | del_timer_sync(&sdata->u.mgd.timer); |
1432 | del_timer_sync(&sdata->u.mgd.chswitch_timer); | 1432 | del_timer_sync(&sdata->u.mgd.chswitch_timer); |
1433 | |||
1434 | sdata->u.mgd.timers_running = 0; | ||
1433 | } | 1435 | } |
1434 | 1436 | ||
1435 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 1437 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index bcaee5d12839..839dd9737989 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, | |||
299 | if (local->scan_req != local->int_scan_req) | 299 | if (local->scan_req != local->int_scan_req) |
300 | cfg80211_scan_done(local->scan_req, aborted); | 300 | cfg80211_scan_done(local->scan_req, aborted); |
301 | local->scan_req = NULL; | 301 | local->scan_req = NULL; |
302 | local->scan_sdata = NULL; | 302 | rcu_assign_pointer(local->scan_sdata, NULL); |
303 | 303 | ||
304 | local->scanning = 0; | 304 | local->scanning = 0; |
305 | local->scan_channel = NULL; | 305 | local->scan_channel = NULL; |
@@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) | |||
984 | kfree(local->sched_scan_ies.ie[i]); | 984 | kfree(local->sched_scan_ies.ie[i]); |
985 | 985 | ||
986 | drv_sched_scan_stop(local, sdata); | 986 | drv_sched_scan_stop(local, sdata); |
987 | rcu_assign_pointer(local->sched_scan_sdata, NULL); | ||
988 | } | 987 | } |
989 | out: | 988 | out: |
990 | mutex_unlock(&local->mtx); | 989 | mutex_unlock(&local->mtx); |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index f10fb8256442..05d60859d8e3 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, | |||
67 | struct tcf_common *pc; | 67 | struct tcf_common *pc; |
68 | int ret = 0; | 68 | int ret = 0; |
69 | int err; | 69 | int err; |
70 | #ifdef CONFIG_GACT_PROB | ||
71 | struct tc_gact_p *p_parm = NULL; | ||
72 | #endif | ||
70 | 73 | ||
71 | if (nla == NULL) | 74 | if (nla == NULL) |
72 | return -EINVAL; | 75 | return -EINVAL; |
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, | |||
82 | #ifndef CONFIG_GACT_PROB | 85 | #ifndef CONFIG_GACT_PROB |
83 | if (tb[TCA_GACT_PROB] != NULL) | 86 | if (tb[TCA_GACT_PROB] != NULL) |
84 | return -EOPNOTSUPP; | 87 | return -EOPNOTSUPP; |
88 | #else | ||
89 | if (tb[TCA_GACT_PROB]) { | ||
90 | p_parm = nla_data(tb[TCA_GACT_PROB]); | ||
91 | if (p_parm->ptype >= MAX_RAND) | ||
92 | return -EINVAL; | ||
93 | } | ||
85 | #endif | 94 | #endif |
86 | 95 | ||
87 | pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); | 96 | pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); |
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, | |||
103 | spin_lock_bh(&gact->tcf_lock); | 112 | spin_lock_bh(&gact->tcf_lock); |
104 | gact->tcf_action = parm->action; | 113 | gact->tcf_action = parm->action; |
105 | #ifdef CONFIG_GACT_PROB | 114 | #ifdef CONFIG_GACT_PROB |
106 | if (tb[TCA_GACT_PROB] != NULL) { | 115 | if (p_parm) { |
107 | struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]); | ||
108 | gact->tcfg_paction = p_parm->paction; | 116 | gact->tcfg_paction = p_parm->paction; |
109 | gact->tcfg_pval = p_parm->pval; | 117 | gact->tcfg_pval = p_parm->pval; |
110 | gact->tcfg_ptype = p_parm->ptype; | 118 | gact->tcfg_ptype = p_parm->ptype; |
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, | |||
133 | 141 | ||
134 | spin_lock(&gact->tcf_lock); | 142 | spin_lock(&gact->tcf_lock); |
135 | #ifdef CONFIG_GACT_PROB | 143 | #ifdef CONFIG_GACT_PROB |
136 | if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) | 144 | if (gact->tcfg_ptype) |
137 | action = gact_rand[gact->tcfg_ptype](gact); | 145 | action = gact_rand[gact->tcfg_ptype](gact); |
138 | else | 146 | else |
139 | action = gact->tcf_action; | 147 | action = gact->tcf_action; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60e281ad0f07..58fb3c7aab9e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -185,7 +185,12 @@ err3: | |||
185 | err2: | 185 | err2: |
186 | kfree(tname); | 186 | kfree(tname); |
187 | err1: | 187 | err1: |
188 | kfree(pc); | 188 | if (ret == ACT_P_CREATED) { |
189 | if (est) | ||
190 | gen_kill_estimator(&pc->tcfc_bstats, | ||
191 | &pc->tcfc_rate_est); | ||
192 | kfree_rcu(pc, tcfc_rcu); | ||
193 | } | ||
189 | return err; | 194 | return err; |
190 | } | 195 | } |
191 | 196 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 26aa2f6ce257..45c53ab067a6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, | |||
74 | p = to_pedit(pc); | 74 | p = to_pedit(pc); |
75 | keys = kmalloc(ksize, GFP_KERNEL); | 75 | keys = kmalloc(ksize, GFP_KERNEL); |
76 | if (keys == NULL) { | 76 | if (keys == NULL) { |
77 | kfree(pc); | 77 | if (est) |
78 | gen_kill_estimator(&pc->tcfc_bstats, | ||
79 | &pc->tcfc_rate_est); | ||
80 | kfree_rcu(pc, tcfc_rcu); | ||
78 | return -ENOMEM; | 81 | return -ENOMEM; |
79 | } | 82 | } |
80 | ret = ACT_P_CREATED; | 83 | ret = ACT_P_CREATED; |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 3922f2a2821b..3714f60f0b3c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, | |||
131 | d = to_defact(pc); | 131 | d = to_defact(pc); |
132 | ret = alloc_defdata(d, defdata); | 132 | ret = alloc_defdata(d, defdata); |
133 | if (ret < 0) { | 133 | if (ret < 0) { |
134 | kfree(pc); | 134 | if (est) |
135 | gen_kill_estimator(&pc->tcfc_bstats, | ||
136 | &pc->tcfc_rate_est); | ||
137 | kfree_rcu(pc, tcfc_rcu); | ||
135 | return ret; | 138 | return ret; |
136 | } | 139 | } |
137 | d->tcf_action = parm->action; | 140 | d->tcf_action = parm->action; |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2303ee73b50a..2ded3c7fad06 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags) | |||
680 | channel_flags |= IEEE80211_CHAN_NO_IBSS; | 680 | channel_flags |= IEEE80211_CHAN_NO_IBSS; |
681 | if (rd_flags & NL80211_RRF_DFS) | 681 | if (rd_flags & NL80211_RRF_DFS) |
682 | channel_flags |= IEEE80211_CHAN_RADAR; | 682 | channel_flags |= IEEE80211_CHAN_RADAR; |
683 | if (rd_flags & NL80211_RRF_NO_OFDM) | ||
684 | channel_flags |= IEEE80211_CHAN_NO_OFDM; | ||
683 | return channel_flags; | 685 | return channel_flags; |
684 | } | 686 | } |
685 | 687 | ||
@@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy, | |||
901 | chan->max_antenna_gain = min(chan->orig_mag, | 903 | chan->max_antenna_gain = min(chan->orig_mag, |
902 | (int) MBI_TO_DBI(power_rule->max_antenna_gain)); | 904 | (int) MBI_TO_DBI(power_rule->max_antenna_gain)); |
903 | chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); | 905 | chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); |
904 | chan->max_power = min(chan->max_power, chan->max_reg_power); | 906 | if (chan->orig_mpwr) { |
907 | /* | ||
908 | * Devices that have their own custom regulatory domain | ||
909 | * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the | ||
910 | * passed country IE power settings. | ||
911 | */ | ||
912 | if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && | ||
913 | wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && | ||
914 | wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) | ||
915 | chan->max_power = chan->max_reg_power; | ||
916 | else | ||
917 | chan->max_power = min(chan->orig_mpwr, | ||
918 | chan->max_reg_power); | ||
919 | } else | ||
920 | chan->max_power = chan->max_reg_power; | ||
905 | } | 921 | } |
906 | 922 | ||
907 | static void handle_band(struct wiphy *wiphy, | 923 | static void handle_band(struct wiphy *wiphy, |
@@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy) | |||
1885 | chan->flags = chan->orig_flags; | 1901 | chan->flags = chan->orig_flags; |
1886 | chan->max_antenna_gain = chan->orig_mag; | 1902 | chan->max_antenna_gain = chan->orig_mag; |
1887 | chan->max_power = chan->orig_mpwr; | 1903 | chan->max_power = chan->orig_mpwr; |
1904 | chan->beacon_found = false; | ||
1888 | } | 1905 | } |
1889 | } | 1906 | } |
1890 | } | 1907 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5b228f97d4b3..87cd0e4d4282 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) | |||
415 | if (x->lft.hard_add_expires_seconds) { | 415 | if (x->lft.hard_add_expires_seconds) { |
416 | long tmo = x->lft.hard_add_expires_seconds + | 416 | long tmo = x->lft.hard_add_expires_seconds + |
417 | x->curlft.add_time - now; | 417 | x->curlft.add_time - now; |
418 | if (tmo <= 0) | 418 | if (tmo <= 0) { |
419 | goto expired; | 419 | if (x->xflags & XFRM_SOFT_EXPIRE) { |
420 | /* enter hard expire without soft expire first?! | ||
421 | * setting a new date could trigger this. | ||
422 | * workarbound: fix x->curflt.add_time by below: | ||
423 | */ | ||
424 | x->curlft.add_time = now - x->saved_tmo - 1; | ||
425 | tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; | ||
426 | } else | ||
427 | goto expired; | ||
428 | } | ||
420 | if (tmo < next) | 429 | if (tmo < next) |
421 | next = tmo; | 430 | next = tmo; |
422 | } | 431 | } |
@@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) | |||
433 | if (x->lft.soft_add_expires_seconds) { | 442 | if (x->lft.soft_add_expires_seconds) { |
434 | long tmo = x->lft.soft_add_expires_seconds + | 443 | long tmo = x->lft.soft_add_expires_seconds + |
435 | x->curlft.add_time - now; | 444 | x->curlft.add_time - now; |
436 | if (tmo <= 0) | 445 | if (tmo <= 0) { |
437 | warn = 1; | 446 | warn = 1; |
438 | else if (tmo < next) | 447 | x->xflags &= ~XFRM_SOFT_EXPIRE; |
448 | } else if (tmo < next) { | ||
439 | next = tmo; | 449 | next = tmo; |
450 | x->xflags |= XFRM_SOFT_EXPIRE; | ||
451 | x->saved_tmo = tmo; | ||
452 | } | ||
440 | } | 453 | } |
441 | if (x->lft.soft_use_expires_seconds) { | 454 | if (x->lft.soft_use_expires_seconds) { |
442 | long tmo = x->lft.soft_use_expires_seconds + | 455 | long tmo = x->lft.soft_use_expires_seconds + |