aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-07 17:27:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-07 17:27:46 -0400
commit0ffb01d9def22f1954e99529b7e4ded497b2e88b (patch)
treee18b4dd941bc0e2e34078b7b64469f5675046734 /net
parent7b4022fa17991801e29f09c6794bbf4d1a0d6b6d (diff)
parent4e4f1fc226816905c937f9b29dabe351075dfe0f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A quick set of fixes, some to deal with fallout from yesterday's net-next merge. 1) Fix compilation of bnx2x driver with CONFIG_BNX2X_SRIOV disabled, from Dmitry Kravkov. 2) Fix a bnx2x regression caused by one of Dave Jones's mistaken braces changes, from Eilon Greenstein. 3) Add some protective filtering in the netlink tap code, from Daniel Borkmann. 4) Fix TCP congestion window growth regression after timeouts, from Yuchung Cheng. 5) Correctly adjust TCP's rcv_ssthresh for out of order packets, from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: tcp: properly increase rcv_ssthresh for ofo packets net: add documentation for BQL helpers mlx5: remove unused MLX5_DEBUG param in Kconfig bnx2x: Restore a call to config_init bnx2x: fix broken compilation with CONFIG_BNX2X_SRIOV is not set tcp: fix no cwnd growth after timeout net: netlink: filter particular protocols from analyzers
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/netlink/af_netlink.c30
2 files changed, 36 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1969e16d936d..25a89eaa669d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3162 3162
3163 /* If reordering is high then always grow cwnd whenever data is 3163 /* If reordering is high then always grow cwnd whenever data is
3164 * delivered regardless of its ordering. Otherwise stay conservative 3164 * delivered regardless of its ordering. Otherwise stay conservative
3165 * and only grow cwnd on in-order delivery in Open state, and retain 3165 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3166 * cwnd in Disordered state (RFC5681). A stretched ACK with
3167 * new SACK or ECE mark may first advance cwnd here and later reduce 3166 * new SACK or ECE mark may first advance cwnd here and later reduce
3168 * cwnd in tcp_fastretrans_alert() based on more states. 3167 * cwnd in tcp_fastretrans_alert() based on more states.
3169 */ 3168 */
3170 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) 3169 if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
3171 return flag & FLAG_FORWARD_PROGRESS; 3170 return flag & FLAG_FORWARD_PROGRESS;
3172 3171
3173 return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 3172 return flag & FLAG_DATA_ACKED;
3174 flag & FLAG_DATA_ACKED;
3175} 3173}
3176 3174
3177/* Check that window update is acceptable. 3175/* Check that window update is acceptable.
@@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4141 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { 4139 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
4142 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4140 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4143 } else { 4141 } else {
4142 tcp_grow_window(sk, skb);
4144 kfree_skb_partial(skb, fragstolen); 4143 kfree_skb_partial(skb, fragstolen);
4145 skb = NULL; 4144 skb = NULL;
4146 } 4145 }
@@ -4216,8 +4215,10 @@ add_sack:
4216 if (tcp_is_sack(tp)) 4215 if (tcp_is_sack(tp))
4217 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4216 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4218end: 4217end:
4219 if (skb) 4218 if (skb) {
4219 tcp_grow_window(sk, skb);
4220 skb_set_owner_r(skb, sk); 4220 skb_set_owner_r(skb, sk);
4221 }
4221} 4222}
4222 4223
4223static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4224static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a17dda1bbee0..8df7f64c6db3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt)
168} 168}
169EXPORT_SYMBOL_GPL(netlink_remove_tap); 169EXPORT_SYMBOL_GPL(netlink_remove_tap);
170 170
171static bool netlink_filter_tap(const struct sk_buff *skb)
172{
173 struct sock *sk = skb->sk;
174 bool pass = false;
175
176 /* We take the more conservative approach and
177 * whitelist socket protocols that may pass.
178 */
179 switch (sk->sk_protocol) {
180 case NETLINK_ROUTE:
181 case NETLINK_USERSOCK:
182 case NETLINK_SOCK_DIAG:
183 case NETLINK_NFLOG:
184 case NETLINK_XFRM:
185 case NETLINK_FIB_LOOKUP:
186 case NETLINK_NETFILTER:
187 case NETLINK_GENERIC:
188 pass = true;
189 break;
190 }
191
192 return pass;
193}
194
171static int __netlink_deliver_tap_skb(struct sk_buff *skb, 195static int __netlink_deliver_tap_skb(struct sk_buff *skb,
172 struct net_device *dev) 196 struct net_device *dev)
173{ 197{
174 struct sk_buff *nskb; 198 struct sk_buff *nskb;
199 struct sock *sk = skb->sk;
175 int ret = -ENOMEM; 200 int ret = -ENOMEM;
176 201
177 dev_hold(dev); 202 dev_hold(dev);
178 nskb = skb_clone(skb, GFP_ATOMIC); 203 nskb = skb_clone(skb, GFP_ATOMIC);
179 if (nskb) { 204 if (nskb) {
180 nskb->dev = dev; 205 nskb->dev = dev;
206 nskb->protocol = htons((u16) sk->sk_protocol);
207
181 ret = dev_queue_xmit(nskb); 208 ret = dev_queue_xmit(nskb);
182 if (unlikely(ret > 0)) 209 if (unlikely(ret > 0))
183 ret = net_xmit_errno(ret); 210 ret = net_xmit_errno(ret);
@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
192 int ret; 219 int ret;
193 struct netlink_tap *tmp; 220 struct netlink_tap *tmp;
194 221
222 if (!netlink_filter_tap(skb))
223 return;
224
195 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { 225 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
196 ret = __netlink_deliver_tap_skb(skb, tmp->dev); 226 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
197 if (unlikely(ret)) 227 if (unlikely(ret))