diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 30 |
2 files changed, 36 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1969e16d936d..25a89eaa669d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3162,16 +3162,14 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) | |||
3162 | 3162 | ||
3163 | /* If reordering is high then always grow cwnd whenever data is | 3163 | /* If reordering is high then always grow cwnd whenever data is |
3164 | * delivered regardless of its ordering. Otherwise stay conservative | 3164 | * delivered regardless of its ordering. Otherwise stay conservative |
3165 | * and only grow cwnd on in-order delivery in Open state, and retain | 3165 | * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ |
3166 | * cwnd in Disordered state (RFC5681). A stretched ACK with | ||
3167 | * new SACK or ECE mark may first advance cwnd here and later reduce | 3166 | * new SACK or ECE mark may first advance cwnd here and later reduce |
3168 | * cwnd in tcp_fastretrans_alert() based on more states. | 3167 | * cwnd in tcp_fastretrans_alert() based on more states. |
3169 | */ | 3168 | */ |
3170 | if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) | 3169 | if (tcp_sk(sk)->reordering > sysctl_tcp_reordering) |
3171 | return flag & FLAG_FORWARD_PROGRESS; | 3170 | return flag & FLAG_FORWARD_PROGRESS; |
3172 | 3171 | ||
3173 | return inet_csk(sk)->icsk_ca_state == TCP_CA_Open && | 3172 | return flag & FLAG_DATA_ACKED; |
3174 | flag & FLAG_DATA_ACKED; | ||
3175 | } | 3173 | } |
3176 | 3174 | ||
3177 | /* Check that window update is acceptable. | 3175 | /* Check that window update is acceptable. |
@@ -4141,6 +4139,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4141 | if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { | 4139 | if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { |
4142 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | 4140 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); |
4143 | } else { | 4141 | } else { |
4142 | tcp_grow_window(sk, skb); | ||
4144 | kfree_skb_partial(skb, fragstolen); | 4143 | kfree_skb_partial(skb, fragstolen); |
4145 | skb = NULL; | 4144 | skb = NULL; |
4146 | } | 4145 | } |
@@ -4216,8 +4215,10 @@ add_sack: | |||
4216 | if (tcp_is_sack(tp)) | 4215 | if (tcp_is_sack(tp)) |
4217 | tcp_sack_new_ofo_skb(sk, seq, end_seq); | 4216 | tcp_sack_new_ofo_skb(sk, seq, end_seq); |
4218 | end: | 4217 | end: |
4219 | if (skb) | 4218 | if (skb) { |
4219 | tcp_grow_window(sk, skb); | ||
4220 | skb_set_owner_r(skb, sk); | 4220 | skb_set_owner_r(skb, sk); |
4221 | } | ||
4221 | } | 4222 | } |
4222 | 4223 | ||
4223 | static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, | 4224 | static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a17dda1bbee0..8df7f64c6db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -168,16 +168,43 @@ int netlink_remove_tap(struct netlink_tap *nt) | |||
168 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(netlink_remove_tap); | 169 | EXPORT_SYMBOL_GPL(netlink_remove_tap); |
170 | 170 | ||
171 | static bool netlink_filter_tap(const struct sk_buff *skb) | ||
172 | { | ||
173 | struct sock *sk = skb->sk; | ||
174 | bool pass = false; | ||
175 | |||
176 | /* We take the more conservative approach and | ||
177 | * whitelist socket protocols that may pass. | ||
178 | */ | ||
179 | switch (sk->sk_protocol) { | ||
180 | case NETLINK_ROUTE: | ||
181 | case NETLINK_USERSOCK: | ||
182 | case NETLINK_SOCK_DIAG: | ||
183 | case NETLINK_NFLOG: | ||
184 | case NETLINK_XFRM: | ||
185 | case NETLINK_FIB_LOOKUP: | ||
186 | case NETLINK_NETFILTER: | ||
187 | case NETLINK_GENERIC: | ||
188 | pass = true; | ||
189 | break; | ||
190 | } | ||
191 | |||
192 | return pass; | ||
193 | } | ||
194 | |||
171 | static int __netlink_deliver_tap_skb(struct sk_buff *skb, | 195 | static int __netlink_deliver_tap_skb(struct sk_buff *skb, |
172 | struct net_device *dev) | 196 | struct net_device *dev) |
173 | { | 197 | { |
174 | struct sk_buff *nskb; | 198 | struct sk_buff *nskb; |
199 | struct sock *sk = skb->sk; | ||
175 | int ret = -ENOMEM; | 200 | int ret = -ENOMEM; |
176 | 201 | ||
177 | dev_hold(dev); | 202 | dev_hold(dev); |
178 | nskb = skb_clone(skb, GFP_ATOMIC); | 203 | nskb = skb_clone(skb, GFP_ATOMIC); |
179 | if (nskb) { | 204 | if (nskb) { |
180 | nskb->dev = dev; | 205 | nskb->dev = dev; |
206 | nskb->protocol = htons((u16) sk->sk_protocol); | ||
207 | |||
181 | ret = dev_queue_xmit(nskb); | 208 | ret = dev_queue_xmit(nskb); |
182 | if (unlikely(ret > 0)) | 209 | if (unlikely(ret > 0)) |
183 | ret = net_xmit_errno(ret); | 210 | ret = net_xmit_errno(ret); |
@@ -192,6 +219,9 @@ static void __netlink_deliver_tap(struct sk_buff *skb) | |||
192 | int ret; | 219 | int ret; |
193 | struct netlink_tap *tmp; | 220 | struct netlink_tap *tmp; |
194 | 221 | ||
222 | if (!netlink_filter_tap(skb)) | ||
223 | return; | ||
224 | |||
195 | list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { | 225 | list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { |
196 | ret = __netlink_deliver_tap_skb(skb, tmp->dev); | 226 | ret = __netlink_deliver_tap_skb(skb, tmp->dev); |
197 | if (unlikely(ret)) | 227 | if (unlikely(ret)) |