diff options
author | Mark Brown <broonie@linaro.org> | 2014-04-29 13:01:28 -0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-04-29 13:01:28 -0400 |
commit | 3e93457b45a1a8c69227ce596ee2005fa06f20dd (patch) | |
tree | 248c27e432533b1af80a9b2240eaa8e48e3b87cc /net | |
parent | 290414499cf94284a97cc3c33214d13ccfcd896a (diff) | |
parent | c42ba72ec3a7a1b6aa30122931f1f4b91b601c31 (diff) |
Merge tag 'ib-mfd-regulator-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd into regulator-tps65090
Immutable branch between MFD and Regulator due for v3.16 merge-window.
Diffstat (limited to 'net')
48 files changed, 338 insertions, 204 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 6f142f03716d..733ec283ed1b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -493,10 +493,48 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | |||
493 | } | 493 | } |
494 | } | 494 | } |
495 | 495 | ||
496 | static int vlan_calculate_locking_subclass(struct net_device *real_dev) | ||
497 | { | ||
498 | int subclass = 0; | ||
499 | |||
500 | while (is_vlan_dev(real_dev)) { | ||
501 | subclass++; | ||
502 | real_dev = vlan_dev_priv(real_dev)->real_dev; | ||
503 | } | ||
504 | |||
505 | return subclass; | ||
506 | } | ||
507 | |||
508 | static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from) | ||
509 | { | ||
510 | int err = 0, subclass; | ||
511 | |||
512 | subclass = vlan_calculate_locking_subclass(to); | ||
513 | |||
514 | spin_lock_nested(&to->addr_list_lock, subclass); | ||
515 | err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); | ||
516 | if (!err) | ||
517 | __dev_set_rx_mode(to); | ||
518 | spin_unlock(&to->addr_list_lock); | ||
519 | } | ||
520 | |||
521 | static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from) | ||
522 | { | ||
523 | int err = 0, subclass; | ||
524 | |||
525 | subclass = vlan_calculate_locking_subclass(to); | ||
526 | |||
527 | spin_lock_nested(&to->addr_list_lock, subclass); | ||
528 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
529 | if (!err) | ||
530 | __dev_set_rx_mode(to); | ||
531 | spin_unlock(&to->addr_list_lock); | ||
532 | } | ||
533 | |||
496 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | 534 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
497 | { | 535 | { |
498 | dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); | 536 | vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); |
499 | dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); | 537 | vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); |
500 | } | 538 | } |
501 | 539 | ||
502 | /* | 540 | /* |
@@ -608,9 +646,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
608 | 646 | ||
609 | SET_NETDEV_DEVTYPE(dev, &vlan_type); | 647 | SET_NETDEV_DEVTYPE(dev, &vlan_type); |
610 | 648 | ||
611 | if (is_vlan_dev(real_dev)) | 649 | subclass = vlan_calculate_locking_subclass(dev); |
612 | subclass = 1; | ||
613 | |||
614 | vlan_dev_set_lockdep_class(dev, subclass); | 650 | vlan_dev_set_lockdep_class(dev, subclass); |
615 | 651 | ||
616 | vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); | 652 | vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); |
diff --git a/net/core/dev.c b/net/core/dev.c index 14dac0654f28..d2c8a06b3a98 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2284,7 +2284,7 @@ EXPORT_SYMBOL(skb_checksum_help); | |||
2284 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) | 2284 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
2285 | { | 2285 | { |
2286 | __be16 type = skb->protocol; | 2286 | __be16 type = skb->protocol; |
2287 | int vlan_depth = ETH_HLEN; | 2287 | int vlan_depth = skb->mac_len; |
2288 | 2288 | ||
2289 | /* Tunnel gso handlers can set protocol to ethernet. */ | 2289 | /* Tunnel gso handlers can set protocol to ethernet. */ |
2290 | if (type == htons(ETH_P_TEB)) { | 2290 | if (type == htons(ETH_P_TEB)) { |
@@ -5238,6 +5238,7 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
5238 | if (ops->ndo_set_rx_mode) | 5238 | if (ops->ndo_set_rx_mode) |
5239 | ops->ndo_set_rx_mode(dev); | 5239 | ops->ndo_set_rx_mode(dev); |
5240 | } | 5240 | } |
5241 | EXPORT_SYMBOL(__dev_set_rx_mode); | ||
5241 | 5242 | ||
5242 | void dev_set_rx_mode(struct net_device *dev) | 5243 | void dev_set_rx_mode(struct net_device *dev) |
5243 | { | 5244 | { |
diff --git a/net/core/dst.c b/net/core/dst.c index ca4231ec7347..80d6286c8b62 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -142,12 +142,12 @@ loop: | |||
142 | mutex_unlock(&dst_gc_mutex); | 142 | mutex_unlock(&dst_gc_mutex); |
143 | } | 143 | } |
144 | 144 | ||
145 | int dst_discard(struct sk_buff *skb) | 145 | int dst_discard_sk(struct sock *sk, struct sk_buff *skb) |
146 | { | 146 | { |
147 | kfree_skb(skb); | 147 | kfree_skb(skb); |
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
150 | EXPORT_SYMBOL(dst_discard); | 150 | EXPORT_SYMBOL(dst_discard_sk); |
151 | 151 | ||
152 | const u32 dst_default_metrics[RTAX_MAX + 1] = { | 152 | const u32 dst_default_metrics[RTAX_MAX + 1] = { |
153 | /* This initializer is needed to force linker to place this variable | 153 | /* This initializer is needed to force linker to place this variable |
@@ -184,7 +184,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, | |||
184 | dst->xfrm = NULL; | 184 | dst->xfrm = NULL; |
185 | #endif | 185 | #endif |
186 | dst->input = dst_discard; | 186 | dst->input = dst_discard; |
187 | dst->output = dst_discard; | 187 | dst->output = dst_discard_sk; |
188 | dst->error = 0; | 188 | dst->error = 0; |
189 | dst->obsolete = initial_obsolete; | 189 | dst->obsolete = initial_obsolete; |
190 | dst->header_len = 0; | 190 | dst->header_len = 0; |
@@ -209,8 +209,10 @@ static void ___dst_free(struct dst_entry *dst) | |||
209 | /* The first case (dev==NULL) is required, when | 209 | /* The first case (dev==NULL) is required, when |
210 | protocol module is unloaded. | 210 | protocol module is unloaded. |
211 | */ | 211 | */ |
212 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) | 212 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { |
213 | dst->input = dst->output = dst_discard; | 213 | dst->input = dst_discard; |
214 | dst->output = dst_discard_sk; | ||
215 | } | ||
214 | dst->obsolete = DST_OBSOLETE_DEAD; | 216 | dst->obsolete = DST_OBSOLETE_DEAD; |
215 | } | 217 | } |
216 | 218 | ||
@@ -361,7 +363,8 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
361 | return; | 363 | return; |
362 | 364 | ||
363 | if (!unregister) { | 365 | if (!unregister) { |
364 | dst->input = dst->output = dst_discard; | 366 | dst->input = dst_discard; |
367 | dst->output = dst_discard_sk; | ||
365 | } else { | 368 | } else { |
366 | dst->dev = dev_net(dst->dev)->loopback_dev; | 369 | dst->dev = dev_net(dst->dev)->loopback_dev; |
367 | dev_hold(dst->dev); | 370 | dev_hold(dst->dev); |
diff --git a/net/core/filter.c b/net/core/filter.c index e08b3822c72a..cd58614660cf 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -600,6 +600,9 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) | |||
600 | if (skb_is_nonlinear(skb)) | 600 | if (skb_is_nonlinear(skb)) |
601 | return 0; | 601 | return 0; |
602 | 602 | ||
603 | if (skb->len < sizeof(struct nlattr)) | ||
604 | return 0; | ||
605 | |||
603 | if (A > skb->len - sizeof(struct nlattr)) | 606 | if (A > skb->len - sizeof(struct nlattr)) |
604 | return 0; | 607 | return 0; |
605 | 608 | ||
@@ -618,11 +621,14 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) | |||
618 | if (skb_is_nonlinear(skb)) | 621 | if (skb_is_nonlinear(skb)) |
619 | return 0; | 622 | return 0; |
620 | 623 | ||
624 | if (skb->len < sizeof(struct nlattr)) | ||
625 | return 0; | ||
626 | |||
621 | if (A > skb->len - sizeof(struct nlattr)) | 627 | if (A > skb->len - sizeof(struct nlattr)) |
622 | return 0; | 628 | return 0; |
623 | 629 | ||
624 | nla = (struct nlattr *) &skb->data[A]; | 630 | nla = (struct nlattr *) &skb->data[A]; |
625 | if (nla->nla_len > A - skb->len) | 631 | if (nla->nla_len > skb->len - A) |
626 | return 0; | 632 | return 0; |
627 | 633 | ||
628 | nla = nla_find_nested(nla, X); | 634 | nla = nla_find_nested(nla, X); |
@@ -1737,7 +1743,6 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) | |||
1737 | [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, | 1743 | [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, |
1738 | [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, | 1744 | [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, |
1739 | [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, | 1745 | [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, |
1740 | [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS, | ||
1741 | [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, | 1746 | [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, |
1742 | [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, | 1747 | [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, |
1743 | [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, | 1748 | [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 8876078859da..0248e8a3460c 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -138,7 +138,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
138 | 138 | ||
139 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 139 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
140 | 140 | ||
141 | err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); | 141 | err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
142 | return net_xmit_eval(err); | 142 | return net_xmit_eval(err); |
143 | } | 143 | } |
144 | return -ENOBUFS; | 144 | return -ENOBUFS; |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index ce0cbbfe0f43..daccc4a36d80 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -752,7 +752,7 @@ static int dn_to_neigh_output(struct sk_buff *skb) | |||
752 | return n->output(n, skb); | 752 | return n->output(n, skb); |
753 | } | 753 | } |
754 | 754 | ||
755 | static int dn_output(struct sk_buff *skb) | 755 | static int dn_output(struct sock *sk, struct sk_buff *skb) |
756 | { | 756 | { |
757 | struct dst_entry *dst = skb_dst(skb); | 757 | struct dst_entry *dst = skb_dst(skb); |
758 | struct dn_route *rt = (struct dn_route *)dst; | 758 | struct dn_route *rt = (struct dn_route *)dst; |
@@ -838,6 +838,18 @@ drop: | |||
838 | * Used to catch bugs. This should never normally get | 838 | * Used to catch bugs. This should never normally get |
839 | * called. | 839 | * called. |
840 | */ | 840 | */ |
841 | static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb) | ||
842 | { | ||
843 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | ||
844 | |||
845 | net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", | ||
846 | le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); | ||
847 | |||
848 | kfree_skb(skb); | ||
849 | |||
850 | return NET_RX_DROP; | ||
851 | } | ||
852 | |||
841 | static int dn_rt_bug(struct sk_buff *skb) | 853 | static int dn_rt_bug(struct sk_buff *skb) |
842 | { | 854 | { |
843 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 855 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
@@ -1463,7 +1475,7 @@ make_route: | |||
1463 | 1475 | ||
1464 | rt->n = neigh; | 1476 | rt->n = neigh; |
1465 | rt->dst.lastuse = jiffies; | 1477 | rt->dst.lastuse = jiffies; |
1466 | rt->dst.output = dn_rt_bug; | 1478 | rt->dst.output = dn_rt_bug_sk; |
1467 | switch (res.type) { | 1479 | switch (res.type) { |
1468 | case RTN_UNICAST: | 1480 | case RTN_UNICAST: |
1469 | rt->dst.input = dn_forward; | 1481 | rt->dst.input = dn_forward; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 1a629f870274..255aa9946fe7 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -250,7 +250,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
250 | bool dev_match; | 250 | bool dev_match; |
251 | 251 | ||
252 | fl4.flowi4_oif = 0; | 252 | fl4.flowi4_oif = 0; |
253 | fl4.flowi4_iif = oif; | 253 | fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX; |
254 | fl4.daddr = src; | 254 | fl4.daddr = src; |
255 | fl4.saddr = dst; | 255 | fl4.saddr = dst; |
256 | fl4.flowi4_tos = tos; | 256 | fl4.flowi4_tos = tos; |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b53f0bf84dca..8a043f03c88e 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -631,6 +631,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
631 | .daddr = nh->nh_gw, | 631 | .daddr = nh->nh_gw, |
632 | .flowi4_scope = cfg->fc_scope + 1, | 632 | .flowi4_scope = cfg->fc_scope + 1, |
633 | .flowi4_oif = nh->nh_oif, | 633 | .flowi4_oif = nh->nh_oif, |
634 | .flowi4_iif = LOOPBACK_IFINDEX, | ||
634 | }; | 635 | }; |
635 | 636 | ||
636 | /* It is not necessary, but requires a bit of thinking */ | 637 | /* It is not necessary, but requires a bit of thinking */ |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1a0755fea491..1cbeba5edff9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -101,17 +101,17 @@ int __ip_local_out(struct sk_buff *skb) | |||
101 | skb_dst(skb)->dev, dst_output); | 101 | skb_dst(skb)->dev, dst_output); |
102 | } | 102 | } |
103 | 103 | ||
104 | int ip_local_out(struct sk_buff *skb) | 104 | int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) |
105 | { | 105 | { |
106 | int err; | 106 | int err; |
107 | 107 | ||
108 | err = __ip_local_out(skb); | 108 | err = __ip_local_out(skb); |
109 | if (likely(err == 1)) | 109 | if (likely(err == 1)) |
110 | err = dst_output(skb); | 110 | err = dst_output_sk(sk, skb); |
111 | 111 | ||
112 | return err; | 112 | return err; |
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(ip_local_out); | 114 | EXPORT_SYMBOL_GPL(ip_local_out_sk); |
115 | 115 | ||
116 | static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) | 116 | static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) |
117 | { | 117 | { |
@@ -226,9 +226,8 @@ static int ip_finish_output(struct sk_buff *skb) | |||
226 | return ip_finish_output2(skb); | 226 | return ip_finish_output2(skb); |
227 | } | 227 | } |
228 | 228 | ||
229 | int ip_mc_output(struct sk_buff *skb) | 229 | int ip_mc_output(struct sock *sk, struct sk_buff *skb) |
230 | { | 230 | { |
231 | struct sock *sk = skb->sk; | ||
232 | struct rtable *rt = skb_rtable(skb); | 231 | struct rtable *rt = skb_rtable(skb); |
233 | struct net_device *dev = rt->dst.dev; | 232 | struct net_device *dev = rt->dst.dev; |
234 | 233 | ||
@@ -287,7 +286,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
287 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 286 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
288 | } | 287 | } |
289 | 288 | ||
290 | int ip_output(struct sk_buff *skb) | 289 | int ip_output(struct sock *sk, struct sk_buff *skb) |
291 | { | 290 | { |
292 | struct net_device *dev = skb_dst(skb)->dev; | 291 | struct net_device *dev = skb_dst(skb)->dev; |
293 | 292 | ||
@@ -315,9 +314,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) | |||
315 | sizeof(fl4->saddr) + sizeof(fl4->daddr)); | 314 | sizeof(fl4->saddr) + sizeof(fl4->daddr)); |
316 | } | 315 | } |
317 | 316 | ||
318 | int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) | 317 | /* Note: skb->sk can be different from sk, in case of tunnels */ |
318 | int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) | ||
319 | { | 319 | { |
320 | struct sock *sk = skb->sk; | ||
321 | struct inet_sock *inet = inet_sk(sk); | 320 | struct inet_sock *inet = inet_sk(sk); |
322 | struct ip_options_rcu *inet_opt; | 321 | struct ip_options_rcu *inet_opt; |
323 | struct flowi4 *fl4; | 322 | struct flowi4 *fl4; |
@@ -389,6 +388,7 @@ packet_routed: | |||
389 | ip_select_ident_more(skb, &rt->dst, sk, | 388 | ip_select_ident_more(skb, &rt->dst, sk, |
390 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | 389 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
391 | 390 | ||
391 | /* TODO : should we use skb->sk here instead of sk ? */ | ||
392 | skb->priority = sk->sk_priority; | 392 | skb->priority = sk->sk_priority; |
393 | skb->mark = sk->sk_mark; | 393 | skb->mark = sk->sk_mark; |
394 | 394 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e77381d1df9a..fa5b7519765f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -670,7 +670,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
670 | return; | 670 | return; |
671 | } | 671 | } |
672 | 672 | ||
673 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, | 673 | err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol, |
674 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 674 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
675 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 675 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
676 | 676 | ||
@@ -722,19 +722,18 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, | |||
722 | int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | 722 | int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) |
723 | { | 723 | { |
724 | int err = 0; | 724 | int err = 0; |
725 | struct ip_tunnel *t; | 725 | struct ip_tunnel *t = netdev_priv(dev); |
726 | struct net *net = dev_net(dev); | 726 | struct net *net = t->net; |
727 | struct ip_tunnel *tunnel = netdev_priv(dev); | 727 | struct ip_tunnel_net *itn = net_generic(net, t->ip_tnl_net_id); |
728 | struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id); | ||
729 | 728 | ||
730 | BUG_ON(!itn->fb_tunnel_dev); | 729 | BUG_ON(!itn->fb_tunnel_dev); |
731 | switch (cmd) { | 730 | switch (cmd) { |
732 | case SIOCGETTUNNEL: | 731 | case SIOCGETTUNNEL: |
733 | t = NULL; | 732 | if (dev == itn->fb_tunnel_dev) { |
734 | if (dev == itn->fb_tunnel_dev) | ||
735 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 733 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
736 | if (t == NULL) | 734 | if (t == NULL) |
737 | t = netdev_priv(dev); | 735 | t = netdev_priv(dev); |
736 | } | ||
738 | memcpy(p, &t->parms, sizeof(*p)); | 737 | memcpy(p, &t->parms, sizeof(*p)); |
739 | break; | 738 | break; |
740 | 739 | ||
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index e0c2b1d2ea4e..bcf206c79005 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include <net/netns/generic.h> | 46 | #include <net/netns/generic.h> |
47 | #include <net/rtnetlink.h> | 47 | #include <net/rtnetlink.h> |
48 | 48 | ||
49 | int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | 49 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
50 | __be32 src, __be32 dst, __u8 proto, | 50 | __be32 src, __be32 dst, __u8 proto, |
51 | __u8 tos, __u8 ttl, __be16 df, bool xnet) | 51 | __u8 tos, __u8 ttl, __be16 df, bool xnet) |
52 | { | 52 | { |
@@ -76,7 +76,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | |||
76 | iph->ttl = ttl; | 76 | iph->ttl = ttl; |
77 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); | 77 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
78 | 78 | ||
79 | err = ip_local_out(skb); | 79 | err = ip_local_out_sk(sk, skb); |
80 | if (unlikely(net_xmit_eval(err))) | 80 | if (unlikely(net_xmit_eval(err))) |
81 | pkt_len = 0; | 81 | pkt_len = 0; |
82 | return pkt_len; | 82 | return pkt_len; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 28863570dd60..d84dc8d4c916 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -455,7 +455,7 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
455 | struct mr_table *mrt; | 455 | struct mr_table *mrt; |
456 | struct flowi4 fl4 = { | 456 | struct flowi4 fl4 = { |
457 | .flowi4_oif = dev->ifindex, | 457 | .flowi4_oif = dev->ifindex, |
458 | .flowi4_iif = skb->skb_iif, | 458 | .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, |
459 | .flowi4_mark = skb->mark, | 459 | .flowi4_mark = skb->mark, |
460 | }; | 460 | }; |
461 | int err; | 461 | int err; |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c49dcd0284a0..4bfaedf9b34e 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
@@ -89,11 +89,8 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
89 | if (ipv4_is_multicast(iph->daddr)) { | 89 | if (ipv4_is_multicast(iph->daddr)) { |
90 | if (ipv4_is_zeronet(iph->saddr)) | 90 | if (ipv4_is_zeronet(iph->saddr)) |
91 | return ipv4_is_local_multicast(iph->daddr) ^ invert; | 91 | return ipv4_is_local_multicast(iph->daddr) ^ invert; |
92 | flow.flowi4_iif = 0; | ||
93 | } else { | ||
94 | flow.flowi4_iif = LOOPBACK_IFINDEX; | ||
95 | } | 92 | } |
96 | 93 | flow.flowi4_iif = LOOPBACK_IFINDEX; | |
97 | flow.daddr = iph->saddr; | 94 | flow.daddr = iph->saddr; |
98 | flow.saddr = rpfilter_get_saddr(iph->daddr); | 95 | flow.saddr = rpfilter_get_saddr(iph->daddr); |
99 | flow.flowi4_oif = 0; | 96 | flow.flowi4_oif = 0; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index f4b19e5dde54..8210964a9f19 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -252,26 +252,33 @@ int ping_init_sock(struct sock *sk) | |||
252 | { | 252 | { |
253 | struct net *net = sock_net(sk); | 253 | struct net *net = sock_net(sk); |
254 | kgid_t group = current_egid(); | 254 | kgid_t group = current_egid(); |
255 | struct group_info *group_info = get_current_groups(); | 255 | struct group_info *group_info; |
256 | int i, j, count = group_info->ngroups; | 256 | int i, j, count; |
257 | kgid_t low, high; | 257 | kgid_t low, high; |
258 | int ret = 0; | ||
258 | 259 | ||
259 | inet_get_ping_group_range_net(net, &low, &high); | 260 | inet_get_ping_group_range_net(net, &low, &high); |
260 | if (gid_lte(low, group) && gid_lte(group, high)) | 261 | if (gid_lte(low, group) && gid_lte(group, high)) |
261 | return 0; | 262 | return 0; |
262 | 263 | ||
264 | group_info = get_current_groups(); | ||
265 | count = group_info->ngroups; | ||
263 | for (i = 0; i < group_info->nblocks; i++) { | 266 | for (i = 0; i < group_info->nblocks; i++) { |
264 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); | 267 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); |
265 | for (j = 0; j < cp_count; j++) { | 268 | for (j = 0; j < cp_count; j++) { |
266 | kgid_t gid = group_info->blocks[i][j]; | 269 | kgid_t gid = group_info->blocks[i][j]; |
267 | if (gid_lte(low, gid) && gid_lte(gid, high)) | 270 | if (gid_lte(low, gid) && gid_lte(gid, high)) |
268 | return 0; | 271 | goto out_release_group; |
269 | } | 272 | } |
270 | 273 | ||
271 | count -= cp_count; | 274 | count -= cp_count; |
272 | } | 275 | } |
273 | 276 | ||
274 | return -EACCES; | 277 | ret = -EACCES; |
278 | |||
279 | out_release_group: | ||
280 | put_group_info(group_info); | ||
281 | return ret; | ||
275 | } | 282 | } |
276 | EXPORT_SYMBOL_GPL(ping_init_sock); | 283 | EXPORT_SYMBOL_GPL(ping_init_sock); |
277 | 284 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 34d094cadb11..db1e0da871f4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1129,7 +1129,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1129 | dst_set_expires(&rt->dst, 0); | 1129 | dst_set_expires(&rt->dst, 0); |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | static int ip_rt_bug(struct sk_buff *skb) | 1132 | static int ip_rt_bug(struct sock *sk, struct sk_buff *skb) |
1133 | { | 1133 | { |
1134 | pr_debug("%s: %pI4 -> %pI4, %s\n", | 1134 | pr_debug("%s: %pI4 -> %pI4, %s\n", |
1135 | __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, | 1135 | __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, |
@@ -1700,8 +1700,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1700 | 1700 | ||
1701 | if (res.type == RTN_LOCAL) { | 1701 | if (res.type == RTN_LOCAL) { |
1702 | err = fib_validate_source(skb, saddr, daddr, tos, | 1702 | err = fib_validate_source(skb, saddr, daddr, tos, |
1703 | LOOPBACK_IFINDEX, | 1703 | 0, dev, in_dev, &itag); |
1704 | dev, in_dev, &itag); | ||
1705 | if (err < 0) | 1704 | if (err < 0) |
1706 | goto martian_source_keep_err; | 1705 | goto martian_source_keep_err; |
1707 | goto local_input; | 1706 | goto local_input; |
@@ -2218,7 +2217,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
2218 | 2217 | ||
2219 | new->__use = 1; | 2218 | new->__use = 1; |
2220 | new->input = dst_discard; | 2219 | new->input = dst_discard; |
2221 | new->output = dst_discard; | 2220 | new->output = dst_discard_sk; |
2222 | 2221 | ||
2223 | new->dev = ort->dst.dev; | 2222 | new->dev = ort->dst.dev; |
2224 | if (new->dev) | 2223 | if (new->dev) |
@@ -2357,7 +2356,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
2357 | } | 2356 | } |
2358 | } else | 2357 | } else |
2359 | #endif | 2358 | #endif |
2360 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) | 2359 | if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex)) |
2361 | goto nla_put_failure; | 2360 | goto nla_put_failure; |
2362 | } | 2361 | } |
2363 | 2362 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 699fb102e971..025e25093984 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -981,7 +981,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
981 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, | 981 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, |
982 | tcp_skb_pcount(skb)); | 982 | tcp_skb_pcount(skb)); |
983 | 983 | ||
984 | err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); | 984 | err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
985 | if (likely(err <= 0)) | 985 | if (likely(err <= 0)) |
986 | return err; | 986 | return err; |
987 | 987 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index baa0f63731fd..40e701f2e1e0 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -86,7 +86,7 @@ int xfrm4_output_finish(struct sk_buff *skb) | |||
86 | return xfrm_output(skb); | 86 | return xfrm_output(skb); |
87 | } | 87 | } |
88 | 88 | ||
89 | int xfrm4_output(struct sk_buff *skb) | 89 | int xfrm4_output(struct sock *sk, struct sk_buff *skb) |
90 | { | 90 | { |
91 | struct dst_entry *dst = skb_dst(skb); | 91 | struct dst_entry *dst = skb_dst(skb); |
92 | struct xfrm_state *x = dst->xfrm; | 92 | struct xfrm_state *x = dst->xfrm; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index c9138189415a..d4ade34ab375 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -224,9 +224,8 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | |||
224 | return dst; | 224 | return dst; |
225 | } | 225 | } |
226 | 226 | ||
227 | int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) | 227 | int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused) |
228 | { | 228 | { |
229 | struct sock *sk = skb->sk; | ||
230 | struct ipv6_pinfo *np = inet6_sk(sk); | 229 | struct ipv6_pinfo *np = inet6_sk(sk); |
231 | struct flowi6 fl6; | 230 | struct flowi6 fl6; |
232 | struct dst_entry *dst; | 231 | struct dst_entry *dst; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c98338b81d30..9d921462b57f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1559,6 +1559,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
1559 | return 0; | 1559 | return 0; |
1560 | } | 1560 | } |
1561 | 1561 | ||
1562 | static void ip6gre_dellink(struct net_device *dev, struct list_head *head) | ||
1563 | { | ||
1564 | struct net *net = dev_net(dev); | ||
1565 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | ||
1566 | |||
1567 | if (dev != ign->fb_tunnel_dev) | ||
1568 | unregister_netdevice_queue(dev, head); | ||
1569 | } | ||
1570 | |||
1562 | static size_t ip6gre_get_size(const struct net_device *dev) | 1571 | static size_t ip6gre_get_size(const struct net_device *dev) |
1563 | { | 1572 | { |
1564 | return | 1573 | return |
@@ -1636,6 +1645,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { | |||
1636 | .validate = ip6gre_tunnel_validate, | 1645 | .validate = ip6gre_tunnel_validate, |
1637 | .newlink = ip6gre_newlink, | 1646 | .newlink = ip6gre_newlink, |
1638 | .changelink = ip6gre_changelink, | 1647 | .changelink = ip6gre_changelink, |
1648 | .dellink = ip6gre_dellink, | ||
1639 | .get_size = ip6gre_get_size, | 1649 | .get_size = ip6gre_get_size, |
1640 | .fill_info = ip6gre_fill_info, | 1650 | .fill_info = ip6gre_fill_info, |
1641 | }; | 1651 | }; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3284d61577c0..40e7581374f7 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -132,7 +132,7 @@ static int ip6_finish_output(struct sk_buff *skb) | |||
132 | return ip6_finish_output2(skb); | 132 | return ip6_finish_output2(skb); |
133 | } | 133 | } |
134 | 134 | ||
135 | int ip6_output(struct sk_buff *skb) | 135 | int ip6_output(struct sock *sk, struct sk_buff *skb) |
136 | { | 136 | { |
137 | struct net_device *dev = skb_dst(skb)->dev; | 137 | struct net_device *dev = skb_dst(skb)->dev; |
138 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 138 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index e1df691d78be..b05b609f69d1 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1340,8 +1340,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1340 | int err = 0; | 1340 | int err = 0; |
1341 | struct ip6_tnl_parm p; | 1341 | struct ip6_tnl_parm p; |
1342 | struct __ip6_tnl_parm p1; | 1342 | struct __ip6_tnl_parm p1; |
1343 | struct ip6_tnl *t = NULL; | 1343 | struct ip6_tnl *t = netdev_priv(dev); |
1344 | struct net *net = dev_net(dev); | 1344 | struct net *net = t->net; |
1345 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1345 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1346 | 1346 | ||
1347 | switch (cmd) { | 1347 | switch (cmd) { |
@@ -1353,11 +1353,11 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1353 | } | 1353 | } |
1354 | ip6_tnl_parm_from_user(&p1, &p); | 1354 | ip6_tnl_parm_from_user(&p1, &p); |
1355 | t = ip6_tnl_locate(net, &p1, 0); | 1355 | t = ip6_tnl_locate(net, &p1, 0); |
1356 | if (t == NULL) | ||
1357 | t = netdev_priv(dev); | ||
1356 | } else { | 1358 | } else { |
1357 | memset(&p, 0, sizeof(p)); | 1359 | memset(&p, 0, sizeof(p)); |
1358 | } | 1360 | } |
1359 | if (t == NULL) | ||
1360 | t = netdev_priv(dev); | ||
1361 | ip6_tnl_parm_to_user(&p, &t->parms); | 1361 | ip6_tnl_parm_to_user(&p, &t->parms); |
1362 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { | 1362 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { |
1363 | err = -EFAULT; | 1363 | err = -EFAULT; |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 8737400af0a0..8659067da28e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -700,7 +700,7 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
700 | struct mr6_table *mrt; | 700 | struct mr6_table *mrt; |
701 | struct flowi6 fl6 = { | 701 | struct flowi6 fl6 = { |
702 | .flowi6_oif = dev->ifindex, | 702 | .flowi6_oif = dev->ifindex, |
703 | .flowi6_iif = skb->skb_iif, | 703 | .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, |
704 | .flowi6_mark = skb->mark, | 704 | .flowi6_mark = skb->mark, |
705 | }; | 705 | }; |
706 | int err; | 706 | int err; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5015c50a5ba7..4011617cca68 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -84,9 +84,9 @@ static void ip6_dst_ifdown(struct dst_entry *, | |||
84 | static int ip6_dst_gc(struct dst_ops *ops); | 84 | static int ip6_dst_gc(struct dst_ops *ops); |
85 | 85 | ||
86 | static int ip6_pkt_discard(struct sk_buff *skb); | 86 | static int ip6_pkt_discard(struct sk_buff *skb); |
87 | static int ip6_pkt_discard_out(struct sk_buff *skb); | 87 | static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb); |
88 | static int ip6_pkt_prohibit(struct sk_buff *skb); | 88 | static int ip6_pkt_prohibit(struct sk_buff *skb); |
89 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | 89 | static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb); |
90 | static void ip6_link_failure(struct sk_buff *skb); | 90 | static void ip6_link_failure(struct sk_buff *skb); |
91 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | 91 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
92 | struct sk_buff *skb, u32 mtu); | 92 | struct sk_buff *skb, u32 mtu); |
@@ -290,7 +290,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { | |||
290 | .obsolete = DST_OBSOLETE_FORCE_CHK, | 290 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
291 | .error = -EINVAL, | 291 | .error = -EINVAL, |
292 | .input = dst_discard, | 292 | .input = dst_discard, |
293 | .output = dst_discard, | 293 | .output = dst_discard_sk, |
294 | }, | 294 | }, |
295 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 295 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
296 | .rt6i_protocol = RTPROT_KERNEL, | 296 | .rt6i_protocol = RTPROT_KERNEL, |
@@ -1058,7 +1058,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori | |||
1058 | 1058 | ||
1059 | new->__use = 1; | 1059 | new->__use = 1; |
1060 | new->input = dst_discard; | 1060 | new->input = dst_discard; |
1061 | new->output = dst_discard; | 1061 | new->output = dst_discard_sk; |
1062 | 1062 | ||
1063 | if (dst_metrics_read_only(&ort->dst)) | 1063 | if (dst_metrics_read_only(&ort->dst)) |
1064 | new->_metrics = ort->dst._metrics; | 1064 | new->_metrics = ort->dst._metrics; |
@@ -1338,7 +1338,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) | |||
1338 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); | 1338 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
1339 | 1339 | ||
1340 | if (mtu) | 1340 | if (mtu) |
1341 | return mtu; | 1341 | goto out; |
1342 | 1342 | ||
1343 | mtu = IPV6_MIN_MTU; | 1343 | mtu = IPV6_MIN_MTU; |
1344 | 1344 | ||
@@ -1348,7 +1348,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) | |||
1348 | mtu = idev->cnf.mtu6; | 1348 | mtu = idev->cnf.mtu6; |
1349 | rcu_read_unlock(); | 1349 | rcu_read_unlock(); |
1350 | 1350 | ||
1351 | return mtu; | 1351 | out: |
1352 | return min_t(unsigned int, mtu, IP6_MAX_MTU); | ||
1352 | } | 1353 | } |
1353 | 1354 | ||
1354 | static struct dst_entry *icmp6_dst_gc_list; | 1355 | static struct dst_entry *icmp6_dst_gc_list; |
@@ -1576,7 +1577,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1576 | switch (cfg->fc_type) { | 1577 | switch (cfg->fc_type) { |
1577 | case RTN_BLACKHOLE: | 1578 | case RTN_BLACKHOLE: |
1578 | rt->dst.error = -EINVAL; | 1579 | rt->dst.error = -EINVAL; |
1579 | rt->dst.output = dst_discard; | 1580 | rt->dst.output = dst_discard_sk; |
1580 | rt->dst.input = dst_discard; | 1581 | rt->dst.input = dst_discard; |
1581 | break; | 1582 | break; |
1582 | case RTN_PROHIBIT: | 1583 | case RTN_PROHIBIT: |
@@ -2128,7 +2129,7 @@ static int ip6_pkt_discard(struct sk_buff *skb) | |||
2128 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); | 2129 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); |
2129 | } | 2130 | } |
2130 | 2131 | ||
2131 | static int ip6_pkt_discard_out(struct sk_buff *skb) | 2132 | static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb) |
2132 | { | 2133 | { |
2133 | skb->dev = skb_dst(skb)->dev; | 2134 | skb->dev = skb_dst(skb)->dev; |
2134 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); | 2135 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); |
@@ -2139,7 +2140,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb) | |||
2139 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); | 2140 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); |
2140 | } | 2141 | } |
2141 | 2142 | ||
2142 | static int ip6_pkt_prohibit_out(struct sk_buff *skb) | 2143 | static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb) |
2143 | { | 2144 | { |
2144 | skb->dev = skb_dst(skb)->dev; | 2145 | skb->dev = skb_dst(skb)->dev; |
2145 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); | 2146 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1693c8d885f0..e5a453ca302e 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -974,8 +974,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
974 | goto out; | 974 | goto out; |
975 | } | 975 | } |
976 | 976 | ||
977 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, | 977 | err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, |
978 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 978 | IPPROTO_IPV6, tos, ttl, df, |
979 | !net_eq(tunnel->net, dev_net(dev))); | ||
979 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 980 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
980 | return NETDEV_TX_OK; | 981 | return NETDEV_TX_OK; |
981 | 982 | ||
@@ -1126,8 +1127,8 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1126 | int err = 0; | 1127 | int err = 0; |
1127 | struct ip_tunnel_parm p; | 1128 | struct ip_tunnel_parm p; |
1128 | struct ip_tunnel_prl prl; | 1129 | struct ip_tunnel_prl prl; |
1129 | struct ip_tunnel *t; | 1130 | struct ip_tunnel *t = netdev_priv(dev); |
1130 | struct net *net = dev_net(dev); | 1131 | struct net *net = t->net; |
1131 | struct sit_net *sitn = net_generic(net, sit_net_id); | 1132 | struct sit_net *sitn = net_generic(net, sit_net_id); |
1132 | #ifdef CONFIG_IPV6_SIT_6RD | 1133 | #ifdef CONFIG_IPV6_SIT_6RD |
1133 | struct ip_tunnel_6rd ip6rd; | 1134 | struct ip_tunnel_6rd ip6rd; |
@@ -1138,16 +1139,15 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1138 | #ifdef CONFIG_IPV6_SIT_6RD | 1139 | #ifdef CONFIG_IPV6_SIT_6RD |
1139 | case SIOCGET6RD: | 1140 | case SIOCGET6RD: |
1140 | #endif | 1141 | #endif |
1141 | t = NULL; | ||
1142 | if (dev == sitn->fb_tunnel_dev) { | 1142 | if (dev == sitn->fb_tunnel_dev) { |
1143 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { | 1143 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { |
1144 | err = -EFAULT; | 1144 | err = -EFAULT; |
1145 | break; | 1145 | break; |
1146 | } | 1146 | } |
1147 | t = ipip6_tunnel_locate(net, &p, 0); | 1147 | t = ipip6_tunnel_locate(net, &p, 0); |
1148 | if (t == NULL) | ||
1149 | t = netdev_priv(dev); | ||
1148 | } | 1150 | } |
1149 | if (t == NULL) | ||
1150 | t = netdev_priv(dev); | ||
1151 | 1151 | ||
1152 | err = -EFAULT; | 1152 | err = -EFAULT; |
1153 | if (cmd == SIOCGETTUNNEL) { | 1153 | if (cmd == SIOCGETTUNNEL) { |
@@ -1243,9 +1243,6 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1243 | err = -EINVAL; | 1243 | err = -EINVAL; |
1244 | if (dev == sitn->fb_tunnel_dev) | 1244 | if (dev == sitn->fb_tunnel_dev) |
1245 | goto done; | 1245 | goto done; |
1246 | err = -ENOENT; | ||
1247 | if (!(t = netdev_priv(dev))) | ||
1248 | goto done; | ||
1249 | err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); | 1246 | err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); |
1250 | break; | 1247 | break; |
1251 | 1248 | ||
@@ -1261,9 +1258,6 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1261 | err = -EFAULT; | 1258 | err = -EFAULT; |
1262 | if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl))) | 1259 | if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl))) |
1263 | goto done; | 1260 | goto done; |
1264 | err = -ENOENT; | ||
1265 | if (!(t = netdev_priv(dev))) | ||
1266 | goto done; | ||
1267 | 1261 | ||
1268 | switch (cmd) { | 1262 | switch (cmd) { |
1269 | case SIOCDELPRL: | 1263 | case SIOCDELPRL: |
@@ -1291,8 +1285,6 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1291 | sizeof(ip6rd))) | 1285 | sizeof(ip6rd))) |
1292 | goto done; | 1286 | goto done; |
1293 | 1287 | ||
1294 | t = netdev_priv(dev); | ||
1295 | |||
1296 | if (cmd != SIOCDEL6RD) { | 1288 | if (cmd != SIOCDEL6RD) { |
1297 | err = ipip6_tunnel_update_6rd(t, &ip6rd); | 1289 | err = ipip6_tunnel_update_6rd(t, &ip6rd); |
1298 | if (err < 0) | 1290 | if (err < 0) |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 6cd625e37706..19ef329bdbf8 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -163,7 +163,7 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
163 | return x->outer_mode->afinfo->output_finish(skb); | 163 | return x->outer_mode->afinfo->output_finish(skb); |
164 | } | 164 | } |
165 | 165 | ||
166 | int xfrm6_output(struct sk_buff *skb) | 166 | int xfrm6_output(struct sock *sk, struct sk_buff *skb) |
167 | { | 167 | { |
168 | return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, | 168 | return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, |
169 | skb_dst(skb)->dev, __xfrm6_output); | 169 | skb_dst(skb)->dev, __xfrm6_output); |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 47f7a5490555..a4e37d7158dc 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1131,10 +1131,10 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1131 | skb->local_df = 1; | 1131 | skb->local_df = 1; |
1132 | #if IS_ENABLED(CONFIG_IPV6) | 1132 | #if IS_ENABLED(CONFIG_IPV6) |
1133 | if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) | 1133 | if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) |
1134 | error = inet6_csk_xmit(skb, NULL); | 1134 | error = inet6_csk_xmit(tunnel->sock, skb, NULL); |
1135 | else | 1135 | else |
1136 | #endif | 1136 | #endif |
1137 | error = ip_queue_xmit(skb, fl); | 1137 | error = ip_queue_xmit(tunnel->sock, skb, fl); |
1138 | 1138 | ||
1139 | /* Update stats */ | 1139 | /* Update stats */ |
1140 | if (error >= 0) { | 1140 | if (error >= 0) { |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0b44d855269c..3397fe6897c0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -487,7 +487,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
487 | 487 | ||
488 | xmit: | 488 | xmit: |
489 | /* Queue the packet to IP for output */ | 489 | /* Queue the packet to IP for output */ |
490 | rc = ip_queue_xmit(skb, &inet->cork.fl); | 490 | rc = ip_queue_xmit(sk, skb, &inet->cork.fl); |
491 | rcu_read_unlock(); | 491 | rcu_read_unlock(); |
492 | 492 | ||
493 | error: | 493 | error: |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index bd1fd8ea5105..75b5dd2c9267 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -249,7 +249,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
249 | 249 | ||
250 | if (!local->use_chanctx) { | 250 | if (!local->use_chanctx) { |
251 | local->_oper_chandef = *chandef; | 251 | local->_oper_chandef = *chandef; |
252 | ieee80211_hw_config(local, 0); | 252 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
253 | } else { | 253 | } else { |
254 | err = drv_add_chanctx(local, ctx); | 254 | err = drv_add_chanctx(local, ctx); |
255 | if (err) { | 255 | if (err) { |
@@ -286,7 +286,7 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local, | |||
286 | check_single_channel = true; | 286 | check_single_channel = true; |
287 | local->hw.conf.radar_enabled = false; | 287 | local->hw.conf.radar_enabled = false; |
288 | 288 | ||
289 | ieee80211_hw_config(local, 0); | 289 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
290 | } else { | 290 | } else { |
291 | drv_remove_chanctx(local, ctx); | 291 | drv_remove_chanctx(local, ctx); |
292 | } | 292 | } |
@@ -492,6 +492,13 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, | |||
492 | rx_chains_static = max(rx_chains_static, needed_static); | 492 | rx_chains_static = max(rx_chains_static, needed_static); |
493 | rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); | 493 | rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); |
494 | } | 494 | } |
495 | |||
496 | /* Disable SMPS for the monitor interface */ | ||
497 | sdata = rcu_dereference(local->monitor_sdata); | ||
498 | if (sdata && | ||
499 | rcu_access_pointer(sdata->vif.chanctx_conf) == &chanctx->conf) | ||
500 | rx_chains_dynamic = rx_chains_static = local->rx_chains; | ||
501 | |||
495 | rcu_read_unlock(); | 502 | rcu_read_unlock(); |
496 | 503 | ||
497 | if (!local->use_chanctx) { | 504 | if (!local->use_chanctx) { |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index b055f6a55c68..4c1bf61bc778 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -148,6 +148,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) | |||
148 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 148 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
149 | if (!rcu_access_pointer(sdata->vif.chanctx_conf)) | 149 | if (!rcu_access_pointer(sdata->vif.chanctx_conf)) |
150 | continue; | 150 | continue; |
151 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
152 | continue; | ||
151 | power = min(power, sdata->vif.bss_conf.txpower); | 153 | power = min(power, sdata->vif.bss_conf.txpower); |
152 | } | 154 | } |
153 | rcu_read_unlock(); | 155 | rcu_read_unlock(); |
@@ -199,7 +201,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
199 | { | 201 | { |
200 | struct ieee80211_local *local = sdata->local; | 202 | struct ieee80211_local *local = sdata->local; |
201 | 203 | ||
202 | if (!changed) | 204 | if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
203 | return; | 205 | return; |
204 | 206 | ||
205 | drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed); | 207 | drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed); |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 0c2a29484c07..6fb38558a5e6 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -355,6 +355,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
355 | struct ieee80211_roc_work *dep; | 355 | struct ieee80211_roc_work *dep; |
356 | 356 | ||
357 | /* start this ROC */ | 357 | /* start this ROC */ |
358 | ieee80211_offchannel_stop_vifs(local); | ||
358 | 359 | ||
359 | /* switch channel etc */ | 360 | /* switch channel etc */ |
360 | ieee80211_recalc_idle(local); | 361 | ieee80211_recalc_idle(local); |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index e6e574a307c8..00ba90b02ab2 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -618,6 +618,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
618 | sta, true, acked); | 618 | sta, true, acked); |
619 | 619 | ||
620 | if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) && | 620 | if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) && |
621 | (ieee80211_is_data(hdr->frame_control)) && | ||
621 | (rates_idx != -1)) | 622 | (rates_idx != -1)) |
622 | sta->last_tx_rate = info->status.rates[rates_idx]; | 623 | sta->last_tx_rate = info->status.rates[rates_idx]; |
623 | 624 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 6dba48efe01e..75421f2ba8be 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1795,6 +1795,7 @@ int nf_conntrack_init_net(struct net *net) | |||
1795 | int cpu; | 1795 | int cpu; |
1796 | 1796 | ||
1797 | atomic_set(&net->ct.count, 0); | 1797 | atomic_set(&net->ct.count, 0); |
1798 | seqcount_init(&net->ct.generation); | ||
1798 | 1799 | ||
1799 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); | 1800 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); |
1800 | if (!net->ct.pcpu_lists) | 1801 | if (!net->ct.pcpu_lists) |
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 7bd03decd36c..825c3e3f8305 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c | |||
@@ -605,32 +605,14 @@ static struct nf_conntrack_helper pptp __read_mostly = { | |||
605 | .expect_policy = &pptp_exp_policy, | 605 | .expect_policy = &pptp_exp_policy, |
606 | }; | 606 | }; |
607 | 607 | ||
608 | static void nf_conntrack_pptp_net_exit(struct net *net) | ||
609 | { | ||
610 | nf_ct_gre_keymap_flush(net); | ||
611 | } | ||
612 | |||
613 | static struct pernet_operations nf_conntrack_pptp_net_ops = { | ||
614 | .exit = nf_conntrack_pptp_net_exit, | ||
615 | }; | ||
616 | |||
617 | static int __init nf_conntrack_pptp_init(void) | 608 | static int __init nf_conntrack_pptp_init(void) |
618 | { | 609 | { |
619 | int rv; | 610 | return nf_conntrack_helper_register(&pptp); |
620 | |||
621 | rv = nf_conntrack_helper_register(&pptp); | ||
622 | if (rv < 0) | ||
623 | return rv; | ||
624 | rv = register_pernet_subsys(&nf_conntrack_pptp_net_ops); | ||
625 | if (rv < 0) | ||
626 | nf_conntrack_helper_unregister(&pptp); | ||
627 | return rv; | ||
628 | } | 611 | } |
629 | 612 | ||
630 | static void __exit nf_conntrack_pptp_fini(void) | 613 | static void __exit nf_conntrack_pptp_fini(void) |
631 | { | 614 | { |
632 | nf_conntrack_helper_unregister(&pptp); | 615 | nf_conntrack_helper_unregister(&pptp); |
633 | unregister_pernet_subsys(&nf_conntrack_pptp_net_ops); | ||
634 | } | 616 | } |
635 | 617 | ||
636 | module_init(nf_conntrack_pptp_init); | 618 | module_init(nf_conntrack_pptp_init); |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 9d9c0dade602..d5665739e3b1 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -66,7 +66,7 @@ static inline struct netns_proto_gre *gre_pernet(struct net *net) | |||
66 | return net_generic(net, proto_gre_net_id); | 66 | return net_generic(net, proto_gre_net_id); |
67 | } | 67 | } |
68 | 68 | ||
69 | void nf_ct_gre_keymap_flush(struct net *net) | 69 | static void nf_ct_gre_keymap_flush(struct net *net) |
70 | { | 70 | { |
71 | struct netns_proto_gre *net_gre = gre_pernet(net); | 71 | struct netns_proto_gre *net_gre = gre_pernet(net); |
72 | struct nf_ct_gre_keymap *km, *tmp; | 72 | struct nf_ct_gre_keymap *km, *tmp; |
@@ -78,7 +78,6 @@ void nf_ct_gre_keymap_flush(struct net *net) | |||
78 | } | 78 | } |
79 | write_unlock_bh(&net_gre->keymap_lock); | 79 | write_unlock_bh(&net_gre->keymap_lock); |
80 | } | 80 | } |
81 | EXPORT_SYMBOL(nf_ct_gre_keymap_flush); | ||
82 | 81 | ||
83 | static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, | 82 | static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, |
84 | const struct nf_conntrack_tuple *t) | 83 | const struct nf_conntrack_tuple *t) |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 90998a6ff8b9..804105391b9a 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -25,9 +25,8 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr, | |||
25 | struct nft_data data[NFT_REG_MAX + 1]) | 25 | struct nft_data data[NFT_REG_MAX + 1]) |
26 | { | 26 | { |
27 | const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); | 27 | const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); |
28 | u32 mask; | 28 | u32 mask = nft_cmp_fast_mask(priv->len); |
29 | 29 | ||
30 | mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len); | ||
31 | if ((data[priv->sreg].data[0] & mask) == priv->data) | 30 | if ((data[priv->sreg].data[0] & mask) == priv->data) |
32 | return; | 31 | return; |
33 | data[NFT_REG_VERDICT].verdict = NFT_BREAK; | 32 | data[NFT_REG_VERDICT].verdict = NFT_BREAK; |
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 954925db414d..e2b3f51c81f1 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c | |||
@@ -128,7 +128,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx, | |||
128 | BUG_ON(err < 0); | 128 | BUG_ON(err < 0); |
129 | desc.len *= BITS_PER_BYTE; | 129 | desc.len *= BITS_PER_BYTE; |
130 | 130 | ||
131 | mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len); | 131 | mask = nft_cmp_fast_mask(desc.len); |
132 | priv->data = data.data[0] & mask; | 132 | priv->data = data.data[0] & mask; |
133 | priv->len = desc.len; | 133 | priv->len = desc.len; |
134 | return 0; | 134 | return 0; |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index a3d6951602db..ebb6e2442554 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
@@ -174,7 +174,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) | |||
174 | 174 | ||
175 | skb->local_df = 1; | 175 | skb->local_df = 1; |
176 | 176 | ||
177 | return iptunnel_xmit(rt, skb, fl.saddr, | 177 | return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, |
178 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, | 178 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, |
179 | OVS_CB(skb)->tun_key->ipv4_tos, | 179 | OVS_CB(skb)->tun_key->ipv4_tos, |
180 | OVS_CB(skb)->tun_key->ipv4_ttl, df, false); | 180 | OVS_CB(skb)->tun_key->ipv4_ttl, df, false); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 4f6d6f9d1274..39579c3e0d14 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1395,35 +1395,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
1395 | return false; | 1395 | return false; |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | /* Update asoc's rwnd for the approximated state in the buffer, | 1398 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ |
1399 | * and check whether SACK needs to be sent. | 1399 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) |
1400 | */ | ||
1401 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
1402 | { | 1400 | { |
1403 | int rx_count; | ||
1404 | struct sctp_chunk *sack; | 1401 | struct sctp_chunk *sack; |
1405 | struct timer_list *timer; | 1402 | struct timer_list *timer; |
1406 | 1403 | ||
1407 | if (asoc->ep->rcvbuf_policy) | 1404 | if (asoc->rwnd_over) { |
1408 | rx_count = atomic_read(&asoc->rmem_alloc); | 1405 | if (asoc->rwnd_over >= len) { |
1409 | else | 1406 | asoc->rwnd_over -= len; |
1410 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | 1407 | } else { |
1408 | asoc->rwnd += (len - asoc->rwnd_over); | ||
1409 | asoc->rwnd_over = 0; | ||
1410 | } | ||
1411 | } else { | ||
1412 | asoc->rwnd += len; | ||
1413 | } | ||
1411 | 1414 | ||
1412 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) | 1415 | /* If we had window pressure, start recovering it |
1413 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; | 1416 | * once our rwnd had reached the accumulated pressure |
1414 | else | 1417 | * threshold. The idea is to recover slowly, but up |
1415 | asoc->rwnd = 0; | 1418 | * to the initial advertised window. |
1419 | */ | ||
1420 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
1421 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
1422 | asoc->rwnd += change; | ||
1423 | asoc->rwnd_press -= change; | ||
1424 | } | ||
1416 | 1425 | ||
1417 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", | 1426 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", |
1418 | __func__, asoc, asoc->rwnd, rx_count, | 1427 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, |
1419 | asoc->base.sk->sk_rcvbuf); | 1428 | asoc->a_rwnd); |
1420 | 1429 | ||
1421 | /* Send a window update SACK if the rwnd has increased by at least the | 1430 | /* Send a window update SACK if the rwnd has increased by at least the |
1422 | * minimum of the association's PMTU and half of the receive buffer. | 1431 | * minimum of the association's PMTU and half of the receive buffer. |
1423 | * The algorithm used is similar to the one described in | 1432 | * The algorithm used is similar to the one described in |
1424 | * Section 4.2.3.3 of RFC 1122. | 1433 | * Section 4.2.3.3 of RFC 1122. |
1425 | */ | 1434 | */ |
1426 | if (update_peer && sctp_peer_needs_update(asoc)) { | 1435 | if (sctp_peer_needs_update(asoc)) { |
1427 | asoc->a_rwnd = asoc->rwnd; | 1436 | asoc->a_rwnd = asoc->rwnd; |
1428 | 1437 | ||
1429 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1438 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
@@ -1445,6 +1454,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | |||
1445 | } | 1454 | } |
1446 | } | 1455 | } |
1447 | 1456 | ||
1457 | /* Decrease asoc's rwnd by len. */ | ||
1458 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
1459 | { | ||
1460 | int rx_count; | ||
1461 | int over = 0; | ||
1462 | |||
1463 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
1464 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
1465 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
1466 | asoc->rwnd, asoc->rwnd_over); | ||
1467 | |||
1468 | if (asoc->ep->rcvbuf_policy) | ||
1469 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
1470 | else | ||
1471 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
1472 | |||
1473 | /* If we've reached or overflowed our receive buffer, announce | ||
1474 | * a 0 rwnd if rwnd would still be positive. Store the | ||
1475 | * the potential pressure overflow so that the window can be restored | ||
1476 | * back to original value. | ||
1477 | */ | ||
1478 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
1479 | over = 1; | ||
1480 | |||
1481 | if (asoc->rwnd >= len) { | ||
1482 | asoc->rwnd -= len; | ||
1483 | if (over) { | ||
1484 | asoc->rwnd_press += asoc->rwnd; | ||
1485 | asoc->rwnd = 0; | ||
1486 | } | ||
1487 | } else { | ||
1488 | asoc->rwnd_over = len - asoc->rwnd; | ||
1489 | asoc->rwnd = 0; | ||
1490 | } | ||
1491 | |||
1492 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
1493 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
1494 | asoc->rwnd_press); | ||
1495 | } | ||
1448 | 1496 | ||
1449 | /* Build the bind address list for the association based on info from the | 1497 | /* Build the bind address list for the association based on info from the |
1450 | * local endpoint and the remote peer. | 1498 | * local endpoint and the remote peer. |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 683c7d1b1306..0e8529113dc5 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -386,14 +386,13 @@ nomem: | |||
386 | */ | 386 | */ |
387 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) | 387 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) |
388 | { | 388 | { |
389 | struct net *net = sock_net(asoc->base.sk); | ||
390 | struct sctp_auth_bytes *secret; | 389 | struct sctp_auth_bytes *secret; |
391 | struct sctp_shared_key *ep_key; | 390 | struct sctp_shared_key *ep_key; |
392 | 391 | ||
393 | /* If we don't support AUTH, or peer is not capable | 392 | /* If we don't support AUTH, or peer is not capable |
394 | * we don't need to do anything. | 393 | * we don't need to do anything. |
395 | */ | 394 | */ |
396 | if (!net->sctp.auth_enable || !asoc->peer.auth_capable) | 395 | if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) |
397 | return 0; | 396 | return 0; |
398 | 397 | ||
399 | /* If the key_id is non-zero and we couldn't find an | 398 | /* If the key_id is non-zero and we couldn't find an |
@@ -440,16 +439,16 @@ struct sctp_shared_key *sctp_auth_get_shkey( | |||
440 | */ | 439 | */ |
441 | int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) | 440 | int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) |
442 | { | 441 | { |
443 | struct net *net = sock_net(ep->base.sk); | ||
444 | struct crypto_hash *tfm = NULL; | 442 | struct crypto_hash *tfm = NULL; |
445 | __u16 id; | 443 | __u16 id; |
446 | 444 | ||
447 | /* if the transforms are already allocted, we are done */ | 445 | /* If AUTH extension is disabled, we are done */ |
448 | if (!net->sctp.auth_enable) { | 446 | if (!ep->auth_enable) { |
449 | ep->auth_hmacs = NULL; | 447 | ep->auth_hmacs = NULL; |
450 | return 0; | 448 | return 0; |
451 | } | 449 | } |
452 | 450 | ||
451 | /* If the transforms are already allocated, we are done */ | ||
453 | if (ep->auth_hmacs) | 452 | if (ep->auth_hmacs) |
454 | return 0; | 453 | return 0; |
455 | 454 | ||
@@ -665,12 +664,10 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) | |||
665 | /* Check if peer requested that this chunk is authenticated */ | 664 | /* Check if peer requested that this chunk is authenticated */ |
666 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) | 665 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) |
667 | { | 666 | { |
668 | struct net *net; | ||
669 | if (!asoc) | 667 | if (!asoc) |
670 | return 0; | 668 | return 0; |
671 | 669 | ||
672 | net = sock_net(asoc->base.sk); | 670 | if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) |
673 | if (!net->sctp.auth_enable || !asoc->peer.auth_capable) | ||
674 | return 0; | 671 | return 0; |
675 | 672 | ||
676 | return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); | 673 | return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); |
@@ -679,12 +676,10 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) | |||
679 | /* Check if we requested that peer authenticate this chunk. */ | 676 | /* Check if we requested that peer authenticate this chunk. */ |
680 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) | 677 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) |
681 | { | 678 | { |
682 | struct net *net; | ||
683 | if (!asoc) | 679 | if (!asoc) |
684 | return 0; | 680 | return 0; |
685 | 681 | ||
686 | net = sock_net(asoc->base.sk); | 682 | if (!asoc->ep->auth_enable) |
687 | if (!net->sctp.auth_enable) | ||
688 | return 0; | 683 | return 0; |
689 | 684 | ||
690 | return __sctp_auth_cid(chunk, | 685 | return __sctp_auth_cid(chunk, |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 8e5fdea05216..3d9f429858dc 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -68,7 +68,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
68 | if (!ep->digest) | 68 | if (!ep->digest) |
69 | return NULL; | 69 | return NULL; |
70 | 70 | ||
71 | if (net->sctp.auth_enable) { | 71 | ep->auth_enable = net->sctp.auth_enable; |
72 | if (ep->auth_enable) { | ||
72 | /* Allocate space for HMACS and CHUNKS authentication | 73 | /* Allocate space for HMACS and CHUNKS authentication |
73 | * variables. There are arrays that we encode directly | 74 | * variables. There are arrays that we encode directly |
74 | * into parameters to make the rest of the operations easier. | 75 | * into parameters to make the rest of the operations easier. |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 4e1d0fcb028e..c09757fbf803 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -957,7 +957,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
957 | 957 | ||
958 | SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); | 958 | SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); |
959 | 959 | ||
960 | return ip_queue_xmit(skb, &transport->fl); | 960 | return ip_queue_xmit(&inet->sk, skb, &transport->fl); |
961 | } | 961 | } |
962 | 962 | ||
963 | static struct sctp_af sctp_af_inet; | 963 | static struct sctp_af sctp_af_inet; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 3a1767ef3201..fee5552ddf92 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -219,6 +219,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
219 | gfp_t gfp, int vparam_len) | 219 | gfp_t gfp, int vparam_len) |
220 | { | 220 | { |
221 | struct net *net = sock_net(asoc->base.sk); | 221 | struct net *net = sock_net(asoc->base.sk); |
222 | struct sctp_endpoint *ep = asoc->ep; | ||
222 | sctp_inithdr_t init; | 223 | sctp_inithdr_t init; |
223 | union sctp_params addrs; | 224 | union sctp_params addrs; |
224 | size_t chunksize; | 225 | size_t chunksize; |
@@ -278,7 +279,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
278 | chunksize += vparam_len; | 279 | chunksize += vparam_len; |
279 | 280 | ||
280 | /* Account for AUTH related parameters */ | 281 | /* Account for AUTH related parameters */ |
281 | if (net->sctp.auth_enable) { | 282 | if (ep->auth_enable) { |
282 | /* Add random parameter length*/ | 283 | /* Add random parameter length*/ |
283 | chunksize += sizeof(asoc->c.auth_random); | 284 | chunksize += sizeof(asoc->c.auth_random); |
284 | 285 | ||
@@ -363,7 +364,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
363 | } | 364 | } |
364 | 365 | ||
365 | /* Add SCTP-AUTH chunks to the parameter list */ | 366 | /* Add SCTP-AUTH chunks to the parameter list */ |
366 | if (net->sctp.auth_enable) { | 367 | if (ep->auth_enable) { |
367 | sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), | 368 | sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), |
368 | asoc->c.auth_random); | 369 | asoc->c.auth_random); |
369 | if (auth_hmacs) | 370 | if (auth_hmacs) |
@@ -2010,7 +2011,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc, | |||
2010 | /* if the peer reports AUTH, assume that he | 2011 | /* if the peer reports AUTH, assume that he |
2011 | * supports AUTH. | 2012 | * supports AUTH. |
2012 | */ | 2013 | */ |
2013 | if (net->sctp.auth_enable) | 2014 | if (asoc->ep->auth_enable) |
2014 | asoc->peer.auth_capable = 1; | 2015 | asoc->peer.auth_capable = 1; |
2015 | break; | 2016 | break; |
2016 | case SCTP_CID_ASCONF: | 2017 | case SCTP_CID_ASCONF: |
@@ -2102,6 +2103,7 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
2102 | * SCTP_IERROR_NO_ERROR - continue with the chunk | 2103 | * SCTP_IERROR_NO_ERROR - continue with the chunk |
2103 | */ | 2104 | */ |
2104 | static sctp_ierror_t sctp_verify_param(struct net *net, | 2105 | static sctp_ierror_t sctp_verify_param(struct net *net, |
2106 | const struct sctp_endpoint *ep, | ||
2105 | const struct sctp_association *asoc, | 2107 | const struct sctp_association *asoc, |
2106 | union sctp_params param, | 2108 | union sctp_params param, |
2107 | sctp_cid_t cid, | 2109 | sctp_cid_t cid, |
@@ -2152,7 +2154,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, | |||
2152 | goto fallthrough; | 2154 | goto fallthrough; |
2153 | 2155 | ||
2154 | case SCTP_PARAM_RANDOM: | 2156 | case SCTP_PARAM_RANDOM: |
2155 | if (!net->sctp.auth_enable) | 2157 | if (!ep->auth_enable) |
2156 | goto fallthrough; | 2158 | goto fallthrough; |
2157 | 2159 | ||
2158 | /* SCTP-AUTH: Secion 6.1 | 2160 | /* SCTP-AUTH: Secion 6.1 |
@@ -2169,7 +2171,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, | |||
2169 | break; | 2171 | break; |
2170 | 2172 | ||
2171 | case SCTP_PARAM_CHUNKS: | 2173 | case SCTP_PARAM_CHUNKS: |
2172 | if (!net->sctp.auth_enable) | 2174 | if (!ep->auth_enable) |
2173 | goto fallthrough; | 2175 | goto fallthrough; |
2174 | 2176 | ||
2175 | /* SCTP-AUTH: Section 3.2 | 2177 | /* SCTP-AUTH: Section 3.2 |
@@ -2185,7 +2187,7 @@ static sctp_ierror_t sctp_verify_param(struct net *net, | |||
2185 | break; | 2187 | break; |
2186 | 2188 | ||
2187 | case SCTP_PARAM_HMAC_ALGO: | 2189 | case SCTP_PARAM_HMAC_ALGO: |
2188 | if (!net->sctp.auth_enable) | 2190 | if (!ep->auth_enable) |
2189 | goto fallthrough; | 2191 | goto fallthrough; |
2190 | 2192 | ||
2191 | hmacs = (struct sctp_hmac_algo_param *)param.p; | 2193 | hmacs = (struct sctp_hmac_algo_param *)param.p; |
@@ -2220,10 +2222,9 @@ fallthrough: | |||
2220 | } | 2222 | } |
2221 | 2223 | ||
2222 | /* Verify the INIT packet before we process it. */ | 2224 | /* Verify the INIT packet before we process it. */ |
2223 | int sctp_verify_init(struct net *net, const struct sctp_association *asoc, | 2225 | int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, |
2224 | sctp_cid_t cid, | 2226 | const struct sctp_association *asoc, sctp_cid_t cid, |
2225 | sctp_init_chunk_t *peer_init, | 2227 | sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, |
2226 | struct sctp_chunk *chunk, | ||
2227 | struct sctp_chunk **errp) | 2228 | struct sctp_chunk **errp) |
2228 | { | 2229 | { |
2229 | union sctp_params param; | 2230 | union sctp_params param; |
@@ -2264,8 +2265,8 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc, | |||
2264 | 2265 | ||
2265 | /* Verify all the variable length parameters */ | 2266 | /* Verify all the variable length parameters */ |
2266 | sctp_walk_params(param, peer_init, init_hdr.params) { | 2267 | sctp_walk_params(param, peer_init, init_hdr.params) { |
2267 | 2268 | result = sctp_verify_param(net, ep, asoc, param, cid, | |
2268 | result = sctp_verify_param(net, asoc, param, cid, chunk, errp); | 2269 | chunk, errp); |
2269 | switch (result) { | 2270 | switch (result) { |
2270 | case SCTP_IERROR_ABORT: | 2271 | case SCTP_IERROR_ABORT: |
2271 | case SCTP_IERROR_NOMEM: | 2272 | case SCTP_IERROR_NOMEM: |
@@ -2497,6 +2498,7 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
2497 | struct sctp_af *af; | 2498 | struct sctp_af *af; |
2498 | union sctp_addr_param *addr_param; | 2499 | union sctp_addr_param *addr_param; |
2499 | struct sctp_transport *t; | 2500 | struct sctp_transport *t; |
2501 | struct sctp_endpoint *ep = asoc->ep; | ||
2500 | 2502 | ||
2501 | /* We maintain all INIT parameters in network byte order all the | 2503 | /* We maintain all INIT parameters in network byte order all the |
2502 | * time. This allows us to not worry about whether the parameters | 2504 | * time. This allows us to not worry about whether the parameters |
@@ -2636,7 +2638,7 @@ do_addr_param: | |||
2636 | goto fall_through; | 2638 | goto fall_through; |
2637 | 2639 | ||
2638 | case SCTP_PARAM_RANDOM: | 2640 | case SCTP_PARAM_RANDOM: |
2639 | if (!net->sctp.auth_enable) | 2641 | if (!ep->auth_enable) |
2640 | goto fall_through; | 2642 | goto fall_through; |
2641 | 2643 | ||
2642 | /* Save peer's random parameter */ | 2644 | /* Save peer's random parameter */ |
@@ -2649,7 +2651,7 @@ do_addr_param: | |||
2649 | break; | 2651 | break; |
2650 | 2652 | ||
2651 | case SCTP_PARAM_HMAC_ALGO: | 2653 | case SCTP_PARAM_HMAC_ALGO: |
2652 | if (!net->sctp.auth_enable) | 2654 | if (!ep->auth_enable) |
2653 | goto fall_through; | 2655 | goto fall_through; |
2654 | 2656 | ||
2655 | /* Save peer's HMAC list */ | 2657 | /* Save peer's HMAC list */ |
@@ -2665,7 +2667,7 @@ do_addr_param: | |||
2665 | break; | 2667 | break; |
2666 | 2668 | ||
2667 | case SCTP_PARAM_CHUNKS: | 2669 | case SCTP_PARAM_CHUNKS: |
2668 | if (!net->sctp.auth_enable) | 2670 | if (!ep->auth_enable) |
2669 | goto fall_through; | 2671 | goto fall_through; |
2670 | 2672 | ||
2671 | asoc->peer.peer_chunks = kmemdup(param.p, | 2673 | asoc->peer.peer_chunks = kmemdup(param.p, |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 01e002430c85..5170a1ff95a1 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -357,7 +357,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, | |||
357 | 357 | ||
358 | /* Verify the INIT chunk before processing it. */ | 358 | /* Verify the INIT chunk before processing it. */ |
359 | err_chunk = NULL; | 359 | err_chunk = NULL; |
360 | if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, | 360 | if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
361 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | 361 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
362 | &err_chunk)) { | 362 | &err_chunk)) { |
363 | /* This chunk contains fatal error. It is to be discarded. | 363 | /* This chunk contains fatal error. It is to be discarded. |
@@ -524,7 +524,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, | |||
524 | 524 | ||
525 | /* Verify the INIT chunk before processing it. */ | 525 | /* Verify the INIT chunk before processing it. */ |
526 | err_chunk = NULL; | 526 | err_chunk = NULL; |
527 | if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, | 527 | if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
528 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | 528 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
529 | &err_chunk)) { | 529 | &err_chunk)) { |
530 | 530 | ||
@@ -1430,7 +1430,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( | |||
1430 | 1430 | ||
1431 | /* Verify the INIT chunk before processing it. */ | 1431 | /* Verify the INIT chunk before processing it. */ |
1432 | err_chunk = NULL; | 1432 | err_chunk = NULL; |
1433 | if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type, | 1433 | if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, |
1434 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | 1434 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, |
1435 | &err_chunk)) { | 1435 | &err_chunk)) { |
1436 | /* This chunk contains fatal error. It is to be discarded. | 1436 | /* This chunk contains fatal error. It is to be discarded. |
@@ -6178,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6178 | * PMTU. In cases, such as loopback, this might be a rather | 6178 | * PMTU. In cases, such as loopback, this might be a rather |
6179 | * large spill over. | 6179 | * large spill over. |
6180 | */ | 6180 | */ |
6181 | if ((!chunk->data_accepted) && (!asoc->rwnd || | 6181 | if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || |
6182 | (datalen > asoc->rwnd + asoc->frag_point))) { | 6182 | (datalen > asoc->rwnd + asoc->frag_point))) { |
6183 | 6183 | ||
6184 | /* If this is the next TSN, consider reneging to make | 6184 | /* If this is the next TSN, consider reneging to make |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e13519e9df80..fee06b99a4da 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2115,6 +2115,12 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
2115 | sctp_skb_pull(skb, copied); | 2115 | sctp_skb_pull(skb, copied); |
2116 | skb_queue_head(&sk->sk_receive_queue, skb); | 2116 | skb_queue_head(&sk->sk_receive_queue, skb); |
2117 | 2117 | ||
2118 | /* When only partial message is copied to the user, increase | ||
2119 | * rwnd by that amount. If all the data in the skb is read, | ||
2120 | * rwnd is updated when the event is freed. | ||
2121 | */ | ||
2122 | if (!sctp_ulpevent_is_notification(event)) | ||
2123 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
2118 | goto out; | 2124 | goto out; |
2119 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | 2125 | } else if ((event->msg_flags & MSG_NOTIFICATION) || |
2120 | (event->msg_flags & MSG_EOR)) | 2126 | (event->msg_flags & MSG_EOR)) |
@@ -3315,10 +3321,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, | |||
3315 | char __user *optval, | 3321 | char __user *optval, |
3316 | unsigned int optlen) | 3322 | unsigned int optlen) |
3317 | { | 3323 | { |
3318 | struct net *net = sock_net(sk); | 3324 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
3319 | struct sctp_authchunk val; | 3325 | struct sctp_authchunk val; |
3320 | 3326 | ||
3321 | if (!net->sctp.auth_enable) | 3327 | if (!ep->auth_enable) |
3322 | return -EACCES; | 3328 | return -EACCES; |
3323 | 3329 | ||
3324 | if (optlen != sizeof(struct sctp_authchunk)) | 3330 | if (optlen != sizeof(struct sctp_authchunk)) |
@@ -3335,7 +3341,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, | |||
3335 | } | 3341 | } |
3336 | 3342 | ||
3337 | /* add this chunk id to the endpoint */ | 3343 | /* add this chunk id to the endpoint */ |
3338 | return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); | 3344 | return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); |
3339 | } | 3345 | } |
3340 | 3346 | ||
3341 | /* | 3347 | /* |
@@ -3348,12 +3354,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, | |||
3348 | char __user *optval, | 3354 | char __user *optval, |
3349 | unsigned int optlen) | 3355 | unsigned int optlen) |
3350 | { | 3356 | { |
3351 | struct net *net = sock_net(sk); | 3357 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
3352 | struct sctp_hmacalgo *hmacs; | 3358 | struct sctp_hmacalgo *hmacs; |
3353 | u32 idents; | 3359 | u32 idents; |
3354 | int err; | 3360 | int err; |
3355 | 3361 | ||
3356 | if (!net->sctp.auth_enable) | 3362 | if (!ep->auth_enable) |
3357 | return -EACCES; | 3363 | return -EACCES; |
3358 | 3364 | ||
3359 | if (optlen < sizeof(struct sctp_hmacalgo)) | 3365 | if (optlen < sizeof(struct sctp_hmacalgo)) |
@@ -3370,7 +3376,7 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, | |||
3370 | goto out; | 3376 | goto out; |
3371 | } | 3377 | } |
3372 | 3378 | ||
3373 | err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); | 3379 | err = sctp_auth_ep_set_hmacs(ep, hmacs); |
3374 | out: | 3380 | out: |
3375 | kfree(hmacs); | 3381 | kfree(hmacs); |
3376 | return err; | 3382 | return err; |
@@ -3386,12 +3392,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk, | |||
3386 | char __user *optval, | 3392 | char __user *optval, |
3387 | unsigned int optlen) | 3393 | unsigned int optlen) |
3388 | { | 3394 | { |
3389 | struct net *net = sock_net(sk); | 3395 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
3390 | struct sctp_authkey *authkey; | 3396 | struct sctp_authkey *authkey; |
3391 | struct sctp_association *asoc; | 3397 | struct sctp_association *asoc; |
3392 | int ret; | 3398 | int ret; |
3393 | 3399 | ||
3394 | if (!net->sctp.auth_enable) | 3400 | if (!ep->auth_enable) |
3395 | return -EACCES; | 3401 | return -EACCES; |
3396 | 3402 | ||
3397 | if (optlen <= sizeof(struct sctp_authkey)) | 3403 | if (optlen <= sizeof(struct sctp_authkey)) |
@@ -3412,7 +3418,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk, | |||
3412 | goto out; | 3418 | goto out; |
3413 | } | 3419 | } |
3414 | 3420 | ||
3415 | ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); | 3421 | ret = sctp_auth_set_key(ep, asoc, authkey); |
3416 | out: | 3422 | out: |
3417 | kzfree(authkey); | 3423 | kzfree(authkey); |
3418 | return ret; | 3424 | return ret; |
@@ -3428,11 +3434,11 @@ static int sctp_setsockopt_active_key(struct sock *sk, | |||
3428 | char __user *optval, | 3434 | char __user *optval, |
3429 | unsigned int optlen) | 3435 | unsigned int optlen) |
3430 | { | 3436 | { |
3431 | struct net *net = sock_net(sk); | 3437 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
3432 | struct sctp_authkeyid val; | 3438 | struct sctp_authkeyid val; |
3433 | struct sctp_association *asoc; | 3439 | struct sctp_association *asoc; |
3434 | 3440 | ||
3435 | if (!net->sctp.auth_enable) | 3441 | if (!ep->auth_enable) |
3436 | return -EACCES; | 3442 | return -EACCES; |
3437 | 3443 | ||
3438 | if (optlen != sizeof(struct sctp_authkeyid)) | 3444 | if (optlen != sizeof(struct sctp_authkeyid)) |
@@ -3444,8 +3450,7 @@ static int sctp_setsockopt_active_key(struct sock *sk, | |||
3444 | if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) | 3450 | if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) |
3445 | return -EINVAL; | 3451 | return -EINVAL; |
3446 | 3452 | ||
3447 | return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, | 3453 | return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); |
3448 | val.scact_keynumber); | ||
3449 | } | 3454 | } |
3450 | 3455 | ||
3451 | /* | 3456 | /* |
@@ -3457,11 +3462,11 @@ static int sctp_setsockopt_del_key(struct sock *sk, | |||
3457 | char __user *optval, | 3462 | char __user *optval, |
3458 | unsigned int optlen) | 3463 | unsigned int optlen) |
3459 | { | 3464 | { |
3460 | struct net *net = sock_net(sk); | 3465 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
3461 | struct sctp_authkeyid val; | 3466 | struct sctp_authkeyid val; |
3462 | struct sctp_association *asoc; | 3467 | struct sctp_association *asoc; |
3463 | 3468 | ||
3464 | if (!net->sctp.auth_enable) | 3469 | if (!ep->auth_enable) |
3465 | return -EACCES; | 3470 | return -EACCES; |
3466 | 3471 | ||
3467 | if (optlen != sizeof(struct sctp_authkeyid)) | 3472 | if (optlen != sizeof(struct sctp_authkeyid)) |
@@ -3473,8 +3478,7 @@ static int sctp_setsockopt_del_key(struct sock *sk, | |||
3473 | if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) | 3478 | if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) |
3474 | return -EINVAL; | 3479 | return -EINVAL; |
3475 | 3480 | ||
3476 | return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, | 3481 | return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); |
3477 | val.scact_keynumber); | ||
3478 | 3482 | ||
3479 | } | 3483 | } |
3480 | 3484 | ||
@@ -5381,16 +5385,16 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, | |||
5381 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, | 5385 | static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, |
5382 | char __user *optval, int __user *optlen) | 5386 | char __user *optval, int __user *optlen) |
5383 | { | 5387 | { |
5384 | struct net *net = sock_net(sk); | 5388 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5385 | struct sctp_hmacalgo __user *p = (void __user *)optval; | 5389 | struct sctp_hmacalgo __user *p = (void __user *)optval; |
5386 | struct sctp_hmac_algo_param *hmacs; | 5390 | struct sctp_hmac_algo_param *hmacs; |
5387 | __u16 data_len = 0; | 5391 | __u16 data_len = 0; |
5388 | u32 num_idents; | 5392 | u32 num_idents; |
5389 | 5393 | ||
5390 | if (!net->sctp.auth_enable) | 5394 | if (!ep->auth_enable) |
5391 | return -EACCES; | 5395 | return -EACCES; |
5392 | 5396 | ||
5393 | hmacs = sctp_sk(sk)->ep->auth_hmacs_list; | 5397 | hmacs = ep->auth_hmacs_list; |
5394 | data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); | 5398 | data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); |
5395 | 5399 | ||
5396 | if (len < sizeof(struct sctp_hmacalgo) + data_len) | 5400 | if (len < sizeof(struct sctp_hmacalgo) + data_len) |
@@ -5411,11 +5415,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, | |||
5411 | static int sctp_getsockopt_active_key(struct sock *sk, int len, | 5415 | static int sctp_getsockopt_active_key(struct sock *sk, int len, |
5412 | char __user *optval, int __user *optlen) | 5416 | char __user *optval, int __user *optlen) |
5413 | { | 5417 | { |
5414 | struct net *net = sock_net(sk); | 5418 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5415 | struct sctp_authkeyid val; | 5419 | struct sctp_authkeyid val; |
5416 | struct sctp_association *asoc; | 5420 | struct sctp_association *asoc; |
5417 | 5421 | ||
5418 | if (!net->sctp.auth_enable) | 5422 | if (!ep->auth_enable) |
5419 | return -EACCES; | 5423 | return -EACCES; |
5420 | 5424 | ||
5421 | if (len < sizeof(struct sctp_authkeyid)) | 5425 | if (len < sizeof(struct sctp_authkeyid)) |
@@ -5430,7 +5434,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, | |||
5430 | if (asoc) | 5434 | if (asoc) |
5431 | val.scact_keynumber = asoc->active_key_id; | 5435 | val.scact_keynumber = asoc->active_key_id; |
5432 | else | 5436 | else |
5433 | val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; | 5437 | val.scact_keynumber = ep->active_key_id; |
5434 | 5438 | ||
5435 | len = sizeof(struct sctp_authkeyid); | 5439 | len = sizeof(struct sctp_authkeyid); |
5436 | if (put_user(len, optlen)) | 5440 | if (put_user(len, optlen)) |
@@ -5444,7 +5448,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, | |||
5444 | static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, | 5448 | static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, |
5445 | char __user *optval, int __user *optlen) | 5449 | char __user *optval, int __user *optlen) |
5446 | { | 5450 | { |
5447 | struct net *net = sock_net(sk); | 5451 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5448 | struct sctp_authchunks __user *p = (void __user *)optval; | 5452 | struct sctp_authchunks __user *p = (void __user *)optval; |
5449 | struct sctp_authchunks val; | 5453 | struct sctp_authchunks val; |
5450 | struct sctp_association *asoc; | 5454 | struct sctp_association *asoc; |
@@ -5452,7 +5456,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, | |||
5452 | u32 num_chunks = 0; | 5456 | u32 num_chunks = 0; |
5453 | char __user *to; | 5457 | char __user *to; |
5454 | 5458 | ||
5455 | if (!net->sctp.auth_enable) | 5459 | if (!ep->auth_enable) |
5456 | return -EACCES; | 5460 | return -EACCES; |
5457 | 5461 | ||
5458 | if (len < sizeof(struct sctp_authchunks)) | 5462 | if (len < sizeof(struct sctp_authchunks)) |
@@ -5489,7 +5493,7 @@ num: | |||
5489 | static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, | 5493 | static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, |
5490 | char __user *optval, int __user *optlen) | 5494 | char __user *optval, int __user *optlen) |
5491 | { | 5495 | { |
5492 | struct net *net = sock_net(sk); | 5496 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
5493 | struct sctp_authchunks __user *p = (void __user *)optval; | 5497 | struct sctp_authchunks __user *p = (void __user *)optval; |
5494 | struct sctp_authchunks val; | 5498 | struct sctp_authchunks val; |
5495 | struct sctp_association *asoc; | 5499 | struct sctp_association *asoc; |
@@ -5497,7 +5501,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, | |||
5497 | u32 num_chunks = 0; | 5501 | u32 num_chunks = 0; |
5498 | char __user *to; | 5502 | char __user *to; |
5499 | 5503 | ||
5500 | if (!net->sctp.auth_enable) | 5504 | if (!ep->auth_enable) |
5501 | return -EACCES; | 5505 | return -EACCES; |
5502 | 5506 | ||
5503 | if (len < sizeof(struct sctp_authchunks)) | 5507 | if (len < sizeof(struct sctp_authchunks)) |
@@ -5514,7 +5518,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, | |||
5514 | if (asoc) | 5518 | if (asoc) |
5515 | ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; | 5519 | ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; |
5516 | else | 5520 | else |
5517 | ch = sctp_sk(sk)->ep->auth_chunk_list; | 5521 | ch = ep->auth_chunk_list; |
5518 | 5522 | ||
5519 | if (!ch) | 5523 | if (!ch) |
5520 | goto num; | 5524 | goto num; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 35c8923b5554..c82fdc1eab7c 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -64,6 +64,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, | |||
64 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | 64 | static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, |
65 | void __user *buffer, size_t *lenp, | 65 | void __user *buffer, size_t *lenp, |
66 | loff_t *ppos); | 66 | loff_t *ppos); |
67 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, | ||
68 | void __user *buffer, size_t *lenp, | ||
69 | loff_t *ppos); | ||
67 | 70 | ||
68 | static struct ctl_table sctp_table[] = { | 71 | static struct ctl_table sctp_table[] = { |
69 | { | 72 | { |
@@ -266,7 +269,7 @@ static struct ctl_table sctp_net_table[] = { | |||
266 | .data = &init_net.sctp.auth_enable, | 269 | .data = &init_net.sctp.auth_enable, |
267 | .maxlen = sizeof(int), | 270 | .maxlen = sizeof(int), |
268 | .mode = 0644, | 271 | .mode = 0644, |
269 | .proc_handler = proc_dointvec, | 272 | .proc_handler = proc_sctp_do_auth, |
270 | }, | 273 | }, |
271 | { | 274 | { |
272 | .procname = "addr_scope_policy", | 275 | .procname = "addr_scope_policy", |
@@ -400,6 +403,37 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | |||
400 | return ret; | 403 | return ret; |
401 | } | 404 | } |
402 | 405 | ||
406 | static int proc_sctp_do_auth(struct ctl_table *ctl, int write, | ||
407 | void __user *buffer, size_t *lenp, | ||
408 | loff_t *ppos) | ||
409 | { | ||
410 | struct net *net = current->nsproxy->net_ns; | ||
411 | struct ctl_table tbl; | ||
412 | int new_value, ret; | ||
413 | |||
414 | memset(&tbl, 0, sizeof(struct ctl_table)); | ||
415 | tbl.maxlen = sizeof(unsigned int); | ||
416 | |||
417 | if (write) | ||
418 | tbl.data = &new_value; | ||
419 | else | ||
420 | tbl.data = &net->sctp.auth_enable; | ||
421 | |||
422 | ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); | ||
423 | |||
424 | if (write) { | ||
425 | struct sock *sk = net->sctp.ctl_sock; | ||
426 | |||
427 | net->sctp.auth_enable = new_value; | ||
428 | /* Update the value in the control socket */ | ||
429 | lock_sock(sk); | ||
430 | sctp_sk(sk)->ep->auth_enable = new_value; | ||
431 | release_sock(sk); | ||
432 | } | ||
433 | |||
434 | return ret; | ||
435 | } | ||
436 | |||
403 | int sctp_sysctl_net_register(struct net *net) | 437 | int sctp_sysctl_net_register(struct net *net) |
404 | { | 438 | { |
405 | struct ctl_table *table = sctp_net_table; | 439 | struct ctl_table *table = sctp_net_table; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 8d198ae03606..85c64658bd0b 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
989 | skb = sctp_event2skb(event); | 989 | skb = sctp_event2skb(event); |
990 | /* Set the owner and charge rwnd for bytes received. */ | 990 | /* Set the owner and charge rwnd for bytes received. */ |
991 | sctp_ulpevent_set_owner(event, asoc); | 991 | sctp_ulpevent_set_owner(event, asoc); |
992 | sctp_assoc_rwnd_update(asoc, false); | 992 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); |
993 | 993 | ||
994 | if (!skb->data_len) | 994 | if (!skb->data_len) |
995 | return; | 995 | return; |
@@ -1011,7 +1011,6 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1011 | { | 1011 | { |
1012 | struct sk_buff *skb, *frag; | 1012 | struct sk_buff *skb, *frag; |
1013 | unsigned int len; | 1013 | unsigned int len; |
1014 | struct sctp_association *asoc; | ||
1015 | 1014 | ||
1016 | /* Current stack structures assume that the rcv buffer is | 1015 | /* Current stack structures assume that the rcv buffer is |
1017 | * per socket. For UDP style sockets this is not true as | 1016 | * per socket. For UDP style sockets this is not true as |
@@ -1036,11 +1035,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | done: | 1037 | done: |
1039 | asoc = event->asoc; | 1038 | sctp_assoc_rwnd_increase(event->asoc, len); |
1040 | sctp_association_hold(asoc); | ||
1041 | sctp_ulpevent_release_owner(event); | 1039 | sctp_ulpevent_release_owner(event); |
1042 | sctp_assoc_rwnd_update(asoc, true); | ||
1043 | sctp_association_put(asoc); | ||
1044 | } | 1040 | } |
1045 | 1041 | ||
1046 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | 1042 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
diff --git a/net/socket.c b/net/socket.c index 1b1e7e6a960f..abf56b2a14f9 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1880,8 +1880,8 @@ out: | |||
1880 | * Receive a datagram from a socket. | 1880 | * Receive a datagram from a socket. |
1881 | */ | 1881 | */ |
1882 | 1882 | ||
1883 | asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, | 1883 | SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, |
1884 | unsigned int flags) | 1884 | unsigned int, flags) |
1885 | { | 1885 | { |
1886 | return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); | 1886 | return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); |
1887 | } | 1887 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f02f511b7107..c08fbd11ceff 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1842,7 +1842,7 @@ purge_queue: | |||
1842 | xfrm_pol_put(pol); | 1842 | xfrm_pol_put(pol); |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | static int xdst_queue_output(struct sk_buff *skb) | 1845 | static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) |
1846 | { | 1846 | { |
1847 | unsigned long sched_next; | 1847 | unsigned long sched_next; |
1848 | struct dst_entry *dst = skb_dst(skb); | 1848 | struct dst_entry *dst = skb_dst(skb); |