diff options
Diffstat (limited to 'net')
36 files changed, 345 insertions, 188 deletions
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 9f481cfdf77d..e8090f099eb8 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
@@ -352,19 +352,21 @@ out: | |||
352 | */ | 352 | */ |
353 | int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface) | 353 | int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface) |
354 | { | 354 | { |
355 | static const size_t tvlv_padding = sizeof(__be32); | ||
355 | struct batadv_elp_packet *elp_packet; | 356 | struct batadv_elp_packet *elp_packet; |
356 | unsigned char *elp_buff; | 357 | unsigned char *elp_buff; |
357 | u32 random_seqno; | 358 | u32 random_seqno; |
358 | size_t size; | 359 | size_t size; |
359 | int res = -ENOMEM; | 360 | int res = -ENOMEM; |
360 | 361 | ||
361 | size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN; | 362 | size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding; |
362 | hard_iface->bat_v.elp_skb = dev_alloc_skb(size); | 363 | hard_iface->bat_v.elp_skb = dev_alloc_skb(size); |
363 | if (!hard_iface->bat_v.elp_skb) | 364 | if (!hard_iface->bat_v.elp_skb) |
364 | goto out; | 365 | goto out; |
365 | 366 | ||
366 | skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); | 367 | skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); |
367 | elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN); | 368 | elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, |
369 | BATADV_ELP_HLEN + tvlv_padding); | ||
368 | elp_packet = (struct batadv_elp_packet *)elp_buff; | 370 | elp_packet = (struct batadv_elp_packet *)elp_buff; |
369 | 371 | ||
370 | elp_packet->packet_type = BATADV_ELP; | 372 | elp_packet->packet_type = BATADV_ELP; |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 0fddc17106bd..5b71a289d04f 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) | |||
275 | kfree(entry); | 275 | kfree(entry); |
276 | 276 | ||
277 | packet = (struct batadv_frag_packet *)skb_out->data; | 277 | packet = (struct batadv_frag_packet *)skb_out->data; |
278 | size = ntohs(packet->total_size); | 278 | size = ntohs(packet->total_size) + hdr_size; |
279 | 279 | ||
280 | /* Make room for the rest of the fragments. */ | 280 | /* Make room for the rest of the fragments. */ |
281 | if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { | 281 | if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2920e06a5403..04c19a37e500 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -102,12 +102,18 @@ struct br_tunnel_info { | |||
102 | struct metadata_dst *tunnel_dst; | 102 | struct metadata_dst *tunnel_dst; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | /* private vlan flags */ | ||
106 | enum { | ||
107 | BR_VLFLAG_PER_PORT_STATS = BIT(0), | ||
108 | }; | ||
109 | |||
105 | /** | 110 | /** |
106 | * struct net_bridge_vlan - per-vlan entry | 111 | * struct net_bridge_vlan - per-vlan entry |
107 | * | 112 | * |
108 | * @vnode: rhashtable member | 113 | * @vnode: rhashtable member |
109 | * @vid: VLAN id | 114 | * @vid: VLAN id |
110 | * @flags: bridge vlan flags | 115 | * @flags: bridge vlan flags |
116 | * @priv_flags: private (in-kernel) bridge vlan flags | ||
111 | * @stats: per-cpu VLAN statistics | 117 | * @stats: per-cpu VLAN statistics |
112 | * @br: if MASTER flag set, this points to a bridge struct | 118 | * @br: if MASTER flag set, this points to a bridge struct |
113 | * @port: if MASTER flag unset, this points to a port struct | 119 | * @port: if MASTER flag unset, this points to a port struct |
@@ -127,6 +133,7 @@ struct net_bridge_vlan { | |||
127 | struct rhash_head tnode; | 133 | struct rhash_head tnode; |
128 | u16 vid; | 134 | u16 vid; |
129 | u16 flags; | 135 | u16 flags; |
136 | u16 priv_flags; | ||
130 | struct br_vlan_stats __percpu *stats; | 137 | struct br_vlan_stats __percpu *stats; |
131 | union { | 138 | union { |
132 | struct net_bridge *br; | 139 | struct net_bridge *br; |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8c9297a01947..e84be08b8285 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu) | |||
197 | v = container_of(rcu, struct net_bridge_vlan, rcu); | 197 | v = container_of(rcu, struct net_bridge_vlan, rcu); |
198 | WARN_ON(br_vlan_is_master(v)); | 198 | WARN_ON(br_vlan_is_master(v)); |
199 | /* if we had per-port stats configured then free them here */ | 199 | /* if we had per-port stats configured then free them here */ |
200 | if (v->brvlan->stats != v->stats) | 200 | if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS) |
201 | free_percpu(v->stats); | 201 | free_percpu(v->stats); |
202 | v->stats = NULL; | 202 | v->stats = NULL; |
203 | kfree(v); | 203 | kfree(v); |
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) | |||
264 | err = -ENOMEM; | 264 | err = -ENOMEM; |
265 | goto out_filt; | 265 | goto out_filt; |
266 | } | 266 | } |
267 | v->priv_flags |= BR_VLFLAG_PER_PORT_STATS; | ||
267 | } else { | 268 | } else { |
268 | v->stats = masterv->stats; | 269 | v->stats = masterv->stats; |
269 | } | 270 | } |
diff --git a/net/can/raw.c b/net/can/raw.c index 1051eee82581..3aab7664933f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
745 | } else | 745 | } else |
746 | ifindex = ro->ifindex; | 746 | ifindex = ro->ifindex; |
747 | 747 | ||
748 | if (ro->fd_frames) { | 748 | dev = dev_get_by_index(sock_net(sk), ifindex); |
749 | if (!dev) | ||
750 | return -ENXIO; | ||
751 | |||
752 | err = -EINVAL; | ||
753 | if (ro->fd_frames && dev->mtu == CANFD_MTU) { | ||
749 | if (unlikely(size != CANFD_MTU && size != CAN_MTU)) | 754 | if (unlikely(size != CANFD_MTU && size != CAN_MTU)) |
750 | return -EINVAL; | 755 | goto put_dev; |
751 | } else { | 756 | } else { |
752 | if (unlikely(size != CAN_MTU)) | 757 | if (unlikely(size != CAN_MTU)) |
753 | return -EINVAL; | 758 | goto put_dev; |
754 | } | 759 | } |
755 | 760 | ||
756 | dev = dev_get_by_index(sock_net(sk), ifindex); | ||
757 | if (!dev) | ||
758 | return -ENXIO; | ||
759 | |||
760 | skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), | 761 | skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), |
761 | msg->msg_flags & MSG_DONTWAIT, &err); | 762 | msg->msg_flags & MSG_DONTWAIT, &err); |
762 | if (!skb) | 763 | if (!skb) |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 57fcc6b4bf6e..2f126eff275d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, | |||
580 | struct bio_vec bvec; | 580 | struct bio_vec bvec; |
581 | int ret; | 581 | int ret; |
582 | 582 | ||
583 | /* sendpage cannot properly handle pages with page_count == 0, | 583 | /* |
584 | * we need to fallback to sendmsg if that's the case */ | 584 | * sendpage cannot properly handle pages with page_count == 0, |
585 | if (page_count(page) >= 1) | 585 | * we need to fall back to sendmsg if that's the case. |
586 | * | ||
587 | * Same goes for slab pages: skb_can_coalesce() allows | ||
588 | * coalescing neighboring slab objects into a single frag which | ||
589 | * triggers one of hardened usercopy checks. | ||
590 | */ | ||
591 | if (page_count(page) >= 1 && !PageSlab(page)) | ||
586 | return __ceph_tcp_sendpage(sock, page, offset, size, more); | 592 | return __ceph_tcp_sendpage(sock, page, offset, size, more); |
587 | 593 | ||
588 | bvec.bv_page = page; | 594 | bvec.bv_page = page; |
diff --git a/net/core/dev.c b/net/core/dev.c index 0ffcbdd55fa9..ddc551f24ba2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | |||
5655 | skb->vlan_tci = 0; | 5655 | skb->vlan_tci = 0; |
5656 | skb->dev = napi->dev; | 5656 | skb->dev = napi->dev; |
5657 | skb->skb_iif = 0; | 5657 | skb->skb_iif = 0; |
5658 | |||
5659 | /* eth_type_trans() assumes pkt_type is PACKET_HOST */ | ||
5660 | skb->pkt_type = PACKET_HOST; | ||
5661 | |||
5658 | skb->encapsulation = 0; | 5662 | skb->encapsulation = 0; |
5659 | skb_shinfo(skb)->gso_type = 0; | 5663 | skb_shinfo(skb)->gso_type = 0; |
5660 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); | 5664 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); |
@@ -5966,11 +5970,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done) | |||
5966 | if (work_done) | 5970 | if (work_done) |
5967 | timeout = n->dev->gro_flush_timeout; | 5971 | timeout = n->dev->gro_flush_timeout; |
5968 | 5972 | ||
5973 | /* When the NAPI instance uses a timeout and keeps postponing | ||
5974 | * it, we need to bound somehow the time packets are kept in | ||
5975 | * the GRO layer | ||
5976 | */ | ||
5977 | napi_gro_flush(n, !!timeout); | ||
5969 | if (timeout) | 5978 | if (timeout) |
5970 | hrtimer_start(&n->timer, ns_to_ktime(timeout), | 5979 | hrtimer_start(&n->timer, ns_to_ktime(timeout), |
5971 | HRTIMER_MODE_REL_PINNED); | 5980 | HRTIMER_MODE_REL_PINNED); |
5972 | else | ||
5973 | napi_gro_flush(n, false); | ||
5974 | } | 5981 | } |
5975 | if (unlikely(!list_empty(&n->poll_list))) { | 5982 | if (unlikely(!list_empty(&n->poll_list))) { |
5976 | /* If n->poll_list is not empty, we need to mask irqs */ | 5983 | /* If n->poll_list is not empty, we need to mask irqs */ |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b4ee5c8b928f..a8217e221e19 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) | |||
4854 | nf_reset(skb); | 4854 | nf_reset(skb); |
4855 | nf_reset_trace(skb); | 4855 | nf_reset_trace(skb); |
4856 | 4856 | ||
4857 | #ifdef CONFIG_NET_SWITCHDEV | ||
4858 | skb->offload_fwd_mark = 0; | ||
4859 | skb->offload_mr_fwd_mark = 0; | ||
4860 | #endif | ||
4861 | |||
4857 | if (!xnet) | 4862 | if (!xnet) |
4858 | return; | 4863 | return; |
4859 | 4864 | ||
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index dde671e97829..c248e0dccbe1 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
80 | 80 | ||
81 | iph->version = 4; | 81 | iph->version = 4; |
82 | iph->ihl = sizeof(struct iphdr) >> 2; | 82 | iph->ihl = sizeof(struct iphdr) >> 2; |
83 | iph->frag_off = df; | 83 | iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; |
84 | iph->protocol = proto; | 84 | iph->protocol = proto; |
85 | iph->tos = tos; | 85 | iph->tos = tos; |
86 | iph->daddr = dst; | 86 | iph->daddr = dst; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2868ef28ce52..1e37c1388189 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4268,7 +4268,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
4268 | * If the sack array is full, forget about the last one. | 4268 | * If the sack array is full, forget about the last one. |
4269 | */ | 4269 | */ |
4270 | if (this_sack >= TCP_NUM_SACKS) { | 4270 | if (this_sack >= TCP_NUM_SACKS) { |
4271 | if (tp->compressed_ack) | 4271 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
4272 | tcp_send_ack(sk); | 4272 | tcp_send_ack(sk); |
4273 | this_sack--; | 4273 | this_sack--; |
4274 | tp->rx_opt.num_sacks--; | 4274 | tp->rx_opt.num_sacks--; |
@@ -4363,6 +4363,7 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
4363 | if (TCP_SKB_CB(from)->has_rxtstamp) { | 4363 | if (TCP_SKB_CB(from)->has_rxtstamp) { |
4364 | TCP_SKB_CB(to)->has_rxtstamp = true; | 4364 | TCP_SKB_CB(to)->has_rxtstamp = true; |
4365 | to->tstamp = from->tstamp; | 4365 | to->tstamp = from->tstamp; |
4366 | skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; | ||
4366 | } | 4367 | } |
4367 | 4368 | ||
4368 | return true; | 4369 | return true; |
@@ -5188,7 +5189,17 @@ send_now: | |||
5188 | if (!tcp_is_sack(tp) || | 5189 | if (!tcp_is_sack(tp) || |
5189 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) | 5190 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) |
5190 | goto send_now; | 5191 | goto send_now; |
5191 | tp->compressed_ack++; | 5192 | |
5193 | if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { | ||
5194 | tp->compressed_ack_rcv_nxt = tp->rcv_nxt; | ||
5195 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) | ||
5196 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | ||
5197 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); | ||
5198 | tp->compressed_ack = 0; | ||
5199 | } | ||
5200 | |||
5201 | if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH) | ||
5202 | goto send_now; | ||
5192 | 5203 | ||
5193 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) | 5204 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) |
5194 | return; | 5205 | return; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9c34b97d365d..3f510cad0b3e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, | |||
180 | { | 180 | { |
181 | struct tcp_sock *tp = tcp_sk(sk); | 181 | struct tcp_sock *tp = tcp_sk(sk); |
182 | 182 | ||
183 | if (unlikely(tp->compressed_ack)) { | 183 | if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { |
184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, |
185 | tp->compressed_ack); | 185 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); |
186 | tp->compressed_ack = 0; | 186 | tp->compressed_ack = TCP_FASTRETRANS_THRESH; |
187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) | 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
188 | __sock_put(sk); | 188 | __sock_put(sk); |
189 | } | 189 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 676020663ce8..5f8b6d3cd855 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) | |||
740 | 740 | ||
741 | bh_lock_sock(sk); | 741 | bh_lock_sock(sk); |
742 | if (!sock_owned_by_user(sk)) { | 742 | if (!sock_owned_by_user(sk)) { |
743 | if (tp->compressed_ack) | 743 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
744 | tcp_send_ack(sk); | 744 | tcp_send_ack(sk); |
745 | } else { | 745 | } else { |
746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 63a808d5af15..045597b9a7c0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp); | |||
179 | static void addrconf_dad_work(struct work_struct *w); | 179 | static void addrconf_dad_work(struct work_struct *w); |
180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, |
181 | bool send_na); | 181 | bool send_na); |
182 | static void addrconf_dad_run(struct inet6_dev *idev); | 182 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart); |
183 | static void addrconf_rs_timer(struct timer_list *t); | 183 | static void addrconf_rs_timer(struct timer_list *t); |
184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3439 | void *ptr) | 3439 | void *ptr) |
3440 | { | 3440 | { |
3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
3442 | struct netdev_notifier_change_info *change_info; | ||
3442 | struct netdev_notifier_changeupper_info *info; | 3443 | struct netdev_notifier_changeupper_info *info; |
3443 | struct inet6_dev *idev = __in6_dev_get(dev); | 3444 | struct inet6_dev *idev = __in6_dev_get(dev); |
3444 | struct net *net = dev_net(dev); | 3445 | struct net *net = dev_net(dev); |
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3513 | break; | 3514 | break; |
3514 | } | 3515 | } |
3515 | 3516 | ||
3516 | if (idev) { | 3517 | if (!IS_ERR_OR_NULL(idev)) { |
3517 | if (idev->if_flags & IF_READY) { | 3518 | if (idev->if_flags & IF_READY) { |
3518 | /* device is already configured - | 3519 | /* device is already configured - |
3519 | * but resend MLD reports, we might | 3520 | * but resend MLD reports, we might |
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3521 | * multicast snooping switches | 3522 | * multicast snooping switches |
3522 | */ | 3523 | */ |
3523 | ipv6_mc_up(idev); | 3524 | ipv6_mc_up(idev); |
3525 | change_info = ptr; | ||
3526 | if (change_info->flags_changed & IFF_NOARP) | ||
3527 | addrconf_dad_run(idev, true); | ||
3524 | rt6_sync_up(dev, RTNH_F_LINKDOWN); | 3528 | rt6_sync_up(dev, RTNH_F_LINKDOWN); |
3525 | break; | 3529 | break; |
3526 | } | 3530 | } |
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3555 | 3559 | ||
3556 | if (!IS_ERR_OR_NULL(idev)) { | 3560 | if (!IS_ERR_OR_NULL(idev)) { |
3557 | if (run_pending) | 3561 | if (run_pending) |
3558 | addrconf_dad_run(idev); | 3562 | addrconf_dad_run(idev, false); |
3559 | 3563 | ||
3560 | /* Device has an address by now */ | 3564 | /* Device has an address by now */ |
3561 | rt6_sync_up(dev, RTNH_F_DEAD); | 3565 | rt6_sync_up(dev, RTNH_F_DEAD); |
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | |||
4173 | addrconf_verify_rtnl(); | 4177 | addrconf_verify_rtnl(); |
4174 | } | 4178 | } |
4175 | 4179 | ||
4176 | static void addrconf_dad_run(struct inet6_dev *idev) | 4180 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart) |
4177 | { | 4181 | { |
4178 | struct inet6_ifaddr *ifp; | 4182 | struct inet6_ifaddr *ifp; |
4179 | 4183 | ||
4180 | read_lock_bh(&idev->lock); | 4184 | read_lock_bh(&idev->lock); |
4181 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | 4185 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
4182 | spin_lock(&ifp->lock); | 4186 | spin_lock(&ifp->lock); |
4183 | if (ifp->flags & IFA_F_TENTATIVE && | 4187 | if ((ifp->flags & IFA_F_TENTATIVE && |
4184 | ifp->state == INET6_IFADDR_STATE_DAD) | 4188 | ifp->state == INET6_IFADDR_STATE_DAD) || restart) { |
4189 | if (restart) | ||
4190 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
4185 | addrconf_dad_kick(ifp); | 4191 | addrconf_dad_kick(ifp); |
4192 | } | ||
4186 | spin_unlock(&ifp->lock); | 4193 | spin_unlock(&ifp->lock); |
4187 | } | 4194 | } |
4188 | read_unlock_bh(&idev->lock); | 4195 | read_unlock_bh(&idev->lock); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2a7423c39456..059f0531f7c1 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
2232 | if (rt) { | 2232 | if (rt) { |
2233 | rcu_read_lock(); | 2233 | rcu_read_lock(); |
2234 | if (rt->rt6i_flags & RTF_CACHE) { | 2234 | if (rt->rt6i_flags & RTF_CACHE) { |
2235 | if (dst_hold_safe(&rt->dst)) | 2235 | rt6_remove_exception_rt(rt); |
2236 | rt6_remove_exception_rt(rt); | ||
2237 | } else { | 2236 | } else { |
2238 | struct fib6_info *from; | 2237 | struct fib6_info *from; |
2239 | struct fib6_node *fn; | 2238 | struct fib6_node *fn; |
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu); | |||
2360 | 2359 | ||
2361 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) | 2360 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) |
2362 | { | 2361 | { |
2362 | int oif = sk->sk_bound_dev_if; | ||
2363 | struct dst_entry *dst; | 2363 | struct dst_entry *dst; |
2364 | 2364 | ||
2365 | ip6_update_pmtu(skb, sock_net(sk), mtu, | 2365 | if (!oif && skb->dev) |
2366 | sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); | 2366 | oif = l3mdev_master_ifindex(skb->dev); |
2367 | |||
2368 | ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); | ||
2367 | 2369 | ||
2368 | dst = __sk_dst_get(sk); | 2370 | dst = __sk_dst_get(sk); |
2369 | if (!dst || !dst->obsolete || | 2371 | if (!dst || !dst->obsolete || |
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg) | |||
3214 | if (cfg->fc_flags & RTF_GATEWAY && | 3216 | if (cfg->fc_flags & RTF_GATEWAY && |
3215 | !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) | 3217 | !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) |
3216 | goto out; | 3218 | goto out; |
3217 | if (dst_hold_safe(&rt->dst)) | 3219 | |
3218 | rc = rt6_remove_exception_rt(rt); | 3220 | rc = rt6_remove_exception_rt(rt); |
3219 | out: | 3221 | out: |
3220 | return rc; | 3222 | return rc; |
3221 | } | 3223 | } |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 82cdf9020b53..26f1d435696a 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, | |||
1490 | goto err_sock; | 1490 | goto err_sock; |
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | sk = sock->sk; | ||
1494 | |||
1495 | sock_hold(sk); | ||
1496 | tunnel->sock = sk; | ||
1497 | tunnel->l2tp_net = net; | 1493 | tunnel->l2tp_net = net; |
1498 | |||
1499 | pn = l2tp_pernet(net); | 1494 | pn = l2tp_pernet(net); |
1500 | 1495 | ||
1501 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | 1496 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); |
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, | |||
1510 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | 1505 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); |
1511 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | 1506 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); |
1512 | 1507 | ||
1508 | sk = sock->sk; | ||
1509 | sock_hold(sk); | ||
1510 | tunnel->sock = sk; | ||
1511 | |||
1513 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { | 1512 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { |
1514 | struct udp_tunnel_sock_cfg udp_cfg = { | 1513 | struct udp_tunnel_sock_cfg udp_cfg = { |
1515 | .sk_user_data = tunnel, | 1514 | .sk_user_data = tunnel, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ec3095f13aae..a74650e98f42 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
2394 | void *ph; | 2394 | void *ph; |
2395 | __u32 ts; | 2395 | __u32 ts; |
2396 | 2396 | ||
2397 | ph = skb_shinfo(skb)->destructor_arg; | 2397 | ph = skb_zcopy_get_nouarg(skb); |
2398 | packet_dec_pending(&po->tx_ring); | 2398 | packet_dec_pending(&po->tx_ring); |
2399 | 2399 | ||
2400 | ts = __packet_set_timestamp(po, ph, skb); | 2400 | ts = __packet_set_timestamp(po, ph, skb); |
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
2461 | skb->mark = po->sk.sk_mark; | 2461 | skb->mark = po->sk.sk_mark; |
2462 | skb->tstamp = sockc->transmit_time; | 2462 | skb->tstamp = sockc->transmit_time; |
2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); | 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); |
2464 | skb_shinfo(skb)->destructor_arg = ph.raw; | 2464 | skb_zcopy_set_nouarg(skb, ph.raw); |
2465 | 2465 | ||
2466 | skb_reserve(skb, hlen); | 2466 | skb_reserve(skb, hlen); |
2467 | skb_reset_network_header(skb); | 2467 | skb_reset_network_header(skb); |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 64362d078da8..a2522f9d71e2 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -375,17 +375,36 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); | |||
375 | * getting ACKs from the server. Returns a number representing the life state | 375 | * getting ACKs from the server. Returns a number representing the life state |
376 | * which can be compared to that returned by a previous call. | 376 | * which can be compared to that returned by a previous call. |
377 | * | 377 | * |
378 | * If this is a client call, ping ACKs will be sent to the server to find out | 378 | * If the life state stalls, rxrpc_kernel_probe_life() should be called and |
379 | * whether it's still responsive and whether the call is still alive on the | 379 | * then 2RTT waited. |
380 | * server. | ||
381 | */ | 380 | */ |
382 | u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) | 381 | u32 rxrpc_kernel_check_life(const struct socket *sock, |
382 | const struct rxrpc_call *call) | ||
383 | { | 383 | { |
384 | return call->acks_latest; | 384 | return call->acks_latest; |
385 | } | 385 | } |
386 | EXPORT_SYMBOL(rxrpc_kernel_check_life); | 386 | EXPORT_SYMBOL(rxrpc_kernel_check_life); |
387 | 387 | ||
388 | /** | 388 | /** |
389 | * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive | ||
390 | * @sock: The socket the call is on | ||
391 | * @call: The call to check | ||
392 | * | ||
393 | * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to | ||
394 | * find out whether a call is still alive by pinging it. This should cause the | ||
395 | * life state to be bumped in about 2*RTT. | ||
396 | * | ||
397 | * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. | ||
398 | */ | ||
399 | void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) | ||
400 | { | ||
401 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, | ||
402 | rxrpc_propose_ack_ping_for_check_life); | ||
403 | rxrpc_send_ack_packet(call, true, NULL); | ||
404 | } | ||
405 | EXPORT_SYMBOL(rxrpc_kernel_probe_life); | ||
406 | |||
407 | /** | ||
389 | * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. | 408 | * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. |
390 | * @sock: The socket the call is on | 409 | * @sock: The socket the call is on |
391 | * @call: The call to query | 410 | * @call: The call to query |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index da3dd0f68cc2..2b372a06b432 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
201 | goto out_release; | 201 | goto out_release; |
202 | } | 202 | } |
203 | } else { | 203 | } else { |
204 | return err; | 204 | ret = err; |
205 | goto out_free; | ||
205 | } | 206 | } |
206 | 207 | ||
207 | p = to_pedit(*a); | 208 | p = to_pedit(*a); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 052855d47354..37c9b8f0e10f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -27,10 +27,7 @@ struct tcf_police_params { | |||
27 | u32 tcfp_ewma_rate; | 27 | u32 tcfp_ewma_rate; |
28 | s64 tcfp_burst; | 28 | s64 tcfp_burst; |
29 | u32 tcfp_mtu; | 29 | u32 tcfp_mtu; |
30 | s64 tcfp_toks; | ||
31 | s64 tcfp_ptoks; | ||
32 | s64 tcfp_mtu_ptoks; | 30 | s64 tcfp_mtu_ptoks; |
33 | s64 tcfp_t_c; | ||
34 | struct psched_ratecfg rate; | 31 | struct psched_ratecfg rate; |
35 | bool rate_present; | 32 | bool rate_present; |
36 | struct psched_ratecfg peak; | 33 | struct psched_ratecfg peak; |
@@ -41,6 +38,11 @@ struct tcf_police_params { | |||
41 | struct tcf_police { | 38 | struct tcf_police { |
42 | struct tc_action common; | 39 | struct tc_action common; |
43 | struct tcf_police_params __rcu *params; | 40 | struct tcf_police_params __rcu *params; |
41 | |||
42 | spinlock_t tcfp_lock ____cacheline_aligned_in_smp; | ||
43 | s64 tcfp_toks; | ||
44 | s64 tcfp_ptoks; | ||
45 | s64 tcfp_t_c; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | #define to_police(pc) ((struct tcf_police *)pc) | 48 | #define to_police(pc) ((struct tcf_police *)pc) |
@@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
122 | return ret; | 124 | return ret; |
123 | } | 125 | } |
124 | ret = ACT_P_CREATED; | 126 | ret = ACT_P_CREATED; |
127 | spin_lock_init(&(to_police(*a)->tcfp_lock)); | ||
125 | } else if (!ovr) { | 128 | } else if (!ovr) { |
126 | tcf_idr_release(*a, bind); | 129 | tcf_idr_release(*a, bind); |
127 | return -EEXIST; | 130 | return -EEXIST; |
@@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
186 | } | 189 | } |
187 | 190 | ||
188 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); | 191 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); |
189 | new->tcfp_toks = new->tcfp_burst; | 192 | if (new->peak_present) |
190 | if (new->peak_present) { | ||
191 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, | 193 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, |
192 | new->tcfp_mtu); | 194 | new->tcfp_mtu); |
193 | new->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
194 | } | ||
195 | 195 | ||
196 | if (tb[TCA_POLICE_AVRATE]) | 196 | if (tb[TCA_POLICE_AVRATE]) |
197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
@@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
210 | new->tcfp_t_c = ktime_get_ns(); | 210 | spin_lock_bh(&police->tcfp_lock); |
211 | police->tcfp_t_c = ktime_get_ns(); | ||
212 | police->tcfp_toks = new->tcfp_burst; | ||
213 | if (new->peak_present) | ||
214 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
215 | spin_unlock_bh(&police->tcfp_lock); | ||
211 | police->tcf_action = parm->action; | 216 | police->tcf_action = parm->action; |
212 | rcu_swap_protected(police->params, | 217 | rcu_swap_protected(police->params, |
213 | new, | 218 | new, |
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, | |||
257 | } | 262 | } |
258 | 263 | ||
259 | now = ktime_get_ns(); | 264 | now = ktime_get_ns(); |
260 | toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); | 265 | spin_lock_bh(&police->tcfp_lock); |
266 | toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); | ||
261 | if (p->peak_present) { | 267 | if (p->peak_present) { |
262 | ptoks = toks + p->tcfp_ptoks; | 268 | ptoks = toks + police->tcfp_ptoks; |
263 | if (ptoks > p->tcfp_mtu_ptoks) | 269 | if (ptoks > p->tcfp_mtu_ptoks) |
264 | ptoks = p->tcfp_mtu_ptoks; | 270 | ptoks = p->tcfp_mtu_ptoks; |
265 | ptoks -= (s64)psched_l2t_ns(&p->peak, | 271 | ptoks -= (s64)psched_l2t_ns(&p->peak, |
266 | qdisc_pkt_len(skb)); | 272 | qdisc_pkt_len(skb)); |
267 | } | 273 | } |
268 | toks += p->tcfp_toks; | 274 | toks += police->tcfp_toks; |
269 | if (toks > p->tcfp_burst) | 275 | if (toks > p->tcfp_burst) |
270 | toks = p->tcfp_burst; | 276 | toks = p->tcfp_burst; |
271 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); | 277 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); |
272 | if ((toks|ptoks) >= 0) { | 278 | if ((toks|ptoks) >= 0) { |
273 | p->tcfp_t_c = now; | 279 | police->tcfp_t_c = now; |
274 | p->tcfp_toks = toks; | 280 | police->tcfp_toks = toks; |
275 | p->tcfp_ptoks = ptoks; | 281 | police->tcfp_ptoks = ptoks; |
282 | spin_unlock_bh(&police->tcfp_lock); | ||
276 | ret = p->tcfp_result; | 283 | ret = p->tcfp_result; |
277 | goto inc_drops; | 284 | goto inc_drops; |
278 | } | 285 | } |
286 | spin_unlock_bh(&police->tcfp_lock); | ||
279 | } | 287 | } |
280 | 288 | ||
281 | inc_overlimits: | 289 | inc_overlimits: |
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 4b1af706896c..25a7cf6d380f 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
@@ -469,22 +469,29 @@ begin: | |||
469 | goto begin; | 469 | goto begin; |
470 | } | 470 | } |
471 | prefetch(&skb->end); | 471 | prefetch(&skb->end); |
472 | f->credit -= qdisc_pkt_len(skb); | 472 | plen = qdisc_pkt_len(skb); |
473 | f->credit -= plen; | ||
473 | 474 | ||
474 | if (ktime_to_ns(skb->tstamp) || !q->rate_enable) | 475 | if (!q->rate_enable) |
475 | goto out; | 476 | goto out; |
476 | 477 | ||
477 | rate = q->flow_max_rate; | 478 | rate = q->flow_max_rate; |
478 | if (skb->sk) | 479 | |
479 | rate = min(skb->sk->sk_pacing_rate, rate); | 480 | /* If EDT time was provided for this skb, we need to |
480 | 481 | * update f->time_next_packet only if this qdisc enforces | |
481 | if (rate <= q->low_rate_threshold) { | 482 | * a flow max rate. |
482 | f->credit = 0; | 483 | */ |
483 | plen = qdisc_pkt_len(skb); | 484 | if (!skb->tstamp) { |
484 | } else { | 485 | if (skb->sk) |
485 | plen = max(qdisc_pkt_len(skb), q->quantum); | 486 | rate = min(skb->sk->sk_pacing_rate, rate); |
486 | if (f->credit > 0) | 487 | |
487 | goto out; | 488 | if (rate <= q->low_rate_threshold) { |
489 | f->credit = 0; | ||
490 | } else { | ||
491 | plen = max(plen, q->quantum); | ||
492 | if (f->credit > 0) | ||
493 | goto out; | ||
494 | } | ||
488 | } | 495 | } |
489 | if (rate != ~0UL) { | 496 | if (rate != ~0UL) { |
490 | u64 len = (u64)plen * NSEC_PER_SEC; | 497 | u64 len = (u64)plen * NSEC_PER_SEC; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 67939ad99c01..b0e74a3e77ec 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, | |||
118 | sctp_transport_route(tp, NULL, sp); | 118 | sctp_transport_route(tp, NULL, sp); |
119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
120 | sctp_assoc_sync_pmtu(asoc); | 120 | sctp_assoc_sync_pmtu(asoc); |
121 | } else if (!sctp_transport_pmtu_check(tp)) { | ||
122 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | ||
123 | sctp_assoc_sync_pmtu(asoc); | ||
121 | } | 124 | } |
122 | 125 | ||
123 | if (asoc->pmtu_pending) { | 126 | if (asoc->pmtu_pending) { |
@@ -396,25 +399,6 @@ finish: | |||
396 | return retval; | 399 | return retval; |
397 | } | 400 | } |
398 | 401 | ||
399 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
400 | { | ||
401 | sk_free(skb->sk); | ||
402 | } | ||
403 | |||
404 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
405 | { | ||
406 | skb_orphan(skb); | ||
407 | skb->sk = sk; | ||
408 | skb->destructor = sctp_packet_release_owner; | ||
409 | |||
410 | /* | ||
411 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
412 | * therefore only reserve a single byte to keep socket around until | ||
413 | * the packet has been transmitted. | ||
414 | */ | ||
415 | refcount_inc(&sk->sk_wmem_alloc); | ||
416 | } | ||
417 | |||
418 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | 402 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) |
419 | { | 403 | { |
420 | if (SCTP_OUTPUT_CB(head)->last == head) | 404 | if (SCTP_OUTPUT_CB(head)->last == head) |
@@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
601 | if (!head) | 585 | if (!head) |
602 | goto out; | 586 | goto out; |
603 | skb_reserve(head, packet->overhead + MAX_HEADER); | 587 | skb_reserve(head, packet->overhead + MAX_HEADER); |
604 | sctp_packet_set_owner_w(head, sk); | 588 | skb_set_owner_w(head, sk); |
605 | 589 | ||
606 | /* set sctp header */ | 590 | /* set sctp header */ |
607 | sh = skb_push(head, sizeof(struct sctphdr)); | 591 | sh = skb_push(head, sizeof(struct sctphdr)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 739f3e50120d..bf618d1b41fd 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3940,32 +3940,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk, | |||
3940 | unsigned int optlen) | 3940 | unsigned int optlen) |
3941 | { | 3941 | { |
3942 | struct sctp_assoc_value params; | 3942 | struct sctp_assoc_value params; |
3943 | struct sctp_association *asoc; | ||
3944 | int retval = -EINVAL; | ||
3945 | 3943 | ||
3946 | if (optlen != sizeof(params)) | 3944 | if (optlen != sizeof(params)) |
3947 | goto out; | 3945 | return -EINVAL; |
3948 | |||
3949 | if (copy_from_user(¶ms, optval, optlen)) { | ||
3950 | retval = -EFAULT; | ||
3951 | goto out; | ||
3952 | } | ||
3953 | |||
3954 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
3955 | if (asoc) { | ||
3956 | asoc->prsctp_enable = !!params.assoc_value; | ||
3957 | } else if (!params.assoc_id) { | ||
3958 | struct sctp_sock *sp = sctp_sk(sk); | ||
3959 | 3946 | ||
3960 | sp->ep->prsctp_enable = !!params.assoc_value; | 3947 | if (copy_from_user(¶ms, optval, optlen)) |
3961 | } else { | 3948 | return -EFAULT; |
3962 | goto out; | ||
3963 | } | ||
3964 | 3949 | ||
3965 | retval = 0; | 3950 | sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value; |
3966 | 3951 | ||
3967 | out: | 3952 | return 0; |
3968 | return retval; | ||
3969 | } | 3953 | } |
3970 | 3954 | ||
3971 | static int sctp_setsockopt_default_prinfo(struct sock *sk, | 3955 | static int sctp_setsockopt_default_prinfo(struct sock *sk, |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ffb940d3b57c..3892e7630f3a 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc, | |||
535 | goto out; | 535 | goto out; |
536 | } | 536 | } |
537 | 537 | ||
538 | stream->incnt = incnt; | ||
539 | stream->outcnt = outcnt; | 538 | stream->outcnt = outcnt; |
540 | 539 | ||
541 | asoc->strreset_outstanding = !!out + !!in; | 540 | asoc->strreset_outstanding = !!out + !!in; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 80e2119f1c70..5fbaf1901571 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock) | |||
127 | smc = smc_sk(sk); | 127 | smc = smc_sk(sk); |
128 | 128 | ||
129 | /* cleanup for a dangling non-blocking connect */ | 129 | /* cleanup for a dangling non-blocking connect */ |
130 | if (smc->connect_info && sk->sk_state == SMC_INIT) | ||
131 | tcp_abort(smc->clcsock->sk, ECONNABORTED); | ||
130 | flush_work(&smc->connect_work); | 132 | flush_work(&smc->connect_work); |
131 | kfree(smc->connect_info); | 133 | kfree(smc->connect_info); |
132 | smc->connect_info = NULL; | 134 | smc->connect_info = NULL; |
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc, | |||
547 | 549 | ||
548 | mutex_lock(&smc_create_lgr_pending); | 550 | mutex_lock(&smc_create_lgr_pending); |
549 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, | 551 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, |
550 | ibport, &aclc->lcl, NULL, 0); | 552 | ibport, ntoh24(aclc->qpn), &aclc->lcl, |
553 | NULL, 0); | ||
551 | if (local_contact < 0) { | 554 | if (local_contact < 0) { |
552 | if (local_contact == -ENOMEM) | 555 | if (local_contact == -ENOMEM) |
553 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 556 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc, | |||
618 | int rc = 0; | 621 | int rc = 0; |
619 | 622 | ||
620 | mutex_lock(&smc_create_lgr_pending); | 623 | mutex_lock(&smc_create_lgr_pending); |
621 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, | 624 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0, |
622 | NULL, ismdev, aclc->gid); | 625 | NULL, ismdev, aclc->gid); |
623 | if (local_contact < 0) | 626 | if (local_contact < 0) |
624 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); | 627 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); |
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, | |||
1083 | int *local_contact) | 1086 | int *local_contact) |
1084 | { | 1087 | { |
1085 | /* allocate connection / link group */ | 1088 | /* allocate connection / link group */ |
1086 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, | 1089 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0, |
1087 | &pclc->lcl, NULL, 0); | 1090 | &pclc->lcl, NULL, 0); |
1088 | if (*local_contact < 0) { | 1091 | if (*local_contact < 0) { |
1089 | if (*local_contact == -ENOMEM) | 1092 | if (*local_contact == -ENOMEM) |
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc, | |||
1107 | struct smc_clc_msg_smcd *pclc_smcd; | 1110 | struct smc_clc_msg_smcd *pclc_smcd; |
1108 | 1111 | ||
1109 | pclc_smcd = smc_get_clc_msg_smcd(pclc); | 1112 | pclc_smcd = smc_get_clc_msg_smcd(pclc); |
1110 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, | 1113 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL, |
1111 | ismdev, pclc_smcd->gid); | 1114 | ismdev, pclc_smcd->gid); |
1112 | if (*local_contact < 0) { | 1115 | if (*local_contact < 0) { |
1113 | if (*local_contact == -ENOMEM) | 1116 | if (*local_contact == -ENOMEM) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index ed5dcf03fe0b..db83332ac1c8 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn, | |||
81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, | 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, |
82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); | 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); |
83 | BUILD_BUG_ON_MSG( | 83 | BUILD_BUG_ON_MSG( |
84 | sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, | 84 | offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, |
85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); | 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); |
86 | BUILD_BUG_ON_MSG( | 86 | BUILD_BUG_ON_MSG( |
87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, | 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, |
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) | |||
177 | int smcd_cdc_msg_send(struct smc_connection *conn) | 177 | int smcd_cdc_msg_send(struct smc_connection *conn) |
178 | { | 178 | { |
179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
180 | union smc_host_cursor curs; | ||
180 | struct smcd_cdc_msg cdc; | 181 | struct smcd_cdc_msg cdc; |
181 | int rc, diff; | 182 | int rc, diff; |
182 | 183 | ||
183 | memset(&cdc, 0, sizeof(cdc)); | 184 | memset(&cdc, 0, sizeof(cdc)); |
184 | cdc.common.type = SMC_CDC_MSG_TYPE; | 185 | cdc.common.type = SMC_CDC_MSG_TYPE; |
185 | cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; | 186 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); |
186 | cdc.prod_count = conn->local_tx_ctrl.prod.count; | 187 | cdc.prod.wrap = curs.wrap; |
187 | 188 | cdc.prod.count = curs.count; | |
188 | cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; | 189 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); |
189 | cdc.cons_count = conn->local_tx_ctrl.cons.count; | 190 | cdc.cons.wrap = curs.wrap; |
190 | cdc.prod_flags = conn->local_tx_ctrl.prod_flags; | 191 | cdc.cons.count = curs.count; |
191 | cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | 192 | cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; |
193 | cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | ||
192 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); | 194 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); |
193 | if (rc) | 195 | if (rc) |
194 | return rc; | 196 | return rc; |
195 | smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, | 197 | smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); |
196 | conn); | ||
197 | /* Calculate transmitted data and increment free send buffer space */ | 198 | /* Calculate transmitted data and increment free send buffer space */ |
198 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, | 199 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, |
199 | &conn->tx_curs_sent); | 200 | &conn->tx_curs_sent); |
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) | |||
331 | static void smcd_cdc_rx_tsklet(unsigned long data) | 332 | static void smcd_cdc_rx_tsklet(unsigned long data) |
332 | { | 333 | { |
333 | struct smc_connection *conn = (struct smc_connection *)data; | 334 | struct smc_connection *conn = (struct smc_connection *)data; |
335 | struct smcd_cdc_msg *data_cdc; | ||
334 | struct smcd_cdc_msg cdc; | 336 | struct smcd_cdc_msg cdc; |
335 | struct smc_sock *smc; | 337 | struct smc_sock *smc; |
336 | 338 | ||
337 | if (!conn) | 339 | if (!conn) |
338 | return; | 340 | return; |
339 | 341 | ||
340 | memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); | 342 | data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; |
343 | smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); | ||
344 | smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); | ||
341 | smc = container_of(conn, struct smc_sock, conn); | 345 | smc = container_of(conn, struct smc_sock, conn); |
342 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); | 346 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); |
343 | } | 347 | } |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 934df4473a7c..b5bfe38c7f9b 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -48,21 +48,31 @@ struct smc_cdc_msg { | |||
48 | struct smc_cdc_producer_flags prod_flags; | 48 | struct smc_cdc_producer_flags prod_flags; |
49 | struct smc_cdc_conn_state_flags conn_state_flags; | 49 | struct smc_cdc_conn_state_flags conn_state_flags; |
50 | u8 reserved[18]; | 50 | u8 reserved[18]; |
51 | } __packed; /* format defined in RFC7609 */ | 51 | }; |
52 | |||
53 | /* SMC-D cursor format */ | ||
54 | union smcd_cdc_cursor { | ||
55 | struct { | ||
56 | u16 wrap; | ||
57 | u32 count; | ||
58 | struct smc_cdc_producer_flags prod_flags; | ||
59 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
60 | } __packed; | ||
61 | #ifdef KERNEL_HAS_ATOMIC64 | ||
62 | atomic64_t acurs; /* for atomic processing */ | ||
63 | #else | ||
64 | u64 acurs; /* for atomic processing */ | ||
65 | #endif | ||
66 | } __aligned(8); | ||
52 | 67 | ||
53 | /* CDC message for SMC-D */ | 68 | /* CDC message for SMC-D */ |
54 | struct smcd_cdc_msg { | 69 | struct smcd_cdc_msg { |
55 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ | 70 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ |
56 | u8 res1[7]; | 71 | u8 res1[7]; |
57 | u16 prod_wrap; | 72 | union smcd_cdc_cursor prod; |
58 | u32 prod_count; | 73 | union smcd_cdc_cursor cons; |
59 | u8 res2[2]; | ||
60 | u16 cons_wrap; | ||
61 | u32 cons_count; | ||
62 | struct smc_cdc_producer_flags prod_flags; | ||
63 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
64 | u8 res3[8]; | 74 | u8 res3[8]; |
65 | } __packed; | 75 | } __aligned(8); |
66 | 76 | ||
67 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) | 77 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) |
68 | { | 78 | { |
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt, | |||
135 | #endif | 145 | #endif |
136 | } | 146 | } |
137 | 147 | ||
148 | static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | ||
149 | union smcd_cdc_cursor *src, | ||
150 | struct smc_connection *conn) | ||
151 | { | ||
152 | #ifndef KERNEL_HAS_ATOMIC64 | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&conn->acurs_lock, flags); | ||
156 | tgt->acurs = src->acurs; | ||
157 | spin_unlock_irqrestore(&conn->acurs_lock, flags); | ||
158 | #else | ||
159 | atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); | ||
160 | #endif | ||
161 | } | ||
162 | |||
138 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new */ |
139 | static inline int smc_curs_diff(unsigned int size, | 164 | static inline int smc_curs_diff(unsigned int size, |
140 | union smc_host_cursor *old, | 165 | union smc_host_cursor *old, |
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
222 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 247 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
223 | struct smcd_cdc_msg *peer) | 248 | struct smcd_cdc_msg *peer) |
224 | { | 249 | { |
225 | local->prod.wrap = peer->prod_wrap; | 250 | union smc_host_cursor temp; |
226 | local->prod.count = peer->prod_count; | 251 | |
227 | local->cons.wrap = peer->cons_wrap; | 252 | temp.wrap = peer->prod.wrap; |
228 | local->cons.count = peer->cons_count; | 253 | temp.count = peer->prod.count; |
229 | local->prod_flags = peer->prod_flags; | 254 | atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs)); |
230 | local->conn_state_flags = peer->conn_state_flags; | 255 | |
256 | temp.wrap = peer->cons.wrap; | ||
257 | temp.count = peer->cons.count; | ||
258 | atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs)); | ||
259 | local->prod_flags = peer->cons.prod_flags; | ||
260 | local->conn_state_flags = peer->cons.conn_state_flags; | ||
231 | } | 261 | } |
232 | 262 | ||
233 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 263 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 18daebcef181..1c9fa7f0261a 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -184,6 +184,8 @@ free: | |||
184 | 184 | ||
185 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) | 185 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) |
186 | smc_llc_link_inactive(lnk); | 186 | smc_llc_link_inactive(lnk); |
187 | if (lgr->is_smcd) | ||
188 | smc_ism_signal_shutdown(lgr); | ||
187 | smc_lgr_free(lgr); | 189 | smc_lgr_free(lgr); |
188 | } | 190 | } |
189 | } | 191 | } |
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) | |||
485 | } | 487 | } |
486 | 488 | ||
487 | /* Called when SMC-D device is terminated or peer is lost */ | 489 | /* Called when SMC-D device is terminated or peer is lost */ |
488 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | 490 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) |
489 | { | 491 | { |
490 | struct smc_link_group *lgr, *l; | 492 | struct smc_link_group *lgr, *l; |
491 | LIST_HEAD(lgr_free_list); | 493 | LIST_HEAD(lgr_free_list); |
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
495 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { | 497 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { |
496 | if (lgr->is_smcd && lgr->smcd == dev && | 498 | if (lgr->is_smcd && lgr->smcd == dev && |
497 | (!peer_gid || lgr->peer_gid == peer_gid) && | 499 | (!peer_gid || lgr->peer_gid == peer_gid) && |
498 | !list_empty(&lgr->list)) { | 500 | (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { |
499 | __smc_lgr_terminate(lgr); | 501 | __smc_lgr_terminate(lgr); |
500 | list_move(&lgr->list, &lgr_free_list); | 502 | list_move(&lgr->list, &lgr_free_list); |
501 | } | 503 | } |
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
506 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { | 508 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { |
507 | list_del_init(&lgr->list); | 509 | list_del_init(&lgr->list); |
508 | cancel_delayed_work_sync(&lgr->free_work); | 510 | cancel_delayed_work_sync(&lgr->free_work); |
511 | if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */ | ||
512 | smc_ism_signal_shutdown(lgr); | ||
509 | smc_lgr_free(lgr); | 513 | smc_lgr_free(lgr); |
510 | } | 514 | } |
511 | } | 515 | } |
@@ -559,7 +563,7 @@ out: | |||
559 | 563 | ||
560 | static bool smcr_lgr_match(struct smc_link_group *lgr, | 564 | static bool smcr_lgr_match(struct smc_link_group *lgr, |
561 | struct smc_clc_msg_local *lcl, | 565 | struct smc_clc_msg_local *lcl, |
562 | enum smc_lgr_role role) | 566 | enum smc_lgr_role role, u32 clcqpn) |
563 | { | 567 | { |
564 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, | 568 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, |
565 | SMC_SYSTEMID_LEN) && | 569 | SMC_SYSTEMID_LEN) && |
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, | |||
567 | SMC_GID_SIZE) && | 571 | SMC_GID_SIZE) && |
568 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, | 572 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, |
569 | sizeof(lcl->mac)) && | 573 | sizeof(lcl->mac)) && |
570 | lgr->role == role; | 574 | lgr->role == role && |
575 | (lgr->role == SMC_SERV || | ||
576 | lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn); | ||
571 | } | 577 | } |
572 | 578 | ||
573 | static bool smcd_lgr_match(struct smc_link_group *lgr, | 579 | static bool smcd_lgr_match(struct smc_link_group *lgr, |
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr, | |||
578 | 584 | ||
579 | /* create a new SMC connection (and a new link group if necessary) */ | 585 | /* create a new SMC connection (and a new link group if necessary) */ |
580 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 586 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
581 | struct smc_ib_device *smcibdev, u8 ibport, | 587 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
582 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 588 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
583 | u64 peer_gid) | 589 | u64 peer_gid) |
584 | { | 590 | { |
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
603 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { | 609 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { |
604 | write_lock_bh(&lgr->conns_lock); | 610 | write_lock_bh(&lgr->conns_lock); |
605 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : | 611 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : |
606 | smcr_lgr_match(lgr, lcl, role)) && | 612 | smcr_lgr_match(lgr, lcl, role, clcqpn)) && |
607 | !lgr->sync_err && | 613 | !lgr->sync_err && |
608 | lgr->vlan_id == vlan_id && | 614 | lgr->vlan_id == vlan_id && |
609 | (role == SMC_CLNT || | 615 | (role == SMC_CLNT || |
@@ -1024,6 +1030,8 @@ void smc_core_exit(void) | |||
1024 | smc_llc_link_inactive(lnk); | 1030 | smc_llc_link_inactive(lnk); |
1025 | } | 1031 | } |
1026 | cancel_delayed_work_sync(&lgr->free_work); | 1032 | cancel_delayed_work_sync(&lgr->free_work); |
1033 | if (lgr->is_smcd) | ||
1034 | smc_ism_signal_shutdown(lgr); | ||
1027 | smc_lgr_free(lgr); /* free link group */ | 1035 | smc_lgr_free(lgr); /* free link group */ |
1028 | } | 1036 | } |
1029 | } | 1037 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index c156674733c9..cf98f4d6093e 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr); | |||
247 | void smc_lgr_forget(struct smc_link_group *lgr); | 247 | void smc_lgr_forget(struct smc_link_group *lgr); |
248 | void smc_lgr_terminate(struct smc_link_group *lgr); | 248 | void smc_lgr_terminate(struct smc_link_group *lgr); |
249 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); | 249 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); |
250 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); | 250 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, |
251 | unsigned short vlan); | ||
251 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); | 252 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); |
252 | int smc_uncompress_bufsize(u8 compressed); | 253 | int smc_uncompress_bufsize(u8 compressed); |
253 | int smc_rmb_rtoken_handling(struct smc_connection *conn, | 254 | int smc_rmb_rtoken_handling(struct smc_connection *conn, |
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); | |||
262 | 263 | ||
263 | void smc_conn_free(struct smc_connection *conn); | 264 | void smc_conn_free(struct smc_connection *conn); |
264 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 265 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
265 | struct smc_ib_device *smcibdev, u8 ibport, | 266 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
266 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 267 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
267 | u64 peer_gid); | 268 | u64 peer_gid); |
268 | void smcd_conn_free(struct smc_connection *conn); | 269 | void smcd_conn_free(struct smc_connection *conn); |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index e36f21ce7252..2fff79db1a59 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
@@ -187,22 +187,28 @@ struct smc_ism_event_work { | |||
187 | #define ISM_EVENT_REQUEST 0x0001 | 187 | #define ISM_EVENT_REQUEST 0x0001 |
188 | #define ISM_EVENT_RESPONSE 0x0002 | 188 | #define ISM_EVENT_RESPONSE 0x0002 |
189 | #define ISM_EVENT_REQUEST_IR 0x00000001 | 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 |
190 | #define ISM_EVENT_CODE_SHUTDOWN 0x80 | ||
190 | #define ISM_EVENT_CODE_TESTLINK 0x83 | 191 | #define ISM_EVENT_CODE_TESTLINK 0x83 |
191 | 192 | ||
193 | union smcd_sw_event_info { | ||
194 | u64 info; | ||
195 | struct { | ||
196 | u8 uid[SMC_LGR_ID_SIZE]; | ||
197 | unsigned short vlan_id; | ||
198 | u16 code; | ||
199 | }; | ||
200 | }; | ||
201 | |||
192 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | 202 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) |
193 | { | 203 | { |
194 | union { | 204 | union smcd_sw_event_info ev_info; |
195 | u64 info; | ||
196 | struct { | ||
197 | u32 uid; | ||
198 | unsigned short vlanid; | ||
199 | u16 code; | ||
200 | }; | ||
201 | } ev_info; | ||
202 | 205 | ||
206 | ev_info.info = wrk->event.info; | ||
203 | switch (wrk->event.code) { | 207 | switch (wrk->event.code) { |
208 | case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ | ||
209 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); | ||
210 | break; | ||
204 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ | 211 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ |
205 | ev_info.info = wrk->event.info; | ||
206 | if (ev_info.code == ISM_EVENT_REQUEST) { | 212 | if (ev_info.code == ISM_EVENT_REQUEST) { |
207 | ev_info.code = ISM_EVENT_RESPONSE; | 213 | ev_info.code = ISM_EVENT_RESPONSE; |
208 | wrk->smcd->ops->signal_event(wrk->smcd, | 214 | wrk->smcd->ops->signal_event(wrk->smcd, |
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | |||
215 | } | 221 | } |
216 | } | 222 | } |
217 | 223 | ||
224 | int smc_ism_signal_shutdown(struct smc_link_group *lgr) | ||
225 | { | ||
226 | int rc; | ||
227 | union smcd_sw_event_info ev_info; | ||
228 | |||
229 | memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); | ||
230 | ev_info.vlan_id = lgr->vlan_id; | ||
231 | ev_info.code = ISM_EVENT_REQUEST; | ||
232 | rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, | ||
233 | ISM_EVENT_REQUEST_IR, | ||
234 | ISM_EVENT_CODE_SHUTDOWN, | ||
235 | ev_info.info); | ||
236 | return rc; | ||
237 | } | ||
238 | |||
218 | /* worker for SMC-D events */ | 239 | /* worker for SMC-D events */ |
219 | static void smc_ism_event_work(struct work_struct *work) | 240 | static void smc_ism_event_work(struct work_struct *work) |
220 | { | 241 | { |
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work) | |||
223 | 244 | ||
224 | switch (wrk->event.type) { | 245 | switch (wrk->event.type) { |
225 | case ISM_EVENT_GID: /* GID event, token is peer GID */ | 246 | case ISM_EVENT_GID: /* GID event, token is peer GID */ |
226 | smc_smcd_terminate(wrk->smcd, wrk->event.tok); | 247 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); |
227 | break; | 248 | break; |
228 | case ISM_EVENT_DMB: | 249 | case ISM_EVENT_DMB: |
229 | break; | 250 | break; |
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd) | |||
289 | spin_unlock(&smcd_dev_list.lock); | 310 | spin_unlock(&smcd_dev_list.lock); |
290 | flush_workqueue(smcd->event_wq); | 311 | flush_workqueue(smcd->event_wq); |
291 | destroy_workqueue(smcd->event_wq); | 312 | destroy_workqueue(smcd->event_wq); |
292 | smc_smcd_terminate(smcd, 0); | 313 | smc_smcd_terminate(smcd, 0, VLAN_VID_MASK); |
293 | 314 | ||
294 | device_del(&smcd->dev); | 315 | device_del(&smcd->dev); |
295 | } | 316 | } |
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index aee45b860b79..4da946cbfa29 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h | |||
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, | |||
45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); | 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); |
46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, | 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, |
47 | void *data, size_t len); | 47 | void *data, size_t len); |
48 | int smc_ism_signal_shutdown(struct smc_link_group *lgr); | ||
48 | #endif | 49 | #endif |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 3c458d279855..c2694750a6a8 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
215 | 215 | ||
216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); | 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); |
217 | if (pend->idx < link->wr_tx_cnt) { | 217 | if (pend->idx < link->wr_tx_cnt) { |
218 | u32 idx = pend->idx; | ||
219 | |||
218 | /* clear the full struct smc_wr_tx_pend including .priv */ | 220 | /* clear the full struct smc_wr_tx_pend including .priv */ |
219 | memset(&link->wr_tx_pends[pend->idx], 0, | 221 | memset(&link->wr_tx_pends[pend->idx], 0, |
220 | sizeof(link->wr_tx_pends[pend->idx])); | 222 | sizeof(link->wr_tx_pends[pend->idx])); |
221 | memset(&link->wr_tx_bufs[pend->idx], 0, | 223 | memset(&link->wr_tx_bufs[pend->idx], 0, |
222 | sizeof(link->wr_tx_bufs[pend->idx])); | 224 | sizeof(link->wr_tx_bufs[pend->idx])); |
223 | test_and_clear_bit(pend->idx, link->wr_tx_mask); | 225 | test_and_clear_bit(idx, link->wr_tx_mask); |
224 | return 1; | 226 | return 1; |
225 | } | 227 | } |
226 | 228 | ||
diff --git a/net/socket.c b/net/socket.c index 593826e11a53..334fcc617ef2 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
853 | struct socket *sock = file->private_data; | 853 | struct socket *sock = file->private_data; |
854 | 854 | ||
855 | if (unlikely(!sock->ops->splice_read)) | 855 | if (unlikely(!sock->ops->splice_read)) |
856 | return -EINVAL; | 856 | return generic_file_splice_read(file, ppos, pipe, len, flags); |
857 | 857 | ||
858 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); | 858 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); |
859 | } | 859 | } |
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 2830709957bd..c138d68e8a69 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, | |||
166 | 166 | ||
167 | /* Apply trial address if we just left trial period */ | 167 | /* Apply trial address if we just left trial period */ |
168 | if (!trial && !self) { | 168 | if (!trial && !self) { |
169 | tipc_net_finalize(net, tn->trial_addr); | 169 | tipc_sched_net_finalize(net, tn->trial_addr); |
170 | msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); | ||
170 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); | 171 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); |
171 | } | 172 | } |
172 | 173 | ||
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t) | |||
300 | goto exit; | 301 | goto exit; |
301 | } | 302 | } |
302 | 303 | ||
303 | /* Trial period over ? */ | 304 | /* Did we just leave trial period ? */ |
304 | if (!time_before(jiffies, tn->addr_trial_end)) { | 305 | if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) { |
305 | /* Did we just leave it ? */ | 306 | mod_timer(&d->timer, jiffies + TIPC_DISC_INIT); |
306 | if (!tipc_own_addr(net)) | 307 | spin_unlock_bh(&d->lock); |
307 | tipc_net_finalize(net, tn->trial_addr); | 308 | tipc_sched_net_finalize(net, tn->trial_addr); |
308 | 309 | return; | |
309 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); | ||
310 | msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net)); | ||
311 | } | 310 | } |
312 | 311 | ||
313 | /* Adjust timeout interval according to discovery phase */ | 312 | /* Adjust timeout interval according to discovery phase */ |
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t) | |||
319 | d->timer_intv = TIPC_DISC_SLOW; | 318 | d->timer_intv = TIPC_DISC_SLOW; |
320 | else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) | 319 | else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) |
321 | d->timer_intv = TIPC_DISC_FAST; | 320 | d->timer_intv = TIPC_DISC_FAST; |
321 | msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); | ||
322 | msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); | ||
322 | } | 323 | } |
323 | 324 | ||
324 | mod_timer(&d->timer, jiffies + d->timer_intv); | 325 | mod_timer(&d->timer, jiffies + d->timer_intv); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 62199cf5a56c..f076edb74338 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -104,6 +104,14 @@ | |||
104 | * - A local spin_lock protecting the queue of subscriber events. | 104 | * - A local spin_lock protecting the queue of subscriber events. |
105 | */ | 105 | */ |
106 | 106 | ||
107 | struct tipc_net_work { | ||
108 | struct work_struct work; | ||
109 | struct net *net; | ||
110 | u32 addr; | ||
111 | }; | ||
112 | |||
113 | static void tipc_net_finalize(struct net *net, u32 addr); | ||
114 | |||
107 | int tipc_net_init(struct net *net, u8 *node_id, u32 addr) | 115 | int tipc_net_init(struct net *net, u8 *node_id, u32 addr) |
108 | { | 116 | { |
109 | if (tipc_own_id(net)) { | 117 | if (tipc_own_id(net)) { |
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr) | |||
119 | return 0; | 127 | return 0; |
120 | } | 128 | } |
121 | 129 | ||
122 | void tipc_net_finalize(struct net *net, u32 addr) | 130 | static void tipc_net_finalize(struct net *net, u32 addr) |
123 | { | 131 | { |
124 | struct tipc_net *tn = tipc_net(net); | 132 | struct tipc_net *tn = tipc_net(net); |
125 | 133 | ||
126 | if (!cmpxchg(&tn->node_addr, 0, addr)) { | 134 | if (cmpxchg(&tn->node_addr, 0, addr)) |
127 | tipc_set_node_addr(net, addr); | 135 | return; |
128 | tipc_named_reinit(net); | 136 | tipc_set_node_addr(net, addr); |
129 | tipc_sk_reinit(net); | 137 | tipc_named_reinit(net); |
130 | tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, | 138 | tipc_sk_reinit(net); |
131 | TIPC_CLUSTER_SCOPE, 0, addr); | 139 | tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, |
132 | } | 140 | TIPC_CLUSTER_SCOPE, 0, addr); |
141 | } | ||
142 | |||
143 | static void tipc_net_finalize_work(struct work_struct *work) | ||
144 | { | ||
145 | struct tipc_net_work *fwork; | ||
146 | |||
147 | fwork = container_of(work, struct tipc_net_work, work); | ||
148 | tipc_net_finalize(fwork->net, fwork->addr); | ||
149 | kfree(fwork); | ||
150 | } | ||
151 | |||
152 | void tipc_sched_net_finalize(struct net *net, u32 addr) | ||
153 | { | ||
154 | struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); | ||
155 | |||
156 | if (!fwork) | ||
157 | return; | ||
158 | INIT_WORK(&fwork->work, tipc_net_finalize_work); | ||
159 | fwork->net = net; | ||
160 | fwork->addr = addr; | ||
161 | schedule_work(&fwork->work); | ||
133 | } | 162 | } |
134 | 163 | ||
135 | void tipc_net_stop(struct net *net) | 164 | void tipc_net_stop(struct net *net) |
diff --git a/net/tipc/net.h b/net/tipc/net.h index 09ad02b50bb1..b7f2e364eb99 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h | |||
@@ -42,7 +42,7 @@ | |||
42 | extern const struct nla_policy tipc_nl_net_policy[]; | 42 | extern const struct nla_policy tipc_nl_net_policy[]; |
43 | 43 | ||
44 | int tipc_net_init(struct net *net, u8 *node_id, u32 addr); | 44 | int tipc_net_init(struct net *net, u8 *node_id, u32 addr); |
45 | void tipc_net_finalize(struct net *net, u32 addr); | 45 | void tipc_sched_net_finalize(struct net *net, u32 addr); |
46 | void tipc_net_stop(struct net *net); | 46 | void tipc_net_stop(struct net *net); |
47 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); | 47 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); |
48 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); | 48 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 636e6131769d..b57b1be7252b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) | |||
1555 | /** | 1555 | /** |
1556 | * tipc_sk_anc_data_recv - optionally capture ancillary data for received message | 1556 | * tipc_sk_anc_data_recv - optionally capture ancillary data for received message |
1557 | * @m: descriptor for message info | 1557 | * @m: descriptor for message info |
1558 | * @msg: received message header | 1558 | * @skb: received message buffer |
1559 | * @tsk: TIPC port associated with message | 1559 | * @tsk: TIPC port associated with message |
1560 | * | 1560 | * |
1561 | * Note: Ancillary data is not captured if not requested by receiver. | 1561 | * Note: Ancillary data is not captured if not requested by receiver. |
1562 | * | 1562 | * |
1563 | * Returns 0 if successful, otherwise errno | 1563 | * Returns 0 if successful, otherwise errno |
1564 | */ | 1564 | */ |
1565 | static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, | 1565 | static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb, |
1566 | struct tipc_sock *tsk) | 1566 | struct tipc_sock *tsk) |
1567 | { | 1567 | { |
1568 | struct tipc_msg *msg; | ||
1568 | u32 anc_data[3]; | 1569 | u32 anc_data[3]; |
1569 | u32 err; | 1570 | u32 err; |
1570 | u32 dest_type; | 1571 | u32 dest_type; |
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, | |||
1573 | 1574 | ||
1574 | if (likely(m->msg_controllen == 0)) | 1575 | if (likely(m->msg_controllen == 0)) |
1575 | return 0; | 1576 | return 0; |
1577 | msg = buf_msg(skb); | ||
1576 | 1578 | ||
1577 | /* Optionally capture errored message object(s) */ | 1579 | /* Optionally capture errored message object(s) */ |
1578 | err = msg ? msg_errcode(msg) : 0; | 1580 | err = msg ? msg_errcode(msg) : 0; |
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, | |||
1583 | if (res) | 1585 | if (res) |
1584 | return res; | 1586 | return res; |
1585 | if (anc_data[1]) { | 1587 | if (anc_data[1]) { |
1588 | if (skb_linearize(skb)) | ||
1589 | return -ENOMEM; | ||
1590 | msg = buf_msg(skb); | ||
1586 | res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], | 1591 | res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], |
1587 | msg_data(msg)); | 1592 | msg_data(msg)); |
1588 | if (res) | 1593 | if (res) |
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, | |||
1744 | 1749 | ||
1745 | /* Collect msg meta data, including error code and rejected data */ | 1750 | /* Collect msg meta data, including error code and rejected data */ |
1746 | tipc_sk_set_orig_addr(m, skb); | 1751 | tipc_sk_set_orig_addr(m, skb); |
1747 | rc = tipc_sk_anc_data_recv(m, hdr, tsk); | 1752 | rc = tipc_sk_anc_data_recv(m, skb, tsk); |
1748 | if (unlikely(rc)) | 1753 | if (unlikely(rc)) |
1749 | goto exit; | 1754 | goto exit; |
1755 | hdr = buf_msg(skb); | ||
1750 | 1756 | ||
1751 | /* Capture data if non-error msg, otherwise just set return value */ | 1757 | /* Capture data if non-error msg, otherwise just set return value */ |
1752 | if (likely(!err)) { | 1758 | if (likely(!err)) { |
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m, | |||
1856 | /* Collect msg meta data, incl. error code and rejected data */ | 1862 | /* Collect msg meta data, incl. error code and rejected data */ |
1857 | if (!copied) { | 1863 | if (!copied) { |
1858 | tipc_sk_set_orig_addr(m, skb); | 1864 | tipc_sk_set_orig_addr(m, skb); |
1859 | rc = tipc_sk_anc_data_recv(m, hdr, tsk); | 1865 | rc = tipc_sk_anc_data_recv(m, skb, tsk); |
1860 | if (rc) | 1866 | if (rc) |
1861 | break; | 1867 | break; |
1868 | hdr = buf_msg(skb); | ||
1862 | } | 1869 | } |
1863 | 1870 | ||
1864 | /* Copy data if msg ok, otherwise return error/partial data */ | 1871 | /* Copy data if msg ok, otherwise return error/partial data */ |