diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ceph/messenger.c | 12 | ||||
-rw-r--r-- | net/core/dev.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 15 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 2 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 19 | ||||
-rw-r--r-- | net/packet/af_packet.c | 4 | ||||
-rw-r--r-- | net/sched/act_police.c | 36 | ||||
-rw-r--r-- | net/sctp/output.c | 24 | ||||
-rw-r--r-- | net/sctp/socket.c | 26 | ||||
-rw-r--r-- | net/sctp/stream.c | 1 | ||||
-rw-r--r-- | net/smc/af_smc.c | 11 | ||||
-rw-r--r-- | net/smc/smc_cdc.c | 26 | ||||
-rw-r--r-- | net/smc/smc_cdc.h | 60 | ||||
-rw-r--r-- | net/smc/smc_core.c | 20 | ||||
-rw-r--r-- | net/smc/smc_core.h | 5 | ||||
-rw-r--r-- | net/smc/smc_ism.c | 43 | ||||
-rw-r--r-- | net/smc/smc_ism.h | 1 | ||||
-rw-r--r-- | net/smc/smc_wr.c | 4 |
19 files changed, 197 insertions, 125 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 57fcc6b4bf6e..2f126eff275d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, | |||
580 | struct bio_vec bvec; | 580 | struct bio_vec bvec; |
581 | int ret; | 581 | int ret; |
582 | 582 | ||
583 | /* sendpage cannot properly handle pages with page_count == 0, | 583 | /* |
584 | * we need to fallback to sendmsg if that's the case */ | 584 | * sendpage cannot properly handle pages with page_count == 0, |
585 | if (page_count(page) >= 1) | 585 | * we need to fall back to sendmsg if that's the case. |
586 | * | ||
587 | * Same goes for slab pages: skb_can_coalesce() allows | ||
588 | * coalescing neighboring slab objects into a single frag which | ||
589 | * triggers one of hardened usercopy checks. | ||
590 | */ | ||
591 | if (page_count(page) >= 1 && !PageSlab(page)) | ||
586 | return __ceph_tcp_sendpage(sock, page, offset, size, more); | 592 | return __ceph_tcp_sendpage(sock, page, offset, size, more); |
587 | 593 | ||
588 | bvec.bv_page = page; | 594 | bvec.bv_page = page; |
diff --git a/net/core/dev.c b/net/core/dev.c index d83582623cd7..f69b2fcdee40 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -5981,11 +5981,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done) | |||
5981 | if (work_done) | 5981 | if (work_done) |
5982 | timeout = n->dev->gro_flush_timeout; | 5982 | timeout = n->dev->gro_flush_timeout; |
5983 | 5983 | ||
5984 | /* When the NAPI instance uses a timeout and keeps postponing | ||
5985 | * it, we need to bound somehow the time packets are kept in | ||
5986 | * the GRO layer | ||
5987 | */ | ||
5988 | napi_gro_flush(n, !!timeout); | ||
5984 | if (timeout) | 5989 | if (timeout) |
5985 | hrtimer_start(&n->timer, ns_to_ktime(timeout), | 5990 | hrtimer_start(&n->timer, ns_to_ktime(timeout), |
5986 | HRTIMER_MODE_REL_PINNED); | 5991 | HRTIMER_MODE_REL_PINNED); |
5987 | else | ||
5988 | napi_gro_flush(n, false); | ||
5989 | } | 5992 | } |
5990 | if (unlikely(!list_empty(&n->poll_list))) { | 5993 | if (unlikely(!list_empty(&n->poll_list))) { |
5991 | /* If n->poll_list is not empty, we need to mask irqs */ | 5994 | /* If n->poll_list is not empty, we need to mask irqs */ |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index edaaebfbcd46..568dbf3b711a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4269,7 +4269,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
4269 | * If the sack array is full, forget about the last one. | 4269 | * If the sack array is full, forget about the last one. |
4270 | */ | 4270 | */ |
4271 | if (this_sack >= TCP_NUM_SACKS) { | 4271 | if (this_sack >= TCP_NUM_SACKS) { |
4272 | if (tp->compressed_ack) | 4272 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
4273 | tcp_send_ack(sk); | 4273 | tcp_send_ack(sk); |
4274 | this_sack--; | 4274 | this_sack--; |
4275 | tp->rx_opt.num_sacks--; | 4275 | tp->rx_opt.num_sacks--; |
@@ -4364,6 +4364,7 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
4364 | if (TCP_SKB_CB(from)->has_rxtstamp) { | 4364 | if (TCP_SKB_CB(from)->has_rxtstamp) { |
4365 | TCP_SKB_CB(to)->has_rxtstamp = true; | 4365 | TCP_SKB_CB(to)->has_rxtstamp = true; |
4366 | to->tstamp = from->tstamp; | 4366 | to->tstamp = from->tstamp; |
4367 | skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; | ||
4367 | } | 4368 | } |
4368 | 4369 | ||
4369 | return true; | 4370 | return true; |
@@ -5189,7 +5190,17 @@ send_now: | |||
5189 | if (!tcp_is_sack(tp) || | 5190 | if (!tcp_is_sack(tp) || |
5190 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) | 5191 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) |
5191 | goto send_now; | 5192 | goto send_now; |
5192 | tp->compressed_ack++; | 5193 | |
5194 | if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { | ||
5195 | tp->compressed_ack_rcv_nxt = tp->rcv_nxt; | ||
5196 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) | ||
5197 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | ||
5198 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); | ||
5199 | tp->compressed_ack = 0; | ||
5200 | } | ||
5201 | |||
5202 | if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH) | ||
5203 | goto send_now; | ||
5193 | 5204 | ||
5194 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) | 5205 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) |
5195 | return; | 5206 | return; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d40d4cc53319..c5dc4c4fdadd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, | |||
180 | { | 180 | { |
181 | struct tcp_sock *tp = tcp_sk(sk); | 181 | struct tcp_sock *tp = tcp_sk(sk); |
182 | 182 | ||
183 | if (unlikely(tp->compressed_ack)) { | 183 | if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { |
184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, |
185 | tp->compressed_ack); | 185 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); |
186 | tp->compressed_ack = 0; | 186 | tp->compressed_ack = TCP_FASTRETRANS_THRESH; |
187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) | 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
188 | __sock_put(sk); | 188 | __sock_put(sk); |
189 | } | 189 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 676020663ce8..5f8b6d3cd855 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) | |||
740 | 740 | ||
741 | bh_lock_sock(sk); | 741 | bh_lock_sock(sk); |
742 | if (!sock_owned_by_user(sk)) { | 742 | if (!sock_owned_by_user(sk)) { |
743 | if (tp->compressed_ack) | 743 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
744 | tcp_send_ack(sk); | 744 | tcp_send_ack(sk); |
745 | } else { | 745 | } else { |
746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 63a808d5af15..045597b9a7c0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp); | |||
179 | static void addrconf_dad_work(struct work_struct *w); | 179 | static void addrconf_dad_work(struct work_struct *w); |
180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, |
181 | bool send_na); | 181 | bool send_na); |
182 | static void addrconf_dad_run(struct inet6_dev *idev); | 182 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart); |
183 | static void addrconf_rs_timer(struct timer_list *t); | 183 | static void addrconf_rs_timer(struct timer_list *t); |
184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3439 | void *ptr) | 3439 | void *ptr) |
3440 | { | 3440 | { |
3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
3442 | struct netdev_notifier_change_info *change_info; | ||
3442 | struct netdev_notifier_changeupper_info *info; | 3443 | struct netdev_notifier_changeupper_info *info; |
3443 | struct inet6_dev *idev = __in6_dev_get(dev); | 3444 | struct inet6_dev *idev = __in6_dev_get(dev); |
3444 | struct net *net = dev_net(dev); | 3445 | struct net *net = dev_net(dev); |
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3513 | break; | 3514 | break; |
3514 | } | 3515 | } |
3515 | 3516 | ||
3516 | if (idev) { | 3517 | if (!IS_ERR_OR_NULL(idev)) { |
3517 | if (idev->if_flags & IF_READY) { | 3518 | if (idev->if_flags & IF_READY) { |
3518 | /* device is already configured - | 3519 | /* device is already configured - |
3519 | * but resend MLD reports, we might | 3520 | * but resend MLD reports, we might |
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3521 | * multicast snooping switches | 3522 | * multicast snooping switches |
3522 | */ | 3523 | */ |
3523 | ipv6_mc_up(idev); | 3524 | ipv6_mc_up(idev); |
3525 | change_info = ptr; | ||
3526 | if (change_info->flags_changed & IFF_NOARP) | ||
3527 | addrconf_dad_run(idev, true); | ||
3524 | rt6_sync_up(dev, RTNH_F_LINKDOWN); | 3528 | rt6_sync_up(dev, RTNH_F_LINKDOWN); |
3525 | break; | 3529 | break; |
3526 | } | 3530 | } |
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3555 | 3559 | ||
3556 | if (!IS_ERR_OR_NULL(idev)) { | 3560 | if (!IS_ERR_OR_NULL(idev)) { |
3557 | if (run_pending) | 3561 | if (run_pending) |
3558 | addrconf_dad_run(idev); | 3562 | addrconf_dad_run(idev, false); |
3559 | 3563 | ||
3560 | /* Device has an address by now */ | 3564 | /* Device has an address by now */ |
3561 | rt6_sync_up(dev, RTNH_F_DEAD); | 3565 | rt6_sync_up(dev, RTNH_F_DEAD); |
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | |||
4173 | addrconf_verify_rtnl(); | 4177 | addrconf_verify_rtnl(); |
4174 | } | 4178 | } |
4175 | 4179 | ||
4176 | static void addrconf_dad_run(struct inet6_dev *idev) | 4180 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart) |
4177 | { | 4181 | { |
4178 | struct inet6_ifaddr *ifp; | 4182 | struct inet6_ifaddr *ifp; |
4179 | 4183 | ||
4180 | read_lock_bh(&idev->lock); | 4184 | read_lock_bh(&idev->lock); |
4181 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | 4185 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
4182 | spin_lock(&ifp->lock); | 4186 | spin_lock(&ifp->lock); |
4183 | if (ifp->flags & IFA_F_TENTATIVE && | 4187 | if ((ifp->flags & IFA_F_TENTATIVE && |
4184 | ifp->state == INET6_IFADDR_STATE_DAD) | 4188 | ifp->state == INET6_IFADDR_STATE_DAD) || restart) { |
4189 | if (restart) | ||
4190 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
4185 | addrconf_dad_kick(ifp); | 4191 | addrconf_dad_kick(ifp); |
4192 | } | ||
4186 | spin_unlock(&ifp->lock); | 4193 | spin_unlock(&ifp->lock); |
4187 | } | 4194 | } |
4188 | read_unlock_bh(&idev->lock); | 4195 | read_unlock_bh(&idev->lock); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ec3095f13aae..a74650e98f42 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
2394 | void *ph; | 2394 | void *ph; |
2395 | __u32 ts; | 2395 | __u32 ts; |
2396 | 2396 | ||
2397 | ph = skb_shinfo(skb)->destructor_arg; | 2397 | ph = skb_zcopy_get_nouarg(skb); |
2398 | packet_dec_pending(&po->tx_ring); | 2398 | packet_dec_pending(&po->tx_ring); |
2399 | 2399 | ||
2400 | ts = __packet_set_timestamp(po, ph, skb); | 2400 | ts = __packet_set_timestamp(po, ph, skb); |
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
2461 | skb->mark = po->sk.sk_mark; | 2461 | skb->mark = po->sk.sk_mark; |
2462 | skb->tstamp = sockc->transmit_time; | 2462 | skb->tstamp = sockc->transmit_time; |
2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); | 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); |
2464 | skb_shinfo(skb)->destructor_arg = ph.raw; | 2464 | skb_zcopy_set_nouarg(skb, ph.raw); |
2465 | 2465 | ||
2466 | skb_reserve(skb, hlen); | 2466 | skb_reserve(skb, hlen); |
2467 | skb_reset_network_header(skb); | 2467 | skb_reset_network_header(skb); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 052855d47354..37c9b8f0e10f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -27,10 +27,7 @@ struct tcf_police_params { | |||
27 | u32 tcfp_ewma_rate; | 27 | u32 tcfp_ewma_rate; |
28 | s64 tcfp_burst; | 28 | s64 tcfp_burst; |
29 | u32 tcfp_mtu; | 29 | u32 tcfp_mtu; |
30 | s64 tcfp_toks; | ||
31 | s64 tcfp_ptoks; | ||
32 | s64 tcfp_mtu_ptoks; | 30 | s64 tcfp_mtu_ptoks; |
33 | s64 tcfp_t_c; | ||
34 | struct psched_ratecfg rate; | 31 | struct psched_ratecfg rate; |
35 | bool rate_present; | 32 | bool rate_present; |
36 | struct psched_ratecfg peak; | 33 | struct psched_ratecfg peak; |
@@ -41,6 +38,11 @@ struct tcf_police_params { | |||
41 | struct tcf_police { | 38 | struct tcf_police { |
42 | struct tc_action common; | 39 | struct tc_action common; |
43 | struct tcf_police_params __rcu *params; | 40 | struct tcf_police_params __rcu *params; |
41 | |||
42 | spinlock_t tcfp_lock ____cacheline_aligned_in_smp; | ||
43 | s64 tcfp_toks; | ||
44 | s64 tcfp_ptoks; | ||
45 | s64 tcfp_t_c; | ||
44 | }; | 46 | }; |
45 | 47 | ||
46 | #define to_police(pc) ((struct tcf_police *)pc) | 48 | #define to_police(pc) ((struct tcf_police *)pc) |
@@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
122 | return ret; | 124 | return ret; |
123 | } | 125 | } |
124 | ret = ACT_P_CREATED; | 126 | ret = ACT_P_CREATED; |
127 | spin_lock_init(&(to_police(*a)->tcfp_lock)); | ||
125 | } else if (!ovr) { | 128 | } else if (!ovr) { |
126 | tcf_idr_release(*a, bind); | 129 | tcf_idr_release(*a, bind); |
127 | return -EEXIST; | 130 | return -EEXIST; |
@@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
186 | } | 189 | } |
187 | 190 | ||
188 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); | 191 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); |
189 | new->tcfp_toks = new->tcfp_burst; | 192 | if (new->peak_present) |
190 | if (new->peak_present) { | ||
191 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, | 193 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, |
192 | new->tcfp_mtu); | 194 | new->tcfp_mtu); |
193 | new->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
194 | } | ||
195 | 195 | ||
196 | if (tb[TCA_POLICE_AVRATE]) | 196 | if (tb[TCA_POLICE_AVRATE]) |
197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
@@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
210 | new->tcfp_t_c = ktime_get_ns(); | 210 | spin_lock_bh(&police->tcfp_lock); |
211 | police->tcfp_t_c = ktime_get_ns(); | ||
212 | police->tcfp_toks = new->tcfp_burst; | ||
213 | if (new->peak_present) | ||
214 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
215 | spin_unlock_bh(&police->tcfp_lock); | ||
211 | police->tcf_action = parm->action; | 216 | police->tcf_action = parm->action; |
212 | rcu_swap_protected(police->params, | 217 | rcu_swap_protected(police->params, |
213 | new, | 218 | new, |
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, | |||
257 | } | 262 | } |
258 | 263 | ||
259 | now = ktime_get_ns(); | 264 | now = ktime_get_ns(); |
260 | toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); | 265 | spin_lock_bh(&police->tcfp_lock); |
266 | toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); | ||
261 | if (p->peak_present) { | 267 | if (p->peak_present) { |
262 | ptoks = toks + p->tcfp_ptoks; | 268 | ptoks = toks + police->tcfp_ptoks; |
263 | if (ptoks > p->tcfp_mtu_ptoks) | 269 | if (ptoks > p->tcfp_mtu_ptoks) |
264 | ptoks = p->tcfp_mtu_ptoks; | 270 | ptoks = p->tcfp_mtu_ptoks; |
265 | ptoks -= (s64)psched_l2t_ns(&p->peak, | 271 | ptoks -= (s64)psched_l2t_ns(&p->peak, |
266 | qdisc_pkt_len(skb)); | 272 | qdisc_pkt_len(skb)); |
267 | } | 273 | } |
268 | toks += p->tcfp_toks; | 274 | toks += police->tcfp_toks; |
269 | if (toks > p->tcfp_burst) | 275 | if (toks > p->tcfp_burst) |
270 | toks = p->tcfp_burst; | 276 | toks = p->tcfp_burst; |
271 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); | 277 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); |
272 | if ((toks|ptoks) >= 0) { | 278 | if ((toks|ptoks) >= 0) { |
273 | p->tcfp_t_c = now; | 279 | police->tcfp_t_c = now; |
274 | p->tcfp_toks = toks; | 280 | police->tcfp_toks = toks; |
275 | p->tcfp_ptoks = ptoks; | 281 | police->tcfp_ptoks = ptoks; |
282 | spin_unlock_bh(&police->tcfp_lock); | ||
276 | ret = p->tcfp_result; | 283 | ret = p->tcfp_result; |
277 | goto inc_drops; | 284 | goto inc_drops; |
278 | } | 285 | } |
286 | spin_unlock_bh(&police->tcfp_lock); | ||
279 | } | 287 | } |
280 | 288 | ||
281 | inc_overlimits: | 289 | inc_overlimits: |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 67939ad99c01..b0e74a3e77ec 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, | |||
118 | sctp_transport_route(tp, NULL, sp); | 118 | sctp_transport_route(tp, NULL, sp); |
119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
120 | sctp_assoc_sync_pmtu(asoc); | 120 | sctp_assoc_sync_pmtu(asoc); |
121 | } else if (!sctp_transport_pmtu_check(tp)) { | ||
122 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | ||
123 | sctp_assoc_sync_pmtu(asoc); | ||
121 | } | 124 | } |
122 | 125 | ||
123 | if (asoc->pmtu_pending) { | 126 | if (asoc->pmtu_pending) { |
@@ -396,25 +399,6 @@ finish: | |||
396 | return retval; | 399 | return retval; |
397 | } | 400 | } |
398 | 401 | ||
399 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
400 | { | ||
401 | sk_free(skb->sk); | ||
402 | } | ||
403 | |||
404 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
405 | { | ||
406 | skb_orphan(skb); | ||
407 | skb->sk = sk; | ||
408 | skb->destructor = sctp_packet_release_owner; | ||
409 | |||
410 | /* | ||
411 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
412 | * therefore only reserve a single byte to keep socket around until | ||
413 | * the packet has been transmitted. | ||
414 | */ | ||
415 | refcount_inc(&sk->sk_wmem_alloc); | ||
416 | } | ||
417 | |||
418 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | 402 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) |
419 | { | 403 | { |
420 | if (SCTP_OUTPUT_CB(head)->last == head) | 404 | if (SCTP_OUTPUT_CB(head)->last == head) |
@@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
601 | if (!head) | 585 | if (!head) |
602 | goto out; | 586 | goto out; |
603 | skb_reserve(head, packet->overhead + MAX_HEADER); | 587 | skb_reserve(head, packet->overhead + MAX_HEADER); |
604 | sctp_packet_set_owner_w(head, sk); | 588 | skb_set_owner_w(head, sk); |
605 | 589 | ||
606 | /* set sctp header */ | 590 | /* set sctp header */ |
607 | sh = skb_push(head, sizeof(struct sctphdr)); | 591 | sh = skb_push(head, sizeof(struct sctphdr)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e16c090e89f0..1fb2cad94597 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3951,32 +3951,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk, | |||
3951 | unsigned int optlen) | 3951 | unsigned int optlen) |
3952 | { | 3952 | { |
3953 | struct sctp_assoc_value params; | 3953 | struct sctp_assoc_value params; |
3954 | struct sctp_association *asoc; | ||
3955 | int retval = -EINVAL; | ||
3956 | 3954 | ||
3957 | if (optlen != sizeof(params)) | 3955 | if (optlen != sizeof(params)) |
3958 | goto out; | 3956 | return -EINVAL; |
3959 | |||
3960 | if (copy_from_user(¶ms, optval, optlen)) { | ||
3961 | retval = -EFAULT; | ||
3962 | goto out; | ||
3963 | } | ||
3964 | |||
3965 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
3966 | if (asoc) { | ||
3967 | asoc->prsctp_enable = !!params.assoc_value; | ||
3968 | } else if (!params.assoc_id) { | ||
3969 | struct sctp_sock *sp = sctp_sk(sk); | ||
3970 | 3957 | ||
3971 | sp->ep->prsctp_enable = !!params.assoc_value; | 3958 | if (copy_from_user(¶ms, optval, optlen)) |
3972 | } else { | 3959 | return -EFAULT; |
3973 | goto out; | ||
3974 | } | ||
3975 | 3960 | ||
3976 | retval = 0; | 3961 | sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value; |
3977 | 3962 | ||
3978 | out: | 3963 | return 0; |
3979 | return retval; | ||
3980 | } | 3964 | } |
3981 | 3965 | ||
3982 | static int sctp_setsockopt_default_prinfo(struct sock *sk, | 3966 | static int sctp_setsockopt_default_prinfo(struct sock *sk, |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ffb940d3b57c..3892e7630f3a 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc, | |||
535 | goto out; | 535 | goto out; |
536 | } | 536 | } |
537 | 537 | ||
538 | stream->incnt = incnt; | ||
539 | stream->outcnt = outcnt; | 538 | stream->outcnt = outcnt; |
540 | 539 | ||
541 | asoc->strreset_outstanding = !!out + !!in; | 540 | asoc->strreset_outstanding = !!out + !!in; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 4b865250e238..63f08b4e51d6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock) | |||
127 | smc = smc_sk(sk); | 127 | smc = smc_sk(sk); |
128 | 128 | ||
129 | /* cleanup for a dangling non-blocking connect */ | 129 | /* cleanup for a dangling non-blocking connect */ |
130 | if (smc->connect_info && sk->sk_state == SMC_INIT) | ||
131 | tcp_abort(smc->clcsock->sk, ECONNABORTED); | ||
130 | flush_work(&smc->connect_work); | 132 | flush_work(&smc->connect_work); |
131 | kfree(smc->connect_info); | 133 | kfree(smc->connect_info); |
132 | smc->connect_info = NULL; | 134 | smc->connect_info = NULL; |
@@ -551,7 +553,8 @@ static int smc_connect_rdma(struct smc_sock *smc, | |||
551 | 553 | ||
552 | mutex_lock(&smc_create_lgr_pending); | 554 | mutex_lock(&smc_create_lgr_pending); |
553 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, | 555 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, |
554 | ibport, &aclc->lcl, NULL, 0); | 556 | ibport, ntoh24(aclc->qpn), &aclc->lcl, |
557 | NULL, 0); | ||
555 | if (local_contact < 0) { | 558 | if (local_contact < 0) { |
556 | if (local_contact == -ENOMEM) | 559 | if (local_contact == -ENOMEM) |
557 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 560 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
@@ -621,7 +624,7 @@ static int smc_connect_ism(struct smc_sock *smc, | |||
621 | int rc = 0; | 624 | int rc = 0; |
622 | 625 | ||
623 | mutex_lock(&smc_create_lgr_pending); | 626 | mutex_lock(&smc_create_lgr_pending); |
624 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, | 627 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0, |
625 | NULL, ismdev, aclc->gid); | 628 | NULL, ismdev, aclc->gid); |
626 | if (local_contact < 0) | 629 | if (local_contact < 0) |
627 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); | 630 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); |
@@ -1086,7 +1089,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, | |||
1086 | int *local_contact) | 1089 | int *local_contact) |
1087 | { | 1090 | { |
1088 | /* allocate connection / link group */ | 1091 | /* allocate connection / link group */ |
1089 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, | 1092 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0, |
1090 | &pclc->lcl, NULL, 0); | 1093 | &pclc->lcl, NULL, 0); |
1091 | if (*local_contact < 0) { | 1094 | if (*local_contact < 0) { |
1092 | if (*local_contact == -ENOMEM) | 1095 | if (*local_contact == -ENOMEM) |
@@ -1110,7 +1113,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc, | |||
1110 | struct smc_clc_msg_smcd *pclc_smcd; | 1113 | struct smc_clc_msg_smcd *pclc_smcd; |
1111 | 1114 | ||
1112 | pclc_smcd = smc_get_clc_msg_smcd(pclc); | 1115 | pclc_smcd = smc_get_clc_msg_smcd(pclc); |
1113 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, | 1116 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL, |
1114 | ismdev, pclc_smcd->gid); | 1117 | ismdev, pclc_smcd->gid); |
1115 | if (*local_contact < 0) { | 1118 | if (*local_contact < 0) { |
1116 | if (*local_contact == -ENOMEM) | 1119 | if (*local_contact == -ENOMEM) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index ed5dcf03fe0b..db83332ac1c8 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn, | |||
81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, | 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, |
82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); | 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); |
83 | BUILD_BUG_ON_MSG( | 83 | BUILD_BUG_ON_MSG( |
84 | sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, | 84 | offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, |
85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); | 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); |
86 | BUILD_BUG_ON_MSG( | 86 | BUILD_BUG_ON_MSG( |
87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, | 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, |
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) | |||
177 | int smcd_cdc_msg_send(struct smc_connection *conn) | 177 | int smcd_cdc_msg_send(struct smc_connection *conn) |
178 | { | 178 | { |
179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
180 | union smc_host_cursor curs; | ||
180 | struct smcd_cdc_msg cdc; | 181 | struct smcd_cdc_msg cdc; |
181 | int rc, diff; | 182 | int rc, diff; |
182 | 183 | ||
183 | memset(&cdc, 0, sizeof(cdc)); | 184 | memset(&cdc, 0, sizeof(cdc)); |
184 | cdc.common.type = SMC_CDC_MSG_TYPE; | 185 | cdc.common.type = SMC_CDC_MSG_TYPE; |
185 | cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; | 186 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); |
186 | cdc.prod_count = conn->local_tx_ctrl.prod.count; | 187 | cdc.prod.wrap = curs.wrap; |
187 | 188 | cdc.prod.count = curs.count; | |
188 | cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; | 189 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); |
189 | cdc.cons_count = conn->local_tx_ctrl.cons.count; | 190 | cdc.cons.wrap = curs.wrap; |
190 | cdc.prod_flags = conn->local_tx_ctrl.prod_flags; | 191 | cdc.cons.count = curs.count; |
191 | cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | 192 | cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; |
193 | cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | ||
192 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); | 194 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); |
193 | if (rc) | 195 | if (rc) |
194 | return rc; | 196 | return rc; |
195 | smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, | 197 | smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); |
196 | conn); | ||
197 | /* Calculate transmitted data and increment free send buffer space */ | 198 | /* Calculate transmitted data and increment free send buffer space */ |
198 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, | 199 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, |
199 | &conn->tx_curs_sent); | 200 | &conn->tx_curs_sent); |
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) | |||
331 | static void smcd_cdc_rx_tsklet(unsigned long data) | 332 | static void smcd_cdc_rx_tsklet(unsigned long data) |
332 | { | 333 | { |
333 | struct smc_connection *conn = (struct smc_connection *)data; | 334 | struct smc_connection *conn = (struct smc_connection *)data; |
335 | struct smcd_cdc_msg *data_cdc; | ||
334 | struct smcd_cdc_msg cdc; | 336 | struct smcd_cdc_msg cdc; |
335 | struct smc_sock *smc; | 337 | struct smc_sock *smc; |
336 | 338 | ||
337 | if (!conn) | 339 | if (!conn) |
338 | return; | 340 | return; |
339 | 341 | ||
340 | memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); | 342 | data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; |
343 | smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); | ||
344 | smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); | ||
341 | smc = container_of(conn, struct smc_sock, conn); | 345 | smc = container_of(conn, struct smc_sock, conn); |
342 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); | 346 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); |
343 | } | 347 | } |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 934df4473a7c..b5bfe38c7f9b 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -48,21 +48,31 @@ struct smc_cdc_msg { | |||
48 | struct smc_cdc_producer_flags prod_flags; | 48 | struct smc_cdc_producer_flags prod_flags; |
49 | struct smc_cdc_conn_state_flags conn_state_flags; | 49 | struct smc_cdc_conn_state_flags conn_state_flags; |
50 | u8 reserved[18]; | 50 | u8 reserved[18]; |
51 | } __packed; /* format defined in RFC7609 */ | 51 | }; |
52 | |||
53 | /* SMC-D cursor format */ | ||
54 | union smcd_cdc_cursor { | ||
55 | struct { | ||
56 | u16 wrap; | ||
57 | u32 count; | ||
58 | struct smc_cdc_producer_flags prod_flags; | ||
59 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
60 | } __packed; | ||
61 | #ifdef KERNEL_HAS_ATOMIC64 | ||
62 | atomic64_t acurs; /* for atomic processing */ | ||
63 | #else | ||
64 | u64 acurs; /* for atomic processing */ | ||
65 | #endif | ||
66 | } __aligned(8); | ||
52 | 67 | ||
53 | /* CDC message for SMC-D */ | 68 | /* CDC message for SMC-D */ |
54 | struct smcd_cdc_msg { | 69 | struct smcd_cdc_msg { |
55 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ | 70 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ |
56 | u8 res1[7]; | 71 | u8 res1[7]; |
57 | u16 prod_wrap; | 72 | union smcd_cdc_cursor prod; |
58 | u32 prod_count; | 73 | union smcd_cdc_cursor cons; |
59 | u8 res2[2]; | ||
60 | u16 cons_wrap; | ||
61 | u32 cons_count; | ||
62 | struct smc_cdc_producer_flags prod_flags; | ||
63 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
64 | u8 res3[8]; | 74 | u8 res3[8]; |
65 | } __packed; | 75 | } __aligned(8); |
66 | 76 | ||
67 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) | 77 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) |
68 | { | 78 | { |
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt, | |||
135 | #endif | 145 | #endif |
136 | } | 146 | } |
137 | 147 | ||
148 | static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | ||
149 | union smcd_cdc_cursor *src, | ||
150 | struct smc_connection *conn) | ||
151 | { | ||
152 | #ifndef KERNEL_HAS_ATOMIC64 | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&conn->acurs_lock, flags); | ||
156 | tgt->acurs = src->acurs; | ||
157 | spin_unlock_irqrestore(&conn->acurs_lock, flags); | ||
158 | #else | ||
159 | atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); | ||
160 | #endif | ||
161 | } | ||
162 | |||
138 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new */ |
139 | static inline int smc_curs_diff(unsigned int size, | 164 | static inline int smc_curs_diff(unsigned int size, |
140 | union smc_host_cursor *old, | 165 | union smc_host_cursor *old, |
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
222 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 247 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
223 | struct smcd_cdc_msg *peer) | 248 | struct smcd_cdc_msg *peer) |
224 | { | 249 | { |
225 | local->prod.wrap = peer->prod_wrap; | 250 | union smc_host_cursor temp; |
226 | local->prod.count = peer->prod_count; | 251 | |
227 | local->cons.wrap = peer->cons_wrap; | 252 | temp.wrap = peer->prod.wrap; |
228 | local->cons.count = peer->cons_count; | 253 | temp.count = peer->prod.count; |
229 | local->prod_flags = peer->prod_flags; | 254 | atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs)); |
230 | local->conn_state_flags = peer->conn_state_flags; | 255 | |
256 | temp.wrap = peer->cons.wrap; | ||
257 | temp.count = peer->cons.count; | ||
258 | atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs)); | ||
259 | local->prod_flags = peer->cons.prod_flags; | ||
260 | local->conn_state_flags = peer->cons.conn_state_flags; | ||
231 | } | 261 | } |
232 | 262 | ||
233 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 263 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 1382ddae591e..35c1cdc93e1c 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -189,6 +189,8 @@ free: | |||
189 | 189 | ||
190 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) | 190 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) |
191 | smc_llc_link_inactive(lnk); | 191 | smc_llc_link_inactive(lnk); |
192 | if (lgr->is_smcd) | ||
193 | smc_ism_signal_shutdown(lgr); | ||
192 | smc_lgr_free(lgr); | 194 | smc_lgr_free(lgr); |
193 | } | 195 | } |
194 | } | 196 | } |
@@ -495,7 +497,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) | |||
495 | } | 497 | } |
496 | 498 | ||
497 | /* Called when SMC-D device is terminated or peer is lost */ | 499 | /* Called when SMC-D device is terminated or peer is lost */ |
498 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | 500 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) |
499 | { | 501 | { |
500 | struct smc_link_group *lgr, *l; | 502 | struct smc_link_group *lgr, *l; |
501 | LIST_HEAD(lgr_free_list); | 503 | LIST_HEAD(lgr_free_list); |
@@ -505,7 +507,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
505 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { | 507 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { |
506 | if (lgr->is_smcd && lgr->smcd == dev && | 508 | if (lgr->is_smcd && lgr->smcd == dev && |
507 | (!peer_gid || lgr->peer_gid == peer_gid) && | 509 | (!peer_gid || lgr->peer_gid == peer_gid) && |
508 | !list_empty(&lgr->list)) { | 510 | (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { |
509 | __smc_lgr_terminate(lgr); | 511 | __smc_lgr_terminate(lgr); |
510 | list_move(&lgr->list, &lgr_free_list); | 512 | list_move(&lgr->list, &lgr_free_list); |
511 | } | 513 | } |
@@ -516,6 +518,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
516 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { | 518 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { |
517 | list_del_init(&lgr->list); | 519 | list_del_init(&lgr->list); |
518 | cancel_delayed_work_sync(&lgr->free_work); | 520 | cancel_delayed_work_sync(&lgr->free_work); |
521 | if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */ | ||
522 | smc_ism_signal_shutdown(lgr); | ||
519 | smc_lgr_free(lgr); | 523 | smc_lgr_free(lgr); |
520 | } | 524 | } |
521 | } | 525 | } |
@@ -569,7 +573,7 @@ out: | |||
569 | 573 | ||
570 | static bool smcr_lgr_match(struct smc_link_group *lgr, | 574 | static bool smcr_lgr_match(struct smc_link_group *lgr, |
571 | struct smc_clc_msg_local *lcl, | 575 | struct smc_clc_msg_local *lcl, |
572 | enum smc_lgr_role role) | 576 | enum smc_lgr_role role, u32 clcqpn) |
573 | { | 577 | { |
574 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, | 578 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, |
575 | SMC_SYSTEMID_LEN) && | 579 | SMC_SYSTEMID_LEN) && |
@@ -577,7 +581,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, | |||
577 | SMC_GID_SIZE) && | 581 | SMC_GID_SIZE) && |
578 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, | 582 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, |
579 | sizeof(lcl->mac)) && | 583 | sizeof(lcl->mac)) && |
580 | lgr->role == role; | 584 | lgr->role == role && |
585 | (lgr->role == SMC_SERV || | ||
586 | lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn); | ||
581 | } | 587 | } |
582 | 588 | ||
583 | static bool smcd_lgr_match(struct smc_link_group *lgr, | 589 | static bool smcd_lgr_match(struct smc_link_group *lgr, |
@@ -588,7 +594,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr, | |||
588 | 594 | ||
589 | /* create a new SMC connection (and a new link group if necessary) */ | 595 | /* create a new SMC connection (and a new link group if necessary) */ |
590 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 596 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
591 | struct smc_ib_device *smcibdev, u8 ibport, | 597 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
592 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 598 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
593 | u64 peer_gid) | 599 | u64 peer_gid) |
594 | { | 600 | { |
@@ -613,7 +619,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
613 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { | 619 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { |
614 | write_lock_bh(&lgr->conns_lock); | 620 | write_lock_bh(&lgr->conns_lock); |
615 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : | 621 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : |
616 | smcr_lgr_match(lgr, lcl, role)) && | 622 | smcr_lgr_match(lgr, lcl, role, clcqpn)) && |
617 | !lgr->sync_err && | 623 | !lgr->sync_err && |
618 | lgr->vlan_id == vlan_id && | 624 | lgr->vlan_id == vlan_id && |
619 | (role == SMC_CLNT || | 625 | (role == SMC_CLNT || |
@@ -1034,6 +1040,8 @@ void smc_core_exit(void) | |||
1034 | smc_llc_link_inactive(lnk); | 1040 | smc_llc_link_inactive(lnk); |
1035 | } | 1041 | } |
1036 | cancel_delayed_work_sync(&lgr->free_work); | 1042 | cancel_delayed_work_sync(&lgr->free_work); |
1043 | if (lgr->is_smcd) | ||
1044 | smc_ism_signal_shutdown(lgr); | ||
1037 | smc_lgr_free(lgr); /* free link group */ | 1045 | smc_lgr_free(lgr); /* free link group */ |
1038 | } | 1046 | } |
1039 | } | 1047 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index e177c6675038..b00287989a3d 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -249,7 +249,8 @@ struct smc_clc_msg_local; | |||
249 | void smc_lgr_forget(struct smc_link_group *lgr); | 249 | void smc_lgr_forget(struct smc_link_group *lgr); |
250 | void smc_lgr_terminate(struct smc_link_group *lgr); | 250 | void smc_lgr_terminate(struct smc_link_group *lgr); |
251 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); | 251 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); |
252 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); | 252 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, |
253 | unsigned short vlan); | ||
253 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); | 254 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); |
254 | int smc_uncompress_bufsize(u8 compressed); | 255 | int smc_uncompress_bufsize(u8 compressed); |
255 | int smc_rmb_rtoken_handling(struct smc_connection *conn, | 256 | int smc_rmb_rtoken_handling(struct smc_connection *conn, |
@@ -264,7 +265,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); | |||
264 | 265 | ||
265 | void smc_conn_free(struct smc_connection *conn); | 266 | void smc_conn_free(struct smc_connection *conn); |
266 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 267 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
267 | struct smc_ib_device *smcibdev, u8 ibport, | 268 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
268 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 269 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
269 | u64 peer_gid); | 270 | u64 peer_gid); |
270 | void smcd_conn_free(struct smc_connection *conn); | 271 | void smcd_conn_free(struct smc_connection *conn); |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index e36f21ce7252..2fff79db1a59 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
@@ -187,22 +187,28 @@ struct smc_ism_event_work { | |||
187 | #define ISM_EVENT_REQUEST 0x0001 | 187 | #define ISM_EVENT_REQUEST 0x0001 |
188 | #define ISM_EVENT_RESPONSE 0x0002 | 188 | #define ISM_EVENT_RESPONSE 0x0002 |
189 | #define ISM_EVENT_REQUEST_IR 0x00000001 | 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 |
190 | #define ISM_EVENT_CODE_SHUTDOWN 0x80 | ||
190 | #define ISM_EVENT_CODE_TESTLINK 0x83 | 191 | #define ISM_EVENT_CODE_TESTLINK 0x83 |
191 | 192 | ||
193 | union smcd_sw_event_info { | ||
194 | u64 info; | ||
195 | struct { | ||
196 | u8 uid[SMC_LGR_ID_SIZE]; | ||
197 | unsigned short vlan_id; | ||
198 | u16 code; | ||
199 | }; | ||
200 | }; | ||
201 | |||
192 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | 202 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) |
193 | { | 203 | { |
194 | union { | 204 | union smcd_sw_event_info ev_info; |
195 | u64 info; | ||
196 | struct { | ||
197 | u32 uid; | ||
198 | unsigned short vlanid; | ||
199 | u16 code; | ||
200 | }; | ||
201 | } ev_info; | ||
202 | 205 | ||
206 | ev_info.info = wrk->event.info; | ||
203 | switch (wrk->event.code) { | 207 | switch (wrk->event.code) { |
208 | case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ | ||
209 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); | ||
210 | break; | ||
204 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ | 211 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ |
205 | ev_info.info = wrk->event.info; | ||
206 | if (ev_info.code == ISM_EVENT_REQUEST) { | 212 | if (ev_info.code == ISM_EVENT_REQUEST) { |
207 | ev_info.code = ISM_EVENT_RESPONSE; | 213 | ev_info.code = ISM_EVENT_RESPONSE; |
208 | wrk->smcd->ops->signal_event(wrk->smcd, | 214 | wrk->smcd->ops->signal_event(wrk->smcd, |
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | |||
215 | } | 221 | } |
216 | } | 222 | } |
217 | 223 | ||
224 | int smc_ism_signal_shutdown(struct smc_link_group *lgr) | ||
225 | { | ||
226 | int rc; | ||
227 | union smcd_sw_event_info ev_info; | ||
228 | |||
229 | memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); | ||
230 | ev_info.vlan_id = lgr->vlan_id; | ||
231 | ev_info.code = ISM_EVENT_REQUEST; | ||
232 | rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, | ||
233 | ISM_EVENT_REQUEST_IR, | ||
234 | ISM_EVENT_CODE_SHUTDOWN, | ||
235 | ev_info.info); | ||
236 | return rc; | ||
237 | } | ||
238 | |||
218 | /* worker for SMC-D events */ | 239 | /* worker for SMC-D events */ |
219 | static void smc_ism_event_work(struct work_struct *work) | 240 | static void smc_ism_event_work(struct work_struct *work) |
220 | { | 241 | { |
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work) | |||
223 | 244 | ||
224 | switch (wrk->event.type) { | 245 | switch (wrk->event.type) { |
225 | case ISM_EVENT_GID: /* GID event, token is peer GID */ | 246 | case ISM_EVENT_GID: /* GID event, token is peer GID */ |
226 | smc_smcd_terminate(wrk->smcd, wrk->event.tok); | 247 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); |
227 | break; | 248 | break; |
228 | case ISM_EVENT_DMB: | 249 | case ISM_EVENT_DMB: |
229 | break; | 250 | break; |
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd) | |||
289 | spin_unlock(&smcd_dev_list.lock); | 310 | spin_unlock(&smcd_dev_list.lock); |
290 | flush_workqueue(smcd->event_wq); | 311 | flush_workqueue(smcd->event_wq); |
291 | destroy_workqueue(smcd->event_wq); | 312 | destroy_workqueue(smcd->event_wq); |
292 | smc_smcd_terminate(smcd, 0); | 313 | smc_smcd_terminate(smcd, 0, VLAN_VID_MASK); |
293 | 314 | ||
294 | device_del(&smcd->dev); | 315 | device_del(&smcd->dev); |
295 | } | 316 | } |
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index aee45b860b79..4da946cbfa29 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h | |||
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, | |||
45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); | 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); |
46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, | 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, |
47 | void *data, size_t len); | 47 | void *data, size_t len); |
48 | int smc_ism_signal_shutdown(struct smc_link_group *lgr); | ||
48 | #endif | 49 | #endif |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 3c458d279855..c2694750a6a8 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
215 | 215 | ||
216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); | 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); |
217 | if (pend->idx < link->wr_tx_cnt) { | 217 | if (pend->idx < link->wr_tx_cnt) { |
218 | u32 idx = pend->idx; | ||
219 | |||
218 | /* clear the full struct smc_wr_tx_pend including .priv */ | 220 | /* clear the full struct smc_wr_tx_pend including .priv */ |
219 | memset(&link->wr_tx_pends[pend->idx], 0, | 221 | memset(&link->wr_tx_pends[pend->idx], 0, |
220 | sizeof(link->wr_tx_pends[pend->idx])); | 222 | sizeof(link->wr_tx_pends[pend->idx])); |
221 | memset(&link->wr_tx_bufs[pend->idx], 0, | 223 | memset(&link->wr_tx_bufs[pend->idx], 0, |
222 | sizeof(link->wr_tx_bufs[pend->idx])); | 224 | sizeof(link->wr_tx_bufs[pend->idx])); |
223 | test_and_clear_bit(pend->idx, link->wr_tx_mask); | 225 | test_and_clear_bit(idx, link->wr_tx_mask); |
224 | return 1; | 226 | return 1; |
225 | } | 227 | } |
226 | 228 | ||