aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-04 00:57:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-04 00:57:03 -0400
commite523a2562a4457d9aae9b657125d193218631681 (patch)
tree59e27279d6c672ad50b67fb55f8ddd29dc41ef08 /net
parentbb609316d406c6e4dc29e0219d40e70837f70f8a (diff)
parenta8d7aa17bbc970971ccdf71988ea19230ab368b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Various sockmap fixes from John Fastabend (pinned map handling, blocking in recvmsg, double page put, error handling during redirect failures, etc.) 2) Fix dead code handling in x86-64 JIT, from Gianluca Borello. 3) Missing device put in RDS IB code, from Dag Moxnes. 4) Don't process fast open during repair mode in TCP< from Yuchung Cheng. 5) Move address/port comparison fixes in SCTP, from Xin Long. 6) Handle add a bond slave's master into a bridge properly, from Hangbin Liu. 7) IPv6 multipath code can operate on unitialized memory due to an assumption that the icmp header is in the linear SKB area. Fix from Eric Dumazet. 8) Don't invoke do_tcp_sendpages() recursively via TLS, from Dave Watson. 9) Fix memory leaks in x86-64 JIT, from Daniel Borkmann. 10) RDS leaks kernel memory to userspace, from Eric Dumazet. 11) DCCP can invoke a tasklet on a freed socket, take a refcount. Also from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (78 commits) dccp: fix tasklet usage smc: fix sendpage() call net/smc: handle unregistered buffers net/smc: call consolidation qed: fix spelling mistake: "offloded" -> "offloaded" net/mlx5e: fix spelling mistake: "loobpack" -> "loopback" tcp: restore autocorking rds: do not leak kernel memory to user land qmi_wwan: do not steal interfaces from class drivers ipv4: fix fnhe usage by non-cached routes bpf: sockmap, fix error handling in redirect failures bpf: sockmap, zero sg_size on error when buffer is released bpf: sockmap, fix scatterlist update on error path in send with apply net_sched: fq: take care of throttled flows before reuse ipv6: Revert "ipv6: Allow non-gateway ECMP for IPv6" bpf, x64: fix memleak when not converging on calls bpf, x64: fix memleak when not converging after image net/smc: restrict non-blocking connect finish 8139too: Use disable_irq_nosync() in rtl8139_poll_controller() sctp: fix the issue that the cookie-ack with auth can't get processed ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/compat.c6
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c1
-rw-r--r--net/dccp/ccids/ccid2.c14
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/ipv4/route.c118
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/sched/sch_fq.c37
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/stream.c2
-rw-r--r--net/smc/af_smc.c61
-rw-r--r--net/smc/smc_core.c22
-rw-r--r--net/smc/smc_core.h3
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tls/tls_main.c7
22 files changed, 190 insertions, 129 deletions
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 82c1a6f430b3..5bb6681fa91e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -518,8 +518,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
518 return -ELOOP; 518 return -ELOOP;
519 } 519 }
520 520
521 /* Device is already being bridged */ 521 /* Device has master upper dev */
522 if (br_port_exists(dev)) 522 if (netdev_master_upper_dev_get(dev))
523 return -EBUSY; 523 return -EBUSY;
524 524
525 /* No bridging devices that dislike that (e.g. wireless) */ 525 /* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/compat.c b/net/compat.c
index 5ae7437d3853..7242cce5631b 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
377 optname == SO_ATTACH_REUSEPORT_CBPF) 377 optname == SO_ATTACH_REUSEPORT_CBPF)
378 return do_set_attach_filter(sock, level, optname, 378 return do_set_attach_filter(sock, level, optname,
379 optval, optlen); 379 optval, optlen);
380 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 380 if (!COMPAT_USE_64BIT_TIME &&
381 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
381 return do_set_sock_timeout(sock, level, optname, optval, optlen); 382 return do_set_sock_timeout(sock, level, optname, optval, optlen);
382 383
383 return sock_setsockopt(sock, level, optname, optval, optlen); 384 return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
448static int compat_sock_getsockopt(struct socket *sock, int level, int optname, 449static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int __user *optlen) 450 char __user *optval, int __user *optlen)
450{ 451{
451 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 452 if (!COMPAT_USE_64BIT_TIME &&
453 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
452 return do_get_sock_timeout(sock, level, optname, optval, optlen); 454 return do_get_sock_timeout(sock, level, optname, optval, optlen);
453 return sock_getsockopt(sock, level, optname, optval, optlen); 455 return sock_getsockopt(sock, level, optname, optval, optlen);
454} 456}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 03416e6dd5d7..ba02f0dfe85c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1032,6 +1032,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
1032 info_size = sizeof(info); 1032 info_size = sizeof(info);
1033 if (copy_from_user(&info, useraddr, info_size)) 1033 if (copy_from_user(&info, useraddr, info_size))
1034 return -EFAULT; 1034 return -EFAULT;
1035 /* Since malicious users may modify the original data,
1036 * we need to check whether FLOW_RSS is still requested.
1037 */
1038 if (!(info.flow_type & FLOW_RSS))
1039 return -EINVAL;
1035 } 1040 }
1036 1041
1037 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1042 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
diff --git a/net/core/filter.c b/net/core/filter.c
index d31aff93270d..e77c30ca491d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3240,6 +3240,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3240 skb_dst_set(skb, (struct dst_entry *) md); 3240 skb_dst_set(skb, (struct dst_entry *) md);
3241 3241
3242 info = &md->u.tun_info; 3242 info = &md->u.tun_info;
3243 memset(info, 0, sizeof(*info));
3243 info->mode = IP_TUNNEL_INFO_TX; 3244 info->mode = IP_TUNNEL_INFO_TX;
3244 3245
3245 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 3246 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 92d016e87816..385f153fe031 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
126 DCCPF_SEQ_WMAX)); 126 DCCPF_SEQ_WMAX));
127} 127}
128 128
129static void dccp_tasklet_schedule(struct sock *sk)
130{
131 struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
132
133 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
134 sock_hold(sk);
135 __tasklet_schedule(t);
136 }
137}
138
129static void ccid2_hc_tx_rto_expire(struct timer_list *t) 139static void ccid2_hc_tx_rto_expire(struct timer_list *t)
130{ 140{
131 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); 141 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
166 176
167 /* if we were blocked before, we may now send cwnd=1 packet */ 177 /* if we were blocked before, we may now send cwnd=1 packet */
168 if (sender_was_blocked) 178 if (sender_was_blocked)
169 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 179 dccp_tasklet_schedule(sk);
170 /* restart backed-off timer */ 180 /* restart backed-off timer */
171 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 181 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
172out: 182out:
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
706done: 716done:
707 /* check if incoming Acks allow pending packets to be sent */ 717 /* check if incoming Acks allow pending packets to be sent */
708 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 718 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 719 dccp_tasklet_schedule(sk);
710 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); 720 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
711} 721}
712 722
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index b50a8732ff43..1501a20a94ca 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
232 else 232 else
233 dccp_write_xmit(sk); 233 dccp_write_xmit(sk);
234 bh_unlock_sock(sk); 234 bh_unlock_sock(sk);
235 sock_put(sk);
235} 236}
236 237
237static void dccp_write_xmit_timer(struct timer_list *t) 238static void dccp_write_xmit_timer(struct timer_list *t)
@@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
240 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; 241 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
241 242
242 dccp_write_xmitlet((unsigned long)sk); 243 dccp_write_xmitlet((unsigned long)sk);
243 sock_put(sk);
244} 244}
245 245
246void dccp_init_xmit_timers(struct sock *sk) 246void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ccb25d80f679..1412a7baf0b9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
709 fnhe->fnhe_gw = gw; 709 fnhe->fnhe_gw = gw;
710 fnhe->fnhe_pmtu = pmtu; 710 fnhe->fnhe_pmtu = pmtu;
711 fnhe->fnhe_mtu_locked = lock; 711 fnhe->fnhe_mtu_locked = lock;
712 fnhe->fnhe_expires = expires; 712 fnhe->fnhe_expires = max(1UL, expires);
713 713
714 /* Exception created; mark the cached routes for the nexthop 714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception 715 * stale, so anyone caching it rechecks if this exception
@@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1298} 1298}
1299 1299
1300static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1301{
1302 struct fnhe_hash_bucket *hash;
1303 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1304 u32 hval = fnhe_hashfun(daddr);
1305
1306 spin_lock_bh(&fnhe_lock);
1307
1308 hash = rcu_dereference_protected(nh->nh_exceptions,
1309 lockdep_is_held(&fnhe_lock));
1310 hash += hval;
1311
1312 fnhe_p = &hash->chain;
1313 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1314 while (fnhe) {
1315 if (fnhe->fnhe_daddr == daddr) {
1316 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1317 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1318 fnhe_flush_routes(fnhe);
1319 kfree_rcu(fnhe, rcu);
1320 break;
1321 }
1322 fnhe_p = &fnhe->fnhe_next;
1323 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1324 lockdep_is_held(&fnhe_lock));
1325 }
1326
1327 spin_unlock_bh(&fnhe_lock);
1328}
1329
1300static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) 1330static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1301{ 1331{
1302 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); 1332 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
@@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1310 1340
1311 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1341 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1312 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1342 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1313 if (fnhe->fnhe_daddr == daddr) 1343 if (fnhe->fnhe_daddr == daddr) {
1344 if (fnhe->fnhe_expires &&
1345 time_after(jiffies, fnhe->fnhe_expires)) {
1346 ip_del_fnhe(nh, daddr);
1347 break;
1348 }
1314 return fnhe; 1349 return fnhe;
1350 }
1315 } 1351 }
1316 return NULL; 1352 return NULL;
1317} 1353}
@@ -1636,36 +1672,6 @@ static void ip_handle_martian_source(struct net_device *dev,
1636#endif 1672#endif
1637} 1673}
1638 1674
1639static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1640{
1641 struct fnhe_hash_bucket *hash;
1642 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1643 u32 hval = fnhe_hashfun(daddr);
1644
1645 spin_lock_bh(&fnhe_lock);
1646
1647 hash = rcu_dereference_protected(nh->nh_exceptions,
1648 lockdep_is_held(&fnhe_lock));
1649 hash += hval;
1650
1651 fnhe_p = &hash->chain;
1652 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1653 while (fnhe) {
1654 if (fnhe->fnhe_daddr == daddr) {
1655 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1656 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1657 fnhe_flush_routes(fnhe);
1658 kfree_rcu(fnhe, rcu);
1659 break;
1660 }
1661 fnhe_p = &fnhe->fnhe_next;
1662 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1663 lockdep_is_held(&fnhe_lock));
1664 }
1665
1666 spin_unlock_bh(&fnhe_lock);
1667}
1668
1669/* called in rcu_read_lock() section */ 1675/* called in rcu_read_lock() section */
1670static int __mkroute_input(struct sk_buff *skb, 1676static int __mkroute_input(struct sk_buff *skb,
1671 const struct fib_result *res, 1677 const struct fib_result *res,
@@ -1719,20 +1725,10 @@ static int __mkroute_input(struct sk_buff *skb,
1719 1725
1720 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1726 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1721 if (do_cache) { 1727 if (do_cache) {
1722 if (fnhe) { 1728 if (fnhe)
1723 rth = rcu_dereference(fnhe->fnhe_rth_input); 1729 rth = rcu_dereference(fnhe->fnhe_rth_input);
1724 if (rth && rth->dst.expires && 1730 else
1725 time_after(jiffies, rth->dst.expires)) { 1731 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1726 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1727 fnhe = NULL;
1728 } else {
1729 goto rt_cache;
1730 }
1731 }
1732
1733 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1734
1735rt_cache:
1736 if (rt_cache_valid(rth)) { 1732 if (rt_cache_valid(rth)) {
1737 skb_dst_set_noref(skb, &rth->dst); 1733 skb_dst_set_noref(skb, &rth->dst);
1738 goto out; 1734 goto out;
@@ -2216,39 +2212,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2216 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2212 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2217 * be set to the loopback interface as well. 2213 * be set to the loopback interface as well.
2218 */ 2214 */
2219 fi = NULL; 2215 do_cache = false;
2220 } 2216 }
2221 2217
2222 fnhe = NULL; 2218 fnhe = NULL;
2223 do_cache &= fi != NULL; 2219 do_cache &= fi != NULL;
2224 if (do_cache) { 2220 if (fi) {
2225 struct rtable __rcu **prth; 2221 struct rtable __rcu **prth;
2226 struct fib_nh *nh = &FIB_RES_NH(*res); 2222 struct fib_nh *nh = &FIB_RES_NH(*res);
2227 2223
2228 fnhe = find_exception(nh, fl4->daddr); 2224 fnhe = find_exception(nh, fl4->daddr);
2225 if (!do_cache)
2226 goto add;
2229 if (fnhe) { 2227 if (fnhe) {
2230 prth = &fnhe->fnhe_rth_output; 2228 prth = &fnhe->fnhe_rth_output;
2231 rth = rcu_dereference(*prth); 2229 } else {
2232 if (rth && rth->dst.expires && 2230 if (unlikely(fl4->flowi4_flags &
2233 time_after(jiffies, rth->dst.expires)) { 2231 FLOWI_FLAG_KNOWN_NH &&
2234 ip_del_fnhe(nh, fl4->daddr); 2232 !(nh->nh_gw &&
2235 fnhe = NULL; 2233 nh->nh_scope == RT_SCOPE_LINK))) {
2236 } else { 2234 do_cache = false;
2237 goto rt_cache; 2235 goto add;
2238 } 2236 }
2237 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2239 } 2238 }
2240
2241 if (unlikely(fl4->flowi4_flags &
2242 FLOWI_FLAG_KNOWN_NH &&
2243 !(nh->nh_gw &&
2244 nh->nh_scope == RT_SCOPE_LINK))) {
2245 do_cache = false;
2246 goto add;
2247 }
2248 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2249 rth = rcu_dereference(*prth); 2239 rth = rcu_dereference(*prth);
2250
2251rt_cache:
2252 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2240 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2253 return rth; 2241 return rth;
2254 } 2242 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9ce1c726185e..c9d00ef54dec 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
697{ 697{
698 return skb->len < size_goal && 698 return skb->len < size_goal &&
699 sock_net(sk)->ipv4.sysctl_tcp_autocorking && 699 sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
700 skb != tcp_write_queue_head(sk) && 700 !tcp_rtx_queue_empty(sk) &&
701 refcount_read(&sk->sk_wmem_alloc) > skb->truesize; 701 refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
702} 702}
703 703
@@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1204 uarg->zerocopy = 0; 1204 uarg->zerocopy = 0;
1205 } 1205 }
1206 1206
1207 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { 1207 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1208 !tp->repair) {
1208 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1209 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1209 if (err == -EINPROGRESS && copied_syn > 0) 1210 if (err == -EINPROGRESS && copied_syn > 0)
1210 goto out; 1211 goto out;
@@ -2673,7 +2674,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2673 case TCP_REPAIR_QUEUE: 2674 case TCP_REPAIR_QUEUE:
2674 if (!tp->repair) 2675 if (!tp->repair)
2675 err = -EPERM; 2676 err = -EPERM;
2676 else if (val < TCP_QUEUES_NR) 2677 else if ((unsigned int)val < TCP_QUEUES_NR)
2677 tp->repair_queue = val; 2678 tp->repair_queue = val;
2678 else 2679 else
2679 err = -EINVAL; 2680 err = -EINVAL;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 158d105e76da..58e2f479ffb4 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
806 } 806 }
807 } 807 }
808 } 808 }
809 bbr->idle_restart = 0; 809 /* Restart after idle ends only once we process a new S/ACK for data */
810 if (rs->delivered > 0)
811 bbr->idle_restart = 0;
810} 812}
811 813
812static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) 814static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index cde7d8251377..f4d61736c41a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1835,11 +1835,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1835 const struct ipv6hdr *inner_iph; 1835 const struct ipv6hdr *inner_iph;
1836 const struct icmp6hdr *icmph; 1836 const struct icmp6hdr *icmph;
1837 struct ipv6hdr _inner_iph; 1837 struct ipv6hdr _inner_iph;
1838 struct icmp6hdr _icmph;
1838 1839
1839 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) 1840 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1840 goto out; 1841 goto out;
1841 1842
1842 icmph = icmp6_hdr(skb); 1843 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1844 sizeof(_icmph), &_icmph);
1845 if (!icmph)
1846 goto out;
1847
1843 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && 1848 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1844 icmph->icmp6_type != ICMPV6_PKT_TOOBIG && 1849 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1845 icmph->icmp6_type != ICMPV6_TIME_EXCEED && 1850 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index eea1d8611b20..13b38ad0fa4a 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -547,7 +547,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
547 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 547 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
548 ic->i_send_cq, ic->i_recv_cq); 548 ic->i_send_cq, ic->i_recv_cq);
549 549
550 return ret; 550 goto out;
551 551
552sends_out: 552sends_out:
553 vfree(ic->i_sends); 553 vfree(ic->i_sends);
@@ -572,6 +572,7 @@ send_cq_out:
572 ic->i_send_cq = NULL; 572 ic->i_send_cq = NULL;
573rds_ibdev_out: 573rds_ibdev_out:
574 rds_ib_remove_conn(rds_ibdev, conn); 574 rds_ib_remove_conn(rds_ibdev, conn);
575out:
575 rds_ib_dev_put(rds_ibdev); 576 rds_ib_dev_put(rds_ibdev);
576 577
577 return ret; 578 return ret;
diff --git a/net/rds/recv.c b/net/rds/recv.c
index de50e2126e40..dc67458b52f0 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
558 struct rds_cmsg_rx_trace t; 558 struct rds_cmsg_rx_trace t;
559 int i, j; 559 int i, j;
560 560
561 memset(&t, 0, sizeof(t));
561 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); 562 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
562 t.rx_traces = rs->rs_rx_traces; 563 t.rx_traces = rs->rs_rx_traces;
563 for (i = 0; i < rs->rs_rx_traces; i++) { 564 for (i = 0; i < rs->rs_rx_traces; i++) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a366e4c9413a..4808713c73b9 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
128 return f->next == &detached; 128 return f->next == &detached;
129} 129}
130 130
131static bool fq_flow_is_throttled(const struct fq_flow *f)
132{
133 return f->next == &throttled;
134}
135
136static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
137{
138 if (head->first)
139 head->last->next = flow;
140 else
141 head->first = flow;
142 head->last = flow;
143 flow->next = NULL;
144}
145
146static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
147{
148 rb_erase(&f->rate_node, &q->delayed);
149 q->throttled_flows--;
150 fq_flow_add_tail(&q->old_flows, f);
151}
152
131static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 153static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132{ 154{
133 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 155 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155 177
156static struct kmem_cache *fq_flow_cachep __read_mostly; 178static struct kmem_cache *fq_flow_cachep __read_mostly;
157 179
158static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
159{
160 if (head->first)
161 head->last->next = flow;
162 else
163 head->first = flow;
164 head->last = flow;
165 flow->next = NULL;
166}
167 180
168/* limit number of collected flows per round */ 181/* limit number of collected flows per round */
169#define FQ_GC_MAX 8 182#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
267 f->socket_hash != sk->sk_hash)) { 280 f->socket_hash != sk->sk_hash)) {
268 f->credit = q->initial_quantum; 281 f->credit = q->initial_quantum;
269 f->socket_hash = sk->sk_hash; 282 f->socket_hash = sk->sk_hash;
283 if (fq_flow_is_throttled(f))
284 fq_flow_unset_throttled(q, f);
270 f->time_next_packet = 0ULL; 285 f->time_next_packet = 0ULL;
271 } 286 }
272 return f; 287 return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
438 q->time_next_delayed_flow = f->time_next_packet; 453 q->time_next_delayed_flow = f->time_next_packet;
439 break; 454 break;
440 } 455 }
441 rb_erase(p, &q->delayed); 456 fq_flow_unset_throttled(q, f);
442 q->throttled_flows--;
443 fq_flow_add_tail(&q->old_flows, f);
444 } 457 }
445} 458}
446 459
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 23ebc5318edc..eb93ffe2408b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@ new_skb:
217 skb_pull(chunk->skb, sizeof(*ch)); 217 skb_pull(chunk->skb, sizeof(*ch));
218 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 218 chunk->subh.v = NULL; /* Subheader is no longer valid. */
219 219
220 if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { 220 if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
221 /* This is not a singleton */ 221 /* This is not a singleton */
222 chunk->singleton = 0; 222 chunk->singleton = 0;
223 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { 223 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2e3f7b75a8ec..42247110d842 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -895,6 +895,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
896 return 1; 896 return 1;
897 897
898 if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
899 return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
900
898 return __sctp_v6_cmp_addr(addr1, addr2); 901 return __sctp_v6_cmp_addr(addr1, addr2);
899} 902}
900 903
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dd0594a10961..28c070e187c2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1794,6 +1794,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
1794 GFP_ATOMIC)) 1794 GFP_ATOMIC))
1795 goto nomem; 1795 goto nomem;
1796 1796
1797 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1798 goto nomem;
1799
1797 /* Make sure no new addresses are being added during the 1800 /* Make sure no new addresses are being added during the
1798 * restart. Though this is a pretty complicated attack 1801 * restart. Though this is a pretty complicated attack
1799 * since you'd have to get inside the cookie. 1802 * since you'd have to get inside the cookie.
@@ -1906,6 +1909,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
1906 GFP_ATOMIC)) 1909 GFP_ATOMIC))
1907 goto nomem; 1910 goto nomem;
1908 1911
1912 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1913 goto nomem;
1914
1909 /* Update the content of current association. */ 1915 /* Update the content of current association. */
1910 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1916 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1911 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1917 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -2050,7 +2056,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
2050 } 2056 }
2051 } 2057 }
2052 2058
2053 repl = sctp_make_cookie_ack(new_asoc, chunk); 2059 repl = sctp_make_cookie_ack(asoc, chunk);
2054 if (!repl) 2060 if (!repl)
2055 goto nomem; 2061 goto nomem;
2056 2062
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index f799043abec9..f1f1d1b232ba 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
240 240
241 new->out = NULL; 241 new->out = NULL;
242 new->in = NULL; 242 new->in = NULL;
243 new->outcnt = 0;
244 new->incnt = 0;
243} 245}
244 246
245static int sctp_send_reconf(struct sctp_association *asoc, 247static int sctp_send_reconf(struct sctp_association *asoc,
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index f5d4b69dbabc..544bab42f925 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -292,6 +292,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
292 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); 292 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
293} 293}
294 294
295/* register a new rmb */
296static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
297{
298 /* register memory region for new rmb */
299 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
300 rmb_desc->regerr = 1;
301 return -EFAULT;
302 }
303 return 0;
304}
305
295static int smc_clnt_conf_first_link(struct smc_sock *smc) 306static int smc_clnt_conf_first_link(struct smc_sock *smc)
296{ 307{
297 struct smc_link_group *lgr = smc->conn.lgr; 308 struct smc_link_group *lgr = smc->conn.lgr;
@@ -321,9 +332,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
321 332
322 smc_wr_remember_qp_attr(link); 333 smc_wr_remember_qp_attr(link);
323 334
324 rc = smc_wr_reg_send(link, 335 if (smc_reg_rmb(link, smc->conn.rmb_desc))
325 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
326 if (rc)
327 return SMC_CLC_DECL_INTERR; 336 return SMC_CLC_DECL_INTERR;
328 337
329 /* send CONFIRM LINK response over RoCE fabric */ 338 /* send CONFIRM LINK response over RoCE fabric */
@@ -473,13 +482,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
473 goto decline_rdma_unlock; 482 goto decline_rdma_unlock;
474 } 483 }
475 } else { 484 } else {
476 struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; 485 if (!smc->conn.rmb_desc->reused) {
477 486 if (smc_reg_rmb(link, smc->conn.rmb_desc)) {
478 if (!buf_desc->reused) {
479 /* register memory region for new rmb */
480 rc = smc_wr_reg_send(link,
481 buf_desc->mr_rx[SMC_SINGLE_LINK]);
482 if (rc) {
483 reason_code = SMC_CLC_DECL_INTERR; 487 reason_code = SMC_CLC_DECL_INTERR;
484 goto decline_rdma_unlock; 488 goto decline_rdma_unlock;
485 } 489 }
@@ -719,9 +723,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
719 723
720 link = &lgr->lnk[SMC_SINGLE_LINK]; 724 link = &lgr->lnk[SMC_SINGLE_LINK];
721 725
722 rc = smc_wr_reg_send(link, 726 if (smc_reg_rmb(link, smc->conn.rmb_desc))
723 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
724 if (rc)
725 return SMC_CLC_DECL_INTERR; 727 return SMC_CLC_DECL_INTERR;
726 728
727 /* send CONFIRM LINK request to client over the RoCE fabric */ 729 /* send CONFIRM LINK request to client over the RoCE fabric */
@@ -854,13 +856,8 @@ static void smc_listen_work(struct work_struct *work)
854 smc_rx_init(new_smc); 856 smc_rx_init(new_smc);
855 857
856 if (local_contact != SMC_FIRST_CONTACT) { 858 if (local_contact != SMC_FIRST_CONTACT) {
857 struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; 859 if (!new_smc->conn.rmb_desc->reused) {
858 860 if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) {
859 if (!buf_desc->reused) {
860 /* register memory region for new rmb */
861 rc = smc_wr_reg_send(link,
862 buf_desc->mr_rx[SMC_SINGLE_LINK]);
863 if (rc) {
864 reason_code = SMC_CLC_DECL_INTERR; 861 reason_code = SMC_CLC_DECL_INTERR;
865 goto decline_rdma_unlock; 862 goto decline_rdma_unlock;
866 } 863 }
@@ -978,10 +975,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
978 } 975 }
979 976
980out: 977out:
981 if (lsmc->clcsock) {
982 sock_release(lsmc->clcsock);
983 lsmc->clcsock = NULL;
984 }
985 release_sock(lsk); 978 release_sock(lsk);
986 sock_put(&lsmc->sk); /* sock_hold in smc_listen */ 979 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
987} 980}
@@ -1170,13 +1163,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1170 /* delegate to CLC child sock */ 1163 /* delegate to CLC child sock */
1171 release_sock(sk); 1164 release_sock(sk);
1172 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1165 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1173 /* if non-blocking connect finished ... */
1174 lock_sock(sk); 1166 lock_sock(sk);
1175 if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { 1167 sk->sk_err = smc->clcsock->sk->sk_err;
1176 sk->sk_err = smc->clcsock->sk->sk_err; 1168 if (sk->sk_err) {
1177 if (sk->sk_err) { 1169 mask |= EPOLLERR;
1178 mask |= EPOLLERR; 1170 } else {
1179 } else { 1171 /* if non-blocking connect finished ... */
1172 if (sk->sk_state == SMC_INIT &&
1173 mask & EPOLLOUT &&
1174 smc->clcsock->sk->sk_state != TCP_CLOSE) {
1180 rc = smc_connect_rdma(smc); 1175 rc = smc_connect_rdma(smc);
1181 if (rc < 0) 1176 if (rc < 0)
1182 mask |= EPOLLERR; 1177 mask |= EPOLLERR;
@@ -1320,8 +1315,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1320 1315
1321 smc = smc_sk(sk); 1316 smc = smc_sk(sk);
1322 lock_sock(sk); 1317 lock_sock(sk);
1323 if (sk->sk_state != SMC_ACTIVE) 1318 if (sk->sk_state != SMC_ACTIVE) {
1319 release_sock(sk);
1324 goto out; 1320 goto out;
1321 }
1322 release_sock(sk);
1325 if (smc->use_fallback) 1323 if (smc->use_fallback)
1326 rc = kernel_sendpage(smc->clcsock, page, offset, 1324 rc = kernel_sendpage(smc->clcsock, page, offset,
1327 size, flags); 1325 size, flags);
@@ -1329,7 +1327,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1329 rc = sock_no_sendpage(sock, page, offset, size, flags); 1327 rc = sock_no_sendpage(sock, page, offset, size, flags);
1330 1328
1331out: 1329out:
1332 release_sock(sk);
1333 return rc; 1330 return rc;
1334} 1331}
1335 1332
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f44f6803f7ff..d4bd01bb44e1 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -32,6 +32,9 @@
32 32
33static u32 smc_lgr_num; /* unique link group number */ 33static u32 smc_lgr_num; /* unique link group number */
34 34
35static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
36 bool is_rmb);
37
35static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) 38static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
36{ 39{
37 /* client link group creation always follows the server link group 40 /* client link group creation always follows the server link group
@@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn)
234 conn->sndbuf_size = 0; 237 conn->sndbuf_size = 0;
235 } 238 }
236 if (conn->rmb_desc) { 239 if (conn->rmb_desc) {
237 conn->rmb_desc->reused = true; 240 if (!conn->rmb_desc->regerr) {
238 conn->rmb_desc->used = 0; 241 conn->rmb_desc->reused = 1;
239 conn->rmbe_size = 0; 242 conn->rmb_desc->used = 0;
243 conn->rmbe_size = 0;
244 } else {
245 /* buf registration failed, reuse not possible */
246 struct smc_link_group *lgr = conn->lgr;
247 struct smc_link *lnk;
248
249 write_lock_bh(&lgr->rmbs_lock);
250 list_del(&conn->rmb_desc->list);
251 write_unlock_bh(&lgr->rmbs_lock);
252
253 lnk = &lgr->lnk[SMC_SINGLE_LINK];
254 smc_buf_free(conn->rmb_desc, lnk, true);
255 }
240 } 256 }
241} 257}
242 258
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 07e2a393e6d9..5dfcb15d529f 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -123,7 +123,8 @@ struct smc_buf_desc {
123 */ 123 */
124 u32 order; /* allocation order */ 124 u32 order; /* allocation order */
125 u32 used; /* currently used / unused */ 125 u32 used; /* currently used / unused */
126 bool reused; /* new created / reused */ 126 u8 reused : 1; /* new created / reused */
127 u8 regerr : 1; /* err during registration */
127}; 128};
128 129
129struct smc_rtoken { /* address/key of remote RMB */ 130struct smc_rtoken { /* address/key of remote RMB */
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6f98b56dd48e..baaf93f12cbd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2244,7 +2244,7 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2244 2244
2245 rtnl_lock(); 2245 rtnl_lock();
2246 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2246 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2247 err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2247 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2248 if (err) 2248 if (err)
2249 break; 2249 break;
2250 } 2250 }
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0d379970960e..cc03e00785c7 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk,
114 size = sg->length - offset; 114 size = sg->length - offset;
115 offset += sg->offset; 115 offset += sg->offset;
116 116
117 ctx->in_tcp_sendpages = true;
117 while (1) { 118 while (1) {
118 if (sg_is_last(sg)) 119 if (sg_is_last(sg))
119 sendpage_flags = flags; 120 sendpage_flags = flags;
@@ -148,6 +149,8 @@ retry:
148 } 149 }
149 150
150 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); 151 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
152 ctx->in_tcp_sendpages = false;
153 ctx->sk_write_space(sk);
151 154
152 return 0; 155 return 0;
153} 156}
@@ -217,6 +220,10 @@ static void tls_write_space(struct sock *sk)
217{ 220{
218 struct tls_context *ctx = tls_get_ctx(sk); 221 struct tls_context *ctx = tls_get_ctx(sk);
219 222
223 /* We are already sending pages, ignore notification */
224 if (ctx->in_tcp_sendpages)
225 return;
226
220 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { 227 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
221 gfp_t sk_allocation = sk->sk_allocation; 228 gfp_t sk_allocation = sk->sk_allocation;
222 int rc; 229 int rc;