summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-06-07 12:29:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-07 12:29:14 -0400
commit1e1d926369545ea09c98c6c7f5d109aa4ee0cd0b (patch)
tree8ece321f1b8950da023642da4b5167ccceb86862 /net
parent6e38335dcc70f03faba26bf1260ee024d930afe1 (diff)
parent720f1de4021f09898b8c8443f3b3e995991b6e3a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Free AF_PACKET po->rollover properly, from Willem de Bruijn. 2) Read SFP eeprom in max 16 byte increments to avoid problems with some SFP modules, from Russell King. 3) Fix UDP socket lookup wrt. VRF, from Tim Beale. 4) Handle route invalidation properly in s390 qeth driver, from Julian Wiedmann. 5) Memory leak on unload in RDS, from Zhu Yanjun. 6) sctp_process_init leak, from Neil HOrman. 7) Fix fib_rules rule insertion semantic change that broke Android, from Hangbin Liu. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits) pktgen: do not sleep with the thread lock held. net: mvpp2: Use strscpy to handle stat strings net: rds: fix memory leak in rds_ib_flush_mr_pool ipv6: fix EFAULT on sendto with icmpv6 and hdrincl ipv6: use READ_ONCE() for inet->hdrincl as in ipv4 Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied" net: aquantia: fix wol configuration not applied sometimes ethtool: fix potential userspace buffer overflow Fix memory leak in sctp_process_init net: rds: fix memory leak when unload rds_rdma ipv6: fix the check before getting the cookie in rt6_get_cookie ipv4: not do cache for local delivery if bc_forwarding is enabled s390/qeth: handle error when updating TX queue count s390/qeth: fix VLAN attribute in bridge_hostnotify udev event s390/qeth: check dst entry before use s390/qeth: handle limited IPv4 broadcast in L3 TX path net: fix indirect calls helpers for ptype list hooks. net: ipvlan: Fix ipvlan device tso disabled while NETIF_F_IP_CSUM is set udp: only choose unbound UDP socket for multicast when not in a VRF net/tls: replace the sleeping lock around RX resync with a bit lock ...
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/pktgen.c11
-rw-r--r--net/dsa/tag_sja1105.c10
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/raw.c25
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_rdma.c10
-rw-r--r--net/rds/ib_recv.c3
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/tls/tls_device.c26
15 files changed, 92 insertions, 59 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 140858d4a048..eb7fb6daa1ef 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5021,12 +5021,12 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5021 if (list_empty(head)) 5021 if (list_empty(head))
5022 return; 5022 return;
5023 if (pt_prev->list_func != NULL) 5023 if (pt_prev->list_func != NULL)
5024 pt_prev->list_func(head, pt_prev, orig_dev); 5024 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5025 ip_list_rcv, head, pt_prev, orig_dev);
5025 else 5026 else
5026 list_for_each_entry_safe(skb, next, head, list) { 5027 list_for_each_entry_safe(skb, next, head, list) {
5027 skb_list_del_init(skb); 5028 skb_list_del_init(skb);
5028 INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5029 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5029 skb->dev, pt_prev, orig_dev);
5030 } 5030 }
5031} 5031}
5032 5032
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6dadeff8d39a..d08b1e19ce9c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1355,13 +1355,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1355 if (!regbuf) 1355 if (!regbuf)
1356 return -ENOMEM; 1356 return -ENOMEM;
1357 1357
1358 if (regs.len < reglen)
1359 reglen = regs.len;
1360
1358 ops->get_regs(dev, &regs, regbuf); 1361 ops->get_regs(dev, &regs, regbuf);
1359 1362
1360 ret = -EFAULT; 1363 ret = -EFAULT;
1361 if (copy_to_user(useraddr, &regs, sizeof(regs))) 1364 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1362 goto out; 1365 goto out;
1363 useraddr += offsetof(struct ethtool_regs, data); 1366 useraddr += offsetof(struct ethtool_regs, data);
1364 if (regbuf && copy_to_user(useraddr, regbuf, regs.len)) 1367 if (copy_to_user(useraddr, regbuf, reglen))
1365 goto out; 1368 goto out;
1366 ret = 0; 1369 ret = 0;
1367 1370
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 43f0115cce9c..18f8dd8329ed 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -757,9 +757,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
757 if (err) 757 if (err)
758 goto errout; 758 goto errout;
759 759
760 if (rule_exists(ops, frh, tb, rule)) { 760 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
761 if (nlh->nlmsg_flags & NLM_F_EXCL) 761 rule_exists(ops, frh, tb, rule)) {
762 err = -EEXIST; 762 err = -EEXIST;
763 goto errout_free; 763 goto errout_free;
764 } 764 }
765 765
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99ddc69736b2..f975c5e2a369 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3059,7 +3059,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
3059{ 3059{
3060 while (thread_is_running(t)) { 3060 while (thread_is_running(t)) {
3061 3061
3062 /* note: 't' will still be around even after the unlock/lock
3063 * cycle because pktgen_thread threads are only cleared at
3064 * net exit
3065 */
3066 mutex_unlock(&pktgen_thread_lock);
3062 msleep_interruptible(100); 3067 msleep_interruptible(100);
3068 mutex_lock(&pktgen_thread_lock);
3063 3069
3064 if (signal_pending(current)) 3070 if (signal_pending(current))
3065 goto signal; 3071 goto signal;
@@ -3074,6 +3080,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3074 struct pktgen_thread *t; 3080 struct pktgen_thread *t;
3075 int sig = 1; 3081 int sig = 1;
3076 3082
3083 /* prevent from racing with rmmod */
3084 if (!try_module_get(THIS_MODULE))
3085 return sig;
3086
3077 mutex_lock(&pktgen_thread_lock); 3087 mutex_lock(&pktgen_thread_lock);
3078 3088
3079 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 3089 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3087,6 +3097,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
3087 t->control |= (T_STOP); 3097 t->control |= (T_STOP);
3088 3098
3089 mutex_unlock(&pktgen_thread_lock); 3099 mutex_unlock(&pktgen_thread_lock);
3100 module_put(THIS_MODULE);
3090 return sig; 3101 return sig;
3091} 3102}
3092 3103
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 969402c7dbf1..d43737e6c3fb 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -28,14 +28,10 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
28 */ 28 */
29static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev) 29static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
30{ 30{
31 if (sja1105_is_link_local(skb)) { 31 if (sja1105_is_link_local(skb))
32 SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_LINK_LOCAL;
33 return true; 32 return true;
34 } 33 if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
35 if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) {
36 SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_NORMAL;
37 return true; 34 return true;
38 }
39 return false; 35 return false;
40} 36}
41 37
@@ -84,7 +80,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
84 80
85 skb->offload_fwd_mark = 1; 81 skb->offload_fwd_mark = 1;
86 82
87 if (SJA1105_SKB_CB(skb)->type == SJA1105_FRAME_TYPE_LINK_LOCAL) { 83 if (sja1105_is_link_local(skb)) {
88 /* Management traffic path. Switch embeds the switch ID and 84 /* Management traffic path. Switch embeds the switch ID and
89 * port ID into bytes of the destination MAC, courtesy of 85 * port ID into bytes of the destination MAC, courtesy of
90 * the incl_srcpt options. 86 * the incl_srcpt options.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cee640281e02..6cb7cff22db9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1981,7 +1981,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1981 u32 itag = 0; 1981 u32 itag = 0;
1982 struct rtable *rth; 1982 struct rtable *rth;
1983 struct flowi4 fl4; 1983 struct flowi4 fl4;
1984 bool do_cache; 1984 bool do_cache = true;
1985 1985
1986 /* IP on this device is disabled. */ 1986 /* IP on this device is disabled. */
1987 1987
@@ -2058,6 +2058,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2058 if (res->type == RTN_BROADCAST) { 2058 if (res->type == RTN_BROADCAST) {
2059 if (IN_DEV_BFORWARD(in_dev)) 2059 if (IN_DEV_BFORWARD(in_dev))
2060 goto make_route; 2060 goto make_route;
2061 /* not do cache if bc_forwarding is enabled */
2062 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2063 do_cache = false;
2061 goto brd_input; 2064 goto brd_input;
2062 } 2065 }
2063 2066
@@ -2095,18 +2098,15 @@ brd_input:
2095 RT_CACHE_STAT_INC(in_brd); 2098 RT_CACHE_STAT_INC(in_brd);
2096 2099
2097local_input: 2100local_input:
2098 do_cache = false; 2101 do_cache &= res->fi && !itag;
2099 if (res->fi) { 2102 if (do_cache) {
2100 if (!itag) { 2103 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2101 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2102 2104
2103 rth = rcu_dereference(nhc->nhc_rth_input); 2105 rth = rcu_dereference(nhc->nhc_rth_input);
2104 if (rt_cache_valid(rth)) { 2106 if (rt_cache_valid(rth)) {
2105 skb_dst_set_noref(skb, &rth->dst); 2107 skb_dst_set_noref(skb, &rth->dst);
2106 err = 0; 2108 err = 0;
2107 goto out; 2109 goto out;
2108 }
2109 do_cache = true;
2110 } 2110 }
2111 } 2111 }
2112 2112
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 189144346cd4..7c6228fbf5dd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,8 +533,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
533 (inet->inet_dport != rmt_port && inet->inet_dport) || 533 (inet->inet_dport != rmt_port && inet->inet_dport) ||
534 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || 534 (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
535 ipv6_only_sock(sk) || 535 ipv6_only_sock(sk) ||
536 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && 536 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
537 sk->sk_bound_dev_if != sdif))
538 return false; 537 return false;
539 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) 538 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
540 return false; 539 return false;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 703c8387f102..70693bc7ad9d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -779,6 +779,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
779 struct flowi6 fl6; 779 struct flowi6 fl6;
780 struct ipcm6_cookie ipc6; 780 struct ipcm6_cookie ipc6;
781 int addr_len = msg->msg_namelen; 781 int addr_len = msg->msg_namelen;
782 int hdrincl;
782 u16 proto; 783 u16 proto;
783 int err; 784 int err;
784 785
@@ -792,6 +793,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
792 if (msg->msg_flags & MSG_OOB) 793 if (msg->msg_flags & MSG_OOB)
793 return -EOPNOTSUPP; 794 return -EOPNOTSUPP;
794 795
796 /* hdrincl should be READ_ONCE(inet->hdrincl)
797 * but READ_ONCE() doesn't work with bit fields.
798 * Doing this indirectly yields the same result.
799 */
800 hdrincl = inet->hdrincl;
801 hdrincl = READ_ONCE(hdrincl);
802
795 /* 803 /*
796 * Get and verify the address. 804 * Get and verify the address.
797 */ 805 */
@@ -883,11 +891,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
883 opt = ipv6_fixup_options(&opt_space, opt); 891 opt = ipv6_fixup_options(&opt_space, opt);
884 892
885 fl6.flowi6_proto = proto; 893 fl6.flowi6_proto = proto;
886 rfv.msg = msg; 894
887 rfv.hlen = 0; 895 if (!hdrincl) {
888 err = rawv6_probe_proto_opt(&rfv, &fl6); 896 rfv.msg = msg;
889 if (err) 897 rfv.hlen = 0;
890 goto out; 898 err = rawv6_probe_proto_opt(&rfv, &fl6);
899 if (err)
900 goto out;
901 }
891 902
892 if (!ipv6_addr_any(daddr)) 903 if (!ipv6_addr_any(daddr))
893 fl6.daddr = *daddr; 904 fl6.daddr = *daddr;
@@ -904,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
904 fl6.flowi6_oif = np->ucast_oif; 915 fl6.flowi6_oif = np->ucast_oif;
905 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 916 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
906 917
907 if (inet->hdrincl) 918 if (hdrincl)
908 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; 919 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
909 920
910 if (ipc6.tclass < 0) 921 if (ipc6.tclass < 0)
@@ -927,7 +938,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
927 goto do_confirm; 938 goto do_confirm;
928 939
929back_from_confirm: 940back_from_confirm:
930 if (inet->hdrincl) 941 if (hdrincl)
931 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, 942 err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
932 msg->msg_flags, &ipc6.sockc); 943 msg->msg_flags, &ipc6.sockc);
933 else { 944 else {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fc012e801459..a29d66da7394 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3008,8 +3008,8 @@ static int packet_release(struct socket *sock)
3008 3008
3009 synchronize_net(); 3009 synchronize_net();
3010 3010
3011 kfree(po->rollover);
3011 if (f) { 3012 if (f) {
3012 kfree(po->rollover);
3013 fanout_release_data(f); 3013 fanout_release_data(f);
3014 kfree(f); 3014 kfree(f);
3015 } 3015 }
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2da9b75bad16..b8d581b779b2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -87,7 +87,7 @@ static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
87 87
88 spin_lock_irqsave(&rds_ibdev->spinlock, flags); 88 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
89 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) 89 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
90 rds_conn_drop(ic->conn); 90 rds_conn_path_drop(&ic->conn->c_path[0], true);
91 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags); 91 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
92} 92}
93 93
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index d664e9ade74d..0b347f46b2f4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
428 wait_clean_list_grace(); 428 wait_clean_list_grace();
429 429
430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); 430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
431 if (ibmr_ret) 431 if (ibmr_ret) {
432 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); 432 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
433 433 clean_nodes = clean_nodes->next;
434 }
434 /* more than one entry in llist nodes */ 435 /* more than one entry in llist nodes */
435 if (clean_nodes->next) 436 if (clean_nodes)
436 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); 437 llist_add_batch(clean_nodes, clean_tail,
438 &pool->clean_list);
437 439
438 } 440 }
439 441
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8946c89d7392..3cae88cbdaa0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -168,6 +168,7 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
168 list_del(&inc->ii_cache_entry); 168 list_del(&inc->ii_cache_entry);
169 WARN_ON(!list_empty(&inc->ii_frags)); 169 WARN_ON(!list_empty(&inc->ii_frags));
170 kmem_cache_free(rds_ib_incoming_slab, inc); 170 kmem_cache_free(rds_ib_incoming_slab, inc);
171 atomic_dec(&rds_ib_allocation);
171 } 172 }
172 173
173 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); 174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@@ -1057,6 +1058,8 @@ out:
1057 1058
1058void rds_ib_recv_exit(void) 1059void rds_ib_recv_exit(void)
1059{ 1060{
1061 WARN_ON(atomic_read(&rds_ib_allocation));
1062
1060 kmem_cache_destroy(rds_ib_incoming_slab); 1063 kmem_cache_destroy(rds_ib_incoming_slab);
1061 kmem_cache_destroy(rds_ib_frag_slab); 1064 kmem_cache_destroy(rds_ib_frag_slab);
1062} 1065}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 92331e1195c1..f17908f5c4f3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2312,7 +2312,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2312 union sctp_addr addr; 2312 union sctp_addr addr;
2313 struct sctp_af *af; 2313 struct sctp_af *af;
2314 int src_match = 0; 2314 int src_match = 0;
2315 char *cookie;
2316 2315
2317 /* We must include the address that the INIT packet came from. 2316 /* We must include the address that the INIT packet came from.
2318 * This is the only address that matters for an INIT packet. 2317 * This is the only address that matters for an INIT packet.
@@ -2416,14 +2415,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2416 /* Peer Rwnd : Current calculated value of the peer's rwnd. */ 2415 /* Peer Rwnd : Current calculated value of the peer's rwnd. */
2417 asoc->peer.rwnd = asoc->peer.i.a_rwnd; 2416 asoc->peer.rwnd = asoc->peer.i.a_rwnd;
2418 2417
2419 /* Copy cookie in case we need to resend COOKIE-ECHO. */
2420 cookie = asoc->peer.cookie;
2421 if (cookie) {
2422 asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
2423 if (!asoc->peer.cookie)
2424 goto clean_up;
2425 }
2426
2427 /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily 2418 /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
2428 * high (for example, implementations MAY use the size of the receiver 2419 * high (for example, implementations MAY use the size of the receiver
2429 * advertised window). 2420 * advertised window).
@@ -2592,7 +2583,9 @@ do_addr_param:
2592 case SCTP_PARAM_STATE_COOKIE: 2583 case SCTP_PARAM_STATE_COOKIE:
2593 asoc->peer.cookie_len = 2584 asoc->peer.cookie_len =
2594 ntohs(param.p->length) - sizeof(struct sctp_paramhdr); 2585 ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
2595 asoc->peer.cookie = param.cookie->body; 2586 asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
2587 if (!asoc->peer.cookie)
2588 retval = 0;
2596 break; 2589 break;
2597 2590
2598 case SCTP_PARAM_HEARTBEAT_INFO: 2591 case SCTP_PARAM_HEARTBEAT_INFO:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9b50da548db2..a554d6d15d1b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -883,6 +883,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
883 asoc->rto_initial; 883 asoc->rto_initial;
884 } 884 }
885 885
886 if (sctp_state(asoc, ESTABLISHED)) {
887 kfree(asoc->peer.cookie);
888 asoc->peer.cookie = NULL;
889 }
890
886 if (sctp_state(asoc, ESTABLISHED) || 891 if (sctp_state(asoc, ESTABLISHED) ||
887 sctp_state(asoc, CLOSED) || 892 sctp_state(asoc, CLOSED) ||
888 sctp_state(asoc, SHUTDOWN_RECEIVED)) { 893 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b95c408fd771..1f9cf57d9754 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
550 } 550 }
551} 551}
552 552
553static void tls_device_resync_rx(struct tls_context *tls_ctx,
554 struct sock *sk, u32 seq, u64 rcd_sn)
555{
556 struct net_device *netdev;
557
558 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
559 return;
560 netdev = READ_ONCE(tls_ctx->netdev);
561 if (netdev)
562 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
563 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
564}
565
553void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) 566void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
554{ 567{
555 struct tls_context *tls_ctx = tls_get_ctx(sk); 568 struct tls_context *tls_ctx = tls_get_ctx(sk);
556 struct tls_offload_context_rx *rx_ctx; 569 struct tls_offload_context_rx *rx_ctx;
557 struct net_device *netdev;
558 u32 is_req_pending; 570 u32 is_req_pending;
559 s64 resync_req; 571 s64 resync_req;
560 u32 req_seq; 572 u32 req_seq;
@@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
570 if (unlikely(is_req_pending) && req_seq == seq && 582 if (unlikely(is_req_pending) && req_seq == seq &&
571 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) { 583 atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
572 seq += TLS_HEADER_SIZE - 1; 584 seq += TLS_HEADER_SIZE - 1;
573 down_read(&device_offload_lock); 585 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
574 netdev = tls_ctx->netdev;
575 if (netdev)
576 netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
577 rcd_sn);
578 up_read(&device_offload_lock);
579 } 586 }
580} 587}
581 588
@@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
977 if (ctx->rx_conf == TLS_HW) 984 if (ctx->rx_conf == TLS_HW)
978 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, 985 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
979 TLS_OFFLOAD_CTX_DIR_RX); 986 TLS_OFFLOAD_CTX_DIR_RX);
980 ctx->netdev = NULL; 987 WRITE_ONCE(ctx->netdev, NULL);
988 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
989 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
990 usleep_range(10, 200);
981 dev_put(netdev); 991 dev_put(netdev);
982 list_del_init(&ctx->list); 992 list_del_init(&ctx->list);
983 993