aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/dccp/ccids/ccid2.c6
-rw-r--r--net/dsa/slave.c4
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/rxrpc/net_ns.c6
-rw-r--r--net/rxrpc/output.c12
-rw-r--r--net/rxrpc/peer_event.c156
-rw-r--r--net/rxrpc/peer_object.c8
-rw-r--r--net/rxrpc/rxkad.c4
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/vmw_vsock/af_vsock.c15
-rw-r--r--net/vmw_vsock/vmci_transport.c3
17 files changed, 145 insertions, 126 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 2b75df469220..842a9c7c73a3 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
229 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 229 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
230 u32 cwnd = hc->tx_cwnd, restart_cwnd, 230 u32 cwnd = hc->tx_cwnd, restart_cwnd,
231 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); 231 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
232 s32 delta = now - hc->tx_lsndtime;
232 233
233 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); 234 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
234 235
235 /* don't reduce cwnd below the initial window (IW) */ 236 /* don't reduce cwnd below the initial window (IW) */
236 restart_cwnd = min(cwnd, iwnd); 237 restart_cwnd = min(cwnd, iwnd);
237 cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
238 hc->tx_cwnd = max(cwnd, restart_cwnd);
239 238
239 while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
240 cwnd >>= 1;
241 hc->tx_cwnd = max(cwnd, restart_cwnd);
240 hc->tx_cwnd_stamp = now; 242 hc->tx_cwnd_stamp = now;
241 hc->tx_cwnd_used = 0; 243 hc->tx_cwnd_used = 0;
242 244
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 732369c80644..9864bcd3d317 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
639 int ret; 639 int ret;
640 640
641 /* Port's PHY and MAC both need to be EEE capable */ 641 /* Port's PHY and MAC both need to be EEE capable */
642 if (!dev->phydev) 642 if (!dev->phydev && !dp->pl)
643 return -ENODEV; 643 return -ENODEV;
644 644
645 if (!ds->ops->set_mac_eee) 645 if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
659 int ret; 659 int ret;
660 660
661 /* Port's PHY and MAC both need to be EEE capable */ 661 /* Port's PHY and MAC both need to be EEE capable */
662 if (!dev->phydev) 662 if (!dev->phydev && !dp->pl)
663 return -ENODEV; 663 return -ENODEV;
664 664
665 if (!ds->ops->get_mac_eee) 665 if (!ds->ops->get_mac_eee)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 00e138a44cbb..1cc9650af9fb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1133,12 +1133,8 @@ route_lookup:
1133 max_headroom += 8; 1133 max_headroom += 8;
1134 mtu -= 8; 1134 mtu -= 8;
1135 } 1135 }
1136 if (skb->protocol == htons(ETH_P_IPV6)) { 1136 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1137 if (mtu < IPV6_MIN_MTU) 1137 IPV6_MIN_MTU : IPV4_MIN_MTU);
1138 mtu = IPV6_MIN_MTU;
1139 } else if (mtu < 576) {
1140 mtu = 576;
1141 }
1142 1138
1143 skb_dst_update_pmtu(skb, mtu); 1139 skb_dst_update_pmtu(skb, mtu);
1144 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1140 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ec18b3ce8b6d..7208c16302f6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
978 rt->rt6i_flags &= ~RTF_EXPIRES; 978 rt->rt6i_flags &= ~RTF_EXPIRES;
979 rcu_assign_pointer(rt->from, from); 979 rcu_assign_pointer(rt->from, from);
980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
981 if (from->fib6_metrics != &dst_default_metrics) {
982 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
983 refcount_inc(&from->fib6_metrics->refcnt);
984 }
985} 981}
986 982
987/* Caller must already hold reference to @ort */ 983/* Caller must already hold reference to @ort */
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 89041260784c..260b3dc1b4a2 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
73 73
74 rcu_read_lock_bh(); 74 rcu_read_lock_bh();
75 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
76 if (sap) 76 if (!sap || !llc_sap_hold_safe(sap))
77 llc_sap_hold(sap); 77 sap = NULL;
78 rcu_read_unlock_bh(); 78 rcu_read_unlock_bh();
79 return sap; 79 return sap;
80} 80}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9b27d0cd766d..e6445d8f3f57 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4226 } 4226 }
4227 4227
4228 if (req->tp_block_nr) { 4228 if (req->tp_block_nr) {
4229 unsigned int min_frame_size;
4230
4229 /* Sanity tests and some calculations */ 4231 /* Sanity tests and some calculations */
4230 err = -EBUSY; 4232 err = -EBUSY;
4231 if (unlikely(rb->pg_vec)) 4233 if (unlikely(rb->pg_vec))
@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4248 goto out; 4250 goto out;
4249 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4251 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4250 goto out; 4252 goto out;
4253 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4251 if (po->tp_version >= TPACKET_V3 && 4254 if (po->tp_version >= TPACKET_V3 &&
4252 req->tp_block_size <= 4255 req->tp_block_size <
4253 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) 4256 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4254 goto out; 4257 goto out;
4255 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4258 if (unlikely(req->tp_frame_size < min_frame_size))
4256 po->tp_reserve))
4257 goto out; 4259 goto out;
4258 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4260 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4259 goto out; 4261 goto out;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5fb7d3254d9e..707630ab4713 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -104,9 +104,9 @@ struct rxrpc_net {
104 104
105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ 105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
106 u8 peer_keepalive_cursor; 106 u8 peer_keepalive_cursor;
107 ktime_t peer_keepalive_base; 107 time64_t peer_keepalive_base;
108 struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1]; 108 struct list_head peer_keepalive[32];
109 struct hlist_head peer_keepalive_new; 109 struct list_head peer_keepalive_new;
110 struct timer_list peer_keepalive_timer; 110 struct timer_list peer_keepalive_timer;
111 struct work_struct peer_keepalive_work; 111 struct work_struct peer_keepalive_work;
112}; 112};
@@ -295,7 +295,7 @@ struct rxrpc_peer {
295 struct hlist_head error_targets; /* targets for net error distribution */ 295 struct hlist_head error_targets; /* targets for net error distribution */
296 struct work_struct error_distributor; 296 struct work_struct error_distributor;
297 struct rb_root service_conns; /* Service connections */ 297 struct rb_root service_conns; /* Service connections */
298 struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */ 298 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */ 299 time64_t last_tx_at; /* Last time packet sent here */
300 seqlock_t service_conn_lock; 300 seqlock_t service_conn_lock;
301 spinlock_t lock; /* access lock */ 301 spinlock_t lock; /* access lock */
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 8229a52c2acd..3fde001fcc39 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
136 } 136 }
137 137
138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
139 conn->params.peer->last_tx_at = ktime_get_real(); 139 conn->params.peer->last_tx_at = ktime_get_seconds();
140 if (ret < 0) 140 if (ret < 0)
141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
142 rxrpc_tx_fail_call_final_resend); 142 rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
245 return -EAGAIN; 245 return -EAGAIN;
246 } 246 }
247 247
248 conn->params.peer->last_tx_at = ktime_get_real(); 248 conn->params.peer->last_tx_at = ktime_get_seconds();
249 249
250 _leave(" = 0"); 250 _leave(" = 0");
251 return 0; 251 return 0;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5d6a773db973..417d80867c4f 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
85 hash_init(rxnet->peer_hash); 85 hash_init(rxnet->peer_hash);
86 spin_lock_init(&rxnet->peer_hash_lock); 86 spin_lock_init(&rxnet->peer_hash_lock);
87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++) 87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
88 INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]); 88 INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
89 INIT_HLIST_HEAD(&rxnet->peer_keepalive_new); 89 INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
90 timer_setup(&rxnet->peer_keepalive_timer, 90 timer_setup(&rxnet->peer_keepalive_timer,
91 rxrpc_peer_keepalive_timeout, 0); 91 rxrpc_peer_keepalive_timeout, 0);
92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker); 92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
93 rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC); 93 rxnet->peer_keepalive_base = ktime_get_seconds();
94 94
95 ret = -ENOMEM; 95 ret = -ENOMEM;
96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); 96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f03de1c59ba3..4774c8f5634d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
209 now = ktime_get_real(); 209 now = ktime_get_real();
210 if (ping) 210 if (ping)
211 call->ping_time = now; 211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_real(); 212 conn->params.peer->last_tx_at = ktime_get_seconds();
213 if (ret < 0) 213 if (ret < 0)
214 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 214 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
215 rxrpc_tx_fail_call_ack); 215 rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
296 296
297 ret = kernel_sendmsg(conn->params.local->socket, 297 ret = kernel_sendmsg(conn->params.local->socket,
298 &msg, iov, 1, sizeof(pkt)); 298 &msg, iov, 1, sizeof(pkt));
299 conn->params.peer->last_tx_at = ktime_get_real(); 299 conn->params.peer->last_tx_at = ktime_get_seconds();
300 if (ret < 0) 300 if (ret < 0)
301 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 301 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
302 rxrpc_tx_fail_call_abort); 302 rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
391 * message and update the peer record 391 * message and update the peer record
392 */ 392 */
393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
394 conn->params.peer->last_tx_at = ktime_get_real(); 394 conn->params.peer->last_tx_at = ktime_get_seconds();
395 395
396 up_read(&conn->params.local->defrag_sem); 396 up_read(&conn->params.local->defrag_sem);
397 if (ret < 0) 397 if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
457 if (ret == 0) { 457 if (ret == 0) {
458 ret = kernel_sendmsg(conn->params.local->socket, &msg, 458 ret = kernel_sendmsg(conn->params.local->socket, &msg,
459 iov, 2, len); 459 iov, 2, len);
460 conn->params.peer->last_tx_at = ktime_get_real(); 460 conn->params.peer->last_tx_at = ktime_get_seconds();
461 461
462 opt = IP_PMTUDISC_DO; 462 opt = IP_PMTUDISC_DO;
463 kernel_setsockopt(conn->params.local->socket, SOL_IP, 463 kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
475 if (ret == 0) { 475 if (ret == 0) {
476 ret = kernel_sendmsg(conn->params.local->socket, &msg, 476 ret = kernel_sendmsg(conn->params.local->socket, &msg,
477 iov, 2, len); 477 iov, 2, len);
478 conn->params.peer->last_tx_at = ktime_get_real(); 478 conn->params.peer->last_tx_at = ktime_get_seconds();
479 479
480 opt = IPV6_PMTUDISC_DO; 480 opt = IPV6_PMTUDISC_DO;
481 kernel_setsockopt(conn->params.local->socket, 481 kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret, 599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
600 rxrpc_tx_fail_version_keepalive); 600 rxrpc_tx_fail_version_keepalive);
601 601
602 peer->last_tx_at = ktime_get_real(); 602 peer->last_tx_at = ktime_get_seconds();
603 _leave(""); 603 _leave("");
604} 604}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 0ed8b651cec2..4f9da2f51c69 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
350} 350}
351 351
352/* 352/*
353 * Perform keep-alive pings with VERSION packets to keep any NAT alive. 353 * Perform keep-alive pings.
354 */ 354 */
355void rxrpc_peer_keepalive_worker(struct work_struct *work) 355static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
356 struct list_head *collector,
357 time64_t base,
358 u8 cursor)
356{ 359{
357 struct rxrpc_net *rxnet =
358 container_of(work, struct rxrpc_net, peer_keepalive_work);
359 struct rxrpc_peer *peer; 360 struct rxrpc_peer *peer;
360 unsigned long delay; 361 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
361 ktime_t base, now = ktime_get_real(); 362 time64_t keepalive_at;
362 s64 diff; 363 int slot;
363 u8 cursor, slot;
364 364
365 base = rxnet->peer_keepalive_base; 365 spin_lock_bh(&rxnet->peer_hash_lock);
366 cursor = rxnet->peer_keepalive_cursor;
367 366
368 _enter("%u,%lld", cursor, ktime_sub(now, base)); 367 while (!list_empty(collector)) {
368 peer = list_entry(collector->next,
369 struct rxrpc_peer, keepalive_link);
369 370
370next_bucket: 371 list_del_init(&peer->keepalive_link);
371 diff = ktime_to_ns(ktime_sub(now, base)); 372 if (!rxrpc_get_peer_maybe(peer))
372 if (diff < 0) 373 continue;
373 goto resched;
374 374
375 _debug("at %u", cursor);
376 spin_lock_bh(&rxnet->peer_hash_lock);
377next_peer:
378 if (!rxnet->live) {
379 spin_unlock_bh(&rxnet->peer_hash_lock); 375 spin_unlock_bh(&rxnet->peer_hash_lock);
380 goto out;
381 }
382 376
383 /* Everything in the bucket at the cursor is processed this second; the 377 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
384 * bucket at cursor + 1 goes now + 1s and so on... 378 slot = keepalive_at - base;
385 */ 379 _debug("%02x peer %u t=%d {%pISp}",
386 if (hlist_empty(&rxnet->peer_keepalive[cursor])) { 380 cursor, peer->debug_id, slot, &peer->srx.transport);
387 if (hlist_empty(&rxnet->peer_keepalive_new)) { 381
388 spin_unlock_bh(&rxnet->peer_hash_lock); 382 if (keepalive_at <= base ||
389 goto emptied_bucket; 383 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
384 rxrpc_send_keepalive(peer);
385 slot = RXRPC_KEEPALIVE_TIME;
390 } 386 }
391 387
392 hlist_move_list(&rxnet->peer_keepalive_new, 388 /* A transmission to this peer occurred since last we examined
393 &rxnet->peer_keepalive[cursor]); 389 * it so put it into the appropriate future bucket.
390 */
391 slot += cursor;
392 slot &= mask;
393 spin_lock_bh(&rxnet->peer_hash_lock);
394 list_add_tail(&peer->keepalive_link,
395 &rxnet->peer_keepalive[slot & mask]);
396 rxrpc_put_peer(peer);
394 } 397 }
395 398
396 peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
397 struct rxrpc_peer, keepalive_link);
398 hlist_del_init(&peer->keepalive_link);
399 if (!rxrpc_get_peer_maybe(peer))
400 goto next_peer;
401
402 spin_unlock_bh(&rxnet->peer_hash_lock); 399 spin_unlock_bh(&rxnet->peer_hash_lock);
400}
403 401
404 _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport); 402/*
403 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
404 */
405void rxrpc_peer_keepalive_worker(struct work_struct *work)
406{
407 struct rxrpc_net *rxnet =
408 container_of(work, struct rxrpc_net, peer_keepalive_work);
409 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
410 time64_t base, now, delay;
411 u8 cursor, stop;
412 LIST_HEAD(collector);
405 413
406recalc: 414 now = ktime_get_seconds();
407 diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC); 415 base = rxnet->peer_keepalive_base;
408 if (diff < -30 || diff > 30) 416 cursor = rxnet->peer_keepalive_cursor;
409 goto send; /* LSW of 64-bit time probably wrapped on 32-bit */ 417 _enter("%lld,%u", base - now, cursor);
410 diff += RXRPC_KEEPALIVE_TIME - 1;
411 if (diff < 0)
412 goto send;
413 418
414 slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff; 419 if (!rxnet->live)
415 if (slot == 0) 420 return;
416 goto send;
417 421
418 /* A transmission to this peer occurred since last we examined it so 422 /* Remove to a temporary list all the peers that are currently lodged
419 * put it into the appropriate future bucket. 423 * in expired buckets plus all new peers.
424 *
425 * Everything in the bucket at the cursor is processed this
426 * second; the bucket at cursor + 1 goes at now + 1s and so
427 * on...
420 */ 428 */
421 slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
422 spin_lock_bh(&rxnet->peer_hash_lock); 429 spin_lock_bh(&rxnet->peer_hash_lock);
423 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]); 430 list_splice_init(&rxnet->peer_keepalive_new, &collector);
424 rxrpc_put_peer(peer); 431
425 goto next_peer; 432 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
426 433 while (base <= now && (s8)(cursor - stop) < 0) {
427send: 434 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
428 rxrpc_send_keepalive(peer); 435 &collector);
429 now = ktime_get_real(); 436 base++;
430 goto recalc; 437 cursor++;
438 }
431 439
432emptied_bucket: 440 base = now;
433 cursor++; 441 spin_unlock_bh(&rxnet->peer_hash_lock);
434 if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
435 cursor = 0;
436 base = ktime_add_ns(base, NSEC_PER_SEC);
437 goto next_bucket;
438 442
439resched:
440 rxnet->peer_keepalive_base = base; 443 rxnet->peer_keepalive_base = base;
441 rxnet->peer_keepalive_cursor = cursor; 444 rxnet->peer_keepalive_cursor = cursor;
442 delay = nsecs_to_jiffies(-diff) + 1; 445 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
443 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); 446 ASSERT(list_empty(&collector));
444out: 447
448 /* Schedule the timer for the next occupied timeslot. */
449 cursor = rxnet->peer_keepalive_cursor;
450 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
451 for (; (s8)(cursor - stop) < 0; cursor++) {
452 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
453 break;
454 base++;
455 }
456
457 now = ktime_get_seconds();
458 delay = base - now;
459 if (delay < 1)
460 delay = 1;
461 delay *= HZ;
462 if (rxnet->live)
463 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
464
445 _leave(""); 465 _leave("");
446} 466}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1b7e8107b3ae..24ec7cdcf332 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
322 if (!peer) { 322 if (!peer) {
323 peer = prealloc; 323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new); 325 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 } 326 }
327 327
328 spin_unlock(&rxnet->peer_hash_lock); 328 spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
367 if (!peer) { 367 if (!peer) {
368 hash_add_rcu(rxnet->peer_hash, 368 hash_add_rcu(rxnet->peer_hash,
369 &candidate->hash_link, hash_key); 369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link, 370 list_add_tail(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new); 371 &rxnet->peer_keepalive_new);
372 } 372 }
373 373
374 spin_unlock_bh(&rxnet->peer_hash_lock); 374 spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
441 441
442 spin_lock_bh(&rxnet->peer_hash_lock); 442 spin_lock_bh(&rxnet->peer_hash_lock);
443 hash_del_rcu(&peer->hash_link); 443 hash_del_rcu(&peer->hash_link);
444 hlist_del_init(&peer->keepalive_link); 444 list_del_init(&peer->keepalive_link);
445 spin_unlock_bh(&rxnet->peer_hash_lock); 445 spin_unlock_bh(&rxnet->peer_hash_lock);
446 446
447 kfree_rcu(peer, rcu); 447 kfree_rcu(peer, rcu);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 278ac0807a60..47cb019c521a 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
669 return -EAGAIN; 669 return -EAGAIN;
670 } 670 }
671 671
672 conn->params.peer->last_tx_at = ktime_get_real(); 672 conn->params.peer->last_tx_at = ktime_get_seconds();
673 _leave(" = 0"); 673 _leave(" = 0");
674 return 0; 674 return 0;
675} 675}
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
725 return -EAGAIN; 725 return -EAGAIN;
726 } 726 }
727 727
728 conn->params.peer->last_tx_at = ktime_get_real(); 728 conn->params.peer->last_tx_at = ktime_get_seconds();
729 _leave(" = 0"); 729 _leave(" = 0");
730 return 0; 730 return 0;
731} 731}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 05e4ffe5aabd..e7de5f282722 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
1122 sock_hold(lsk); /* sock_put in smc_listen_work */ 1122 sock_hold(lsk); /* sock_put in smc_listen_work */
1123 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); 1123 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1124 smc_copy_sock_settings_to_smc(new_smc); 1124 smc_copy_sock_settings_to_smc(new_smc);
1125 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1126 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1125 sock_hold(&new_smc->sk); /* sock_put in passive closing */ 1127 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1126 if (!schedule_work(&new_smc->smc_listen_work)) 1128 if (!schedule_work(&new_smc->smc_listen_work))
1127 sock_put(&new_smc->sk); 1129 sock_put(&new_smc->sk);
@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
1397 lock_sock(sk); 1399 lock_sock(sk);
1398 1400
1399 rc = -ENOTCONN; 1401 rc = -ENOTCONN;
1400 if ((sk->sk_state != SMC_LISTEN) && 1402 if ((sk->sk_state != SMC_ACTIVE) &&
1401 (sk->sk_state != SMC_ACTIVE) &&
1402 (sk->sk_state != SMC_PEERCLOSEWAIT1) && 1403 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1403 (sk->sk_state != SMC_PEERCLOSEWAIT2) && 1404 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1404 (sk->sk_state != SMC_APPCLOSEWAIT1) && 1405 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1521 1522
1522 smc = smc_sk(sock->sk); 1523 smc = smc_sk(sock->sk);
1523 conn = &smc->conn; 1524 conn = &smc->conn;
1525 lock_sock(&smc->sk);
1524 if (smc->use_fallback) { 1526 if (smc->use_fallback) {
1525 if (!smc->clcsock) 1527 if (!smc->clcsock) {
1528 release_sock(&smc->sk);
1526 return -EBADF; 1529 return -EBADF;
1527 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); 1530 }
1531 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1532 release_sock(&smc->sk);
1533 return answ;
1528 } 1534 }
1529 lock_sock(&smc->sk);
1530 switch (cmd) { 1535 switch (cmd) {
1531 case SIOCINQ: /* same as FIONREAD */ 1536 case SIOCINQ: /* same as FIONREAD */
1532 if (smc->sk.sk_state == SMC_LISTEN) { 1537 if (smc->sk.sk_state == SMC_LISTEN) {
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a7f6964c3a4b..62199cf5a56c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
123{ 123{
124 struct tipc_net *tn = tipc_net(net); 124 struct tipc_net *tn = tipc_net(net);
125 125
126 spin_lock_bh(&tn->node_list_lock); 126 if (!cmpxchg(&tn->node_addr, 0, addr)) {
127 if (!tipc_own_addr(net)) {
128 tipc_set_node_addr(net, addr); 127 tipc_set_node_addr(net, addr);
129 tipc_named_reinit(net); 128 tipc_named_reinit(net);
130 tipc_sk_reinit(net); 129 tipc_sk_reinit(net);
131 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 130 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 TIPC_CLUSTER_SCOPE, 0, addr); 131 TIPC_CLUSTER_SCOPE, 0, addr);
133 } 132 }
134 spin_unlock_bh(&tn->node_list_lock);
135} 133}
136 134
137void tipc_net_stop(struct net *net) 135void tipc_net_stop(struct net *net)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index c1076c19b858..ab27a2872935 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
451 return transport->shutdown(vsock_sk(sk), mode); 451 return transport->shutdown(vsock_sk(sk), mode);
452} 452}
453 453
454void vsock_pending_work(struct work_struct *work) 454static void vsock_pending_work(struct work_struct *work)
455{ 455{
456 struct sock *sk; 456 struct sock *sk;
457 struct sock *listener; 457 struct sock *listener;
458 struct vsock_sock *vsk; 458 struct vsock_sock *vsk;
459 bool cleanup; 459 bool cleanup;
460 460
461 vsk = container_of(work, struct vsock_sock, dwork.work); 461 vsk = container_of(work, struct vsock_sock, pending_work.work);
462 sk = sk_vsock(vsk); 462 sk = sk_vsock(vsk);
463 listener = vsk->listener; 463 listener = vsk->listener;
464 cleanup = true; 464 cleanup = true;
@@ -498,7 +498,6 @@ out:
498 sock_put(sk); 498 sock_put(sk);
499 sock_put(listener); 499 sock_put(listener);
500} 500}
501EXPORT_SYMBOL_GPL(vsock_pending_work);
502 501
503/**** SOCKET OPERATIONS ****/ 502/**** SOCKET OPERATIONS ****/
504 503
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
597 return retval; 596 return retval;
598} 597}
599 598
599static void vsock_connect_timeout(struct work_struct *work);
600
600struct sock *__vsock_create(struct net *net, 601struct sock *__vsock_create(struct net *net,
601 struct socket *sock, 602 struct socket *sock,
602 struct sock *parent, 603 struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
638 vsk->sent_request = false; 639 vsk->sent_request = false;
639 vsk->ignore_connecting_rst = false; 640 vsk->ignore_connecting_rst = false;
640 vsk->peer_shutdown = 0; 641 vsk->peer_shutdown = 0;
642 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
643 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
641 644
642 psk = parent ? vsock_sk(parent) : NULL; 645 psk = parent ? vsock_sk(parent) : NULL;
643 if (parent) { 646 if (parent) {
@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
1117 struct vsock_sock *vsk; 1120 struct vsock_sock *vsk;
1118 int cancel = 0; 1121 int cancel = 0;
1119 1122
1120 vsk = container_of(work, struct vsock_sock, dwork.work); 1123 vsk = container_of(work, struct vsock_sock, connect_work.work);
1121 sk = sk_vsock(vsk); 1124 sk = sk_vsock(vsk);
1122 1125
1123 lock_sock(sk); 1126 lock_sock(sk);
@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1221 * timeout fires. 1224 * timeout fires.
1222 */ 1225 */
1223 sock_hold(sk); 1226 sock_hold(sk);
1224 INIT_DELAYED_WORK(&vsk->dwork, 1227 schedule_delayed_work(&vsk->connect_work, timeout);
1225 vsock_connect_timeout);
1226 schedule_delayed_work(&vsk->dwork, timeout);
1227 1228
1228 /* Skip ahead to preserve error code set above. */ 1229 /* Skip ahead to preserve error code set above. */
1229 goto out_wait; 1230 goto out_wait;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index a7a73ffe675b..cb332adb84cd 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
1094 vpending->listener = sk; 1094 vpending->listener = sk;
1095 sock_hold(sk); 1095 sock_hold(sk);
1096 sock_hold(pending); 1096 sock_hold(pending);
1097 INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); 1097 schedule_delayed_work(&vpending->pending_work, HZ);
1098 schedule_delayed_work(&vpending->dwork, HZ);
1099 1098
1100out: 1099out:
1101 return err; 1100 return err;