aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2018-03-30 16:04:43 -0400
committerDavid Howells <dhowells@redhat.com>2018-03-30 16:04:43 -0400
commitace45bec6d77bc061c3c3d8ad99e298ea9800c2b (patch)
tree3f87956bcdbccf8470eeb867ffe1760998b4bb0c
parentc0b6edef0bf0e33c12eaf80c676ff09def011518 (diff)
rxrpc: Fix firewall route keepalive
Fix the firewall route keepalive part of AF_RXRPC which is currently function incorrectly by replying to VERSION REPLY packets from the server with VERSION REQUEST packets. Instead, send VERSION REPLY packets to the peers of service connections to act as keep-alives 20s after the latest packet was transmitted to that peer. Also, just discard VERSION REPLY packets rather than replying to them. Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/rxrpc/ar-internal.h14
-rw-r--r--net/rxrpc/conn_event.c3
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/rxrpc/net_ns.c21
-rw-r--r--net/rxrpc/output.c59
-rw-r--r--net/rxrpc/peer_event.c96
-rw-r--r--net/rxrpc/peer_object.c7
-rw-r--r--net/rxrpc/rxkad.c2
9 files changed, 204 insertions, 4 deletions
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ec5ec68be1aa..0b3026b8fa40 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -762,6 +762,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
762static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 762static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
763 int kern) 763 int kern)
764{ 764{
765 struct rxrpc_net *rxnet;
765 struct rxrpc_sock *rx; 766 struct rxrpc_sock *rx;
766 struct sock *sk; 767 struct sock *sk;
767 768
@@ -801,6 +802,9 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
801 rwlock_init(&rx->call_lock); 802 rwlock_init(&rx->call_lock);
802 memset(&rx->srx, 0, sizeof(rx->srx)); 803 memset(&rx->srx, 0, sizeof(rx->srx));
803 804
805 rxnet = rxrpc_net(sock_net(&rx->sk));
806 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
807
804 _leave(" = 0 [%p]", rx); 808 _leave(" = 0 [%p]", rx);
805 return 0; 809 return 0;
806} 810}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 21cf164b6d85..8a348e0a9d95 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -97,8 +97,16 @@ struct rxrpc_net {
97 struct list_head local_endpoints; 97 struct list_head local_endpoints;
98 struct mutex local_mutex; /* Lock for ->local_endpoints */ 98 struct mutex local_mutex; /* Lock for ->local_endpoints */
99 99
100 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
101 DECLARE_HASHTABLE (peer_hash, 10); 100 DECLARE_HASHTABLE (peer_hash, 10);
101 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
102
103#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
104 u8 peer_keepalive_cursor;
105 ktime_t peer_keepalive_base;
106 struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
107 struct hlist_head peer_keepalive_new;
108 struct timer_list peer_keepalive_timer;
109 struct work_struct peer_keepalive_work;
102}; 110};
103 111
104/* 112/*
@@ -285,6 +293,8 @@ struct rxrpc_peer {
285 struct hlist_head error_targets; /* targets for net error distribution */ 293 struct hlist_head error_targets; /* targets for net error distribution */
286 struct work_struct error_distributor; 294 struct work_struct error_distributor;
287 struct rb_root service_conns; /* Service connections */ 295 struct rb_root service_conns; /* Service connections */
296 struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
297 time64_t last_tx_at; /* Last time packet sent here */
288 seqlock_t service_conn_lock; 298 seqlock_t service_conn_lock;
289 spinlock_t lock; /* access lock */ 299 spinlock_t lock; /* access lock */
290 unsigned int if_mtu; /* interface MTU for this peer */ 300 unsigned int if_mtu; /* interface MTU for this peer */
@@ -1026,6 +1036,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
1026int rxrpc_send_abort_packet(struct rxrpc_call *); 1036int rxrpc_send_abort_packet(struct rxrpc_call *);
1027int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); 1037int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
1028void rxrpc_reject_packets(struct rxrpc_local *); 1038void rxrpc_reject_packets(struct rxrpc_local *);
1039void rxrpc_send_keepalive(struct rxrpc_peer *);
1029 1040
1030/* 1041/*
1031 * peer_event.c 1042 * peer_event.c
@@ -1034,6 +1045,7 @@ void rxrpc_error_report(struct sock *);
1034void rxrpc_peer_error_distributor(struct work_struct *); 1045void rxrpc_peer_error_distributor(struct work_struct *);
1035void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1046void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1036 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1047 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1048void rxrpc_peer_keepalive_worker(struct work_struct *);
1037 1049
1038/* 1050/*
1039 * peer_object.c 1051 * peer_object.c
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index d2ec3fd593e8..c717152070df 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,6 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
136 } 136 }
137 137
138 kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 138 kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
139 conn->params.peer->last_tx_at = ktime_get_real();
139 _leave(""); 140 _leave("");
140 return; 141 return;
141} 142}
@@ -239,6 +240,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
239 return -EAGAIN; 240 return -EAGAIN;
240 } 241 }
241 242
243 conn->params.peer->last_tx_at = ktime_get_real();
244
242 _leave(" = 0"); 245 _leave(" = 0");
243 return 0; 246 return 0;
244} 247}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 2a868fdab0ae..d4f2509e018b 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1183,6 +1183,8 @@ void rxrpc_data_ready(struct sock *udp_sk)
1183 1183
1184 switch (sp->hdr.type) { 1184 switch (sp->hdr.type) {
1185 case RXRPC_PACKET_TYPE_VERSION: 1185 case RXRPC_PACKET_TYPE_VERSION:
1186 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
1187 goto discard;
1186 rxrpc_post_packet_to_local(local, skb); 1188 rxrpc_post_packet_to_local(local, skb);
1187 goto out; 1189 goto out;
1188 1190
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index f18c9248e0d4..66baf2b80b6c 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -32,13 +32,22 @@ static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
32 rxrpc_queue_work(&rxnet->service_conn_reaper); 32 rxrpc_queue_work(&rxnet->service_conn_reaper);
33} 33}
34 34
35static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
36{
37 struct rxrpc_net *rxnet =
38 container_of(timer, struct rxrpc_net, peer_keepalive_timer);
39
40 if (rxnet->live)
41 rxrpc_queue_work(&rxnet->peer_keepalive_work);
42}
43
35/* 44/*
36 * Initialise a per-network namespace record. 45 * Initialise a per-network namespace record.
37 */ 46 */
38static __net_init int rxrpc_init_net(struct net *net) 47static __net_init int rxrpc_init_net(struct net *net)
39{ 48{
40 struct rxrpc_net *rxnet = rxrpc_net(net); 49 struct rxrpc_net *rxnet = rxrpc_net(net);
41 int ret; 50 int ret, i;
42 51
43 rxnet->live = true; 52 rxnet->live = true;
44 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); 53 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
@@ -70,8 +79,16 @@ static __net_init int rxrpc_init_net(struct net *net)
70 79
71 INIT_LIST_HEAD(&rxnet->local_endpoints); 80 INIT_LIST_HEAD(&rxnet->local_endpoints);
72 mutex_init(&rxnet->local_mutex); 81 mutex_init(&rxnet->local_mutex);
82
73 hash_init(rxnet->peer_hash); 83 hash_init(rxnet->peer_hash);
74 spin_lock_init(&rxnet->peer_hash_lock); 84 spin_lock_init(&rxnet->peer_hash_lock);
85 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
86 INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
87 INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
88 timer_setup(&rxnet->peer_keepalive_timer,
89 rxrpc_peer_keepalive_timeout, 0);
90 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
91 rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
75 92
76 ret = -ENOMEM; 93 ret = -ENOMEM;
77 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); 94 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
@@ -95,6 +112,8 @@ static __net_exit void rxrpc_exit_net(struct net *net)
95 struct rxrpc_net *rxnet = rxrpc_net(net); 112 struct rxrpc_net *rxnet = rxrpc_net(net);
96 113
97 rxnet->live = false; 114 rxnet->live = false;
115 del_timer_sync(&rxnet->peer_keepalive_timer);
116 cancel_work_sync(&rxnet->peer_keepalive_work);
98 rxrpc_destroy_all_calls(rxnet); 117 rxrpc_destroy_all_calls(rxnet);
99 rxrpc_destroy_all_connections(rxnet); 118 rxrpc_destroy_all_connections(rxnet);
100 rxrpc_destroy_all_locals(rxnet); 119 rxrpc_destroy_all_locals(rxnet);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index cf73dc006c3b..7f1fc04775b3 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -32,6 +32,8 @@ struct rxrpc_abort_buffer {
32 __be32 abort_code; 32 __be32 abort_code;
33}; 33};
34 34
35static const char rxrpc_keepalive_string[] = "";
36
35/* 37/*
36 * Arrange for a keepalive ping a certain time after we last transmitted. This 38 * Arrange for a keepalive ping a certain time after we last transmitted. This
37 * lets the far side know we're still interested in this call and helps keep 39 * lets the far side know we're still interested in this call and helps keep
@@ -122,6 +124,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
122 struct kvec iov[2]; 124 struct kvec iov[2];
123 rxrpc_serial_t serial; 125 rxrpc_serial_t serial;
124 rxrpc_seq_t hard_ack, top; 126 rxrpc_seq_t hard_ack, top;
127 ktime_t now;
125 size_t len, n; 128 size_t len, n;
126 int ret; 129 int ret;
127 u8 reason; 130 u8 reason;
@@ -203,8 +206,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
203 } 206 }
204 207
205 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 208 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
209 now = ktime_get_real();
206 if (ping) 210 if (ping)
207 call->ping_time = ktime_get_real(); 211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_real();
208 213
209 if (call->state < RXRPC_CALL_COMPLETE) { 214 if (call->state < RXRPC_CALL_COMPLETE) {
210 if (ret < 0) { 215 if (ret < 0) {
@@ -288,6 +293,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
288 293
289 ret = kernel_sendmsg(conn->params.local->socket, 294 ret = kernel_sendmsg(conn->params.local->socket,
290 &msg, iov, 1, sizeof(pkt)); 295 &msg, iov, 1, sizeof(pkt));
296 conn->params.peer->last_tx_at = ktime_get_real();
291 297
292 rxrpc_put_connection(conn); 298 rxrpc_put_connection(conn);
293 return ret; 299 return ret;
@@ -378,6 +384,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
378 * message and update the peer record 384 * message and update the peer record
379 */ 385 */
380 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 386 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
387 conn->params.peer->last_tx_at = ktime_get_real();
381 388
382 up_read(&conn->params.local->defrag_sem); 389 up_read(&conn->params.local->defrag_sem);
383 if (ret == -EMSGSIZE) 390 if (ret == -EMSGSIZE)
@@ -429,6 +436,7 @@ send_fragmentable:
429 if (ret == 0) { 436 if (ret == 0) {
430 ret = kernel_sendmsg(conn->params.local->socket, &msg, 437 ret = kernel_sendmsg(conn->params.local->socket, &msg,
431 iov, 2, len); 438 iov, 2, len);
439 conn->params.peer->last_tx_at = ktime_get_real();
432 440
433 opt = IP_PMTUDISC_DO; 441 opt = IP_PMTUDISC_DO;
434 kernel_setsockopt(conn->params.local->socket, SOL_IP, 442 kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -446,6 +454,7 @@ send_fragmentable:
446 if (ret == 0) { 454 if (ret == 0) {
447 ret = kernel_sendmsg(conn->params.local->socket, &msg, 455 ret = kernel_sendmsg(conn->params.local->socket, &msg,
448 iov, 2, len); 456 iov, 2, len);
457 conn->params.peer->last_tx_at = ktime_get_real();
449 458
450 opt = IPV6_PMTUDISC_DO; 459 opt = IPV6_PMTUDISC_DO;
451 kernel_setsockopt(conn->params.local->socket, 460 kernel_setsockopt(conn->params.local->socket,
@@ -515,3 +524,51 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
515 524
516 _leave(""); 525 _leave("");
517} 526}
527
528/*
529 * Send a VERSION reply to a peer as a keepalive.
530 */
531void rxrpc_send_keepalive(struct rxrpc_peer *peer)
532{
533 struct rxrpc_wire_header whdr;
534 struct msghdr msg;
535 struct kvec iov[2];
536 size_t len;
537 int ret;
538
539 _enter("");
540
541 msg.msg_name = &peer->srx.transport;
542 msg.msg_namelen = peer->srx.transport_len;
543 msg.msg_control = NULL;
544 msg.msg_controllen = 0;
545 msg.msg_flags = 0;
546
547 whdr.epoch = htonl(peer->local->rxnet->epoch);
548 whdr.cid = 0;
549 whdr.callNumber = 0;
550 whdr.seq = 0;
551 whdr.serial = 0;
552 whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
553 whdr.flags = RXRPC_LAST_PACKET;
554 whdr.userStatus = 0;
555 whdr.securityIndex = 0;
556 whdr._rsvd = 0;
557 whdr.serviceId = 0;
558
559 iov[0].iov_base = &whdr;
560 iov[0].iov_len = sizeof(whdr);
561 iov[1].iov_base = (char *)rxrpc_keepalive_string;
562 iov[1].iov_len = sizeof(rxrpc_keepalive_string);
563
564 len = iov[0].iov_len + iov[1].iov_len;
565
566 _proto("Tx VERSION (keepalive)");
567
568 ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
569 if (ret < 0)
570 _debug("sendmsg failed: %d", ret);
571
572 peer->last_tx_at = ktime_get_real();
573 _leave("");
574}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7f749505e699..d01eb9a06448 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -348,3 +348,99 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
348 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 348 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
349 usage, avg); 349 usage, avg);
350} 350}
351
352/*
353 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
354 */
355void rxrpc_peer_keepalive_worker(struct work_struct *work)
356{
357 struct rxrpc_net *rxnet =
358 container_of(work, struct rxrpc_net, peer_keepalive_work);
359 struct rxrpc_peer *peer;
360 unsigned long delay;
361 ktime_t base, now = ktime_get_real();
362 s64 diff;
363 u8 cursor, slot;
364
365 base = rxnet->peer_keepalive_base;
366 cursor = rxnet->peer_keepalive_cursor;
367
368 _enter("%u,%lld", cursor, ktime_sub(now, base));
369
370next_bucket:
371 diff = ktime_to_ns(ktime_sub(now, base));
372 if (diff < 0)
373 goto resched;
374
375 _debug("at %u", cursor);
376 spin_lock_bh(&rxnet->peer_hash_lock);
377next_peer:
378 if (!rxnet->live) {
379 spin_unlock_bh(&rxnet->peer_hash_lock);
380 goto out;
381 }
382
383 /* Everything in the bucket at the cursor is processed this second; the
384 * bucket at cursor + 1 goes now + 1s and so on...
385 */
386 if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
387 if (hlist_empty(&rxnet->peer_keepalive_new)) {
388 spin_unlock_bh(&rxnet->peer_hash_lock);
389 goto emptied_bucket;
390 }
391
392 hlist_move_list(&rxnet->peer_keepalive_new,
393 &rxnet->peer_keepalive[cursor]);
394 }
395
396 peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
397 struct rxrpc_peer, keepalive_link);
398 hlist_del_init(&peer->keepalive_link);
399 if (!rxrpc_get_peer_maybe(peer))
400 goto next_peer;
401
402 spin_unlock_bh(&rxnet->peer_hash_lock);
403
404 _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
405
406recalc:
407 diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
408 if (diff < -30 || diff > 30)
409 goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
410 diff += RXRPC_KEEPALIVE_TIME - 1;
411 if (diff < 0)
412 goto send;
413
414 slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
415 if (slot == 0)
416 goto send;
417
418 /* A transmission to this peer occurred since last we examined it so
419 * put it into the appropriate future bucket.
420 */
421 slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
422 spin_lock_bh(&rxnet->peer_hash_lock);
423 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
424 rxrpc_put_peer(peer);
425 goto next_peer;
426
427send:
428 rxrpc_send_keepalive(peer);
429 now = ktime_get_real();
430 goto recalc;
431
432emptied_bucket:
433 cursor++;
434 if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
435 cursor = 0;
436 base = ktime_add_ns(base, NSEC_PER_SEC);
437 goto next_bucket;
438
439resched:
440 rxnet->peer_keepalive_base = base;
441 rxnet->peer_keepalive_cursor = cursor;
442 delay = nsecs_to_jiffies(-diff) + 1;
443 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
444out:
445 _leave("");
446}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index d02a99f37f5f..94a6dbfcf129 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,6 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
322 if (!peer) { 322 if (!peer) {
323 peer = prealloc; 323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
325 } 326 }
326 327
327 spin_unlock(&rxnet->peer_hash_lock); 328 spin_unlock(&rxnet->peer_hash_lock);
@@ -363,9 +364,12 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
363 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); 364 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
364 if (peer && !rxrpc_get_peer_maybe(peer)) 365 if (peer && !rxrpc_get_peer_maybe(peer))
365 peer = NULL; 366 peer = NULL;
366 if (!peer) 367 if (!peer) {
367 hash_add_rcu(rxnet->peer_hash, 368 hash_add_rcu(rxnet->peer_hash,
368 &candidate->hash_link, hash_key); 369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new);
372 }
369 373
370 spin_unlock_bh(&rxnet->peer_hash_lock); 374 spin_unlock_bh(&rxnet->peer_hash_lock);
371 375
@@ -392,6 +396,7 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
392 396
393 spin_lock_bh(&rxnet->peer_hash_lock); 397 spin_lock_bh(&rxnet->peer_hash_lock);
394 hash_del_rcu(&peer->hash_link); 398 hash_del_rcu(&peer->hash_link);
399 hlist_del_init(&peer->keepalive_link);
395 spin_unlock_bh(&rxnet->peer_hash_lock); 400 spin_unlock_bh(&rxnet->peer_hash_lock);
396 401
397 kfree_rcu(peer, rcu); 402 kfree_rcu(peer, rcu);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 77cb23c7bd0a..588fea0dd362 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -668,6 +668,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
668 return -EAGAIN; 668 return -EAGAIN;
669 } 669 }
670 670
671 conn->params.peer->last_tx_at = ktime_get_real();
671 _leave(" = 0"); 672 _leave(" = 0");
672 return 0; 673 return 0;
673} 674}
@@ -722,6 +723,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
722 return -EAGAIN; 723 return -EAGAIN;
723 } 724 }
724 725
726 conn->params.peer->last_tx_at = ktime_get_real();
725 _leave(" = 0"); 727 _leave(" = 0");
726 return 0; 728 return 0;
727} 729}