aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2018-08-08 06:30:02 -0400
committerDavid S. Miller <davem@davemloft.net>2018-08-08 22:10:26 -0400
commit330bdcfadceea5e9a1526d731711e163f9a90975 (patch)
treebbe3790891058a942b1cc833888f7f9bec6e1a9a
parentf39cc1c7f32dd93d19fb14dbf973fd2b5c0f7103 (diff)
rxrpc: Fix the keepalive generator [ver #2]
AF_RXRPC has a keepalive message generator that generates a message for a peer ~20s after the last transmission to that peer to keep firewall ports open. The implementation is incorrect in the following ways: (1) It mixes up ktime_t and time64_t types. (2) It uses ktime_get_real(), the output of which may jump forward or backward due to adjustments to the time of day. (3) If the current time jumps forward too much or jumps backwards, the generator function will crank the base of the time ring round one slot at a time (ie. a 1s period) until it catches up, spewing out VERSION packets as it goes. Fix the problem by: (1) Only using time64_t. There's no need for sub-second resolution. (2) Use ktime_get_seconds() rather than ktime_get_real() so that time isn't perceived to go backwards. (3) Simplifying rxrpc_peer_keepalive_worker() by splitting it into two parts: (a) The "worker" function that manages the buckets and the timer. (b) The "dispatch" function that takes the pending peers and potentially transmits a keepalive packet before putting them back in the ring into the slot appropriate to the revised last-Tx time. (4) Taking everything that's pending out of the ring and splicing it into a temporary collector list for processing. In the case that there's been a significant jump forward, the ring gets entirely emptied and then the time base can be warped forward before the peers are processed. The warping can't happen if the ring isn't empty because the slot a peer is in is keepalive-time dependent, relative to the base time. (5) Limit the number of iterations of the bucket array when scanning it. (6) Set the timer to skip any empty slots as there's no point waking up if there's nothing to do yet. This can be triggered by an incoming call from a server after a reboot with AF_RXRPC and AFS built into the kernel causing a peer record to be set up before userspace is started. The system clock is then adjusted by userspace, thereby potentially causing the keepalive generator to have a meltdown - which leads to a message like: watchdog: BUG: soft lockup - CPU#0 stuck for 23s! [kworker/0:1:23] ... Workqueue: krxrpcd rxrpc_peer_keepalive_worker EIP: lock_acquire+0x69/0x80 ... Call Trace: ? rxrpc_peer_keepalive_worker+0x5e/0x350 ? _raw_spin_lock_bh+0x29/0x60 ? rxrpc_peer_keepalive_worker+0x5e/0x350 ? rxrpc_peer_keepalive_worker+0x5e/0x350 ? __lock_acquire+0x3d3/0x870 ? process_one_work+0x110/0x340 ? process_one_work+0x166/0x340 ? process_one_work+0x110/0x340 ? worker_thread+0x39/0x3c0 ? kthread+0xdb/0x110 ? cancel_delayed_work+0x90/0x90 ? kthread_stop+0x70/0x70 ? ret_from_fork+0x19/0x24 Fixes: ace45bec6d77 ("rxrpc: Fix firewall route keepalive") Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/rxrpc/net_ns.c6
-rw-r--r--net/rxrpc/output.c12
-rw-r--r--net/rxrpc/peer_event.c156
-rw-r--r--net/rxrpc/peer_object.c8
-rw-r--r--net/rxrpc/rxkad.c4
7 files changed, 109 insertions, 89 deletions
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5fb7d3254d9e..707630ab4713 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -104,9 +104,9 @@ struct rxrpc_net {
104 104
105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ 105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
106 u8 peer_keepalive_cursor; 106 u8 peer_keepalive_cursor;
107 ktime_t peer_keepalive_base; 107 time64_t peer_keepalive_base;
108 struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1]; 108 struct list_head peer_keepalive[32];
109 struct hlist_head peer_keepalive_new; 109 struct list_head peer_keepalive_new;
110 struct timer_list peer_keepalive_timer; 110 struct timer_list peer_keepalive_timer;
111 struct work_struct peer_keepalive_work; 111 struct work_struct peer_keepalive_work;
112}; 112};
@@ -295,7 +295,7 @@ struct rxrpc_peer {
295 struct hlist_head error_targets; /* targets for net error distribution */ 295 struct hlist_head error_targets; /* targets for net error distribution */
296 struct work_struct error_distributor; 296 struct work_struct error_distributor;
297 struct rb_root service_conns; /* Service connections */ 297 struct rb_root service_conns; /* Service connections */
298 struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */ 298 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */ 299 time64_t last_tx_at; /* Last time packet sent here */
300 seqlock_t service_conn_lock; 300 seqlock_t service_conn_lock;
301 spinlock_t lock; /* access lock */ 301 spinlock_t lock; /* access lock */
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 8229a52c2acd..3fde001fcc39 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
136 } 136 }
137 137
138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
139 conn->params.peer->last_tx_at = ktime_get_real(); 139 conn->params.peer->last_tx_at = ktime_get_seconds();
140 if (ret < 0) 140 if (ret < 0)
141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
142 rxrpc_tx_fail_call_final_resend); 142 rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
245 return -EAGAIN; 245 return -EAGAIN;
246 } 246 }
247 247
248 conn->params.peer->last_tx_at = ktime_get_real(); 248 conn->params.peer->last_tx_at = ktime_get_seconds();
249 249
250 _leave(" = 0"); 250 _leave(" = 0");
251 return 0; 251 return 0;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5d6a773db973..417d80867c4f 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
85 hash_init(rxnet->peer_hash); 85 hash_init(rxnet->peer_hash);
86 spin_lock_init(&rxnet->peer_hash_lock); 86 spin_lock_init(&rxnet->peer_hash_lock);
87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++) 87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
88 INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]); 88 INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
89 INIT_HLIST_HEAD(&rxnet->peer_keepalive_new); 89 INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
90 timer_setup(&rxnet->peer_keepalive_timer, 90 timer_setup(&rxnet->peer_keepalive_timer,
91 rxrpc_peer_keepalive_timeout, 0); 91 rxrpc_peer_keepalive_timeout, 0);
92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker); 92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
93 rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC); 93 rxnet->peer_keepalive_base = ktime_get_seconds();
94 94
95 ret = -ENOMEM; 95 ret = -ENOMEM;
96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); 96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f03de1c59ba3..4774c8f5634d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
209 now = ktime_get_real(); 209 now = ktime_get_real();
210 if (ping) 210 if (ping)
211 call->ping_time = now; 211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_real(); 212 conn->params.peer->last_tx_at = ktime_get_seconds();
213 if (ret < 0) 213 if (ret < 0)
214 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 214 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
215 rxrpc_tx_fail_call_ack); 215 rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
296 296
297 ret = kernel_sendmsg(conn->params.local->socket, 297 ret = kernel_sendmsg(conn->params.local->socket,
298 &msg, iov, 1, sizeof(pkt)); 298 &msg, iov, 1, sizeof(pkt));
299 conn->params.peer->last_tx_at = ktime_get_real(); 299 conn->params.peer->last_tx_at = ktime_get_seconds();
300 if (ret < 0) 300 if (ret < 0)
301 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 301 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
302 rxrpc_tx_fail_call_abort); 302 rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
391 * message and update the peer record 391 * message and update the peer record
392 */ 392 */
393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
394 conn->params.peer->last_tx_at = ktime_get_real(); 394 conn->params.peer->last_tx_at = ktime_get_seconds();
395 395
396 up_read(&conn->params.local->defrag_sem); 396 up_read(&conn->params.local->defrag_sem);
397 if (ret < 0) 397 if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
457 if (ret == 0) { 457 if (ret == 0) {
458 ret = kernel_sendmsg(conn->params.local->socket, &msg, 458 ret = kernel_sendmsg(conn->params.local->socket, &msg,
459 iov, 2, len); 459 iov, 2, len);
460 conn->params.peer->last_tx_at = ktime_get_real(); 460 conn->params.peer->last_tx_at = ktime_get_seconds();
461 461
462 opt = IP_PMTUDISC_DO; 462 opt = IP_PMTUDISC_DO;
463 kernel_setsockopt(conn->params.local->socket, SOL_IP, 463 kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
475 if (ret == 0) { 475 if (ret == 0) {
476 ret = kernel_sendmsg(conn->params.local->socket, &msg, 476 ret = kernel_sendmsg(conn->params.local->socket, &msg,
477 iov, 2, len); 477 iov, 2, len);
478 conn->params.peer->last_tx_at = ktime_get_real(); 478 conn->params.peer->last_tx_at = ktime_get_seconds();
479 479
480 opt = IPV6_PMTUDISC_DO; 480 opt = IPV6_PMTUDISC_DO;
481 kernel_setsockopt(conn->params.local->socket, 481 kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret, 599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
600 rxrpc_tx_fail_version_keepalive); 600 rxrpc_tx_fail_version_keepalive);
601 601
602 peer->last_tx_at = ktime_get_real(); 602 peer->last_tx_at = ktime_get_seconds();
603 _leave(""); 603 _leave("");
604} 604}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 0ed8b651cec2..4f9da2f51c69 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
350} 350}
351 351
352/* 352/*
353 * Perform keep-alive pings with VERSION packets to keep any NAT alive. 353 * Perform keep-alive pings.
354 */ 354 */
355void rxrpc_peer_keepalive_worker(struct work_struct *work) 355static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
356 struct list_head *collector,
357 time64_t base,
358 u8 cursor)
356{ 359{
357 struct rxrpc_net *rxnet =
358 container_of(work, struct rxrpc_net, peer_keepalive_work);
359 struct rxrpc_peer *peer; 360 struct rxrpc_peer *peer;
360 unsigned long delay; 361 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
361 ktime_t base, now = ktime_get_real(); 362 time64_t keepalive_at;
362 s64 diff; 363 int slot;
363 u8 cursor, slot;
364 364
365 base = rxnet->peer_keepalive_base; 365 spin_lock_bh(&rxnet->peer_hash_lock);
366 cursor = rxnet->peer_keepalive_cursor;
367 366
368 _enter("%u,%lld", cursor, ktime_sub(now, base)); 367 while (!list_empty(collector)) {
368 peer = list_entry(collector->next,
369 struct rxrpc_peer, keepalive_link);
369 370
370next_bucket: 371 list_del_init(&peer->keepalive_link);
371 diff = ktime_to_ns(ktime_sub(now, base)); 372 if (!rxrpc_get_peer_maybe(peer))
372 if (diff < 0) 373 continue;
373 goto resched;
374 374
375 _debug("at %u", cursor);
376 spin_lock_bh(&rxnet->peer_hash_lock);
377next_peer:
378 if (!rxnet->live) {
379 spin_unlock_bh(&rxnet->peer_hash_lock); 375 spin_unlock_bh(&rxnet->peer_hash_lock);
380 goto out;
381 }
382 376
383 /* Everything in the bucket at the cursor is processed this second; the 377 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
384 * bucket at cursor + 1 goes now + 1s and so on... 378 slot = keepalive_at - base;
385 */ 379 _debug("%02x peer %u t=%d {%pISp}",
386 if (hlist_empty(&rxnet->peer_keepalive[cursor])) { 380 cursor, peer->debug_id, slot, &peer->srx.transport);
387 if (hlist_empty(&rxnet->peer_keepalive_new)) { 381
388 spin_unlock_bh(&rxnet->peer_hash_lock); 382 if (keepalive_at <= base ||
389 goto emptied_bucket; 383 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
384 rxrpc_send_keepalive(peer);
385 slot = RXRPC_KEEPALIVE_TIME;
390 } 386 }
391 387
392 hlist_move_list(&rxnet->peer_keepalive_new, 388 /* A transmission to this peer occurred since last we examined
393 &rxnet->peer_keepalive[cursor]); 389 * it so put it into the appropriate future bucket.
390 */
391 slot += cursor;
392 slot &= mask;
393 spin_lock_bh(&rxnet->peer_hash_lock);
394 list_add_tail(&peer->keepalive_link,
395 &rxnet->peer_keepalive[slot & mask]);
396 rxrpc_put_peer(peer);
394 } 397 }
395 398
396 peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
397 struct rxrpc_peer, keepalive_link);
398 hlist_del_init(&peer->keepalive_link);
399 if (!rxrpc_get_peer_maybe(peer))
400 goto next_peer;
401
402 spin_unlock_bh(&rxnet->peer_hash_lock); 399 spin_unlock_bh(&rxnet->peer_hash_lock);
400}
403 401
404 _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport); 402/*
403 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
404 */
405void rxrpc_peer_keepalive_worker(struct work_struct *work)
406{
407 struct rxrpc_net *rxnet =
408 container_of(work, struct rxrpc_net, peer_keepalive_work);
409 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
410 time64_t base, now, delay;
411 u8 cursor, stop;
412 LIST_HEAD(collector);
405 413
406recalc: 414 now = ktime_get_seconds();
407 diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC); 415 base = rxnet->peer_keepalive_base;
408 if (diff < -30 || diff > 30) 416 cursor = rxnet->peer_keepalive_cursor;
409 goto send; /* LSW of 64-bit time probably wrapped on 32-bit */ 417 _enter("%lld,%u", base - now, cursor);
410 diff += RXRPC_KEEPALIVE_TIME - 1;
411 if (diff < 0)
412 goto send;
413 418
414 slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff; 419 if (!rxnet->live)
415 if (slot == 0) 420 return;
416 goto send;
417 421
418 /* A transmission to this peer occurred since last we examined it so 422 /* Remove to a temporary list all the peers that are currently lodged
419 * put it into the appropriate future bucket. 423 * in expired buckets plus all new peers.
424 *
425 * Everything in the bucket at the cursor is processed this
426 * second; the bucket at cursor + 1 goes at now + 1s and so
427 * on...
420 */ 428 */
421 slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
422 spin_lock_bh(&rxnet->peer_hash_lock); 429 spin_lock_bh(&rxnet->peer_hash_lock);
423 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]); 430 list_splice_init(&rxnet->peer_keepalive_new, &collector);
424 rxrpc_put_peer(peer); 431
425 goto next_peer; 432 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
426 433 while (base <= now && (s8)(cursor - stop) < 0) {
427send: 434 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
428 rxrpc_send_keepalive(peer); 435 &collector);
429 now = ktime_get_real(); 436 base++;
430 goto recalc; 437 cursor++;
438 }
431 439
432emptied_bucket: 440 base = now;
433 cursor++; 441 spin_unlock_bh(&rxnet->peer_hash_lock);
434 if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
435 cursor = 0;
436 base = ktime_add_ns(base, NSEC_PER_SEC);
437 goto next_bucket;
438 442
439resched:
440 rxnet->peer_keepalive_base = base; 443 rxnet->peer_keepalive_base = base;
441 rxnet->peer_keepalive_cursor = cursor; 444 rxnet->peer_keepalive_cursor = cursor;
442 delay = nsecs_to_jiffies(-diff) + 1; 445 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
443 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); 446 ASSERT(list_empty(&collector));
444out: 447
448 /* Schedule the timer for the next occupied timeslot. */
449 cursor = rxnet->peer_keepalive_cursor;
450 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
451 for (; (s8)(cursor - stop) < 0; cursor++) {
452 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
453 break;
454 base++;
455 }
456
457 now = ktime_get_seconds();
458 delay = base - now;
459 if (delay < 1)
460 delay = 1;
461 delay *= HZ;
462 if (rxnet->live)
463 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
464
445 _leave(""); 465 _leave("");
446} 466}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1b7e8107b3ae..24ec7cdcf332 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
322 if (!peer) { 322 if (!peer) {
323 peer = prealloc; 323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new); 325 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 } 326 }
327 327
328 spin_unlock(&rxnet->peer_hash_lock); 328 spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
367 if (!peer) { 367 if (!peer) {
368 hash_add_rcu(rxnet->peer_hash, 368 hash_add_rcu(rxnet->peer_hash,
369 &candidate->hash_link, hash_key); 369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link, 370 list_add_tail(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new); 371 &rxnet->peer_keepalive_new);
372 } 372 }
373 373
374 spin_unlock_bh(&rxnet->peer_hash_lock); 374 spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
441 441
442 spin_lock_bh(&rxnet->peer_hash_lock); 442 spin_lock_bh(&rxnet->peer_hash_lock);
443 hash_del_rcu(&peer->hash_link); 443 hash_del_rcu(&peer->hash_link);
444 hlist_del_init(&peer->keepalive_link); 444 list_del_init(&peer->keepalive_link);
445 spin_unlock_bh(&rxnet->peer_hash_lock); 445 spin_unlock_bh(&rxnet->peer_hash_lock);
446 446
447 kfree_rcu(peer, rcu); 447 kfree_rcu(peer, rcu);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 278ac0807a60..47cb019c521a 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
669 return -EAGAIN; 669 return -EAGAIN;
670 } 670 }
671 671
672 conn->params.peer->last_tx_at = ktime_get_real(); 672 conn->params.peer->last_tx_at = ktime_get_seconds();
673 _leave(" = 0"); 673 _leave(" = 0");
674 return 0; 674 return 0;
675} 675}
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
725 return -EAGAIN; 725 return -EAGAIN;
726 } 726 }
727 727
728 conn->params.peer->last_tx_at = ktime_get_real(); 728 conn->params.peer->last_tx_at = ktime_get_seconds();
729 _leave(" = 0"); 729 _leave(" = 0");
730 return 0; 730 return 0;
731} 731}