diff options
Diffstat (limited to 'net/rxrpc/peer_event.c')
-rw-r--r-- | net/rxrpc/peer_event.c | 156 |
1 files changed, 88 insertions, 68 deletions
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 0ed8b651cec2..4f9da2f51c69 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
350 | } | 350 | } |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Perform keep-alive pings with VERSION packets to keep any NAT alive. | 353 | * Perform keep-alive pings. |
354 | */ | 354 | */ |
355 | void rxrpc_peer_keepalive_worker(struct work_struct *work) | 355 | static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, |
356 | struct list_head *collector, | ||
357 | time64_t base, | ||
358 | u8 cursor) | ||
356 | { | 359 | { |
357 | struct rxrpc_net *rxnet = | ||
358 | container_of(work, struct rxrpc_net, peer_keepalive_work); | ||
359 | struct rxrpc_peer *peer; | 360 | struct rxrpc_peer *peer; |
360 | unsigned long delay; | 361 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; |
361 | ktime_t base, now = ktime_get_real(); | 362 | time64_t keepalive_at; |
362 | s64 diff; | 363 | int slot; |
363 | u8 cursor, slot; | ||
364 | 364 | ||
365 | base = rxnet->peer_keepalive_base; | 365 | spin_lock_bh(&rxnet->peer_hash_lock); |
366 | cursor = rxnet->peer_keepalive_cursor; | ||
367 | 366 | ||
368 | _enter("%u,%lld", cursor, ktime_sub(now, base)); | 367 | while (!list_empty(collector)) { |
368 | peer = list_entry(collector->next, | ||
369 | struct rxrpc_peer, keepalive_link); | ||
369 | 370 | ||
370 | next_bucket: | 371 | list_del_init(&peer->keepalive_link); |
371 | diff = ktime_to_ns(ktime_sub(now, base)); | 372 | if (!rxrpc_get_peer_maybe(peer)) |
372 | if (diff < 0) | 373 | continue; |
373 | goto resched; | ||
374 | 374 | ||
375 | _debug("at %u", cursor); | ||
376 | spin_lock_bh(&rxnet->peer_hash_lock); | ||
377 | next_peer: | ||
378 | if (!rxnet->live) { | ||
379 | spin_unlock_bh(&rxnet->peer_hash_lock); | 375 | spin_unlock_bh(&rxnet->peer_hash_lock); |
380 | goto out; | ||
381 | } | ||
382 | 376 | ||
383 | /* Everything in the bucket at the cursor is processed this second; the | 377 | keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; |
384 | * bucket at cursor + 1 goes now + 1s and so on... | 378 | slot = keepalive_at - base; |
385 | */ | 379 | _debug("%02x peer %u t=%d {%pISp}", |
386 | if (hlist_empty(&rxnet->peer_keepalive[cursor])) { | 380 | cursor, peer->debug_id, slot, &peer->srx.transport); |
387 | if (hlist_empty(&rxnet->peer_keepalive_new)) { | 381 | |
388 | spin_unlock_bh(&rxnet->peer_hash_lock); | 382 | if (keepalive_at <= base || |
389 | goto emptied_bucket; | 383 | keepalive_at > base + RXRPC_KEEPALIVE_TIME) { |
384 | rxrpc_send_keepalive(peer); | ||
385 | slot = RXRPC_KEEPALIVE_TIME; | ||
390 | } | 386 | } |
391 | 387 | ||
392 | hlist_move_list(&rxnet->peer_keepalive_new, | 388 | /* A transmission to this peer occurred since last we examined |
393 | &rxnet->peer_keepalive[cursor]); | 389 | * it so put it into the appropriate future bucket. |
390 | */ | ||
391 | slot += cursor; | ||
392 | slot &= mask; | ||
393 | spin_lock_bh(&rxnet->peer_hash_lock); | ||
394 | list_add_tail(&peer->keepalive_link, | ||
395 | &rxnet->peer_keepalive[slot & mask]); | ||
396 | rxrpc_put_peer(peer); | ||
394 | } | 397 | } |
395 | 398 | ||
396 | peer = hlist_entry(rxnet->peer_keepalive[cursor].first, | ||
397 | struct rxrpc_peer, keepalive_link); | ||
398 | hlist_del_init(&peer->keepalive_link); | ||
399 | if (!rxrpc_get_peer_maybe(peer)) | ||
400 | goto next_peer; | ||
401 | |||
402 | spin_unlock_bh(&rxnet->peer_hash_lock); | 399 | spin_unlock_bh(&rxnet->peer_hash_lock); |
400 | } | ||
403 | 401 | ||
404 | _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport); | 402 | /* |
403 | * Perform keep-alive pings with VERSION packets to keep any NAT alive. | ||
404 | */ | ||
405 | void rxrpc_peer_keepalive_worker(struct work_struct *work) | ||
406 | { | ||
407 | struct rxrpc_net *rxnet = | ||
408 | container_of(work, struct rxrpc_net, peer_keepalive_work); | ||
409 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; | ||
410 | time64_t base, now, delay; | ||
411 | u8 cursor, stop; | ||
412 | LIST_HEAD(collector); | ||
405 | 413 | ||
406 | recalc: | 414 | now = ktime_get_seconds(); |
407 | diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC); | 415 | base = rxnet->peer_keepalive_base; |
408 | if (diff < -30 || diff > 30) | 416 | cursor = rxnet->peer_keepalive_cursor; |
409 | goto send; /* LSW of 64-bit time probably wrapped on 32-bit */ | 417 | _enter("%lld,%u", base - now, cursor); |
410 | diff += RXRPC_KEEPALIVE_TIME - 1; | ||
411 | if (diff < 0) | ||
412 | goto send; | ||
413 | 418 | ||
414 | slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff; | 419 | if (!rxnet->live) |
415 | if (slot == 0) | 420 | return; |
416 | goto send; | ||
417 | 421 | ||
418 | /* A transmission to this peer occurred since last we examined it so | 422 | /* Remove to a temporary list all the peers that are currently lodged |
419 | * put it into the appropriate future bucket. | 423 | * in expired buckets plus all new peers. |
424 | * | ||
425 | * Everything in the bucket at the cursor is processed this | ||
426 | * second; the bucket at cursor + 1 goes at now + 1s and so | ||
427 | * on... | ||
420 | */ | 428 | */ |
421 | slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive); | ||
422 | spin_lock_bh(&rxnet->peer_hash_lock); | 429 | spin_lock_bh(&rxnet->peer_hash_lock); |
423 | hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]); | 430 | list_splice_init(&rxnet->peer_keepalive_new, &collector); |
424 | rxrpc_put_peer(peer); | 431 | |
425 | goto next_peer; | 432 | stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive); |
426 | 433 | while (base <= now && (s8)(cursor - stop) < 0) { | |
427 | send: | 434 | list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask], |
428 | rxrpc_send_keepalive(peer); | 435 | &collector); |
429 | now = ktime_get_real(); | 436 | base++; |
430 | goto recalc; | 437 | cursor++; |
438 | } | ||
431 | 439 | ||
432 | emptied_bucket: | 440 | base = now; |
433 | cursor++; | 441 | spin_unlock_bh(&rxnet->peer_hash_lock); |
434 | if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive)) | ||
435 | cursor = 0; | ||
436 | base = ktime_add_ns(base, NSEC_PER_SEC); | ||
437 | goto next_bucket; | ||
438 | 442 | ||
439 | resched: | ||
440 | rxnet->peer_keepalive_base = base; | 443 | rxnet->peer_keepalive_base = base; |
441 | rxnet->peer_keepalive_cursor = cursor; | 444 | rxnet->peer_keepalive_cursor = cursor; |
442 | delay = nsecs_to_jiffies(-diff) + 1; | 445 | rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor); |
443 | timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); | 446 | ASSERT(list_empty(&collector)); |
444 | out: | 447 | |
448 | /* Schedule the timer for the next occupied timeslot. */ | ||
449 | cursor = rxnet->peer_keepalive_cursor; | ||
450 | stop = cursor + RXRPC_KEEPALIVE_TIME - 1; | ||
451 | for (; (s8)(cursor - stop) < 0; cursor++) { | ||
452 | if (!list_empty(&rxnet->peer_keepalive[cursor & mask])) | ||
453 | break; | ||
454 | base++; | ||
455 | } | ||
456 | |||
457 | now = ktime_get_seconds(); | ||
458 | delay = base - now; | ||
459 | if (delay < 1) | ||
460 | delay = 1; | ||
461 | delay *= HZ; | ||
462 | if (rxnet->live) | ||
463 | timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); | ||
464 | |||
445 | _leave(""); | 465 | _leave(""); |
446 | } | 466 | } |