aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2016-09-30 04:13:50 -0400
committerDavid Howells <dhowells@redhat.com>2016-09-30 09:40:11 -0400
commit405dea1debeb9956684de342903bba9ddd52f1cb (patch)
tree156258ace668125e5c2f120be62710ec8c21dfa8
parentdf0adc788ae74e35ab1a79f3db878df7fdc7db55 (diff)
rxrpc: Fix the call timer handling
The call timer's concept of a call timeout (of which there are three) that is inactive is that it is the timeout has the same expiration time as the call expiration timeout (the expiration timer is never inactive). However, I'm not resetting the timeouts when they expire, leading to repeated processing of expired timeouts when other timeout events occur. Fix this by: (1) Move the timer expiry detection into rxrpc_set_timer() inside the locked section. This means that if a timeout is set that will expire immediately, we deal with it immediately. (2) If a timeout is at or before now then it has expired. When an expiry is detected, an event is raised, the timeout is automatically inactivated and the event processor is queued. (3) If a timeout is at or after the expiry timeout then it is inactive. Inactive timeouts do not contribute to the timer setting. (4) The call timer callback can now just call rxrpc_set_timer() to handle things. (5) The call processor work function now checks the event flags rather than checking the timeouts directly. Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--net/rxrpc/call_event.c26
-rw-r--r--net/rxrpc/call_object.c7
2 files changed, 20 insertions, 13 deletions
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 9ff3bb3ffb41..4f00476630b9 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -29,6 +29,7 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
29{ 29{
30 unsigned long t_j, now_j = jiffies; 30 unsigned long t_j, now_j = jiffies;
31 ktime_t t; 31 ktime_t t;
32 bool queue = false;
32 33
33 read_lock_bh(&call->state_lock); 34 read_lock_bh(&call->state_lock);
34 35
@@ -37,13 +38,21 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
37 if (!ktime_after(t, now)) 38 if (!ktime_after(t, now))
38 goto out; 39 goto out;
39 40
40 if (ktime_after(call->resend_at, now) && 41 if (!ktime_after(call->resend_at, now)) {
41 ktime_before(call->resend_at, t)) 42 call->resend_at = call->expire_at;
43 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
44 queue = true;
45 } else if (ktime_before(call->resend_at, t)) {
42 t = call->resend_at; 46 t = call->resend_at;
47 }
43 48
44 if (ktime_after(call->ack_at, now) && 49 if (!ktime_after(call->ack_at, now)) {
45 ktime_before(call->ack_at, t)) 50 call->ack_at = call->expire_at;
51 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
52 queue = true;
53 } else if (ktime_before(call->ack_at, t)) {
46 t = call->ack_at; 54 t = call->ack_at;
55 }
47 56
48 t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); 57 t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
49 t_j += jiffies; 58 t_j += jiffies;
@@ -59,6 +68,9 @@ void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
59 mod_timer(&call->timer, t_j); 68 mod_timer(&call->timer, t_j);
60 trace_rxrpc_timer(call, why, now, now_j); 69 trace_rxrpc_timer(call, why, now, now_j);
61 } 70 }
71
72 if (queue)
73 rxrpc_queue_call(call);
62 } 74 }
63 75
64out: 76out:
@@ -332,8 +344,7 @@ recheck_state:
332 goto recheck_state; 344 goto recheck_state;
333 } 345 }
334 346
335 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) || 347 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
336 ktime_before(call->ack_at, now)) {
337 call->ack_at = call->expire_at; 348 call->ack_at = call->expire_at;
338 if (call->ackr_reason) { 349 if (call->ackr_reason) {
339 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK); 350 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
@@ -341,8 +352,7 @@ recheck_state:
341 } 352 }
342 } 353 }
343 354
344 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) || 355 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
345 ktime_before(call->resend_at, now)) {
346 rxrpc_resend(call, now); 356 rxrpc_resend(call, now);
347 goto recheck_state; 357 goto recheck_state;
348 } 358 }
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 456ab752d473..364b42dc3dce 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -71,11 +71,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
71 71
72 _enter("%d", call->debug_id); 72 _enter("%d", call->debug_id);
73 73
74 if (call->state < RXRPC_CALL_COMPLETE) { 74 if (call->state < RXRPC_CALL_COMPLETE)
75 trace_rxrpc_timer(call, rxrpc_timer_expired, 75 rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
76 ktime_get_real(), jiffies);
77 rxrpc_queue_call(call);
78 }
79} 76}
80 77
81/* 78/*