diff options
Diffstat (limited to 'net/rxrpc/ar-call.c')
-rw-r--r-- | net/rxrpc/ar-call.c | 75 |
1 files changed, 46 insertions, 29 deletions
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index ac31cceda2f1..4d92d88ff1fc 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c | |||
@@ -19,7 +19,7 @@ struct kmem_cache *rxrpc_call_jar; | |||
19 | LIST_HEAD(rxrpc_calls); | 19 | LIST_HEAD(rxrpc_calls); |
20 | DEFINE_RWLOCK(rxrpc_call_lock); | 20 | DEFINE_RWLOCK(rxrpc_call_lock); |
21 | static unsigned rxrpc_call_max_lifetime = 60; | 21 | static unsigned rxrpc_call_max_lifetime = 60; |
22 | static unsigned rxrpc_dead_call_timeout = 10; | 22 | static unsigned rxrpc_dead_call_timeout = 2; |
23 | 23 | ||
24 | static void rxrpc_destroy_call(struct work_struct *work); | 24 | static void rxrpc_destroy_call(struct work_struct *work); |
25 | static void rxrpc_call_life_expired(unsigned long _call); | 25 | static void rxrpc_call_life_expired(unsigned long _call); |
@@ -264,7 +264,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, | |||
264 | switch (call->state) { | 264 | switch (call->state) { |
265 | case RXRPC_CALL_LOCALLY_ABORTED: | 265 | case RXRPC_CALL_LOCALLY_ABORTED: |
266 | if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) | 266 | if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) |
267 | schedule_work(&call->processor); | 267 | rxrpc_queue_call(call); |
268 | case RXRPC_CALL_REMOTELY_ABORTED: | 268 | case RXRPC_CALL_REMOTELY_ABORTED: |
269 | read_unlock(&call->state_lock); | 269 | read_unlock(&call->state_lock); |
270 | goto aborted_call; | 270 | goto aborted_call; |
@@ -398,6 +398,7 @@ found_extant_call: | |||
398 | */ | 398 | */ |
399 | void rxrpc_release_call(struct rxrpc_call *call) | 399 | void rxrpc_release_call(struct rxrpc_call *call) |
400 | { | 400 | { |
401 | struct rxrpc_connection *conn = call->conn; | ||
401 | struct rxrpc_sock *rx = call->socket; | 402 | struct rxrpc_sock *rx = call->socket; |
402 | 403 | ||
403 | _enter("{%d,%d,%d,%d}", | 404 | _enter("{%d,%d,%d,%d}", |
@@ -413,8 +414,7 @@ void rxrpc_release_call(struct rxrpc_call *call) | |||
413 | /* dissociate from the socket | 414 | /* dissociate from the socket |
414 | * - the socket's ref on the call is passed to the death timer | 415 | * - the socket's ref on the call is passed to the death timer |
415 | */ | 416 | */ |
416 | _debug("RELEASE CALL %p (%d CONN %p)", | 417 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); |
417 | call, call->debug_id, call->conn); | ||
418 | 418 | ||
419 | write_lock_bh(&rx->call_lock); | 419 | write_lock_bh(&rx->call_lock); |
420 | if (!list_empty(&call->accept_link)) { | 420 | if (!list_empty(&call->accept_link)) { |
@@ -430,24 +430,42 @@ void rxrpc_release_call(struct rxrpc_call *call) | |||
430 | } | 430 | } |
431 | write_unlock_bh(&rx->call_lock); | 431 | write_unlock_bh(&rx->call_lock); |
432 | 432 | ||
433 | if (call->conn->out_clientflag) | ||
434 | spin_lock(&call->conn->trans->client_lock); | ||
435 | write_lock_bh(&call->conn->lock); | ||
436 | |||
437 | /* free up the channel for reuse */ | 433 | /* free up the channel for reuse */ |
438 | if (call->conn->out_clientflag) { | 434 | spin_lock(&conn->trans->client_lock); |
439 | call->conn->avail_calls++; | 435 | write_lock_bh(&conn->lock); |
440 | if (call->conn->avail_calls == RXRPC_MAXCALLS) | 436 | write_lock(&call->state_lock); |
441 | list_move_tail(&call->conn->bundle_link, | 437 | |
442 | &call->conn->bundle->unused_conns); | 438 | if (conn->channels[call->channel] == call) |
443 | else if (call->conn->avail_calls == 1) | 439 | conn->channels[call->channel] = NULL; |
444 | list_move_tail(&call->conn->bundle_link, | 440 | |
445 | &call->conn->bundle->avail_conns); | 441 | if (conn->out_clientflag && conn->bundle) { |
442 | conn->avail_calls++; | ||
443 | switch (conn->avail_calls) { | ||
444 | case 1: | ||
445 | list_move_tail(&conn->bundle_link, | ||
446 | &conn->bundle->avail_conns); | ||
447 | case 2 ... RXRPC_MAXCALLS - 1: | ||
448 | ASSERT(conn->channels[0] == NULL || | ||
449 | conn->channels[1] == NULL || | ||
450 | conn->channels[2] == NULL || | ||
451 | conn->channels[3] == NULL); | ||
452 | break; | ||
453 | case RXRPC_MAXCALLS: | ||
454 | list_move_tail(&conn->bundle_link, | ||
455 | &conn->bundle->unused_conns); | ||
456 | ASSERT(conn->channels[0] == NULL && | ||
457 | conn->channels[1] == NULL && | ||
458 | conn->channels[2] == NULL && | ||
459 | conn->channels[3] == NULL); | ||
460 | break; | ||
461 | default: | ||
462 | printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n", | ||
463 | conn->avail_calls); | ||
464 | BUG(); | ||
465 | } | ||
446 | } | 466 | } |
447 | 467 | ||
448 | write_lock(&call->state_lock); | 468 | spin_unlock(&conn->trans->client_lock); |
449 | if (call->conn->channels[call->channel] == call) | ||
450 | call->conn->channels[call->channel] = NULL; | ||
451 | 469 | ||
452 | if (call->state < RXRPC_CALL_COMPLETE && | 470 | if (call->state < RXRPC_CALL_COMPLETE && |
453 | call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { | 471 | call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { |
@@ -455,13 +473,12 @@ void rxrpc_release_call(struct rxrpc_call *call) | |||
455 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | 473 | call->state = RXRPC_CALL_LOCALLY_ABORTED; |
456 | call->abort_code = RX_CALL_DEAD; | 474 | call->abort_code = RX_CALL_DEAD; |
457 | set_bit(RXRPC_CALL_ABORT, &call->events); | 475 | set_bit(RXRPC_CALL_ABORT, &call->events); |
458 | schedule_work(&call->processor); | 476 | rxrpc_queue_call(call); |
459 | } | 477 | } |
460 | write_unlock(&call->state_lock); | 478 | write_unlock(&call->state_lock); |
461 | write_unlock_bh(&call->conn->lock); | 479 | write_unlock_bh(&conn->lock); |
462 | if (call->conn->out_clientflag) | ||
463 | spin_unlock(&call->conn->trans->client_lock); | ||
464 | 480 | ||
481 | /* clean up the Rx queue */ | ||
465 | if (!skb_queue_empty(&call->rx_queue) || | 482 | if (!skb_queue_empty(&call->rx_queue) || |
466 | !skb_queue_empty(&call->rx_oos_queue)) { | 483 | !skb_queue_empty(&call->rx_oos_queue)) { |
467 | struct rxrpc_skb_priv *sp; | 484 | struct rxrpc_skb_priv *sp; |
@@ -538,7 +555,7 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call) | |||
538 | if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) | 555 | if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) |
539 | sched = true; | 556 | sched = true; |
540 | if (sched) | 557 | if (sched) |
541 | schedule_work(&call->processor); | 558 | rxrpc_queue_call(call); |
542 | } | 559 | } |
543 | write_unlock(&call->state_lock); | 560 | write_unlock(&call->state_lock); |
544 | } | 561 | } |
@@ -588,7 +605,7 @@ void __rxrpc_put_call(struct rxrpc_call *call) | |||
588 | if (atomic_dec_and_test(&call->usage)) { | 605 | if (atomic_dec_and_test(&call->usage)) { |
589 | _debug("call %d dead", call->debug_id); | 606 | _debug("call %d dead", call->debug_id); |
590 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | 607 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); |
591 | schedule_work(&call->destroyer); | 608 | rxrpc_queue_work(&call->destroyer); |
592 | } | 609 | } |
593 | _leave(""); | 610 | _leave(""); |
594 | } | 611 | } |
@@ -613,7 +630,7 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call) | |||
613 | ASSERTCMP(call->events, ==, 0); | 630 | ASSERTCMP(call->events, ==, 0); |
614 | if (work_pending(&call->processor)) { | 631 | if (work_pending(&call->processor)) { |
615 | _debug("defer destroy"); | 632 | _debug("defer destroy"); |
616 | schedule_work(&call->destroyer); | 633 | rxrpc_queue_work(&call->destroyer); |
617 | return; | 634 | return; |
618 | } | 635 | } |
619 | 636 | ||
@@ -742,7 +759,7 @@ static void rxrpc_call_life_expired(unsigned long _call) | |||
742 | read_lock_bh(&call->state_lock); | 759 | read_lock_bh(&call->state_lock); |
743 | if (call->state < RXRPC_CALL_COMPLETE) { | 760 | if (call->state < RXRPC_CALL_COMPLETE) { |
744 | set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); | 761 | set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); |
745 | schedule_work(&call->processor); | 762 | rxrpc_queue_call(call); |
746 | } | 763 | } |
747 | read_unlock_bh(&call->state_lock); | 764 | read_unlock_bh(&call->state_lock); |
748 | } | 765 | } |
@@ -763,7 +780,7 @@ static void rxrpc_resend_time_expired(unsigned long _call) | |||
763 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); | 780 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); |
764 | if (call->state < RXRPC_CALL_COMPLETE && | 781 | if (call->state < RXRPC_CALL_COMPLETE && |
765 | !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) | 782 | !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) |
766 | schedule_work(&call->processor); | 783 | rxrpc_queue_call(call); |
767 | read_unlock_bh(&call->state_lock); | 784 | read_unlock_bh(&call->state_lock); |
768 | } | 785 | } |
769 | 786 | ||
@@ -782,6 +799,6 @@ static void rxrpc_ack_time_expired(unsigned long _call) | |||
782 | read_lock_bh(&call->state_lock); | 799 | read_lock_bh(&call->state_lock); |
783 | if (call->state < RXRPC_CALL_COMPLETE && | 800 | if (call->state < RXRPC_CALL_COMPLETE && |
784 | !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) | 801 | !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) |
785 | schedule_work(&call->processor); | 802 | rxrpc_queue_call(call); |
786 | read_unlock_bh(&call->state_lock); | 803 | read_unlock_bh(&call->state_lock); |
787 | } | 804 | } |