aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/ar-input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/rxrpc/ar-input.c')
-rw-r--r--net/rxrpc/ar-input.c60
1 files changed, 33 insertions, 27 deletions
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 323c3454561c..ceb5d619a1d4 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -42,6 +42,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
42 bool force, bool terminal) 42 bool force, bool terminal)
43{ 43{
44 struct rxrpc_skb_priv *sp; 44 struct rxrpc_skb_priv *sp;
45 struct rxrpc_sock *rx = call->socket;
45 struct sock *sk; 46 struct sock *sk;
46 int skb_len, ret; 47 int skb_len, ret;
47 48
@@ -64,7 +65,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
64 return 0; 65 return 0;
65 } 66 }
66 67
67 sk = &call->socket->sk; 68 sk = &rx->sk;
68 69
69 if (!force) { 70 if (!force) {
70 /* cast skb->rcvbuf to unsigned... It's pointless, but 71 /* cast skb->rcvbuf to unsigned... It's pointless, but
@@ -89,25 +90,30 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
89 skb->sk = sk; 90 skb->sk = sk;
90 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 91 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
91 92
92 /* Cache the SKB length before we tack it onto the receive
93 * queue. Once it is added it no longer belongs to us and
94 * may be freed by other threads of control pulling packets
95 * from the queue.
96 */
97 skb_len = skb->len;
98
99 _net("post skb %p", skb);
100 __skb_queue_tail(&sk->sk_receive_queue, skb);
101 spin_unlock_bh(&sk->sk_receive_queue.lock);
102
103 if (!sock_flag(sk, SOCK_DEAD))
104 sk->sk_data_ready(sk, skb_len);
105
106 if (terminal) { 93 if (terminal) {
107 _debug("<<<< TERMINAL MESSAGE >>>>"); 94 _debug("<<<< TERMINAL MESSAGE >>>>");
108 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); 95 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
109 } 96 }
110 97
98 /* allow interception by a kernel service */
99 if (rx->interceptor) {
100 rx->interceptor(sk, call->user_call_ID, skb);
101 spin_unlock_bh(&sk->sk_receive_queue.lock);
102 } else {
103
104 /* Cache the SKB length before we tack it onto the
105 * receive queue. Once it is added it no longer
106 * belongs to us and may be freed by other threads of
107 * control pulling packets from the queue */
108 skb_len = skb->len;
109
110 _net("post skb %p", skb);
111 __skb_queue_tail(&sk->sk_receive_queue, skb);
112 spin_unlock_bh(&sk->sk_receive_queue.lock);
113
114 if (!sock_flag(sk, SOCK_DEAD))
115 sk->sk_data_ready(sk, skb_len);
116 }
111 skb = NULL; 117 skb = NULL;
112 } else { 118 } else {
113 spin_unlock_bh(&sk->sk_receive_queue.lock); 119 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -232,7 +238,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
232 read_lock(&call->state_lock); 238 read_lock(&call->state_lock);
233 if (call->state < RXRPC_CALL_COMPLETE && 239 if (call->state < RXRPC_CALL_COMPLETE &&
234 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) 240 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
235 schedule_work(&call->processor); 241 rxrpc_queue_call(call);
236 read_unlock(&call->state_lock); 242 read_unlock(&call->state_lock);
237 } 243 }
238 244
@@ -267,7 +273,7 @@ enqueue_packet:
267 atomic_inc(&call->ackr_not_idle); 273 atomic_inc(&call->ackr_not_idle);
268 read_lock(&call->state_lock); 274 read_lock(&call->state_lock);
269 if (call->state < RXRPC_CALL_DEAD) 275 if (call->state < RXRPC_CALL_DEAD)
270 schedule_work(&call->processor); 276 rxrpc_queue_call(call);
271 read_unlock(&call->state_lock); 277 read_unlock(&call->state_lock);
272 _leave(" = 0 [queued]"); 278 _leave(" = 0 [queued]");
273 return 0; 279 return 0;
@@ -360,7 +366,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
360 call->state = RXRPC_CALL_REMOTELY_ABORTED; 366 call->state = RXRPC_CALL_REMOTELY_ABORTED;
361 call->abort_code = abort_code; 367 call->abort_code = abort_code;
362 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); 368 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
363 schedule_work(&call->processor); 369 rxrpc_queue_call(call);
364 } 370 }
365 goto free_packet_unlock; 371 goto free_packet_unlock;
366 372
@@ -375,7 +381,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
375 case RXRPC_CALL_CLIENT_SEND_REQUEST: 381 case RXRPC_CALL_CLIENT_SEND_REQUEST:
376 call->state = RXRPC_CALL_SERVER_BUSY; 382 call->state = RXRPC_CALL_SERVER_BUSY;
377 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events); 383 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
378 schedule_work(&call->processor); 384 rxrpc_queue_call(call);
379 case RXRPC_CALL_SERVER_BUSY: 385 case RXRPC_CALL_SERVER_BUSY:
380 goto free_packet_unlock; 386 goto free_packet_unlock;
381 default: 387 default:
@@ -419,7 +425,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
419 read_lock_bh(&call->state_lock); 425 read_lock_bh(&call->state_lock);
420 if (call->state < RXRPC_CALL_DEAD) { 426 if (call->state < RXRPC_CALL_DEAD) {
421 skb_queue_tail(&call->rx_queue, skb); 427 skb_queue_tail(&call->rx_queue, skb);
422 schedule_work(&call->processor); 428 rxrpc_queue_call(call);
423 skb = NULL; 429 skb = NULL;
424 } 430 }
425 read_unlock_bh(&call->state_lock); 431 read_unlock_bh(&call->state_lock);
@@ -434,7 +440,7 @@ protocol_error_locked:
434 call->state = RXRPC_CALL_LOCALLY_ABORTED; 440 call->state = RXRPC_CALL_LOCALLY_ABORTED;
435 call->abort_code = RX_PROTOCOL_ERROR; 441 call->abort_code = RX_PROTOCOL_ERROR;
436 set_bit(RXRPC_CALL_ABORT, &call->events); 442 set_bit(RXRPC_CALL_ABORT, &call->events);
437 schedule_work(&call->processor); 443 rxrpc_queue_call(call);
438 } 444 }
439free_packet_unlock: 445free_packet_unlock:
440 write_unlock_bh(&call->state_lock); 446 write_unlock_bh(&call->state_lock);
@@ -506,7 +512,7 @@ protocol_error:
506 call->state = RXRPC_CALL_LOCALLY_ABORTED; 512 call->state = RXRPC_CALL_LOCALLY_ABORTED;
507 call->abort_code = RX_PROTOCOL_ERROR; 513 call->abort_code = RX_PROTOCOL_ERROR;
508 set_bit(RXRPC_CALL_ABORT, &call->events); 514 set_bit(RXRPC_CALL_ABORT, &call->events);
509 schedule_work(&call->processor); 515 rxrpc_queue_call(call);
510 } 516 }
511 write_unlock_bh(&call->state_lock); 517 write_unlock_bh(&call->state_lock);
512 _leave(""); 518 _leave("");
@@ -542,7 +548,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
542 switch (call->state) { 548 switch (call->state) {
543 case RXRPC_CALL_LOCALLY_ABORTED: 549 case RXRPC_CALL_LOCALLY_ABORTED:
544 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) 550 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
545 schedule_work(&call->processor); 551 rxrpc_queue_call(call);
546 case RXRPC_CALL_REMOTELY_ABORTED: 552 case RXRPC_CALL_REMOTELY_ABORTED:
547 case RXRPC_CALL_NETWORK_ERROR: 553 case RXRPC_CALL_NETWORK_ERROR:
548 case RXRPC_CALL_DEAD: 554 case RXRPC_CALL_DEAD:
@@ -591,7 +597,7 @@ dead_call:
591 sp->hdr.seq == __constant_cpu_to_be32(1)) { 597 sp->hdr.seq == __constant_cpu_to_be32(1)) {
592 _debug("incoming call"); 598 _debug("incoming call");
593 skb_queue_tail(&conn->trans->local->accept_queue, skb); 599 skb_queue_tail(&conn->trans->local->accept_queue, skb);
594 schedule_work(&conn->trans->local->acceptor); 600 rxrpc_queue_work(&conn->trans->local->acceptor);
595 goto done; 601 goto done;
596 } 602 }
597 603
@@ -630,7 +636,7 @@ found_completed_call:
630 _debug("final ack again"); 636 _debug("final ack again");
631 rxrpc_get_call(call); 637 rxrpc_get_call(call);
632 set_bit(RXRPC_CALL_ACK_FINAL, &call->events); 638 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
633 schedule_work(&call->processor); 639 rxrpc_queue_call(call);
634 640
635free_unlock: 641free_unlock:
636 read_unlock(&call->state_lock); 642 read_unlock(&call->state_lock);
@@ -651,7 +657,7 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
651 657
652 atomic_inc(&conn->usage); 658 atomic_inc(&conn->usage);
653 skb_queue_tail(&conn->rx_queue, skb); 659 skb_queue_tail(&conn->rx_queue, skb);
654 schedule_work(&conn->processor); 660 rxrpc_queue_conn(conn);
655} 661}
656 662
657/* 663/*
@@ -767,7 +773,7 @@ cant_route_call:
767 if (sp->hdr.seq == __constant_cpu_to_be32(1)) { 773 if (sp->hdr.seq == __constant_cpu_to_be32(1)) {
768 _debug("first packet"); 774 _debug("first packet");
769 skb_queue_tail(&local->accept_queue, skb); 775 skb_queue_tail(&local->accept_queue, skb);
770 schedule_work(&local->acceptor); 776 rxrpc_queue_work(&local->acceptor);
771 rxrpc_put_local(local); 777 rxrpc_put_local(local);
772 _leave(" [incoming]"); 778 _leave(" [incoming]");
773 return; 779 return;