diff options
author | David S. Miller <davem@davemloft.net> | 2018-10-13 00:38:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-10-13 00:38:46 -0400 |
commit | d864991b220b7c62e81d21209e1fd978fd67352c (patch) | |
tree | b570a1ad6fc1b959c5bcda6ceca0b321319c01e0 /net/rxrpc | |
parent | a688c53a0277d8ea21d86a5c56884892e3442c5e (diff) | |
parent | bab5c80b211035739997ebd361a679fa85b39465 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts were easy to resolve using immediate context mostly,
except the cls_u32.c one where I simply too the entire HEAD
chunk.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc')
-rw-r--r-- | net/rxrpc/ar-internal.h | 23 | ||||
-rw-r--r-- | net/rxrpc/call_accept.c | 27 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 5 | ||||
-rw-r--r-- | net/rxrpc/conn_client.c | 10 | ||||
-rw-r--r-- | net/rxrpc/conn_event.c | 26 | ||||
-rw-r--r-- | net/rxrpc/input.c | 251 | ||||
-rw-r--r-- | net/rxrpc/local_object.c | 30 | ||||
-rw-r--r-- | net/rxrpc/peer_event.c | 5 | ||||
-rw-r--r-- | net/rxrpc/peer_object.c | 29 |
9 files changed, 233 insertions, 173 deletions
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 76569c178915..8cee7644965c 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -302,6 +302,7 @@ struct rxrpc_peer { | |||
302 | 302 | ||
303 | /* calculated RTT cache */ | 303 | /* calculated RTT cache */ |
304 | #define RXRPC_RTT_CACHE_SIZE 32 | 304 | #define RXRPC_RTT_CACHE_SIZE 32 |
305 | spinlock_t rtt_input_lock; /* RTT lock for input routine */ | ||
305 | ktime_t rtt_last_req; /* Time of last RTT request */ | 306 | ktime_t rtt_last_req; /* Time of last RTT request */ |
306 | u64 rtt; /* Current RTT estimate (in nS) */ | 307 | u64 rtt; /* Current RTT estimate (in nS) */ |
307 | u64 rtt_sum; /* Sum of cache contents */ | 308 | u64 rtt_sum; /* Sum of cache contents */ |
@@ -442,17 +443,17 @@ struct rxrpc_connection { | |||
442 | spinlock_t state_lock; /* state-change lock */ | 443 | spinlock_t state_lock; /* state-change lock */ |
443 | enum rxrpc_conn_cache_state cache_state; | 444 | enum rxrpc_conn_cache_state cache_state; |
444 | enum rxrpc_conn_proto_state state; /* current state of connection */ | 445 | enum rxrpc_conn_proto_state state; /* current state of connection */ |
445 | u32 local_abort; /* local abort code */ | 446 | u32 abort_code; /* Abort code of connection abort */ |
446 | u32 remote_abort; /* remote abort code */ | ||
447 | int debug_id; /* debug ID for printks */ | 447 | int debug_id; /* debug ID for printks */ |
448 | atomic_t serial; /* packet serial number counter */ | 448 | atomic_t serial; /* packet serial number counter */ |
449 | unsigned int hi_serial; /* highest serial number received */ | 449 | unsigned int hi_serial; /* highest serial number received */ |
450 | u32 security_nonce; /* response re-use preventer */ | 450 | u32 security_nonce; /* response re-use preventer */ |
451 | u16 service_id; /* Service ID, possibly upgraded */ | 451 | u32 service_id; /* Service ID, possibly upgraded */ |
452 | u8 size_align; /* data size alignment (for security) */ | 452 | u8 size_align; /* data size alignment (for security) */ |
453 | u8 security_size; /* security header size */ | 453 | u8 security_size; /* security header size */ |
454 | u8 security_ix; /* security type */ | 454 | u8 security_ix; /* security type */ |
455 | u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ | 455 | u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ |
456 | short error; /* Local error code */ | ||
456 | }; | 457 | }; |
457 | 458 | ||
458 | static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) | 459 | static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) |
@@ -635,6 +636,8 @@ struct rxrpc_call { | |||
635 | bool tx_phase; /* T if transmission phase, F if receive phase */ | 636 | bool tx_phase; /* T if transmission phase, F if receive phase */ |
636 | u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ | 637 | u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ |
637 | 638 | ||
639 | spinlock_t input_lock; /* Lock for packet input to this call */ | ||
640 | |||
638 | /* receive-phase ACK management */ | 641 | /* receive-phase ACK management */ |
639 | u8 ackr_reason; /* reason to ACK */ | 642 | u8 ackr_reason; /* reason to ACK */ |
640 | u16 ackr_skew; /* skew on packet being ACK'd */ | 643 | u16 ackr_skew; /* skew on packet being ACK'd */ |
@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); | |||
720 | void rxrpc_discard_prealloc(struct rxrpc_sock *); | 723 | void rxrpc_discard_prealloc(struct rxrpc_sock *); |
721 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, | 724 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, |
722 | struct rxrpc_sock *, | 725 | struct rxrpc_sock *, |
723 | struct rxrpc_peer *, | ||
724 | struct rxrpc_connection *, | ||
725 | struct sk_buff *); | 726 | struct sk_buff *); |
726 | void rxrpc_accept_incoming_calls(struct rxrpc_local *); | 727 | void rxrpc_accept_incoming_calls(struct rxrpc_local *); |
727 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, | 728 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, |
@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry; | |||
891 | extern struct idr rxrpc_client_conn_ids; | 892 | extern struct idr rxrpc_client_conn_ids; |
892 | 893 | ||
893 | void rxrpc_destroy_client_conn_ids(void); | 894 | void rxrpc_destroy_client_conn_ids(void); |
894 | int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, | 895 | int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, |
895 | struct sockaddr_rxrpc *, gfp_t); | 896 | struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, |
897 | gfp_t); | ||
896 | void rxrpc_expose_client_call(struct rxrpc_call *); | 898 | void rxrpc_expose_client_call(struct rxrpc_call *); |
897 | void rxrpc_disconnect_client_call(struct rxrpc_call *); | 899 | void rxrpc_disconnect_client_call(struct rxrpc_call *); |
898 | void rxrpc_put_client_conn(struct rxrpc_connection *); | 900 | void rxrpc_put_client_conn(struct rxrpc_connection *); |
@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *); | |||
965 | /* | 967 | /* |
966 | * input.c | 968 | * input.c |
967 | */ | 969 | */ |
968 | void rxrpc_data_ready(struct sock *); | 970 | int rxrpc_input_packet(struct sock *, struct sk_buff *); |
969 | 971 | ||
970 | /* | 972 | /* |
971 | * insecure.c | 973 | * insecure.c |
@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *); | |||
1045 | */ | 1047 | */ |
1046 | struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, | 1048 | struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, |
1047 | const struct sockaddr_rxrpc *); | 1049 | const struct sockaddr_rxrpc *); |
1048 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, | 1050 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *, |
1049 | struct sockaddr_rxrpc *, gfp_t); | 1051 | struct sockaddr_rxrpc *, gfp_t); |
1050 | struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); | 1052 | struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); |
1051 | void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); | 1053 | void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *, |
1054 | struct rxrpc_peer *); | ||
1052 | void rxrpc_destroy_all_peers(struct rxrpc_net *); | 1055 | void rxrpc_destroy_all_peers(struct rxrpc_net *); |
1053 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); | 1056 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); |
1054 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); | 1057 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 8354cadbb839..e0d8ca03169a 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, | |||
287 | (peer_tail + 1) & | 287 | (peer_tail + 1) & |
288 | (RXRPC_BACKLOG_MAX - 1)); | 288 | (RXRPC_BACKLOG_MAX - 1)); |
289 | 289 | ||
290 | rxrpc_new_incoming_peer(local, peer); | 290 | rxrpc_new_incoming_peer(rx, local, peer); |
291 | } | 291 | } |
292 | 292 | ||
293 | /* Now allocate and set up the connection */ | 293 | /* Now allocate and set up the connection */ |
@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, | |||
333 | */ | 333 | */ |
334 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | 334 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
335 | struct rxrpc_sock *rx, | 335 | struct rxrpc_sock *rx, |
336 | struct rxrpc_peer *peer, | ||
337 | struct rxrpc_connection *conn, | ||
338 | struct sk_buff *skb) | 336 | struct sk_buff *skb) |
339 | { | 337 | { |
340 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 338 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
339 | struct rxrpc_connection *conn; | ||
340 | struct rxrpc_peer *peer; | ||
341 | struct rxrpc_call *call; | 341 | struct rxrpc_call *call; |
342 | 342 | ||
343 | _enter(""); | 343 | _enter(""); |
@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
354 | goto out; | 354 | goto out; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* The peer, connection and call may all have sprung into existence due | ||
358 | * to a duplicate packet being handled on another CPU in parallel, so | ||
359 | * we have to recheck the routing. However, we're now holding | ||
360 | * rx->incoming_lock, so the values should remain stable. | ||
361 | */ | ||
362 | conn = rxrpc_find_connection_rcu(local, skb, &peer); | ||
363 | |||
357 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); | 364 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); |
358 | if (!call) { | 365 | if (!call) { |
359 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; | 366 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
396 | 403 | ||
397 | case RXRPC_CONN_SERVICE: | 404 | case RXRPC_CONN_SERVICE: |
398 | write_lock(&call->state_lock); | 405 | write_lock(&call->state_lock); |
399 | if (rx->discard_new_call) | 406 | if (call->state < RXRPC_CALL_COMPLETE) { |
400 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | 407 | if (rx->discard_new_call) |
401 | else | 408 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
402 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | 409 | else |
410 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | ||
411 | } | ||
403 | write_unlock(&call->state_lock); | 412 | write_unlock(&call->state_lock); |
404 | break; | 413 | break; |
405 | 414 | ||
406 | case RXRPC_CONN_REMOTELY_ABORTED: | 415 | case RXRPC_CONN_REMOTELY_ABORTED: |
407 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | 416 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, |
408 | conn->remote_abort, -ECONNABORTED); | 417 | conn->abort_code, conn->error); |
409 | break; | 418 | break; |
410 | case RXRPC_CONN_LOCALLY_ABORTED: | 419 | case RXRPC_CONN_LOCALLY_ABORTED: |
411 | rxrpc_abort_call("CON", call, sp->hdr.seq, | 420 | rxrpc_abort_call("CON", call, sp->hdr.seq, |
412 | conn->local_abort, -ECONNABORTED); | 421 | conn->abort_code, conn->error); |
413 | break; | 422 | break; |
414 | default: | 423 | default: |
415 | BUG(); | 424 | BUG(); |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 799f75b6900d..8f1a8f85b1f9 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, | |||
138 | init_waitqueue_head(&call->waitq); | 138 | init_waitqueue_head(&call->waitq); |
139 | spin_lock_init(&call->lock); | 139 | spin_lock_init(&call->lock); |
140 | spin_lock_init(&call->notify_lock); | 140 | spin_lock_init(&call->notify_lock); |
141 | spin_lock_init(&call->input_lock); | ||
141 | rwlock_init(&call->state_lock); | 142 | rwlock_init(&call->state_lock); |
142 | atomic_set(&call->usage, 1); | 143 | atomic_set(&call->usage, 1); |
143 | call->debug_id = debug_id; | 144 | call->debug_id = debug_id; |
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, | |||
287 | /* Set up or get a connection record and set the protocol parameters, | 288 | /* Set up or get a connection record and set the protocol parameters, |
288 | * including channel number and call ID. | 289 | * including channel number and call ID. |
289 | */ | 290 | */ |
290 | ret = rxrpc_connect_call(call, cp, srx, gfp); | 291 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); |
291 | if (ret < 0) | 292 | if (ret < 0) |
292 | goto error; | 293 | goto error; |
293 | 294 | ||
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx, | |||
339 | /* Set up or get a connection record and set the protocol parameters, | 340 | /* Set up or get a connection record and set the protocol parameters, |
340 | * including channel number and call ID. | 341 | * including channel number and call ID. |
341 | */ | 342 | */ |
342 | ret = rxrpc_connect_call(call, cp, srx, gfp); | 343 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); |
343 | if (ret < 0) | 344 | if (ret < 0) |
344 | goto error; | 345 | goto error; |
345 | 346 | ||
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 8acf74fe24c0..521189f4b666 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
@@ -276,7 +276,8 @@ dont_reuse: | |||
276 | * If we return with a connection, the call will be on its waiting list. It's | 276 | * If we return with a connection, the call will be on its waiting list. It's |
277 | * left to the caller to assign a channel and wake up the call. | 277 | * left to the caller to assign a channel and wake up the call. |
278 | */ | 278 | */ |
279 | static int rxrpc_get_client_conn(struct rxrpc_call *call, | 279 | static int rxrpc_get_client_conn(struct rxrpc_sock *rx, |
280 | struct rxrpc_call *call, | ||
280 | struct rxrpc_conn_parameters *cp, | 281 | struct rxrpc_conn_parameters *cp, |
281 | struct sockaddr_rxrpc *srx, | 282 | struct sockaddr_rxrpc *srx, |
282 | gfp_t gfp) | 283 | gfp_t gfp) |
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, | |||
289 | 290 | ||
290 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); | 291 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
291 | 292 | ||
292 | cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); | 293 | cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); |
293 | if (!cp->peer) | 294 | if (!cp->peer) |
294 | goto error; | 295 | goto error; |
295 | 296 | ||
@@ -683,7 +684,8 @@ out: | |||
683 | * find a connection for a call | 684 | * find a connection for a call |
684 | * - called in process context with IRQs enabled | 685 | * - called in process context with IRQs enabled |
685 | */ | 686 | */ |
686 | int rxrpc_connect_call(struct rxrpc_call *call, | 687 | int rxrpc_connect_call(struct rxrpc_sock *rx, |
688 | struct rxrpc_call *call, | ||
687 | struct rxrpc_conn_parameters *cp, | 689 | struct rxrpc_conn_parameters *cp, |
688 | struct sockaddr_rxrpc *srx, | 690 | struct sockaddr_rxrpc *srx, |
689 | gfp_t gfp) | 691 | gfp_t gfp) |
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call, | |||
696 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); | 698 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); |
697 | rxrpc_cull_active_client_conns(rxnet); | 699 | rxrpc_cull_active_client_conns(rxnet); |
698 | 700 | ||
699 | ret = rxrpc_get_client_conn(call, cp, srx, gfp); | 701 | ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); |
700 | if (ret < 0) | 702 | if (ret < 0) |
701 | goto out; | 703 | goto out; |
702 | 704 | ||
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 6df56ce68861..b6fca8ebb117 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
126 | 126 | ||
127 | switch (chan->last_type) { | 127 | switch (chan->last_type) { |
128 | case RXRPC_PACKET_TYPE_ABORT: | 128 | case RXRPC_PACKET_TYPE_ABORT: |
129 | _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); | 129 | _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code); |
130 | break; | 130 | break; |
131 | case RXRPC_PACKET_TYPE_ACK: | 131 | case RXRPC_PACKET_TYPE_ACK: |
132 | trace_rxrpc_tx_ack(chan->call_debug_id, serial, | 132 | trace_rxrpc_tx_ack(chan->call_debug_id, serial, |
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
153 | * pass a connection-level abort onto all calls on that connection | 153 | * pass a connection-level abort onto all calls on that connection |
154 | */ | 154 | */ |
155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, | 155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, |
156 | enum rxrpc_call_completion compl, | 156 | enum rxrpc_call_completion compl) |
157 | u32 abort_code, int error) | ||
158 | { | 157 | { |
159 | struct rxrpc_call *call; | 158 | struct rxrpc_call *call; |
160 | int i; | 159 | int i; |
161 | 160 | ||
162 | _enter("{%d},%x", conn->debug_id, abort_code); | 161 | _enter("{%d},%x", conn->debug_id, conn->abort_code); |
163 | 162 | ||
164 | spin_lock(&conn->channel_lock); | 163 | spin_lock(&conn->channel_lock); |
165 | 164 | ||
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, | |||
172 | trace_rxrpc_abort(call->debug_id, | 171 | trace_rxrpc_abort(call->debug_id, |
173 | "CON", call->cid, | 172 | "CON", call->cid, |
174 | call->call_id, 0, | 173 | call->call_id, 0, |
175 | abort_code, error); | 174 | conn->abort_code, |
175 | conn->error); | ||
176 | if (rxrpc_set_call_completion(call, compl, | 176 | if (rxrpc_set_call_completion(call, compl, |
177 | abort_code, error)) | 177 | conn->abort_code, |
178 | conn->error)) | ||
178 | rxrpc_notify_socket(call); | 179 | rxrpc_notify_socket(call); |
179 | } | 180 | } |
180 | } | 181 | } |
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
207 | return 0; | 208 | return 0; |
208 | } | 209 | } |
209 | 210 | ||
211 | conn->error = error; | ||
212 | conn->abort_code = abort_code; | ||
210 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; | 213 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; |
211 | spin_unlock_bh(&conn->state_lock); | 214 | spin_unlock_bh(&conn->state_lock); |
212 | 215 | ||
213 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); | 216 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED); |
214 | 217 | ||
215 | msg.msg_name = &conn->params.peer->srx.transport; | 218 | msg.msg_name = &conn->params.peer->srx.transport; |
216 | msg.msg_namelen = conn->params.peer->srx.transport_len; | 219 | msg.msg_namelen = conn->params.peer->srx.transport_len; |
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
229 | whdr._rsvd = 0; | 232 | whdr._rsvd = 0; |
230 | whdr.serviceId = htons(conn->service_id); | 233 | whdr.serviceId = htons(conn->service_id); |
231 | 234 | ||
232 | word = htonl(conn->local_abort); | 235 | word = htonl(conn->abort_code); |
233 | 236 | ||
234 | iov[0].iov_base = &whdr; | 237 | iov[0].iov_base = &whdr; |
235 | iov[0].iov_len = sizeof(whdr); | 238 | iov[0].iov_len = sizeof(whdr); |
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
240 | 243 | ||
241 | serial = atomic_inc_return(&conn->serial); | 244 | serial = atomic_inc_return(&conn->serial); |
242 | whdr.serial = htonl(serial); | 245 | whdr.serial = htonl(serial); |
243 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); | 246 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); |
244 | 247 | ||
245 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); | 248 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); |
246 | if (ret < 0) { | 249 | if (ret < 0) { |
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, | |||
315 | abort_code = ntohl(wtmp); | 318 | abort_code = ntohl(wtmp); |
316 | _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); | 319 | _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); |
317 | 320 | ||
321 | conn->error = -ECONNABORTED; | ||
322 | conn->abort_code = abort_code; | ||
318 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; | 323 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; |
319 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, | 324 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); |
320 | abort_code, -ECONNABORTED); | ||
321 | return -ECONNABORTED; | 325 | return -ECONNABORTED; |
322 | 326 | ||
323 | case RXRPC_PACKET_TYPE_CHALLENGE: | 327 | case RXRPC_PACKET_TYPE_CHALLENGE: |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5b2626929822..9128aa0e40aa 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, | |||
216 | /* | 216 | /* |
217 | * Apply a hard ACK by advancing the Tx window. | 217 | * Apply a hard ACK by advancing the Tx window. |
218 | */ | 218 | */ |
219 | static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | 219 | static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, |
220 | struct rxrpc_ack_summary *summary) | 220 | struct rxrpc_ack_summary *summary) |
221 | { | 221 | { |
222 | struct sk_buff *skb, *list = NULL; | 222 | struct sk_buff *skb, *list = NULL; |
223 | bool rot_last = false; | ||
223 | int ix; | 224 | int ix; |
224 | u8 annotation; | 225 | u8 annotation; |
225 | 226 | ||
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
243 | skb->next = list; | 244 | skb->next = list; |
244 | list = skb; | 245 | list = skb; |
245 | 246 | ||
246 | if (annotation & RXRPC_TX_ANNO_LAST) | 247 | if (annotation & RXRPC_TX_ANNO_LAST) { |
247 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); | 248 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); |
249 | rot_last = true; | ||
250 | } | ||
248 | if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) | 251 | if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) |
249 | summary->nr_rot_new_acks++; | 252 | summary->nr_rot_new_acks++; |
250 | } | 253 | } |
251 | 254 | ||
252 | spin_unlock(&call->lock); | 255 | spin_unlock(&call->lock); |
253 | 256 | ||
254 | trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? | 257 | trace_rxrpc_transmit(call, (rot_last ? |
255 | rxrpc_transmit_rotate_last : | 258 | rxrpc_transmit_rotate_last : |
256 | rxrpc_transmit_rotate)); | 259 | rxrpc_transmit_rotate)); |
257 | wake_up(&call->waitq); | 260 | wake_up(&call->waitq); |
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
262 | skb_mark_not_on_list(skb); | 265 | skb_mark_not_on_list(skb); |
263 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 266 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
264 | } | 267 | } |
268 | |||
269 | return rot_last; | ||
265 | } | 270 | } |
266 | 271 | ||
267 | /* | 272 | /* |
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
273 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, | 278 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, |
274 | const char *abort_why) | 279 | const char *abort_why) |
275 | { | 280 | { |
281 | unsigned int state; | ||
276 | 282 | ||
277 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); | 283 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); |
278 | 284 | ||
279 | write_lock(&call->state_lock); | 285 | write_lock(&call->state_lock); |
280 | 286 | ||
281 | switch (call->state) { | 287 | state = call->state; |
288 | switch (state) { | ||
282 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | 289 | case RXRPC_CALL_CLIENT_SEND_REQUEST: |
283 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | 290 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: |
284 | if (reply_begun) | 291 | if (reply_begun) |
285 | call->state = RXRPC_CALL_CLIENT_RECV_REPLY; | 292 | call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; |
286 | else | 293 | else |
287 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | 294 | call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; |
288 | break; | 295 | break; |
289 | 296 | ||
290 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 297 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
291 | __rxrpc_call_completed(call); | 298 | __rxrpc_call_completed(call); |
292 | rxrpc_notify_socket(call); | 299 | rxrpc_notify_socket(call); |
300 | state = call->state; | ||
293 | break; | 301 | break; |
294 | 302 | ||
295 | default: | 303 | default: |
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, | |||
297 | } | 305 | } |
298 | 306 | ||
299 | write_unlock(&call->state_lock); | 307 | write_unlock(&call->state_lock); |
300 | if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { | 308 | if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) |
301 | trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); | 309 | trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); |
302 | } else { | 310 | else |
303 | trace_rxrpc_transmit(call, rxrpc_transmit_end); | 311 | trace_rxrpc_transmit(call, rxrpc_transmit_end); |
304 | } | ||
305 | _leave(" = ok"); | 312 | _leave(" = ok"); |
306 | return true; | 313 | return true; |
307 | 314 | ||
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
332 | trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); | 339 | trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); |
333 | } | 340 | } |
334 | 341 | ||
335 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) | ||
336 | rxrpc_rotate_tx_window(call, top, &summary); | ||
337 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { | 342 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { |
338 | rxrpc_proto_abort("TXL", call, top); | 343 | if (!rxrpc_rotate_tx_window(call, top, &summary)) { |
339 | return false; | 344 | rxrpc_proto_abort("TXL", call, top); |
345 | return false; | ||
346 | } | ||
340 | } | 347 | } |
341 | if (!rxrpc_end_tx_phase(call, true, "ETD")) | 348 | if (!rxrpc_end_tx_phase(call, true, "ETD")) |
342 | return false; | 349 | return false; |
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | |||
452 | } | 459 | } |
453 | } | 460 | } |
454 | 461 | ||
462 | spin_lock(&call->input_lock); | ||
463 | |||
455 | /* Received data implicitly ACKs all of the request packets we sent | 464 | /* Received data implicitly ACKs all of the request packets we sent |
456 | * when we're acting as a client. | 465 | * when we're acting as a client. |
457 | */ | 466 | */ |
458 | if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || | 467 | if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || |
459 | state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && | 468 | state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && |
460 | !rxrpc_receiving_reply(call)) | 469 | !rxrpc_receiving_reply(call)) |
461 | return; | 470 | goto unlock; |
462 | 471 | ||
463 | call->ackr_prev_seq = seq; | 472 | call->ackr_prev_seq = seq; |
464 | 473 | ||
@@ -488,12 +497,16 @@ next_subpacket: | |||
488 | 497 | ||
489 | if (flags & RXRPC_LAST_PACKET) { | 498 | if (flags & RXRPC_LAST_PACKET) { |
490 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 499 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
491 | seq != call->rx_top) | 500 | seq != call->rx_top) { |
492 | return rxrpc_proto_abort("LSN", call, seq); | 501 | rxrpc_proto_abort("LSN", call, seq); |
502 | goto unlock; | ||
503 | } | ||
493 | } else { | 504 | } else { |
494 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 505 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
495 | after_eq(seq, call->rx_top)) | 506 | after_eq(seq, call->rx_top)) { |
496 | return rxrpc_proto_abort("LSA", call, seq); | 507 | rxrpc_proto_abort("LSA", call, seq); |
508 | goto unlock; | ||
509 | } | ||
497 | } | 510 | } |
498 | 511 | ||
499 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | 512 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); |
@@ -560,8 +573,10 @@ next_subpacket: | |||
560 | skip: | 573 | skip: |
561 | offset += len; | 574 | offset += len; |
562 | if (flags & RXRPC_JUMBO_PACKET) { | 575 | if (flags & RXRPC_JUMBO_PACKET) { |
563 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | 576 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) { |
564 | return rxrpc_proto_abort("XJF", call, seq); | 577 | rxrpc_proto_abort("XJF", call, seq); |
578 | goto unlock; | ||
579 | } | ||
565 | offset += sizeof(struct rxrpc_jumbo_header); | 580 | offset += sizeof(struct rxrpc_jumbo_header); |
566 | seq++; | 581 | seq++; |
567 | serial++; | 582 | serial++; |
@@ -601,6 +616,9 @@ ack: | |||
601 | trace_rxrpc_notify_socket(call->debug_id, serial); | 616 | trace_rxrpc_notify_socket(call->debug_id, serial); |
602 | rxrpc_notify_socket(call); | 617 | rxrpc_notify_socket(call); |
603 | } | 618 | } |
619 | |||
620 | unlock: | ||
621 | spin_unlock(&call->input_lock); | ||
604 | _leave(" [queued]"); | 622 | _leave(" [queued]"); |
605 | } | 623 | } |
606 | 624 | ||
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, | |||
687 | 705 | ||
688 | ping_time = call->ping_time; | 706 | ping_time = call->ping_time; |
689 | smp_rmb(); | 707 | smp_rmb(); |
690 | ping_serial = call->ping_serial; | 708 | ping_serial = READ_ONCE(call->ping_serial); |
691 | 709 | ||
692 | if (orig_serial == call->acks_lost_ping) | 710 | if (orig_serial == call->acks_lost_ping) |
693 | rxrpc_input_check_for_lost_ack(call); | 711 | rxrpc_input_check_for_lost_ack(call); |
694 | 712 | ||
695 | if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || | 713 | if (before(orig_serial, ping_serial) || |
696 | before(orig_serial, ping_serial)) | 714 | !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags)) |
697 | return; | 715 | return; |
698 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | ||
699 | if (after(orig_serial, ping_serial)) | 716 | if (after(orig_serial, ping_serial)) |
700 | return; | 717 | return; |
701 | 718 | ||
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
861 | rxrpc_propose_ack_respond_to_ack); | 878 | rxrpc_propose_ack_respond_to_ack); |
862 | } | 879 | } |
863 | 880 | ||
881 | /* Discard any out-of-order or duplicate ACKs. */ | ||
882 | if (before_eq(sp->hdr.serial, call->acks_latest)) | ||
883 | return; | ||
884 | |||
885 | buf.info.rxMTU = 0; | ||
864 | ioffset = offset + nr_acks + 3; | 886 | ioffset = offset + nr_acks + 3; |
865 | if (skb->len >= ioffset + sizeof(buf.info)) { | 887 | if (skb->len >= ioffset + sizeof(buf.info) && |
866 | if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) | 888 | skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) |
867 | return rxrpc_proto_abort("XAI", call, 0); | 889 | return rxrpc_proto_abort("XAI", call, 0); |
890 | |||
891 | spin_lock(&call->input_lock); | ||
892 | |||
893 | /* Discard any out-of-order or duplicate ACKs. */ | ||
894 | if (before_eq(sp->hdr.serial, call->acks_latest)) | ||
895 | goto out; | ||
896 | call->acks_latest_ts = skb->tstamp; | ||
897 | call->acks_latest = sp->hdr.serial; | ||
898 | |||
899 | /* Parse rwind and mtu sizes if provided. */ | ||
900 | if (buf.info.rxMTU) | ||
868 | rxrpc_input_ackinfo(call, skb, &buf.info); | 901 | rxrpc_input_ackinfo(call, skb, &buf.info); |
869 | } | ||
870 | 902 | ||
871 | if (first_soft_ack == 0) | 903 | if (first_soft_ack == 0) { |
872 | return rxrpc_proto_abort("AK0", call, 0); | 904 | rxrpc_proto_abort("AK0", call, 0); |
905 | goto out; | ||
906 | } | ||
873 | 907 | ||
874 | /* Ignore ACKs unless we are or have just been transmitting. */ | 908 | /* Ignore ACKs unless we are or have just been transmitting. */ |
875 | switch (READ_ONCE(call->state)) { | 909 | switch (READ_ONCE(call->state)) { |
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
879 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 913 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
880 | break; | 914 | break; |
881 | default: | 915 | default: |
882 | return; | 916 | goto out; |
883 | } | ||
884 | |||
885 | /* Discard any out-of-order or duplicate ACKs. */ | ||
886 | if (before_eq(sp->hdr.serial, call->acks_latest)) { | ||
887 | _debug("discard ACK %d <= %d", | ||
888 | sp->hdr.serial, call->acks_latest); | ||
889 | return; | ||
890 | } | 917 | } |
891 | call->acks_latest_ts = skb->tstamp; | ||
892 | call->acks_latest = sp->hdr.serial; | ||
893 | 918 | ||
894 | if (before(hard_ack, call->tx_hard_ack) || | 919 | if (before(hard_ack, call->tx_hard_ack) || |
895 | after(hard_ack, call->tx_top)) | 920 | after(hard_ack, call->tx_top)) { |
896 | return rxrpc_proto_abort("AKW", call, 0); | 921 | rxrpc_proto_abort("AKW", call, 0); |
897 | if (nr_acks > call->tx_top - hard_ack) | 922 | goto out; |
898 | return rxrpc_proto_abort("AKN", call, 0); | 923 | } |
924 | if (nr_acks > call->tx_top - hard_ack) { | ||
925 | rxrpc_proto_abort("AKN", call, 0); | ||
926 | goto out; | ||
927 | } | ||
899 | 928 | ||
900 | if (after(hard_ack, call->tx_hard_ack)) | 929 | if (after(hard_ack, call->tx_hard_ack)) { |
901 | rxrpc_rotate_tx_window(call, hard_ack, &summary); | 930 | if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { |
931 | rxrpc_end_tx_phase(call, false, "ETA"); | ||
932 | goto out; | ||
933 | } | ||
934 | } | ||
902 | 935 | ||
903 | if (nr_acks > 0) { | 936 | if (nr_acks > 0) { |
904 | if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) | 937 | if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) { |
905 | return rxrpc_proto_abort("XSA", call, 0); | 938 | rxrpc_proto_abort("XSA", call, 0); |
939 | goto out; | ||
940 | } | ||
906 | rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, | 941 | rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, |
907 | &summary); | 942 | &summary); |
908 | } | 943 | } |
909 | 944 | ||
910 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { | ||
911 | rxrpc_end_tx_phase(call, false, "ETA"); | ||
912 | return; | ||
913 | } | ||
914 | |||
915 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & | 945 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & |
916 | RXRPC_TX_ANNO_LAST && | 946 | RXRPC_TX_ANNO_LAST && |
917 | summary.nr_acks == call->tx_top - hard_ack && | 947 | summary.nr_acks == call->tx_top - hard_ack && |
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
920 | false, true, | 950 | false, true, |
921 | rxrpc_propose_ack_ping_for_lost_reply); | 951 | rxrpc_propose_ack_ping_for_lost_reply); |
922 | 952 | ||
923 | return rxrpc_congestion_management(call, skb, &summary, acked_serial); | 953 | rxrpc_congestion_management(call, skb, &summary, acked_serial); |
954 | out: | ||
955 | spin_unlock(&call->input_lock); | ||
924 | } | 956 | } |
925 | 957 | ||
926 | /* | 958 | /* |
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) | |||
933 | 965 | ||
934 | _proto("Rx ACKALL %%%u", sp->hdr.serial); | 966 | _proto("Rx ACKALL %%%u", sp->hdr.serial); |
935 | 967 | ||
936 | rxrpc_rotate_tx_window(call, call->tx_top, &summary); | 968 | spin_lock(&call->input_lock); |
937 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) | 969 | |
970 | if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) | ||
938 | rxrpc_end_tx_phase(call, false, "ETL"); | 971 | rxrpc_end_tx_phase(call, false, "ETL"); |
972 | |||
973 | spin_unlock(&call->input_lock); | ||
939 | } | 974 | } |
940 | 975 | ||
941 | /* | 976 | /* |
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1018 | } | 1053 | } |
1019 | 1054 | ||
1020 | /* | 1055 | /* |
1021 | * Handle a new call on a channel implicitly completing the preceding call on | 1056 | * Handle a new service call on a channel implicitly completing the preceding |
1022 | * that channel. | 1057 | * call on that channel. This does not apply to client conns. |
1023 | * | 1058 | * |
1024 | * TODO: If callNumber > call_id + 1, renegotiate security. | 1059 | * TODO: If callNumber > call_id + 1, renegotiate security. |
1025 | */ | 1060 | */ |
1026 | static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, | 1061 | static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, |
1062 | struct rxrpc_connection *conn, | ||
1027 | struct rxrpc_call *call) | 1063 | struct rxrpc_call *call) |
1028 | { | 1064 | { |
1029 | switch (READ_ONCE(call->state)) { | 1065 | switch (READ_ONCE(call->state)) { |
1030 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 1066 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
1031 | rxrpc_call_completed(call); | 1067 | rxrpc_call_completed(call); |
1032 | break; | 1068 | /* Fall through */ |
1033 | case RXRPC_CALL_COMPLETE: | 1069 | case RXRPC_CALL_COMPLETE: |
1034 | break; | 1070 | break; |
1035 | default: | 1071 | default: |
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, | |||
1037 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); | 1073 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); |
1038 | rxrpc_queue_call(call); | 1074 | rxrpc_queue_call(call); |
1039 | } | 1075 | } |
1076 | trace_rxrpc_improper_term(call); | ||
1040 | break; | 1077 | break; |
1041 | } | 1078 | } |
1042 | 1079 | ||
1043 | trace_rxrpc_improper_term(call); | 1080 | spin_lock(&rx->incoming_lock); |
1044 | __rxrpc_disconnect_call(conn, call); | 1081 | __rxrpc_disconnect_call(conn, call); |
1082 | spin_unlock(&rx->incoming_lock); | ||
1045 | rxrpc_notify_socket(call); | 1083 | rxrpc_notify_socket(call); |
1046 | } | 1084 | } |
1047 | 1085 | ||
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) | |||
1120 | * The socket is locked by the caller and this prevents the socket from being | 1158 | * The socket is locked by the caller and this prevents the socket from being |
1121 | * shut down and the local endpoint from going away, thus sk_user_data will not | 1159 | * shut down and the local endpoint from going away, thus sk_user_data will not |
1122 | * be cleared until this function returns. | 1160 | * be cleared until this function returns. |
1161 | * | ||
1162 | * Called with the RCU read lock held from the IP layer via UDP. | ||
1123 | */ | 1163 | */ |
1124 | void rxrpc_data_ready(struct sock *udp_sk) | 1164 | int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) |
1125 | { | 1165 | { |
1126 | struct rxrpc_connection *conn; | 1166 | struct rxrpc_connection *conn; |
1127 | struct rxrpc_channel *chan; | 1167 | struct rxrpc_channel *chan; |
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1130 | struct rxrpc_local *local = udp_sk->sk_user_data; | 1170 | struct rxrpc_local *local = udp_sk->sk_user_data; |
1131 | struct rxrpc_peer *peer = NULL; | 1171 | struct rxrpc_peer *peer = NULL; |
1132 | struct rxrpc_sock *rx = NULL; | 1172 | struct rxrpc_sock *rx = NULL; |
1133 | struct sk_buff *skb; | ||
1134 | unsigned int channel; | 1173 | unsigned int channel; |
1135 | int ret, skew = 0; | 1174 | int skew = 0; |
1136 | 1175 | ||
1137 | _enter("%p", udp_sk); | 1176 | _enter("%p", udp_sk); |
1138 | 1177 | ||
1139 | ASSERT(!irqs_disabled()); | ||
1140 | |||
1141 | skb = skb_recv_udp(udp_sk, 0, 1, &ret); | ||
1142 | if (!skb) { | ||
1143 | if (ret == -EAGAIN) | ||
1144 | return; | ||
1145 | _debug("UDP socket error %d", ret); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (skb->tstamp == 0) | 1178 | if (skb->tstamp == 0) |
1150 | skb->tstamp = ktime_get_real(); | 1179 | skb->tstamp = ktime_get_real(); |
1151 | 1180 | ||
1152 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); | 1181 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); |
1153 | 1182 | ||
1154 | _net("recv skb %p", skb); | 1183 | skb_pull(skb, sizeof(struct udphdr)); |
1155 | |||
1156 | /* we'll probably need to checksum it (didn't call sock_recvmsg) */ | ||
1157 | if (skb_checksum_complete(skb)) { | ||
1158 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | ||
1159 | __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); | ||
1160 | _leave(" [CSUM failed]"); | ||
1161 | return; | ||
1162 | } | ||
1163 | |||
1164 | __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); | ||
1165 | 1184 | ||
1166 | /* The UDP protocol already released all skb resources; | 1185 | /* The UDP protocol already released all skb resources; |
1167 | * we are free to add our own data there. | 1186 | * we are free to add our own data there. |
@@ -1177,10 +1196,12 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1177 | if ((lose++ & 7) == 7) { | 1196 | if ((lose++ & 7) == 7) { |
1178 | trace_rxrpc_rx_lose(sp); | 1197 | trace_rxrpc_rx_lose(sp); |
1179 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); | 1198 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); |
1180 | return; | 1199 | return 0; |
1181 | } | 1200 | } |
1182 | } | 1201 | } |
1183 | 1202 | ||
1203 | if (skb->tstamp == 0) | ||
1204 | skb->tstamp = ktime_get_real(); | ||
1184 | trace_rxrpc_rx_packet(sp); | 1205 | trace_rxrpc_rx_packet(sp); |
1185 | 1206 | ||
1186 | switch (sp->hdr.type) { | 1207 | switch (sp->hdr.type) { |
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1234 | if (sp->hdr.serviceId == 0) | 1255 | if (sp->hdr.serviceId == 0) |
1235 | goto bad_message; | 1256 | goto bad_message; |
1236 | 1257 | ||
1237 | rcu_read_lock(); | ||
1238 | |||
1239 | if (rxrpc_to_server(sp)) { | 1258 | if (rxrpc_to_server(sp)) { |
1240 | /* Weed out packets to services we're not offering. Packets | 1259 | /* Weed out packets to services we're not offering. Packets |
1241 | * that would begin a call are explicitly rejected and the rest | 1260 | * that would begin a call are explicitly rejected and the rest |
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1247 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && | 1266 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && |
1248 | sp->hdr.seq == 1) | 1267 | sp->hdr.seq == 1) |
1249 | goto unsupported_service; | 1268 | goto unsupported_service; |
1250 | goto discard_unlock; | 1269 | goto discard; |
1251 | } | 1270 | } |
1252 | } | 1271 | } |
1253 | 1272 | ||
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1257 | goto wrong_security; | 1276 | goto wrong_security; |
1258 | 1277 | ||
1259 | if (sp->hdr.serviceId != conn->service_id) { | 1278 | if (sp->hdr.serviceId != conn->service_id) { |
1260 | if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || | 1279 | int old_id; |
1261 | conn->service_id != conn->params.service_id) | 1280 | |
1281 | if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) | ||
1282 | goto reupgrade; | ||
1283 | old_id = cmpxchg(&conn->service_id, conn->params.service_id, | ||
1284 | sp->hdr.serviceId); | ||
1285 | |||
1286 | if (old_id != conn->params.service_id && | ||
1287 | old_id != sp->hdr.serviceId) | ||
1262 | goto reupgrade; | 1288 | goto reupgrade; |
1263 | conn->service_id = sp->hdr.serviceId; | ||
1264 | } | 1289 | } |
1265 | 1290 | ||
1266 | if (sp->hdr.callNumber == 0) { | 1291 | if (sp->hdr.callNumber == 0) { |
1267 | /* Connection-level packet */ | 1292 | /* Connection-level packet */ |
1268 | _debug("CONN %p {%d}", conn, conn->debug_id); | 1293 | _debug("CONN %p {%d}", conn, conn->debug_id); |
1269 | rxrpc_post_packet_to_conn(conn, skb); | 1294 | rxrpc_post_packet_to_conn(conn, skb); |
1270 | goto out_unlock; | 1295 | goto out; |
1271 | } | 1296 | } |
1272 | 1297 | ||
1273 | /* Note the serial number skew here */ | 1298 | /* Note the serial number skew here */ |
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1286 | 1311 | ||
1287 | /* Ignore really old calls */ | 1312 | /* Ignore really old calls */ |
1288 | if (sp->hdr.callNumber < chan->last_call) | 1313 | if (sp->hdr.callNumber < chan->last_call) |
1289 | goto discard_unlock; | 1314 | goto discard; |
1290 | 1315 | ||
1291 | if (sp->hdr.callNumber == chan->last_call) { | 1316 | if (sp->hdr.callNumber == chan->last_call) { |
1292 | if (chan->call || | 1317 | if (chan->call || |
1293 | sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) | 1318 | sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) |
1294 | goto discard_unlock; | 1319 | goto discard; |
1295 | 1320 | ||
1296 | /* For the previous service call, if completed | 1321 | /* For the previous service call, if completed |
1297 | * successfully, we discard all further packets. | 1322 | * successfully, we discard all further packets. |
1298 | */ | 1323 | */ |
1299 | if (rxrpc_conn_is_service(conn) && | 1324 | if (rxrpc_conn_is_service(conn) && |
1300 | chan->last_type == RXRPC_PACKET_TYPE_ACK) | 1325 | chan->last_type == RXRPC_PACKET_TYPE_ACK) |
1301 | goto discard_unlock; | 1326 | goto discard; |
1302 | 1327 | ||
1303 | /* But otherwise we need to retransmit the final packet | 1328 | /* But otherwise we need to retransmit the final packet |
1304 | * from data cached in the connection record. | 1329 | * from data cached in the connection record. |
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1309 | sp->hdr.serial, | 1334 | sp->hdr.serial, |
1310 | sp->hdr.flags, 0); | 1335 | sp->hdr.flags, 0); |
1311 | rxrpc_post_packet_to_conn(conn, skb); | 1336 | rxrpc_post_packet_to_conn(conn, skb); |
1312 | goto out_unlock; | 1337 | goto out; |
1313 | } | 1338 | } |
1314 | 1339 | ||
1315 | call = rcu_dereference(chan->call); | 1340 | call = rcu_dereference(chan->call); |
1316 | 1341 | ||
1317 | if (sp->hdr.callNumber > chan->call_id) { | 1342 | if (sp->hdr.callNumber > chan->call_id) { |
1318 | if (rxrpc_to_client(sp)) { | 1343 | if (rxrpc_to_client(sp)) |
1319 | rcu_read_unlock(); | ||
1320 | goto reject_packet; | 1344 | goto reject_packet; |
1321 | } | ||
1322 | if (call) | 1345 | if (call) |
1323 | rxrpc_input_implicit_end_call(conn, call); | 1346 | rxrpc_input_implicit_end_call(rx, conn, call); |
1324 | call = NULL; | 1347 | call = NULL; |
1325 | } | 1348 | } |
1326 | 1349 | ||
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1337 | if (!call || atomic_read(&call->usage) == 0) { | 1360 | if (!call || atomic_read(&call->usage) == 0) { |
1338 | if (rxrpc_to_client(sp) || | 1361 | if (rxrpc_to_client(sp) || |
1339 | sp->hdr.type != RXRPC_PACKET_TYPE_DATA) | 1362 | sp->hdr.type != RXRPC_PACKET_TYPE_DATA) |
1340 | goto bad_message_unlock; | 1363 | goto bad_message; |
1341 | if (sp->hdr.seq != 1) | 1364 | if (sp->hdr.seq != 1) |
1342 | goto discard_unlock; | 1365 | goto discard; |
1343 | call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); | 1366 | call = rxrpc_new_incoming_call(local, rx, skb); |
1344 | if (!call) { | 1367 | if (!call) |
1345 | rcu_read_unlock(); | ||
1346 | goto reject_packet; | 1368 | goto reject_packet; |
1347 | } | ||
1348 | rxrpc_send_ping(call, skb, skew); | 1369 | rxrpc_send_ping(call, skb, skew); |
1349 | mutex_unlock(&call->user_mutex); | 1370 | mutex_unlock(&call->user_mutex); |
1350 | } | 1371 | } |
1351 | 1372 | ||
1352 | rxrpc_input_call_packet(call, skb, skew); | 1373 | rxrpc_input_call_packet(call, skb, skew); |
1353 | goto discard_unlock; | 1374 | goto discard; |
1354 | 1375 | ||
1355 | discard_unlock: | ||
1356 | rcu_read_unlock(); | ||
1357 | discard: | 1376 | discard: |
1358 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 1377 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
1359 | out: | 1378 | out: |
1360 | trace_rxrpc_rx_done(0, 0); | 1379 | trace_rxrpc_rx_done(0, 0); |
1361 | return; | 1380 | return 0; |
1362 | |||
1363 | out_unlock: | ||
1364 | rcu_read_unlock(); | ||
1365 | goto out; | ||
1366 | 1381 | ||
1367 | wrong_security: | 1382 | wrong_security: |
1368 | rcu_read_unlock(); | ||
1369 | trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1383 | trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1370 | RXKADINCONSISTENCY, EBADMSG); | 1384 | RXKADINCONSISTENCY, EBADMSG); |
1371 | skb->priority = RXKADINCONSISTENCY; | 1385 | skb->priority = RXKADINCONSISTENCY; |
1372 | goto post_abort; | 1386 | goto post_abort; |
1373 | 1387 | ||
1374 | unsupported_service: | 1388 | unsupported_service: |
1375 | rcu_read_unlock(); | ||
1376 | trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1389 | trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1377 | RX_INVALID_OPERATION, EOPNOTSUPP); | 1390 | RX_INVALID_OPERATION, EOPNOTSUPP); |
1378 | skb->priority = RX_INVALID_OPERATION; | 1391 | skb->priority = RX_INVALID_OPERATION; |
1379 | goto post_abort; | 1392 | goto post_abort; |
1380 | 1393 | ||
1381 | reupgrade: | 1394 | reupgrade: |
1382 | rcu_read_unlock(); | ||
1383 | trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1395 | trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1384 | RX_PROTOCOL_ERROR, EBADMSG); | 1396 | RX_PROTOCOL_ERROR, EBADMSG); |
1385 | goto protocol_error; | 1397 | goto protocol_error; |
1386 | 1398 | ||
1387 | bad_message_unlock: | ||
1388 | rcu_read_unlock(); | ||
1389 | bad_message: | 1399 | bad_message: |
1390 | trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1400 | trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1391 | RX_PROTOCOL_ERROR, EBADMSG); | 1401 | RX_PROTOCOL_ERROR, EBADMSG); |
@@ -1397,4 +1407,5 @@ reject_packet: | |||
1397 | trace_rxrpc_rx_done(skb->mark, skb->priority); | 1407 | trace_rxrpc_rx_done(skb->mark, skb->priority); |
1398 | rxrpc_reject_packet(local, skb); | 1408 | rxrpc_reject_packet(local, skb); |
1399 | _leave(" [badmsg]"); | 1409 | _leave(" [badmsg]"); |
1410 | return 0; | ||
1400 | } | 1411 | } |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 94d234e9c685..cad0691c2bb4 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/hashtable.h> | 20 | #include <linux/hashtable.h> |
21 | #include <net/sock.h> | 21 | #include <net/sock.h> |
22 | #include <net/udp.h> | ||
22 | #include <net/af_rxrpc.h> | 23 | #include <net/af_rxrpc.h> |
23 | #include "ar-internal.h" | 24 | #include "ar-internal.h" |
24 | 25 | ||
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
108 | */ | 109 | */ |
109 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | 110 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
110 | { | 111 | { |
111 | struct sock *sock; | 112 | struct sock *usk; |
112 | int ret, opt; | 113 | int ret, opt; |
113 | 114 | ||
114 | _enter("%p{%d,%d}", | 115 | _enter("%p{%d,%d}", |
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
122 | return ret; | 123 | return ret; |
123 | } | 124 | } |
124 | 125 | ||
126 | /* set the socket up */ | ||
127 | usk = local->socket->sk; | ||
128 | inet_sk(usk)->mc_loop = 0; | ||
129 | |||
130 | /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ | ||
131 | inet_inc_convert_csum(usk); | ||
132 | |||
133 | rcu_assign_sk_user_data(usk, local); | ||
134 | |||
135 | udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC; | ||
136 | udp_sk(usk)->encap_rcv = rxrpc_input_packet; | ||
137 | udp_sk(usk)->encap_destroy = NULL; | ||
138 | udp_sk(usk)->gro_receive = NULL; | ||
139 | udp_sk(usk)->gro_complete = NULL; | ||
140 | |||
141 | udp_encap_enable(); | ||
142 | #if IS_ENABLED(CONFIG_IPV6) | ||
143 | if (local->srx.transport.family == AF_INET6) | ||
144 | udpv6_encap_enable(); | ||
145 | #endif | ||
146 | usk->sk_error_report = rxrpc_error_report; | ||
147 | |||
125 | /* if a local address was supplied then bind it */ | 148 | /* if a local address was supplied then bind it */ |
126 | if (local->srx.transport_len > sizeof(sa_family_t)) { | 149 | if (local->srx.transport_len > sizeof(sa_family_t)) { |
127 | _debug("bind"); | 150 | _debug("bind"); |
@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
191 | BUG(); | 214 | BUG(); |
192 | } | 215 | } |
193 | 216 | ||
194 | /* set the socket up */ | ||
195 | sock = local->socket->sk; | ||
196 | sock->sk_user_data = local; | ||
197 | sock->sk_data_ready = rxrpc_data_ready; | ||
198 | sock->sk_error_report = rxrpc_error_report; | ||
199 | _leave(" = 0"); | 217 | _leave(" = 0"); |
200 | return 0; | 218 | return 0; |
201 | 219 | ||
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 81a7869325a6..7feb611c7258 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -303,6 +303,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
303 | if (rtt < 0) | 303 | if (rtt < 0) |
304 | return; | 304 | return; |
305 | 305 | ||
306 | spin_lock(&peer->rtt_input_lock); | ||
307 | |||
306 | /* Replace the oldest datum in the RTT buffer */ | 308 | /* Replace the oldest datum in the RTT buffer */ |
307 | sum -= peer->rtt_cache[cursor]; | 309 | sum -= peer->rtt_cache[cursor]; |
308 | sum += rtt; | 310 | sum += rtt; |
@@ -314,6 +316,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
314 | peer->rtt_usage = usage; | 316 | peer->rtt_usage = usage; |
315 | } | 317 | } |
316 | 318 | ||
319 | spin_unlock(&peer->rtt_input_lock); | ||
320 | |||
317 | /* Now recalculate the average */ | 321 | /* Now recalculate the average */ |
318 | if (usage == RXRPC_RTT_CACHE_SIZE) { | 322 | if (usage == RXRPC_RTT_CACHE_SIZE) { |
319 | avg = sum / RXRPC_RTT_CACHE_SIZE; | 323 | avg = sum / RXRPC_RTT_CACHE_SIZE; |
@@ -322,6 +326,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
322 | do_div(avg, usage); | 326 | do_div(avg, usage); |
323 | } | 327 | } |
324 | 328 | ||
329 | /* Don't need to update this under lock */ | ||
325 | peer->rtt = avg; | 330 | peer->rtt = avg; |
326 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, | 331 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, |
327 | usage, avg); | 332 | usage, avg); |
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 01a9febfa367..5691b7d266ca 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, | |||
153 | * assess the MTU size for the network interface through which this peer is | 153 | * assess the MTU size for the network interface through which this peer is |
154 | * reached | 154 | * reached |
155 | */ | 155 | */ |
156 | static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | 156 | static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx, |
157 | struct rxrpc_peer *peer) | ||
157 | { | 158 | { |
159 | struct net *net = sock_net(&rx->sk); | ||
158 | struct dst_entry *dst; | 160 | struct dst_entry *dst; |
159 | struct rtable *rt; | 161 | struct rtable *rt; |
160 | struct flowi fl; | 162 | struct flowi fl; |
@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
169 | switch (peer->srx.transport.family) { | 171 | switch (peer->srx.transport.family) { |
170 | case AF_INET: | 172 | case AF_INET: |
171 | rt = ip_route_output_ports( | 173 | rt = ip_route_output_ports( |
172 | &init_net, fl4, NULL, | 174 | net, fl4, NULL, |
173 | peer->srx.transport.sin.sin_addr.s_addr, 0, | 175 | peer->srx.transport.sin.sin_addr.s_addr, 0, |
174 | htons(7000), htons(7001), IPPROTO_UDP, 0, 0); | 176 | htons(7000), htons(7001), IPPROTO_UDP, 0, 0); |
175 | if (IS_ERR(rt)) { | 177 | if (IS_ERR(rt)) { |
@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
188 | sizeof(struct in6_addr)); | 190 | sizeof(struct in6_addr)); |
189 | fl6->fl6_dport = htons(7001); | 191 | fl6->fl6_dport = htons(7001); |
190 | fl6->fl6_sport = htons(7000); | 192 | fl6->fl6_sport = htons(7000); |
191 | dst = ip6_route_output(&init_net, NULL, fl6); | 193 | dst = ip6_route_output(net, NULL, fl6); |
192 | if (dst->error) { | 194 | if (dst->error) { |
193 | _leave(" [route err %d]", dst->error); | 195 | _leave(" [route err %d]", dst->error); |
194 | return; | 196 | return; |
@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) | |||
223 | peer->service_conns = RB_ROOT; | 225 | peer->service_conns = RB_ROOT; |
224 | seqlock_init(&peer->service_conn_lock); | 226 | seqlock_init(&peer->service_conn_lock); |
225 | spin_lock_init(&peer->lock); | 227 | spin_lock_init(&peer->lock); |
228 | spin_lock_init(&peer->rtt_input_lock); | ||
226 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); | 229 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); |
227 | 230 | ||
228 | if (RXRPC_TX_SMSS > 2190) | 231 | if (RXRPC_TX_SMSS > 2190) |
@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) | |||
240 | /* | 243 | /* |
241 | * Initialise peer record. | 244 | * Initialise peer record. |
242 | */ | 245 | */ |
243 | static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) | 246 | static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer, |
247 | unsigned long hash_key) | ||
244 | { | 248 | { |
245 | peer->hash_key = hash_key; | 249 | peer->hash_key = hash_key; |
246 | rxrpc_assess_MTU_size(peer); | 250 | rxrpc_assess_MTU_size(rx, peer); |
247 | peer->mtu = peer->if_mtu; | 251 | peer->mtu = peer->if_mtu; |
248 | peer->rtt_last_req = ktime_get_real(); | 252 | peer->rtt_last_req = ktime_get_real(); |
249 | 253 | ||
@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) | |||
275 | /* | 279 | /* |
276 | * Set up a new peer. | 280 | * Set up a new peer. |
277 | */ | 281 | */ |
278 | static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | 282 | static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, |
283 | struct rxrpc_local *local, | ||
279 | struct sockaddr_rxrpc *srx, | 284 | struct sockaddr_rxrpc *srx, |
280 | unsigned long hash_key, | 285 | unsigned long hash_key, |
281 | gfp_t gfp) | 286 | gfp_t gfp) |
@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | |||
287 | peer = rxrpc_alloc_peer(local, gfp); | 292 | peer = rxrpc_alloc_peer(local, gfp); |
288 | if (peer) { | 293 | if (peer) { |
289 | memcpy(&peer->srx, srx, sizeof(*srx)); | 294 | memcpy(&peer->srx, srx, sizeof(*srx)); |
290 | rxrpc_init_peer(peer, hash_key); | 295 | rxrpc_init_peer(rx, peer, hash_key); |
291 | } | 296 | } |
292 | 297 | ||
293 | _leave(" = %p", peer); | 298 | _leave(" = %p", peer); |
@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | |||
299 | * since we've already done a search in the list from the non-reentrant context | 304 | * since we've already done a search in the list from the non-reentrant context |
300 | * (the data_ready handler) that is the only place we can add new peers. | 305 | * (the data_ready handler) that is the only place we can add new peers. |
301 | */ | 306 | */ |
302 | void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) | 307 | void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local, |
308 | struct rxrpc_peer *peer) | ||
303 | { | 309 | { |
304 | struct rxrpc_net *rxnet = local->rxnet; | 310 | struct rxrpc_net *rxnet = local->rxnet; |
305 | unsigned long hash_key; | 311 | unsigned long hash_key; |
306 | 312 | ||
307 | hash_key = rxrpc_peer_hash_key(local, &peer->srx); | 313 | hash_key = rxrpc_peer_hash_key(local, &peer->srx); |
308 | peer->local = local; | 314 | peer->local = local; |
309 | rxrpc_init_peer(peer, hash_key); | 315 | rxrpc_init_peer(rx, peer, hash_key); |
310 | 316 | ||
311 | spin_lock(&rxnet->peer_hash_lock); | 317 | spin_lock(&rxnet->peer_hash_lock); |
312 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); | 318 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); |
@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) | |||
317 | /* | 323 | /* |
318 | * obtain a remote transport endpoint for the specified address | 324 | * obtain a remote transport endpoint for the specified address |
319 | */ | 325 | */ |
320 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | 326 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, |
327 | struct rxrpc_local *local, | ||
321 | struct sockaddr_rxrpc *srx, gfp_t gfp) | 328 | struct sockaddr_rxrpc *srx, gfp_t gfp) |
322 | { | 329 | { |
323 | struct rxrpc_peer *peer, *candidate; | 330 | struct rxrpc_peer *peer, *candidate; |
@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | |||
337 | /* The peer is not yet present in hash - create a candidate | 344 | /* The peer is not yet present in hash - create a candidate |
338 | * for a new record and then redo the search. | 345 | * for a new record and then redo the search. |
339 | */ | 346 | */ |
340 | candidate = rxrpc_create_peer(local, srx, hash_key, gfp); | 347 | candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp); |
341 | if (!candidate) { | 348 | if (!candidate) { |
342 | _leave(" = NULL [nomem]"); | 349 | _leave(" = NULL [nomem]"); |
343 | return NULL; | 350 | return NULL; |