diff options
Diffstat (limited to 'net/rxrpc')
-rw-r--r-- | net/rxrpc/af_rxrpc.c | 9 | ||||
-rw-r--r-- | net/rxrpc/ar-internal.h | 26 | ||||
-rw-r--r-- | net/rxrpc/call_event.c | 23 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 33 | ||||
-rw-r--r-- | net/rxrpc/conn_client.c | 44 | ||||
-rw-r--r-- | net/rxrpc/conn_event.c | 6 | ||||
-rw-r--r-- | net/rxrpc/conn_object.c | 2 | ||||
-rw-r--r-- | net/rxrpc/input.c | 359 | ||||
-rw-r--r-- | net/rxrpc/local_event.c | 4 | ||||
-rw-r--r-- | net/rxrpc/local_object.c | 104 | ||||
-rw-r--r-- | net/rxrpc/output.c | 9 | ||||
-rw-r--r-- | net/rxrpc/peer_event.c | 12 | ||||
-rw-r--r-- | net/rxrpc/peer_object.c | 18 | ||||
-rw-r--r-- | net/rxrpc/protocol.h | 9 | ||||
-rw-r--r-- | net/rxrpc/recvmsg.c | 53 | ||||
-rw-r--r-- | net/rxrpc/rxkad.c | 32 | ||||
-rw-r--r-- | net/rxrpc/sendmsg.c | 14 | ||||
-rw-r--r-- | net/rxrpc/skbuff.c | 40 |
18 files changed, 468 insertions, 329 deletions
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d09eaf153544..d72ddb67bb74 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
193 | 193 | ||
194 | service_in_use: | 194 | service_in_use: |
195 | write_unlock(&local->services_lock); | 195 | write_unlock(&local->services_lock); |
196 | rxrpc_put_local(local); | 196 | rxrpc_unuse_local(local); |
197 | ret = -EADDRINUSE; | 197 | ret = -EADDRINUSE; |
198 | error_unlock: | 198 | error_unlock: |
199 | release_sock(&rx->sk); | 199 | release_sock(&rx->sk); |
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life); | |||
402 | */ | 402 | */ |
403 | void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) | 403 | void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) |
404 | { | 404 | { |
405 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, | 405 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, |
406 | rxrpc_propose_ack_ping_for_check_life); | 406 | rxrpc_propose_ack_ping_for_check_life); |
407 | rxrpc_send_ack_packet(call, true, NULL); | 407 | rxrpc_send_ack_packet(call, true, NULL); |
408 | } | 408 | } |
@@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk) | |||
862 | static int rxrpc_release_sock(struct sock *sk) | 862 | static int rxrpc_release_sock(struct sock *sk) |
863 | { | 863 | { |
864 | struct rxrpc_sock *rx = rxrpc_sk(sk); | 864 | struct rxrpc_sock *rx = rxrpc_sk(sk); |
865 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); | ||
866 | 865 | ||
867 | _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); | 866 | _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); |
868 | 867 | ||
@@ -898,10 +897,8 @@ static int rxrpc_release_sock(struct sock *sk) | |||
898 | rxrpc_release_calls_on_socket(rx); | 897 | rxrpc_release_calls_on_socket(rx); |
899 | flush_workqueue(rxrpc_workqueue); | 898 | flush_workqueue(rxrpc_workqueue); |
900 | rxrpc_purge_queue(&sk->sk_receive_queue); | 899 | rxrpc_purge_queue(&sk->sk_receive_queue); |
901 | rxrpc_queue_work(&rxnet->service_conn_reaper); | ||
902 | rxrpc_queue_work(&rxnet->client_conn_reaper); | ||
903 | 900 | ||
904 | rxrpc_put_local(rx->local); | 901 | rxrpc_unuse_local(rx->local); |
905 | rx->local = NULL; | 902 | rx->local = NULL; |
906 | key_put(rx->key); | 903 | key_put(rx->key); |
907 | rx->key = NULL; | 904 | rx->key = NULL; |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 80335b4ee4fd..8051dfdcf26d 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -185,11 +185,17 @@ struct rxrpc_host_header { | |||
185 | * - max 48 bytes (struct sk_buff::cb) | 185 | * - max 48 bytes (struct sk_buff::cb) |
186 | */ | 186 | */ |
187 | struct rxrpc_skb_priv { | 187 | struct rxrpc_skb_priv { |
188 | union { | 188 | atomic_t nr_ring_pins; /* Number of rxtx ring pins */ |
189 | u8 nr_jumbo; /* Number of jumbo subpackets */ | 189 | u8 nr_subpackets; /* Number of subpackets */ |
190 | }; | 190 | u8 rx_flags; /* Received packet flags */ |
191 | #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ | ||
192 | #define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */ | ||
191 | union { | 193 | union { |
192 | int remain; /* amount of space remaining for next write */ | 194 | int remain; /* amount of space remaining for next write */ |
195 | |||
196 | /* List of requested ACKs on subpackets */ | ||
197 | unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) / | ||
198 | BITS_PER_LONG]; | ||
193 | }; | 199 | }; |
194 | 200 | ||
195 | struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ | 201 | struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ |
@@ -254,7 +260,8 @@ struct rxrpc_security { | |||
254 | */ | 260 | */ |
255 | struct rxrpc_local { | 261 | struct rxrpc_local { |
256 | struct rcu_head rcu; | 262 | struct rcu_head rcu; |
257 | atomic_t usage; | 263 | atomic_t active_users; /* Number of users of the local endpoint */ |
264 | atomic_t usage; /* Number of references to the structure */ | ||
258 | struct rxrpc_net *rxnet; /* The network ns in which this resides */ | 265 | struct rxrpc_net *rxnet; /* The network ns in which this resides */ |
259 | struct list_head link; | 266 | struct list_head link; |
260 | struct socket *socket; /* my UDP socket */ | 267 | struct socket *socket; /* my UDP socket */ |
@@ -612,8 +619,7 @@ struct rxrpc_call { | |||
612 | #define RXRPC_TX_ANNO_LAST 0x04 | 619 | #define RXRPC_TX_ANNO_LAST 0x04 |
613 | #define RXRPC_TX_ANNO_RESENT 0x08 | 620 | #define RXRPC_TX_ANNO_RESENT 0x08 |
614 | 621 | ||
615 | #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ | 622 | #define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */ |
616 | #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */ | ||
617 | #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ | 623 | #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ |
618 | rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but | 624 | rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but |
619 | * not hard-ACK'd packet follows this. | 625 | * not hard-ACK'd packet follows this. |
@@ -649,7 +655,6 @@ struct rxrpc_call { | |||
649 | 655 | ||
650 | /* receive-phase ACK management */ | 656 | /* receive-phase ACK management */ |
651 | u8 ackr_reason; /* reason to ACK */ | 657 | u8 ackr_reason; /* reason to ACK */ |
652 | u16 ackr_skew; /* skew on packet being ACK'd */ | ||
653 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ | 658 | rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ |
654 | rxrpc_serial_t ackr_first_seq; /* first sequence number received */ | 659 | rxrpc_serial_t ackr_first_seq; /* first sequence number received */ |
655 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ | 660 | rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ |
@@ -743,7 +748,7 @@ int rxrpc_reject_call(struct rxrpc_sock *); | |||
743 | /* | 748 | /* |
744 | * call_event.c | 749 | * call_event.c |
745 | */ | 750 | */ |
746 | void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, | 751 | void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool, |
747 | enum rxrpc_propose_ack_trace); | 752 | enum rxrpc_propose_ack_trace); |
748 | void rxrpc_process_call(struct work_struct *); | 753 | void rxrpc_process_call(struct work_struct *); |
749 | 754 | ||
@@ -905,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *); | |||
905 | void rxrpc_put_client_conn(struct rxrpc_connection *); | 910 | void rxrpc_put_client_conn(struct rxrpc_connection *); |
906 | void rxrpc_discard_expired_client_conns(struct work_struct *); | 911 | void rxrpc_discard_expired_client_conns(struct work_struct *); |
907 | void rxrpc_destroy_all_client_connections(struct rxrpc_net *); | 912 | void rxrpc_destroy_all_client_connections(struct rxrpc_net *); |
913 | void rxrpc_clean_up_local_conns(struct rxrpc_local *); | ||
908 | 914 | ||
909 | /* | 915 | /* |
910 | * conn_event.c | 916 | * conn_event.c |
@@ -1002,6 +1008,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc | |||
1002 | struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); | 1008 | struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); |
1003 | struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); | 1009 | struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); |
1004 | void rxrpc_put_local(struct rxrpc_local *); | 1010 | void rxrpc_put_local(struct rxrpc_local *); |
1011 | struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *); | ||
1012 | void rxrpc_unuse_local(struct rxrpc_local *); | ||
1005 | void rxrpc_queue_local(struct rxrpc_local *); | 1013 | void rxrpc_queue_local(struct rxrpc_local *); |
1006 | void rxrpc_destroy_all_locals(struct rxrpc_net *); | 1014 | void rxrpc_destroy_all_locals(struct rxrpc_net *); |
1007 | 1015 | ||
@@ -1061,6 +1069,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *); | |||
1061 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); | 1069 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); |
1062 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); | 1070 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); |
1063 | void rxrpc_put_peer(struct rxrpc_peer *); | 1071 | void rxrpc_put_peer(struct rxrpc_peer *); |
1072 | void rxrpc_put_peer_locked(struct rxrpc_peer *); | ||
1064 | 1073 | ||
1065 | /* | 1074 | /* |
1066 | * proc.c | 1075 | * proc.c |
@@ -1102,6 +1111,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); | |||
1102 | void rxrpc_packet_destructor(struct sk_buff *); | 1111 | void rxrpc_packet_destructor(struct sk_buff *); |
1103 | void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); | 1112 | void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); |
1104 | void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); | 1113 | void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); |
1114 | void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); | ||
1105 | void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); | 1115 | void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); |
1106 | void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); | 1116 | void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); |
1107 | void rxrpc_purge_queue(struct sk_buff_head *); | 1117 | void rxrpc_purge_queue(struct sk_buff_head *); |
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index bc2adeb3acb9..cedbbb3a7c2e 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c | |||
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call, | |||
43 | * propose an ACK be sent | 43 | * propose an ACK be sent |
44 | */ | 44 | */ |
45 | static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | 45 | static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, |
46 | u16 skew, u32 serial, bool immediate, | 46 | u32 serial, bool immediate, bool background, |
47 | bool background, | ||
48 | enum rxrpc_propose_ack_trace why) | 47 | enum rxrpc_propose_ack_trace why) |
49 | { | 48 | { |
50 | enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; | 49 | enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; |
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | |||
69 | if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { | 68 | if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { |
70 | outcome = rxrpc_propose_ack_update; | 69 | outcome = rxrpc_propose_ack_update; |
71 | call->ackr_serial = serial; | 70 | call->ackr_serial = serial; |
72 | call->ackr_skew = skew; | ||
73 | } | 71 | } |
74 | if (!immediate) | 72 | if (!immediate) |
75 | goto trace; | 73 | goto trace; |
76 | } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { | 74 | } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { |
77 | call->ackr_reason = ack_reason; | 75 | call->ackr_reason = ack_reason; |
78 | call->ackr_serial = serial; | 76 | call->ackr_serial = serial; |
79 | call->ackr_skew = skew; | ||
80 | } else { | 77 | } else { |
81 | outcome = rxrpc_propose_ack_subsume; | 78 | outcome = rxrpc_propose_ack_subsume; |
82 | } | 79 | } |
@@ -137,11 +134,11 @@ trace: | |||
137 | * propose an ACK be sent, locking the call structure | 134 | * propose an ACK be sent, locking the call structure |
138 | */ | 135 | */ |
139 | void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, | 136 | void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, |
140 | u16 skew, u32 serial, bool immediate, bool background, | 137 | u32 serial, bool immediate, bool background, |
141 | enum rxrpc_propose_ack_trace why) | 138 | enum rxrpc_propose_ack_trace why) |
142 | { | 139 | { |
143 | spin_lock_bh(&call->lock); | 140 | spin_lock_bh(&call->lock); |
144 | __rxrpc_propose_ACK(call, ack_reason, skew, serial, | 141 | __rxrpc_propose_ACK(call, ack_reason, serial, |
145 | immediate, background, why); | 142 | immediate, background, why); |
146 | spin_unlock_bh(&call->lock); | 143 | spin_unlock_bh(&call->lock); |
147 | } | 144 | } |
@@ -202,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) | |||
202 | continue; | 199 | continue; |
203 | 200 | ||
204 | skb = call->rxtx_buffer[ix]; | 201 | skb = call->rxtx_buffer[ix]; |
205 | rxrpc_see_skb(skb, rxrpc_skb_tx_seen); | 202 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
206 | 203 | ||
207 | if (anno_type == RXRPC_TX_ANNO_UNACK) { | 204 | if (anno_type == RXRPC_TX_ANNO_UNACK) { |
208 | if (ktime_after(skb->tstamp, max_age)) { | 205 | if (ktime_after(skb->tstamp, max_age)) { |
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) | |||
239 | ack_ts = ktime_sub(now, call->acks_latest_ts); | 236 | ack_ts = ktime_sub(now, call->acks_latest_ts); |
240 | if (ktime_to_ns(ack_ts) < call->peer->rtt) | 237 | if (ktime_to_ns(ack_ts) < call->peer->rtt) |
241 | goto out; | 238 | goto out; |
242 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, | 239 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, |
243 | rxrpc_propose_ack_ping_for_lost_ack); | 240 | rxrpc_propose_ack_ping_for_lost_ack); |
244 | rxrpc_send_ack_packet(call, true, NULL); | 241 | rxrpc_send_ack_packet(call, true, NULL); |
245 | goto out; | 242 | goto out; |
@@ -258,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) | |||
258 | continue; | 255 | continue; |
259 | 256 | ||
260 | skb = call->rxtx_buffer[ix]; | 257 | skb = call->rxtx_buffer[ix]; |
261 | rxrpc_get_skb(skb, rxrpc_skb_tx_got); | 258 | rxrpc_get_skb(skb, rxrpc_skb_got); |
262 | spin_unlock_bh(&call->lock); | 259 | spin_unlock_bh(&call->lock); |
263 | 260 | ||
264 | if (rxrpc_send_data_packet(call, skb, true) < 0) { | 261 | if (rxrpc_send_data_packet(call, skb, true) < 0) { |
265 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 262 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
266 | return; | 263 | return; |
267 | } | 264 | } |
268 | 265 | ||
269 | if (rxrpc_is_client_call(call)) | 266 | if (rxrpc_is_client_call(call)) |
270 | rxrpc_expose_client_call(call); | 267 | rxrpc_expose_client_call(call); |
271 | 268 | ||
272 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 269 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
273 | spin_lock_bh(&call->lock); | 270 | spin_lock_bh(&call->lock); |
274 | 271 | ||
275 | /* We need to clear the retransmit state, but there are two | 272 | /* We need to clear the retransmit state, but there are two |
@@ -372,7 +369,7 @@ recheck_state: | |||
372 | if (time_after_eq(now, t)) { | 369 | if (time_after_eq(now, t)) { |
373 | trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); | 370 | trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); |
374 | cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); | 371 | cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); |
375 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, | 372 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true, |
376 | rxrpc_propose_ack_ping_for_keepalive); | 373 | rxrpc_propose_ack_ping_for_keepalive); |
377 | set_bit(RXRPC_CALL_EV_PING, &call->events); | 374 | set_bit(RXRPC_CALL_EV_PING, &call->events); |
378 | } | 375 | } |
@@ -407,7 +404,7 @@ recheck_state: | |||
407 | send_ack = NULL; | 404 | send_ack = NULL; |
408 | if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { | 405 | if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { |
409 | call->acks_lost_top = call->tx_top; | 406 | call->acks_lost_top = call->tx_top; |
410 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, | 407 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, |
411 | rxrpc_propose_ack_ping_for_lost_ack); | 408 | rxrpc_propose_ack_ping_for_lost_ack); |
412 | send_ack = &call->acks_lost_ping; | 409 | send_ack = &call->acks_lost_ping; |
413 | } | 410 | } |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 217b12be9e08..014548c259ce 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -422,6 +422,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) | |||
422 | } | 422 | } |
423 | 423 | ||
424 | /* | 424 | /* |
425 | * Clean up the RxTx skb ring. | ||
426 | */ | ||
427 | static void rxrpc_cleanup_ring(struct rxrpc_call *call) | ||
428 | { | ||
429 | int i; | ||
430 | |||
431 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { | ||
432 | rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); | ||
433 | call->rxtx_buffer[i] = NULL; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | /* | ||
425 | * Detach a call from its owning socket. | 438 | * Detach a call from its owning socket. |
426 | */ | 439 | */ |
427 | void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) | 440 | void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) |
@@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) | |||
429 | const void *here = __builtin_return_address(0); | 442 | const void *here = __builtin_return_address(0); |
430 | struct rxrpc_connection *conn = call->conn; | 443 | struct rxrpc_connection *conn = call->conn; |
431 | bool put = false; | 444 | bool put = false; |
432 | int i; | ||
433 | 445 | ||
434 | _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); | 446 | _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); |
435 | 447 | ||
@@ -479,13 +491,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) | |||
479 | if (conn) | 491 | if (conn) |
480 | rxrpc_disconnect_call(call); | 492 | rxrpc_disconnect_call(call); |
481 | 493 | ||
482 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { | 494 | rxrpc_cleanup_ring(call); |
483 | rxrpc_free_skb(call->rxtx_buffer[i], | ||
484 | (call->tx_phase ? rxrpc_skb_tx_cleaned : | ||
485 | rxrpc_skb_rx_cleaned)); | ||
486 | call->rxtx_buffer[i] = NULL; | ||
487 | } | ||
488 | |||
489 | _leave(""); | 495 | _leave(""); |
490 | } | 496 | } |
491 | 497 | ||
@@ -568,8 +574,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) | |||
568 | */ | 574 | */ |
569 | void rxrpc_cleanup_call(struct rxrpc_call *call) | 575 | void rxrpc_cleanup_call(struct rxrpc_call *call) |
570 | { | 576 | { |
571 | int i; | ||
572 | |||
573 | _net("DESTROY CALL %d", call->debug_id); | 577 | _net("DESTROY CALL %d", call->debug_id); |
574 | 578 | ||
575 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); | 579 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); |
@@ -580,13 +584,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) | |||
580 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); | 584 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); |
581 | ASSERTCMP(call->conn, ==, NULL); | 585 | ASSERTCMP(call->conn, ==, NULL); |
582 | 586 | ||
583 | /* Clean up the Rx/Tx buffer */ | 587 | rxrpc_cleanup_ring(call); |
584 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) | 588 | rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); |
585 | rxrpc_free_skb(call->rxtx_buffer[i], | ||
586 | (call->tx_phase ? rxrpc_skb_tx_cleaned : | ||
587 | rxrpc_skb_rx_cleaned)); | ||
588 | |||
589 | rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); | ||
590 | 589 | ||
591 | call_rcu(&call->rcu, rxrpc_rcu_destroy_call); | 590 | call_rcu(&call->rcu, rxrpc_rcu_destroy_call); |
592 | } | 591 | } |
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index aea82f909c60..3f1da1b49f69 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
@@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) | |||
1162 | 1162 | ||
1163 | _leave(""); | 1163 | _leave(""); |
1164 | } | 1164 | } |
1165 | |||
1166 | /* | ||
1167 | * Clean up the client connections on a local endpoint. | ||
1168 | */ | ||
1169 | void rxrpc_clean_up_local_conns(struct rxrpc_local *local) | ||
1170 | { | ||
1171 | struct rxrpc_connection *conn, *tmp; | ||
1172 | struct rxrpc_net *rxnet = local->rxnet; | ||
1173 | unsigned int nr_active; | ||
1174 | LIST_HEAD(graveyard); | ||
1175 | |||
1176 | _enter(""); | ||
1177 | |||
1178 | spin_lock(&rxnet->client_conn_cache_lock); | ||
1179 | nr_active = rxnet->nr_active_client_conns; | ||
1180 | |||
1181 | list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, | ||
1182 | cache_link) { | ||
1183 | if (conn->params.local == local) { | ||
1184 | ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); | ||
1185 | |||
1186 | trace_rxrpc_client(conn, -1, rxrpc_client_discard); | ||
1187 | if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) | ||
1188 | BUG(); | ||
1189 | conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; | ||
1190 | list_move(&conn->cache_link, &graveyard); | ||
1191 | nr_active--; | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | rxnet->nr_active_client_conns = nr_active; | ||
1196 | spin_unlock(&rxnet->client_conn_cache_lock); | ||
1197 | ASSERTCMP(nr_active, >=, 0); | ||
1198 | |||
1199 | while (!list_empty(&graveyard)) { | ||
1200 | conn = list_entry(graveyard.next, | ||
1201 | struct rxrpc_connection, cache_link); | ||
1202 | list_del_init(&conn->cache_link); | ||
1203 | |||
1204 | rxrpc_put_connection(conn); | ||
1205 | } | ||
1206 | |||
1207 | _leave(" [culled]"); | ||
1208 | } | ||
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index df6624c140be..a1ceef4f5cd0 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
@@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work) | |||
472 | /* go through the conn-level event packets, releasing the ref on this | 472 | /* go through the conn-level event packets, releasing the ref on this |
473 | * connection that each one has when we've finished with it */ | 473 | * connection that each one has when we've finished with it */ |
474 | while ((skb = skb_dequeue(&conn->rx_queue))) { | 474 | while ((skb = skb_dequeue(&conn->rx_queue))) { |
475 | rxrpc_see_skb(skb, rxrpc_skb_rx_seen); | 475 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
476 | ret = rxrpc_process_event(conn, skb, &abort_code); | 476 | ret = rxrpc_process_event(conn, skb, &abort_code); |
477 | switch (ret) { | 477 | switch (ret) { |
478 | case -EPROTO: | 478 | case -EPROTO: |
@@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work) | |||
484 | goto requeue_and_leave; | 484 | goto requeue_and_leave; |
485 | case -ECONNABORTED: | 485 | case -ECONNABORTED: |
486 | default: | 486 | default: |
487 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 487 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
488 | break; | 488 | break; |
489 | } | 489 | } |
490 | } | 490 | } |
@@ -501,6 +501,6 @@ requeue_and_leave: | |||
501 | protocol_error: | 501 | protocol_error: |
502 | if (rxrpc_abort_connection(conn, ret, abort_code) < 0) | 502 | if (rxrpc_abort_connection(conn, ret, abort_code) < 0) |
503 | goto requeue_and_leave; | 503 | goto requeue_and_leave; |
504 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 504 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
505 | goto out; | 505 | goto out; |
506 | } | 506 | } |
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 434ef392212b..ed05b6922132 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c | |||
@@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) | |||
398 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) | 398 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) |
399 | continue; | 399 | continue; |
400 | 400 | ||
401 | if (rxnet->live) { | 401 | if (rxnet->live && !conn->params.local->dead) { |
402 | idle_timestamp = READ_ONCE(conn->idle_timestamp); | 402 | idle_timestamp = READ_ONCE(conn->idle_timestamp); |
403 | expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; | 403 | expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; |
404 | if (conn->params.local->service_closed) | 404 | if (conn->params.local->service_closed) |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5bd6f1546e5c..d122c53c8697 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -196,15 +196,14 @@ send_extra_data: | |||
196 | * Ping the other end to fill our RTT cache and to retrieve the rwind | 196 | * Ping the other end to fill our RTT cache and to retrieve the rwind |
197 | * and MTU parameters. | 197 | * and MTU parameters. |
198 | */ | 198 | */ |
199 | static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, | 199 | static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) |
200 | int skew) | ||
201 | { | 200 | { |
202 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 201 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
203 | ktime_t now = skb->tstamp; | 202 | ktime_t now = skb->tstamp; |
204 | 203 | ||
205 | if (call->peer->rtt_usage < 3 || | 204 | if (call->peer->rtt_usage < 3 || |
206 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) | 205 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) |
207 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | 206 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
208 | true, true, | 207 | true, true, |
209 | rxrpc_propose_ack_ping_for_params); | 208 | rxrpc_propose_ack_ping_for_params); |
210 | } | 209 | } |
@@ -234,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
234 | ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; | 233 | ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; |
235 | skb = call->rxtx_buffer[ix]; | 234 | skb = call->rxtx_buffer[ix]; |
236 | annotation = call->rxtx_annotations[ix]; | 235 | annotation = call->rxtx_annotations[ix]; |
237 | rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); | 236 | rxrpc_see_skb(skb, rxrpc_skb_rotated); |
238 | call->rxtx_buffer[ix] = NULL; | 237 | call->rxtx_buffer[ix] = NULL; |
239 | call->rxtx_annotations[ix] = 0; | 238 | call->rxtx_annotations[ix] = 0; |
240 | skb->next = list; | 239 | skb->next = list; |
@@ -259,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
259 | skb = list; | 258 | skb = list; |
260 | list = skb->next; | 259 | list = skb->next; |
261 | skb_mark_not_on_list(skb); | 260 | skb_mark_not_on_list(skb); |
262 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 261 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
263 | } | 262 | } |
264 | 263 | ||
265 | return rot_last; | 264 | return rot_last; |
@@ -348,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
348 | } | 347 | } |
349 | 348 | ||
350 | /* | 349 | /* |
351 | * Scan a jumbo packet to validate its structure and to work out how many | 350 | * Scan a data packet to validate its structure and to work out how many |
352 | * subpackets it contains. | 351 | * subpackets it contains. |
353 | * | 352 | * |
354 | * A jumbo packet is a collection of consecutive packets glued together with | 353 | * A jumbo packet is a collection of consecutive packets glued together with |
@@ -359,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
359 | * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any | 358 | * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any |
360 | * size. | 359 | * size. |
361 | */ | 360 | */ |
362 | static bool rxrpc_validate_jumbo(struct sk_buff *skb) | 361 | static bool rxrpc_validate_data(struct sk_buff *skb) |
363 | { | 362 | { |
364 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 363 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
365 | unsigned int offset = sizeof(struct rxrpc_wire_header); | 364 | unsigned int offset = sizeof(struct rxrpc_wire_header); |
366 | unsigned int len = skb->len; | 365 | unsigned int len = skb->len; |
367 | int nr_jumbo = 1; | ||
368 | u8 flags = sp->hdr.flags; | 366 | u8 flags = sp->hdr.flags; |
369 | 367 | ||
370 | do { | 368 | for (;;) { |
371 | nr_jumbo++; | 369 | if (flags & RXRPC_REQUEST_ACK) |
370 | __set_bit(sp->nr_subpackets, sp->rx_req_ack); | ||
371 | sp->nr_subpackets++; | ||
372 | |||
373 | if (!(flags & RXRPC_JUMBO_PACKET)) | ||
374 | break; | ||
375 | |||
372 | if (len - offset < RXRPC_JUMBO_SUBPKTLEN) | 376 | if (len - offset < RXRPC_JUMBO_SUBPKTLEN) |
373 | goto protocol_error; | 377 | goto protocol_error; |
374 | if (flags & RXRPC_LAST_PACKET) | 378 | if (flags & RXRPC_LAST_PACKET) |
@@ -377,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb) | |||
377 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | 381 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) |
378 | goto protocol_error; | 382 | goto protocol_error; |
379 | offset += sizeof(struct rxrpc_jumbo_header); | 383 | offset += sizeof(struct rxrpc_jumbo_header); |
380 | } while (flags & RXRPC_JUMBO_PACKET); | 384 | } |
381 | 385 | ||
382 | sp->nr_jumbo = nr_jumbo; | 386 | if (flags & RXRPC_LAST_PACKET) |
387 | sp->rx_flags |= RXRPC_SKB_INCL_LAST; | ||
383 | return true; | 388 | return true; |
384 | 389 | ||
385 | protocol_error: | 390 | protocol_error: |
@@ -400,10 +405,10 @@ protocol_error: | |||
400 | * (that information is encoded in the ACK packet). | 405 | * (that information is encoded in the ACK packet). |
401 | */ | 406 | */ |
402 | static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, | 407 | static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, |
403 | u8 annotation, bool *_jumbo_bad) | 408 | bool is_jumbo, bool *_jumbo_bad) |
404 | { | 409 | { |
405 | /* Discard normal packets that are duplicates. */ | 410 | /* Discard normal packets that are duplicates. */ |
406 | if (annotation == 0) | 411 | if (is_jumbo) |
407 | return; | 412 | return; |
408 | 413 | ||
409 | /* Skip jumbo subpackets that are duplicates. When we've had three or | 414 | /* Skip jumbo subpackets that are duplicates. When we've had three or |
@@ -417,30 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, | |||
417 | } | 422 | } |
418 | 423 | ||
419 | /* | 424 | /* |
420 | * Process a DATA packet, adding the packet to the Rx ring. | 425 | * Process a DATA packet, adding the packet to the Rx ring. The caller's |
426 | * packet ref must be passed on or discarded. | ||
421 | */ | 427 | */ |
422 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | 428 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) |
423 | u16 skew) | ||
424 | { | 429 | { |
425 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 430 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
426 | enum rxrpc_call_state state; | 431 | enum rxrpc_call_state state; |
427 | unsigned int offset = sizeof(struct rxrpc_wire_header); | 432 | unsigned int j; |
428 | unsigned int ix; | ||
429 | rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; | 433 | rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; |
430 | rxrpc_seq_t seq = sp->hdr.seq, hard_ack; | 434 | rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; |
431 | bool immediate_ack = false, jumbo_bad = false, queued; | 435 | bool immediate_ack = false, jumbo_bad = false; |
432 | u16 len; | 436 | u8 ack = 0; |
433 | u8 ack = 0, flags, annotation = 0; | ||
434 | 437 | ||
435 | _enter("{%u,%u},{%u,%u}", | 438 | _enter("{%u,%u},{%u,%u}", |
436 | call->rx_hard_ack, call->rx_top, skb->len, seq); | 439 | call->rx_hard_ack, call->rx_top, skb->len, seq0); |
437 | 440 | ||
438 | _proto("Rx DATA %%%u { #%u f=%02x }", | 441 | _proto("Rx DATA %%%u { #%u f=%02x n=%u }", |
439 | sp->hdr.serial, seq, sp->hdr.flags); | 442 | sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); |
440 | 443 | ||
441 | state = READ_ONCE(call->state); | 444 | state = READ_ONCE(call->state); |
442 | if (state >= RXRPC_CALL_COMPLETE) | 445 | if (state >= RXRPC_CALL_COMPLETE) { |
446 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
443 | return; | 447 | return; |
448 | } | ||
444 | 449 | ||
445 | if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { | 450 | if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { |
446 | unsigned long timo = READ_ONCE(call->next_req_timo); | 451 | unsigned long timo = READ_ONCE(call->next_req_timo); |
@@ -465,156 +470,157 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | |||
465 | !rxrpc_receiving_reply(call)) | 470 | !rxrpc_receiving_reply(call)) |
466 | goto unlock; | 471 | goto unlock; |
467 | 472 | ||
468 | call->ackr_prev_seq = seq; | 473 | call->ackr_prev_seq = seq0; |
469 | |||
470 | hard_ack = READ_ONCE(call->rx_hard_ack); | 474 | hard_ack = READ_ONCE(call->rx_hard_ack); |
471 | if (after(seq, hard_ack + call->rx_winsize)) { | ||
472 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | ||
473 | ack_serial = serial; | ||
474 | goto ack; | ||
475 | } | ||
476 | 475 | ||
477 | flags = sp->hdr.flags; | 476 | if (sp->nr_subpackets > 1) { |
478 | if (flags & RXRPC_JUMBO_PACKET) { | ||
479 | if (call->nr_jumbo_bad > 3) { | 477 | if (call->nr_jumbo_bad > 3) { |
480 | ack = RXRPC_ACK_NOSPACE; | 478 | ack = RXRPC_ACK_NOSPACE; |
481 | ack_serial = serial; | 479 | ack_serial = serial; |
482 | goto ack; | 480 | goto ack; |
483 | } | 481 | } |
484 | annotation = 1; | ||
485 | } | 482 | } |
486 | 483 | ||
487 | next_subpacket: | 484 | for (j = 0; j < sp->nr_subpackets; j++) { |
488 | queued = false; | 485 | rxrpc_serial_t serial = sp->hdr.serial + j; |
489 | ix = seq & RXRPC_RXTX_BUFF_MASK; | 486 | rxrpc_seq_t seq = seq0 + j; |
490 | len = skb->len; | 487 | unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; |
491 | if (flags & RXRPC_JUMBO_PACKET) | 488 | bool terminal = (j == sp->nr_subpackets - 1); |
492 | len = RXRPC_JUMBO_DATALEN; | 489 | bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); |
493 | 490 | u8 flags, annotation = j; | |
494 | if (flags & RXRPC_LAST_PACKET) { | 491 | |
495 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 492 | _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }", |
496 | seq != call->rx_top) { | 493 | j, serial, seq, terminal, last); |
497 | rxrpc_proto_abort("LSN", call, seq); | 494 | |
498 | goto unlock; | 495 | if (last) { |
499 | } | 496 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
500 | } else { | 497 | seq != call->rx_top) { |
501 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 498 | rxrpc_proto_abort("LSN", call, seq); |
502 | after_eq(seq, call->rx_top)) { | 499 | goto unlock; |
503 | rxrpc_proto_abort("LSA", call, seq); | 500 | } |
504 | goto unlock; | 501 | } else { |
502 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | ||
503 | after_eq(seq, call->rx_top)) { | ||
504 | rxrpc_proto_abort("LSA", call, seq); | ||
505 | goto unlock; | ||
506 | } | ||
505 | } | 507 | } |
506 | } | ||
507 | |||
508 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | ||
509 | if (before_eq(seq, hard_ack)) { | ||
510 | ack = RXRPC_ACK_DUPLICATE; | ||
511 | ack_serial = serial; | ||
512 | goto skip; | ||
513 | } | ||
514 | 508 | ||
515 | if (flags & RXRPC_REQUEST_ACK && !ack) { | 509 | flags = 0; |
516 | ack = RXRPC_ACK_REQUESTED; | 510 | if (last) |
517 | ack_serial = serial; | 511 | flags |= RXRPC_LAST_PACKET; |
518 | } | 512 | if (!terminal) |
513 | flags |= RXRPC_JUMBO_PACKET; | ||
514 | if (test_bit(j, sp->rx_req_ack)) | ||
515 | flags |= RXRPC_REQUEST_ACK; | ||
516 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | ||
519 | 517 | ||
520 | if (call->rxtx_buffer[ix]) { | 518 | if (before_eq(seq, hard_ack)) { |
521 | rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); | ||
522 | if (ack != RXRPC_ACK_DUPLICATE) { | ||
523 | ack = RXRPC_ACK_DUPLICATE; | 519 | ack = RXRPC_ACK_DUPLICATE; |
524 | ack_serial = serial; | 520 | ack_serial = serial; |
521 | continue; | ||
525 | } | 522 | } |
526 | immediate_ack = true; | ||
527 | goto skip; | ||
528 | } | ||
529 | |||
530 | /* Queue the packet. We use a couple of memory barriers here as need | ||
531 | * to make sure that rx_top is perceived to be set after the buffer | ||
532 | * pointer and that the buffer pointer is set after the annotation and | ||
533 | * the skb data. | ||
534 | * | ||
535 | * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() | ||
536 | * and also rxrpc_fill_out_ack(). | ||
537 | */ | ||
538 | rxrpc_get_skb(skb, rxrpc_skb_rx_got); | ||
539 | call->rxtx_annotations[ix] = annotation; | ||
540 | smp_wmb(); | ||
541 | call->rxtx_buffer[ix] = skb; | ||
542 | if (after(seq, call->rx_top)) { | ||
543 | smp_store_release(&call->rx_top, seq); | ||
544 | } else if (before(seq, call->rx_top)) { | ||
545 | /* Send an immediate ACK if we fill in a hole */ | ||
546 | if (!ack) { | ||
547 | ack = RXRPC_ACK_DELAY; | ||
548 | ack_serial = serial; | ||
549 | } | ||
550 | immediate_ack = true; | ||
551 | } | ||
552 | if (flags & RXRPC_LAST_PACKET) { | ||
553 | set_bit(RXRPC_CALL_RX_LAST, &call->flags); | ||
554 | trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); | ||
555 | } else { | ||
556 | trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); | ||
557 | } | ||
558 | queued = true; | ||
559 | 523 | ||
560 | if (after_eq(seq, call->rx_expect_next)) { | 524 | if (call->rxtx_buffer[ix]) { |
561 | if (after(seq, call->rx_expect_next)) { | 525 | rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, |
562 | _net("OOS %u > %u", seq, call->rx_expect_next); | 526 | &jumbo_bad); |
563 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | 527 | if (ack != RXRPC_ACK_DUPLICATE) { |
564 | ack_serial = serial; | 528 | ack = RXRPC_ACK_DUPLICATE; |
529 | ack_serial = serial; | ||
530 | } | ||
531 | immediate_ack = true; | ||
532 | continue; | ||
565 | } | 533 | } |
566 | call->rx_expect_next = seq + 1; | ||
567 | } | ||
568 | 534 | ||
569 | skip: | ||
570 | offset += len; | ||
571 | if (flags & RXRPC_JUMBO_PACKET) { | ||
572 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) { | ||
573 | rxrpc_proto_abort("XJF", call, seq); | ||
574 | goto unlock; | ||
575 | } | ||
576 | offset += sizeof(struct rxrpc_jumbo_header); | ||
577 | seq++; | ||
578 | serial++; | ||
579 | annotation++; | ||
580 | if (flags & RXRPC_JUMBO_PACKET) | ||
581 | annotation |= RXRPC_RX_ANNO_JLAST; | ||
582 | if (after(seq, hard_ack + call->rx_winsize)) { | 535 | if (after(seq, hard_ack + call->rx_winsize)) { |
583 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | 536 | ack = RXRPC_ACK_EXCEEDS_WINDOW; |
584 | ack_serial = serial; | 537 | ack_serial = serial; |
585 | if (!jumbo_bad) { | 538 | if (flags & RXRPC_JUMBO_PACKET) { |
586 | call->nr_jumbo_bad++; | 539 | if (!jumbo_bad) { |
587 | jumbo_bad = true; | 540 | call->nr_jumbo_bad++; |
541 | jumbo_bad = true; | ||
542 | } | ||
588 | } | 543 | } |
544 | |||
589 | goto ack; | 545 | goto ack; |
590 | } | 546 | } |
591 | 547 | ||
592 | _proto("Rx DATA Jumbo %%%u", serial); | 548 | if (flags & RXRPC_REQUEST_ACK && !ack) { |
593 | goto next_subpacket; | 549 | ack = RXRPC_ACK_REQUESTED; |
594 | } | 550 | ack_serial = serial; |
551 | } | ||
552 | |||
553 | /* Queue the packet. We use a couple of memory barriers here as need | ||
554 | * to make sure that rx_top is perceived to be set after the buffer | ||
555 | * pointer and that the buffer pointer is set after the annotation and | ||
556 | * the skb data. | ||
557 | * | ||
558 | * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() | ||
559 | * and also rxrpc_fill_out_ack(). | ||
560 | */ | ||
561 | if (!terminal) | ||
562 | rxrpc_get_skb(skb, rxrpc_skb_got); | ||
563 | call->rxtx_annotations[ix] = annotation; | ||
564 | smp_wmb(); | ||
565 | call->rxtx_buffer[ix] = skb; | ||
566 | if (after(seq, call->rx_top)) { | ||
567 | smp_store_release(&call->rx_top, seq); | ||
568 | } else if (before(seq, call->rx_top)) { | ||
569 | /* Send an immediate ACK if we fill in a hole */ | ||
570 | if (!ack) { | ||
571 | ack = RXRPC_ACK_DELAY; | ||
572 | ack_serial = serial; | ||
573 | } | ||
574 | immediate_ack = true; | ||
575 | } | ||
595 | 576 | ||
596 | if (queued && flags & RXRPC_LAST_PACKET && !ack) { | 577 | if (terminal) { |
597 | ack = RXRPC_ACK_DELAY; | 578 | /* From this point on, we're not allowed to touch the |
598 | ack_serial = serial; | 579 | * packet any longer as its ref now belongs to the Rx |
580 | * ring. | ||
581 | */ | ||
582 | skb = NULL; | ||
583 | } | ||
584 | |||
585 | if (last) { | ||
586 | set_bit(RXRPC_CALL_RX_LAST, &call->flags); | ||
587 | if (!ack) { | ||
588 | ack = RXRPC_ACK_DELAY; | ||
589 | ack_serial = serial; | ||
590 | } | ||
591 | trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); | ||
592 | } else { | ||
593 | trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); | ||
594 | } | ||
595 | |||
596 | if (after_eq(seq, call->rx_expect_next)) { | ||
597 | if (after(seq, call->rx_expect_next)) { | ||
598 | _net("OOS %u > %u", seq, call->rx_expect_next); | ||
599 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | ||
600 | ack_serial = serial; | ||
601 | } | ||
602 | call->rx_expect_next = seq + 1; | ||
603 | } | ||
599 | } | 604 | } |
600 | 605 | ||
601 | ack: | 606 | ack: |
602 | if (ack) | 607 | if (ack) |
603 | rxrpc_propose_ACK(call, ack, skew, ack_serial, | 608 | rxrpc_propose_ACK(call, ack, ack_serial, |
604 | immediate_ack, true, | 609 | immediate_ack, true, |
605 | rxrpc_propose_ack_input_data); | 610 | rxrpc_propose_ack_input_data); |
606 | else | 611 | else |
607 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, | 612 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, |
608 | false, true, | 613 | false, true, |
609 | rxrpc_propose_ack_input_data); | 614 | rxrpc_propose_ack_input_data); |
610 | 615 | ||
611 | if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { | 616 | if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { |
612 | trace_rxrpc_notify_socket(call->debug_id, serial); | 617 | trace_rxrpc_notify_socket(call->debug_id, serial); |
613 | rxrpc_notify_socket(call); | 618 | rxrpc_notify_socket(call); |
614 | } | 619 | } |
615 | 620 | ||
616 | unlock: | 621 | unlock: |
617 | spin_unlock(&call->input_lock); | 622 | spin_unlock(&call->input_lock); |
623 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
618 | _leave(" [queued]"); | 624 | _leave(" [queued]"); |
619 | } | 625 | } |
620 | 626 | ||
@@ -822,8 +828,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, | |||
822 | * soft-ACK means that the packet may be discarded and retransmission | 828 | * soft-ACK means that the packet may be discarded and retransmission |
823 | * requested. A phase is complete when all packets are hard-ACK'd. | 829 | * requested. A phase is complete when all packets are hard-ACK'd. |
824 | */ | 830 | */ |
825 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | 831 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) |
826 | u16 skew) | ||
827 | { | 832 | { |
828 | struct rxrpc_ack_summary summary = { 0 }; | 833 | struct rxrpc_ack_summary summary = { 0 }; |
829 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 834 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
@@ -867,11 +872,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
867 | if (buf.ack.reason == RXRPC_ACK_PING) { | 872 | if (buf.ack.reason == RXRPC_ACK_PING) { |
868 | _proto("Rx ACK %%%u PING Request", sp->hdr.serial); | 873 | _proto("Rx ACK %%%u PING Request", sp->hdr.serial); |
869 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, | 874 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, |
870 | skew, sp->hdr.serial, true, true, | 875 | sp->hdr.serial, true, true, |
871 | rxrpc_propose_ack_respond_to_ping); | 876 | rxrpc_propose_ack_respond_to_ping); |
872 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { | 877 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { |
873 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, | 878 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, |
874 | skew, sp->hdr.serial, true, true, | 879 | sp->hdr.serial, true, true, |
875 | rxrpc_propose_ack_respond_to_ack); | 880 | rxrpc_propose_ack_respond_to_ack); |
876 | } | 881 | } |
877 | 882 | ||
@@ -948,7 +953,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
948 | RXRPC_TX_ANNO_LAST && | 953 | RXRPC_TX_ANNO_LAST && |
949 | summary.nr_acks == call->tx_top - hard_ack && | 954 | summary.nr_acks == call->tx_top - hard_ack && |
950 | rxrpc_is_client_call(call)) | 955 | rxrpc_is_client_call(call)) |
951 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | 956 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
952 | false, true, | 957 | false, true, |
953 | rxrpc_propose_ack_ping_for_lost_reply); | 958 | rxrpc_propose_ack_ping_for_lost_reply); |
954 | 959 | ||
@@ -1004,7 +1009,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) | |||
1004 | * Process an incoming call packet. | 1009 | * Process an incoming call packet. |
1005 | */ | 1010 | */ |
1006 | static void rxrpc_input_call_packet(struct rxrpc_call *call, | 1011 | static void rxrpc_input_call_packet(struct rxrpc_call *call, |
1007 | struct sk_buff *skb, u16 skew) | 1012 | struct sk_buff *skb) |
1008 | { | 1013 | { |
1009 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 1014 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
1010 | unsigned long timo; | 1015 | unsigned long timo; |
@@ -1023,11 +1028,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1023 | 1028 | ||
1024 | switch (sp->hdr.type) { | 1029 | switch (sp->hdr.type) { |
1025 | case RXRPC_PACKET_TYPE_DATA: | 1030 | case RXRPC_PACKET_TYPE_DATA: |
1026 | rxrpc_input_data(call, skb, skew); | 1031 | rxrpc_input_data(call, skb); |
1027 | break; | 1032 | goto no_free; |
1028 | 1033 | ||
1029 | case RXRPC_PACKET_TYPE_ACK: | 1034 | case RXRPC_PACKET_TYPE_ACK: |
1030 | rxrpc_input_ack(call, skb, skew); | 1035 | rxrpc_input_ack(call, skb); |
1031 | break; | 1036 | break; |
1032 | 1037 | ||
1033 | case RXRPC_PACKET_TYPE_BUSY: | 1038 | case RXRPC_PACKET_TYPE_BUSY: |
@@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1051 | break; | 1056 | break; |
1052 | } | 1057 | } |
1053 | 1058 | ||
1059 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1060 | no_free: | ||
1054 | _leave(""); | 1061 | _leave(""); |
1055 | } | 1062 | } |
1056 | 1063 | ||
@@ -1108,8 +1115,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, | |||
1108 | { | 1115 | { |
1109 | _enter("%p,%p", local, skb); | 1116 | _enter("%p,%p", local, skb); |
1110 | 1117 | ||
1111 | skb_queue_tail(&local->event_queue, skb); | 1118 | if (rxrpc_get_local_maybe(local)) { |
1112 | rxrpc_queue_local(local); | 1119 | skb_queue_tail(&local->event_queue, skb); |
1120 | rxrpc_queue_local(local); | ||
1121 | } else { | ||
1122 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1123 | } | ||
1113 | } | 1124 | } |
1114 | 1125 | ||
1115 | /* | 1126 | /* |
@@ -1119,8 +1130,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) | |||
1119 | { | 1130 | { |
1120 | CHECK_SLAB_OKAY(&local->usage); | 1131 | CHECK_SLAB_OKAY(&local->usage); |
1121 | 1132 | ||
1122 | skb_queue_tail(&local->reject_queue, skb); | 1133 | if (rxrpc_get_local_maybe(local)) { |
1123 | rxrpc_queue_local(local); | 1134 | skb_queue_tail(&local->reject_queue, skb); |
1135 | rxrpc_queue_local(local); | ||
1136 | } else { | ||
1137 | rxrpc_free_skb(skb, rxrpc_skb_freed); | ||
1138 | } | ||
1124 | } | 1139 | } |
1125 | 1140 | ||
1126 | /* | 1141 | /* |
@@ -1173,7 +1188,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1173 | struct rxrpc_peer *peer = NULL; | 1188 | struct rxrpc_peer *peer = NULL; |
1174 | struct rxrpc_sock *rx = NULL; | 1189 | struct rxrpc_sock *rx = NULL; |
1175 | unsigned int channel; | 1190 | unsigned int channel; |
1176 | int skew = 0; | ||
1177 | 1191 | ||
1178 | _enter("%p", udp_sk); | 1192 | _enter("%p", udp_sk); |
1179 | 1193 | ||
@@ -1184,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1184 | if (skb->tstamp == 0) | 1198 | if (skb->tstamp == 0) |
1185 | skb->tstamp = ktime_get_real(); | 1199 | skb->tstamp = ktime_get_real(); |
1186 | 1200 | ||
1187 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); | 1201 | rxrpc_new_skb(skb, rxrpc_skb_received); |
1188 | 1202 | ||
1189 | skb_pull(skb, sizeof(struct udphdr)); | 1203 | skb_pull(skb, sizeof(struct udphdr)); |
1190 | 1204 | ||
@@ -1201,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1201 | static int lose; | 1215 | static int lose; |
1202 | if ((lose++ & 7) == 7) { | 1216 | if ((lose++ & 7) == 7) { |
1203 | trace_rxrpc_rx_lose(sp); | 1217 | trace_rxrpc_rx_lose(sp); |
1204 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); | 1218 | rxrpc_free_skb(skb, rxrpc_skb_lost); |
1205 | return 0; | 1219 | return 0; |
1206 | } | 1220 | } |
1207 | } | 1221 | } |
@@ -1233,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1233 | if (sp->hdr.callNumber == 0 || | 1247 | if (sp->hdr.callNumber == 0 || |
1234 | sp->hdr.seq == 0) | 1248 | sp->hdr.seq == 0) |
1235 | goto bad_message; | 1249 | goto bad_message; |
1236 | if (sp->hdr.flags & RXRPC_JUMBO_PACKET && | 1250 | if (!rxrpc_validate_data(skb)) |
1237 | !rxrpc_validate_jumbo(skb)) | ||
1238 | goto bad_message; | 1251 | goto bad_message; |
1252 | |||
1253 | /* Unshare the packet so that it can be modified for in-place | ||
1254 | * decryption. | ||
1255 | */ | ||
1256 | if (sp->hdr.securityIndex != 0) { | ||
1257 | struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); | ||
1258 | if (!nskb) { | ||
1259 | rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); | ||
1260 | goto out; | ||
1261 | } | ||
1262 | |||
1263 | if (nskb != skb) { | ||
1264 | rxrpc_eaten_skb(skb, rxrpc_skb_received); | ||
1265 | rxrpc_new_skb(skb, rxrpc_skb_unshared); | ||
1266 | skb = nskb; | ||
1267 | sp = rxrpc_skb(skb); | ||
1268 | } | ||
1269 | } | ||
1239 | break; | 1270 | break; |
1240 | 1271 | ||
1241 | case RXRPC_PACKET_TYPE_CHALLENGE: | 1272 | case RXRPC_PACKET_TYPE_CHALLENGE: |
@@ -1301,15 +1332,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1301 | goto out; | 1332 | goto out; |
1302 | } | 1333 | } |
1303 | 1334 | ||
1304 | /* Note the serial number skew here */ | 1335 | if ((int)sp->hdr.serial - (int)conn->hi_serial > 0) |
1305 | skew = (int)sp->hdr.serial - (int)conn->hi_serial; | 1336 | conn->hi_serial = sp->hdr.serial; |
1306 | if (skew >= 0) { | ||
1307 | if (skew > 0) | ||
1308 | conn->hi_serial = sp->hdr.serial; | ||
1309 | } else { | ||
1310 | skew = -skew; | ||
1311 | skew = min(skew, 65535); | ||
1312 | } | ||
1313 | 1337 | ||
1314 | /* Call-bound packets are routed by connection channel. */ | 1338 | /* Call-bound packets are routed by connection channel. */ |
1315 | channel = sp->hdr.cid & RXRPC_CHANNELMASK; | 1339 | channel = sp->hdr.cid & RXRPC_CHANNELMASK; |
@@ -1372,15 +1396,18 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | |||
1372 | call = rxrpc_new_incoming_call(local, rx, skb); | 1396 | call = rxrpc_new_incoming_call(local, rx, skb); |
1373 | if (!call) | 1397 | if (!call) |
1374 | goto reject_packet; | 1398 | goto reject_packet; |
1375 | rxrpc_send_ping(call, skb, skew); | 1399 | rxrpc_send_ping(call, skb); |
1376 | mutex_unlock(&call->user_mutex); | 1400 | mutex_unlock(&call->user_mutex); |
1377 | } | 1401 | } |
1378 | 1402 | ||
1379 | rxrpc_input_call_packet(call, skb, skew); | 1403 | /* Process a call packet; this either discards or passes on the ref |
1380 | goto discard; | 1404 | * elsewhere. |
1405 | */ | ||
1406 | rxrpc_input_call_packet(call, skb); | ||
1407 | goto out; | ||
1381 | 1408 | ||
1382 | discard: | 1409 | discard: |
1383 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 1410 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
1384 | out: | 1411 | out: |
1385 | trace_rxrpc_rx_done(0, 0); | 1412 | trace_rxrpc_rx_done(0, 0); |
1386 | return 0; | 1413 | return 0; |
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index e93a78f7c05e..3ce6d628cd75 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c | |||
@@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) | |||
90 | if (skb) { | 90 | if (skb) { |
91 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 91 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
92 | 92 | ||
93 | rxrpc_see_skb(skb, rxrpc_skb_rx_seen); | 93 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
94 | _debug("{%d},{%u}", local->debug_id, sp->hdr.type); | 94 | _debug("{%d},{%u}", local->debug_id, sp->hdr.type); |
95 | 95 | ||
96 | switch (sp->hdr.type) { | 96 | switch (sp->hdr.type) { |
@@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) | |||
108 | break; | 108 | break; |
109 | } | 109 | } |
110 | 110 | ||
111 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 111 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
112 | } | 112 | } |
113 | 113 | ||
114 | _leave(""); | 114 | _leave(""); |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b1c71bad510b..36587260cabd 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
79 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); | 79 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); |
80 | if (local) { | 80 | if (local) { |
81 | atomic_set(&local->usage, 1); | 81 | atomic_set(&local->usage, 1); |
82 | atomic_set(&local->active_users, 1); | ||
82 | local->rxnet = rxnet; | 83 | local->rxnet = rxnet; |
83 | INIT_LIST_HEAD(&local->link); | 84 | INIT_LIST_HEAD(&local->link); |
84 | INIT_WORK(&local->processor, rxrpc_local_processor); | 85 | INIT_WORK(&local->processor, rxrpc_local_processor); |
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
92 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); | 93 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); |
93 | memcpy(&local->srx, srx, sizeof(*srx)); | 94 | memcpy(&local->srx, srx, sizeof(*srx)); |
94 | local->srx.srx_service = 0; | 95 | local->srx.srx_service = 0; |
95 | trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); | 96 | trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); |
96 | } | 97 | } |
97 | 98 | ||
98 | _leave(" = %p", local); | 99 | _leave(" = %p", local); |
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, | |||
266 | * bind the transport socket may still fail if we're attempting | 267 | * bind the transport socket may still fail if we're attempting |
267 | * to use a local address that the dying object is still using. | 268 | * to use a local address that the dying object is still using. |
268 | */ | 269 | */ |
269 | if (!rxrpc_get_local_maybe(local)) { | 270 | if (!rxrpc_use_local(local)) |
270 | cursor = cursor->next; | ||
271 | list_del_init(&local->link); | ||
272 | break; | 271 | break; |
273 | } | ||
274 | 272 | ||
275 | age = "old"; | 273 | age = "old"; |
276 | goto found; | 274 | goto found; |
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, | |||
284 | if (ret < 0) | 282 | if (ret < 0) |
285 | goto sock_error; | 283 | goto sock_error; |
286 | 284 | ||
287 | list_add_tail(&local->link, cursor); | 285 | if (cursor != &rxnet->local_endpoints) |
286 | list_replace_init(cursor, &local->link); | ||
287 | else | ||
288 | list_add_tail(&local->link, cursor); | ||
288 | age = "new"; | 289 | age = "new"; |
289 | 290 | ||
290 | found: | 291 | found: |
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) | |||
320 | int n; | 321 | int n; |
321 | 322 | ||
322 | n = atomic_inc_return(&local->usage); | 323 | n = atomic_inc_return(&local->usage); |
323 | trace_rxrpc_local(local, rxrpc_local_got, n, here); | 324 | trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); |
324 | return local; | 325 | return local; |
325 | } | 326 | } |
326 | 327 | ||
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) | |||
334 | if (local) { | 335 | if (local) { |
335 | int n = atomic_fetch_add_unless(&local->usage, 1, 0); | 336 | int n = atomic_fetch_add_unless(&local->usage, 1, 0); |
336 | if (n > 0) | 337 | if (n > 0) |
337 | trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); | 338 | trace_rxrpc_local(local->debug_id, rxrpc_local_got, |
339 | n + 1, here); | ||
338 | else | 340 | else |
339 | local = NULL; | 341 | local = NULL; |
340 | } | 342 | } |
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) | |||
342 | } | 344 | } |
343 | 345 | ||
344 | /* | 346 | /* |
345 | * Queue a local endpoint. | 347 | * Queue a local endpoint and pass the caller's reference to the work item. |
346 | */ | 348 | */ |
347 | void rxrpc_queue_local(struct rxrpc_local *local) | 349 | void rxrpc_queue_local(struct rxrpc_local *local) |
348 | { | 350 | { |
349 | const void *here = __builtin_return_address(0); | 351 | const void *here = __builtin_return_address(0); |
352 | unsigned int debug_id = local->debug_id; | ||
353 | int n = atomic_read(&local->usage); | ||
350 | 354 | ||
351 | if (rxrpc_queue_work(&local->processor)) | 355 | if (rxrpc_queue_work(&local->processor)) |
352 | trace_rxrpc_local(local, rxrpc_local_queued, | 356 | trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); |
353 | atomic_read(&local->usage), here); | 357 | else |
354 | } | 358 | rxrpc_put_local(local); |
355 | |||
356 | /* | ||
357 | * A local endpoint reached its end of life. | ||
358 | */ | ||
359 | static void __rxrpc_put_local(struct rxrpc_local *local) | ||
360 | { | ||
361 | _enter("%d", local->debug_id); | ||
362 | rxrpc_queue_work(&local->processor); | ||
363 | } | 359 | } |
364 | 360 | ||
365 | /* | 361 | /* |
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local) | |||
372 | 368 | ||
373 | if (local) { | 369 | if (local) { |
374 | n = atomic_dec_return(&local->usage); | 370 | n = atomic_dec_return(&local->usage); |
375 | trace_rxrpc_local(local, rxrpc_local_put, n, here); | 371 | trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here); |
376 | 372 | ||
377 | if (n == 0) | 373 | if (n == 0) |
378 | __rxrpc_put_local(local); | 374 | call_rcu(&local->rcu, rxrpc_local_rcu); |
375 | } | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Start using a local endpoint. | ||
380 | */ | ||
381 | struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) | ||
382 | { | ||
383 | unsigned int au; | ||
384 | |||
385 | local = rxrpc_get_local_maybe(local); | ||
386 | if (!local) | ||
387 | return NULL; | ||
388 | |||
389 | au = atomic_fetch_add_unless(&local->active_users, 1, 0); | ||
390 | if (au == 0) { | ||
391 | rxrpc_put_local(local); | ||
392 | return NULL; | ||
393 | } | ||
394 | |||
395 | return local; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Cease using a local endpoint. Once the number of active users reaches 0, we | ||
400 | * start the closure of the transport in the work processor. | ||
401 | */ | ||
402 | void rxrpc_unuse_local(struct rxrpc_local *local) | ||
403 | { | ||
404 | unsigned int au; | ||
405 | |||
406 | if (local) { | ||
407 | au = atomic_dec_return(&local->active_users); | ||
408 | if (au == 0) | ||
409 | rxrpc_queue_local(local); | ||
410 | else | ||
411 | rxrpc_put_local(local); | ||
379 | } | 412 | } |
380 | } | 413 | } |
381 | 414 | ||
@@ -393,21 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) | |||
393 | 426 | ||
394 | _enter("%d", local->debug_id); | 427 | _enter("%d", local->debug_id); |
395 | 428 | ||
396 | /* We can get a race between an incoming call packet queueing the | ||
397 | * processor again and the work processor starting the destruction | ||
398 | * process which will shut down the UDP socket. | ||
399 | */ | ||
400 | if (local->dead) { | ||
401 | _leave(" [already dead]"); | ||
402 | return; | ||
403 | } | ||
404 | local->dead = true; | 429 | local->dead = true; |
405 | 430 | ||
406 | mutex_lock(&rxnet->local_mutex); | 431 | mutex_lock(&rxnet->local_mutex); |
407 | list_del_init(&local->link); | 432 | list_del_init(&local->link); |
408 | mutex_unlock(&rxnet->local_mutex); | 433 | mutex_unlock(&rxnet->local_mutex); |
409 | 434 | ||
410 | ASSERT(RB_EMPTY_ROOT(&local->client_conns)); | 435 | rxrpc_clean_up_local_conns(local); |
436 | rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); | ||
411 | ASSERT(!local->service); | 437 | ASSERT(!local->service); |
412 | 438 | ||
413 | if (socket) { | 439 | if (socket) { |
@@ -422,13 +448,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) | |||
422 | */ | 448 | */ |
423 | rxrpc_purge_queue(&local->reject_queue); | 449 | rxrpc_purge_queue(&local->reject_queue); |
424 | rxrpc_purge_queue(&local->event_queue); | 450 | rxrpc_purge_queue(&local->event_queue); |
425 | |||
426 | _debug("rcu local %d", local->debug_id); | ||
427 | call_rcu(&local->rcu, rxrpc_local_rcu); | ||
428 | } | 451 | } |
429 | 452 | ||
430 | /* | 453 | /* |
431 | * Process events on an endpoint | 454 | * Process events on an endpoint. The work item carries a ref which |
455 | * we must release. | ||
432 | */ | 456 | */ |
433 | static void rxrpc_local_processor(struct work_struct *work) | 457 | static void rxrpc_local_processor(struct work_struct *work) |
434 | { | 458 | { |
@@ -436,13 +460,15 @@ static void rxrpc_local_processor(struct work_struct *work) | |||
436 | container_of(work, struct rxrpc_local, processor); | 460 | container_of(work, struct rxrpc_local, processor); |
437 | bool again; | 461 | bool again; |
438 | 462 | ||
439 | trace_rxrpc_local(local, rxrpc_local_processing, | 463 | trace_rxrpc_local(local->debug_id, rxrpc_local_processing, |
440 | atomic_read(&local->usage), NULL); | 464 | atomic_read(&local->usage), NULL); |
441 | 465 | ||
442 | do { | 466 | do { |
443 | again = false; | 467 | again = false; |
444 | if (atomic_read(&local->usage) == 0) | 468 | if (atomic_read(&local->active_users) == 0) { |
445 | return rxrpc_local_destroyer(local); | 469 | rxrpc_local_destroyer(local); |
470 | break; | ||
471 | } | ||
446 | 472 | ||
447 | if (!skb_queue_empty(&local->reject_queue)) { | 473 | if (!skb_queue_empty(&local->reject_queue)) { |
448 | rxrpc_reject_packets(local); | 474 | rxrpc_reject_packets(local); |
@@ -454,6 +480,8 @@ static void rxrpc_local_processor(struct work_struct *work) | |||
454 | again = true; | 480 | again = true; |
455 | } | 481 | } |
456 | } while (again); | 482 | } while (again); |
483 | |||
484 | rxrpc_put_local(local); | ||
457 | } | 485 | } |
458 | 486 | ||
459 | /* | 487 | /* |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 948e3fe249ec..935bb60fff56 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, | |||
87 | *_top = top; | 87 | *_top = top; |
88 | 88 | ||
89 | pkt->ack.bufferSpace = htons(8); | 89 | pkt->ack.bufferSpace = htons(8); |
90 | pkt->ack.maxSkew = htons(call->ackr_skew); | 90 | pkt->ack.maxSkew = htons(0); |
91 | pkt->ack.firstPacket = htonl(hard_ack + 1); | 91 | pkt->ack.firstPacket = htonl(hard_ack + 1); |
92 | pkt->ack.previousPacket = htonl(call->ackr_prev_seq); | 92 | pkt->ack.previousPacket = htonl(call->ackr_prev_seq); |
93 | pkt->ack.serial = htonl(serial); | 93 | pkt->ack.serial = htonl(serial); |
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, | |||
228 | if (ping) | 228 | if (ping) |
229 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | 229 | clear_bit(RXRPC_CALL_PINGING, &call->flags); |
230 | rxrpc_propose_ACK(call, pkt->ack.reason, | 230 | rxrpc_propose_ACK(call, pkt->ack.reason, |
231 | ntohs(pkt->ack.maxSkew), | ||
232 | ntohl(pkt->ack.serial), | 231 | ntohl(pkt->ack.serial), |
233 | false, true, | 232 | false, true, |
234 | rxrpc_propose_ack_retry_tx); | 233 | rxrpc_propose_ack_retry_tx); |
@@ -566,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
566 | memset(&whdr, 0, sizeof(whdr)); | 565 | memset(&whdr, 0, sizeof(whdr)); |
567 | 566 | ||
568 | while ((skb = skb_dequeue(&local->reject_queue))) { | 567 | while ((skb = skb_dequeue(&local->reject_queue))) { |
569 | rxrpc_see_skb(skb, rxrpc_skb_rx_seen); | 568 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
570 | sp = rxrpc_skb(skb); | 569 | sp = rxrpc_skb(skb); |
571 | 570 | ||
572 | switch (skb->mark) { | 571 | switch (skb->mark) { |
@@ -582,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
582 | ioc = 2; | 581 | ioc = 2; |
583 | break; | 582 | break; |
584 | default: | 583 | default: |
585 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 584 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
586 | continue; | 585 | continue; |
587 | } | 586 | } |
588 | 587 | ||
@@ -607,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
607 | rxrpc_tx_point_reject); | 606 | rxrpc_tx_point_reject); |
608 | } | 607 | } |
609 | 608 | ||
610 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 609 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
611 | } | 610 | } |
612 | 611 | ||
613 | _leave(""); | 612 | _leave(""); |
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 9f2f45c09e58..c97ebdc043e4 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk) | |||
163 | _leave("UDP socket errqueue empty"); | 163 | _leave("UDP socket errqueue empty"); |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); | 166 | rxrpc_new_skb(skb, rxrpc_skb_received); |
167 | serr = SKB_EXT_ERR(skb); | 167 | serr = SKB_EXT_ERR(skb); |
168 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | 168 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { |
169 | _leave("UDP empty message"); | 169 | _leave("UDP empty message"); |
170 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 170 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
171 | return; | 171 | return; |
172 | } | 172 | } |
173 | 173 | ||
@@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk) | |||
177 | peer = NULL; | 177 | peer = NULL; |
178 | if (!peer) { | 178 | if (!peer) { |
179 | rcu_read_unlock(); | 179 | rcu_read_unlock(); |
180 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 180 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
181 | _leave(" [no peer]"); | 181 | _leave(" [no peer]"); |
182 | return; | 182 | return; |
183 | } | 183 | } |
@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk) | |||
189 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { | 189 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
190 | rxrpc_adjust_mtu(peer, serr); | 190 | rxrpc_adjust_mtu(peer, serr); |
191 | rcu_read_unlock(); | 191 | rcu_read_unlock(); |
192 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 192 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
193 | rxrpc_put_peer(peer); | 193 | rxrpc_put_peer(peer); |
194 | _leave(" [MTU update]"); | 194 | _leave(" [MTU update]"); |
195 | return; | 195 | return; |
@@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk) | |||
197 | 197 | ||
198 | rxrpc_store_error(peer, serr); | 198 | rxrpc_store_error(peer, serr); |
199 | rcu_read_unlock(); | 199 | rcu_read_unlock(); |
200 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 200 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
201 | rxrpc_put_peer(peer); | 201 | rxrpc_put_peer(peer); |
202 | 202 | ||
203 | _leave(""); | 203 | _leave(""); |
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, | |||
378 | spin_lock_bh(&rxnet->peer_hash_lock); | 378 | spin_lock_bh(&rxnet->peer_hash_lock); |
379 | list_add_tail(&peer->keepalive_link, | 379 | list_add_tail(&peer->keepalive_link, |
380 | &rxnet->peer_keepalive[slot & mask]); | 380 | &rxnet->peer_keepalive[slot & mask]); |
381 | rxrpc_put_peer(peer); | 381 | rxrpc_put_peer_locked(peer); |
382 | } | 382 | } |
383 | 383 | ||
384 | spin_unlock_bh(&rxnet->peer_hash_lock); | 384 | spin_unlock_bh(&rxnet->peer_hash_lock); |
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 9d3ce81cf8ae..9c3ac96f71cb 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) | |||
437 | } | 437 | } |
438 | 438 | ||
439 | /* | 439 | /* |
440 | * Drop a ref on a peer record where the caller already holds the | ||
441 | * peer_hash_lock. | ||
442 | */ | ||
443 | void rxrpc_put_peer_locked(struct rxrpc_peer *peer) | ||
444 | { | ||
445 | const void *here = __builtin_return_address(0); | ||
446 | int n; | ||
447 | |||
448 | n = atomic_dec_return(&peer->usage); | ||
449 | trace_rxrpc_peer(peer, rxrpc_peer_put, n, here); | ||
450 | if (n == 0) { | ||
451 | hash_del_rcu(&peer->hash_link); | ||
452 | list_del_init(&peer->keepalive_link); | ||
453 | kfree_rcu(peer, rcu); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | /* | ||
440 | * Make sure all peer records have been discarded. | 458 | * Make sure all peer records have been discarded. |
441 | */ | 459 | */ |
442 | void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) | 460 | void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) |
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h index 99ce322d7caa..49bb972539aa 100644 --- a/net/rxrpc/protocol.h +++ b/net/rxrpc/protocol.h | |||
@@ -89,6 +89,15 @@ struct rxrpc_jumbo_header { | |||
89 | #define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ | 89 | #define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ |
90 | #define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) | 90 | #define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) |
91 | 91 | ||
92 | /* | ||
93 | * The maximum number of subpackets that can possibly fit in a UDP packet is: | ||
94 | * | ||
95 | * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1 | ||
96 | * = ((65535 - 28 - 28) / 1416) + 1 | ||
97 | * = 46 non-terminal packets and 1 terminal packet. | ||
98 | */ | ||
99 | #define RXRPC_MAX_NR_JUMBO 47 | ||
100 | |||
92 | /*****************************************************************************/ | 101 | /*****************************************************************************/ |
93 | /* | 102 | /* |
94 | * on-the-wire Rx ACK packet data payload | 103 | * on-the-wire Rx ACK packet data payload |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 5abf46cf9e6c..3b0becb12041 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) | |||
141 | ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); | 141 | ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); |
142 | 142 | ||
143 | if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { | 143 | if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { |
144 | rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, | 144 | rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true, |
145 | rxrpc_propose_ack_terminal_ack); | 145 | rxrpc_propose_ack_terminal_ack); |
146 | //rxrpc_send_ack_packet(call, false, NULL); | 146 | //rxrpc_send_ack_packet(call, false, NULL); |
147 | } | 147 | } |
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) | |||
159 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; | 159 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; |
160 | call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; | 160 | call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; |
161 | write_unlock_bh(&call->state_lock); | 161 | write_unlock_bh(&call->state_lock); |
162 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, | 162 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true, |
163 | rxrpc_propose_ack_processing_op); | 163 | rxrpc_propose_ack_processing_op); |
164 | break; | 164 | break; |
165 | default: | 165 | default: |
@@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) | |||
177 | struct sk_buff *skb; | 177 | struct sk_buff *skb; |
178 | rxrpc_serial_t serial; | 178 | rxrpc_serial_t serial; |
179 | rxrpc_seq_t hard_ack, top; | 179 | rxrpc_seq_t hard_ack, top; |
180 | u8 flags; | 180 | bool last = false; |
181 | u8 subpacket; | ||
181 | int ix; | 182 | int ix; |
182 | 183 | ||
183 | _enter("%d", call->debug_id); | 184 | _enter("%d", call->debug_id); |
@@ -189,30 +190,32 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) | |||
189 | hard_ack++; | 190 | hard_ack++; |
190 | ix = hard_ack & RXRPC_RXTX_BUFF_MASK; | 191 | ix = hard_ack & RXRPC_RXTX_BUFF_MASK; |
191 | skb = call->rxtx_buffer[ix]; | 192 | skb = call->rxtx_buffer[ix]; |
192 | rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); | 193 | rxrpc_see_skb(skb, rxrpc_skb_rotated); |
193 | sp = rxrpc_skb(skb); | 194 | sp = rxrpc_skb(skb); |
194 | flags = sp->hdr.flags; | 195 | |
195 | serial = sp->hdr.serial; | 196 | subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; |
196 | if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) | 197 | serial = sp->hdr.serial + subpacket; |
197 | serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; | 198 | |
199 | if (subpacket == sp->nr_subpackets - 1 && | ||
200 | sp->rx_flags & RXRPC_SKB_INCL_LAST) | ||
201 | last = true; | ||
198 | 202 | ||
199 | call->rxtx_buffer[ix] = NULL; | 203 | call->rxtx_buffer[ix] = NULL; |
200 | call->rxtx_annotations[ix] = 0; | 204 | call->rxtx_annotations[ix] = 0; |
201 | /* Barrier against rxrpc_input_data(). */ | 205 | /* Barrier against rxrpc_input_data(). */ |
202 | smp_store_release(&call->rx_hard_ack, hard_ack); | 206 | smp_store_release(&call->rx_hard_ack, hard_ack); |
203 | 207 | ||
204 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 208 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
205 | 209 | ||
206 | _debug("%u,%u,%02x", hard_ack, top, flags); | ||
207 | trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); | 210 | trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); |
208 | if (flags & RXRPC_LAST_PACKET) { | 211 | if (last) { |
209 | rxrpc_end_rx_phase(call, serial); | 212 | rxrpc_end_rx_phase(call, serial); |
210 | } else { | 213 | } else { |
211 | /* Check to see if there's an ACK that needs sending. */ | 214 | /* Check to see if there's an ACK that needs sending. */ |
212 | if (after_eq(hard_ack, call->ackr_consumed + 2) || | 215 | if (after_eq(hard_ack, call->ackr_consumed + 2) || |
213 | after_eq(top, call->ackr_seen + 2) || | 216 | after_eq(top, call->ackr_seen + 2) || |
214 | (hard_ack == top && after(hard_ack, call->ackr_consumed))) | 217 | (hard_ack == top && after(hard_ack, call->ackr_consumed))) |
215 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, | 218 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, |
216 | true, true, | 219 | true, true, |
217 | rxrpc_propose_ack_rotate_rx); | 220 | rxrpc_propose_ack_rotate_rx); |
218 | if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) | 221 | if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) |
@@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, | |||
233 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 236 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
234 | rxrpc_seq_t seq = sp->hdr.seq; | 237 | rxrpc_seq_t seq = sp->hdr.seq; |
235 | u16 cksum = sp->hdr.cksum; | 238 | u16 cksum = sp->hdr.cksum; |
239 | u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; | ||
236 | 240 | ||
237 | _enter(""); | 241 | _enter(""); |
238 | 242 | ||
239 | /* For all but the head jumbo subpacket, the security checksum is in a | 243 | /* For all but the head jumbo subpacket, the security checksum is in a |
240 | * jumbo header immediately prior to the data. | 244 | * jumbo header immediately prior to the data. |
241 | */ | 245 | */ |
242 | if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { | 246 | if (subpacket > 0) { |
243 | __be16 tmp; | 247 | __be16 tmp; |
244 | if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) | 248 | if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) |
245 | BUG(); | 249 | BUG(); |
246 | cksum = ntohs(tmp); | 250 | cksum = ntohs(tmp); |
247 | seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; | 251 | seq += subpacket; |
248 | } | 252 | } |
249 | 253 | ||
250 | return call->conn->security->verify_packet(call, skb, offset, len, | 254 | return call->conn->security->verify_packet(call, skb, offset, len, |
@@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, | |||
265 | u8 *_annotation, | 269 | u8 *_annotation, |
266 | unsigned int *_offset, unsigned int *_len) | 270 | unsigned int *_offset, unsigned int *_len) |
267 | { | 271 | { |
272 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
268 | unsigned int offset = sizeof(struct rxrpc_wire_header); | 273 | unsigned int offset = sizeof(struct rxrpc_wire_header); |
269 | unsigned int len; | 274 | unsigned int len; |
270 | int ret; | 275 | int ret; |
271 | u8 annotation = *_annotation; | 276 | u8 annotation = *_annotation; |
277 | u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; | ||
272 | 278 | ||
273 | /* Locate the subpacket */ | 279 | /* Locate the subpacket */ |
280 | offset += subpacket * RXRPC_JUMBO_SUBPKTLEN; | ||
274 | len = skb->len - offset; | 281 | len = skb->len - offset; |
275 | if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { | 282 | if (subpacket < sp->nr_subpackets - 1) |
276 | offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * | 283 | len = RXRPC_JUMBO_DATALEN; |
277 | RXRPC_JUMBO_SUBPKTLEN); | ||
278 | len = (annotation & RXRPC_RX_ANNO_JLAST) ? | ||
279 | skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; | ||
280 | } | ||
281 | 284 | ||
282 | if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { | 285 | if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { |
283 | ret = rxrpc_verify_packet(call, skb, annotation, offset, len); | 286 | ret = rxrpc_verify_packet(call, skb, annotation, offset, len); |
@@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, | |||
303 | { | 306 | { |
304 | struct rxrpc_skb_priv *sp; | 307 | struct rxrpc_skb_priv *sp; |
305 | struct sk_buff *skb; | 308 | struct sk_buff *skb; |
309 | rxrpc_serial_t serial; | ||
306 | rxrpc_seq_t hard_ack, top, seq; | 310 | rxrpc_seq_t hard_ack, top, seq; |
307 | size_t remain; | 311 | size_t remain; |
308 | bool last; | 312 | bool last; |
@@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, | |||
336 | break; | 340 | break; |
337 | } | 341 | } |
338 | smp_rmb(); | 342 | smp_rmb(); |
339 | rxrpc_see_skb(skb, rxrpc_skb_rx_seen); | 343 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
340 | sp = rxrpc_skb(skb); | 344 | sp = rxrpc_skb(skb); |
341 | 345 | ||
342 | if (!(flags & MSG_PEEK)) | 346 | if (!(flags & MSG_PEEK)) { |
347 | serial = sp->hdr.serial; | ||
348 | serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; | ||
343 | trace_rxrpc_receive(call, rxrpc_receive_front, | 349 | trace_rxrpc_receive(call, rxrpc_receive_front, |
344 | sp->hdr.serial, seq); | 350 | serial, seq); |
351 | } | ||
345 | 352 | ||
346 | if (msg) | 353 | if (msg) |
347 | sock_recv_timestamp(msg, sock->sk, skb); | 354 | sock_recv_timestamp(msg, sock->sk, skb); |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index ae8cd8926456..c60c520fde7c 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
@@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
187 | struct rxrpc_skb_priv *sp; | 187 | struct rxrpc_skb_priv *sp; |
188 | struct rxrpc_crypt iv; | 188 | struct rxrpc_crypt iv; |
189 | struct scatterlist sg[16]; | 189 | struct scatterlist sg[16]; |
190 | struct sk_buff *trailer; | ||
191 | unsigned int len; | 190 | unsigned int len; |
192 | u16 check; | 191 | u16 check; |
193 | int nsg; | ||
194 | int err; | 192 | int err; |
195 | 193 | ||
196 | sp = rxrpc_skb(skb); | 194 | sp = rxrpc_skb(skb); |
@@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
214 | crypto_skcipher_encrypt(req); | 212 | crypto_skcipher_encrypt(req); |
215 | 213 | ||
216 | /* we want to encrypt the skbuff in-place */ | 214 | /* we want to encrypt the skbuff in-place */ |
217 | nsg = skb_cow_data(skb, 0, &trailer); | 215 | err = -EMSGSIZE; |
218 | err = -ENOMEM; | 216 | if (skb_shinfo(skb)->nr_frags > 16) |
219 | if (nsg < 0 || nsg > 16) | ||
220 | goto out; | 217 | goto out; |
221 | 218 | ||
222 | len = data_size + call->conn->size_align - 1; | 219 | len = data_size + call->conn->size_align - 1; |
223 | len &= ~(call->conn->size_align - 1); | 220 | len &= ~(call->conn->size_align - 1); |
224 | 221 | ||
225 | sg_init_table(sg, nsg); | 222 | sg_init_table(sg, ARRAY_SIZE(sg)); |
226 | err = skb_to_sgvec(skb, sg, 0, len); | 223 | err = skb_to_sgvec(skb, sg, 0, len); |
227 | if (unlikely(err < 0)) | 224 | if (unlikely(err < 0)) |
228 | goto out; | 225 | goto out; |
@@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, | |||
319 | struct rxkad_level1_hdr sechdr; | 316 | struct rxkad_level1_hdr sechdr; |
320 | struct rxrpc_crypt iv; | 317 | struct rxrpc_crypt iv; |
321 | struct scatterlist sg[16]; | 318 | struct scatterlist sg[16]; |
322 | struct sk_buff *trailer; | ||
323 | bool aborted; | 319 | bool aborted; |
324 | u32 data_size, buf; | 320 | u32 data_size, buf; |
325 | u16 check; | 321 | u16 check; |
326 | int nsg, ret; | 322 | int ret; |
327 | 323 | ||
328 | _enter(""); | 324 | _enter(""); |
329 | 325 | ||
@@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, | |||
336 | /* Decrypt the skbuff in-place. TODO: We really want to decrypt | 332 | /* Decrypt the skbuff in-place. TODO: We really want to decrypt |
337 | * directly into the target buffer. | 333 | * directly into the target buffer. |
338 | */ | 334 | */ |
339 | nsg = skb_cow_data(skb, 0, &trailer); | 335 | sg_init_table(sg, ARRAY_SIZE(sg)); |
340 | if (nsg < 0 || nsg > 16) | ||
341 | goto nomem; | ||
342 | |||
343 | sg_init_table(sg, nsg); | ||
344 | ret = skb_to_sgvec(skb, sg, offset, 8); | 336 | ret = skb_to_sgvec(skb, sg, offset, 8); |
345 | if (unlikely(ret < 0)) | 337 | if (unlikely(ret < 0)) |
346 | return ret; | 338 | return ret; |
@@ -388,10 +380,6 @@ protocol_error: | |||
388 | if (aborted) | 380 | if (aborted) |
389 | rxrpc_send_abort_packet(call); | 381 | rxrpc_send_abort_packet(call); |
390 | return -EPROTO; | 382 | return -EPROTO; |
391 | |||
392 | nomem: | ||
393 | _leave(" = -ENOMEM"); | ||
394 | return -ENOMEM; | ||
395 | } | 383 | } |
396 | 384 | ||
397 | /* | 385 | /* |
@@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, | |||
406 | struct rxkad_level2_hdr sechdr; | 394 | struct rxkad_level2_hdr sechdr; |
407 | struct rxrpc_crypt iv; | 395 | struct rxrpc_crypt iv; |
408 | struct scatterlist _sg[4], *sg; | 396 | struct scatterlist _sg[4], *sg; |
409 | struct sk_buff *trailer; | ||
410 | bool aborted; | 397 | bool aborted; |
411 | u32 data_size, buf; | 398 | u32 data_size, buf; |
412 | u16 check; | 399 | u16 check; |
@@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, | |||
423 | /* Decrypt the skbuff in-place. TODO: We really want to decrypt | 410 | /* Decrypt the skbuff in-place. TODO: We really want to decrypt |
424 | * directly into the target buffer. | 411 | * directly into the target buffer. |
425 | */ | 412 | */ |
426 | nsg = skb_cow_data(skb, 0, &trailer); | ||
427 | if (nsg < 0) | ||
428 | goto nomem; | ||
429 | |||
430 | sg = _sg; | 413 | sg = _sg; |
431 | if (unlikely(nsg > 4)) { | 414 | nsg = skb_shinfo(skb)->nr_frags; |
415 | if (nsg <= 4) { | ||
416 | nsg = 4; | ||
417 | } else { | ||
432 | sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); | 418 | sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); |
433 | if (!sg) | 419 | if (!sg) |
434 | goto nomem; | 420 | goto nomem; |
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 5d3f33ce6d41..6a1547b270fe 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c | |||
@@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
176 | skb->tstamp = ktime_get_real(); | 176 | skb->tstamp = ktime_get_real(); |
177 | 177 | ||
178 | ix = seq & RXRPC_RXTX_BUFF_MASK; | 178 | ix = seq & RXRPC_RXTX_BUFF_MASK; |
179 | rxrpc_get_skb(skb, rxrpc_skb_tx_got); | 179 | rxrpc_get_skb(skb, rxrpc_skb_got); |
180 | call->rxtx_annotations[ix] = annotation; | 180 | call->rxtx_annotations[ix] = annotation; |
181 | smp_wmb(); | 181 | smp_wmb(); |
182 | call->rxtx_buffer[ix] = skb; | 182 | call->rxtx_buffer[ix] = skb; |
@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
226 | rxrpc_set_call_completion(call, | 226 | rxrpc_set_call_completion(call, |
227 | RXRPC_CALL_LOCAL_ERROR, | 227 | RXRPC_CALL_LOCAL_ERROR, |
228 | 0, ret); | 228 | 0, ret); |
229 | rxrpc_notify_socket(call); | ||
229 | goto out; | 230 | goto out; |
230 | } | 231 | } |
231 | _debug("need instant resend %d", ret); | 232 | _debug("need instant resend %d", ret); |
@@ -247,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, | |||
247 | } | 248 | } |
248 | 249 | ||
249 | out: | 250 | out: |
250 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 251 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
251 | _leave(" = %d", ret); | 252 | _leave(" = %d", ret); |
252 | return ret; | 253 | return ret; |
253 | } | 254 | } |
@@ -288,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
288 | 289 | ||
289 | skb = call->tx_pending; | 290 | skb = call->tx_pending; |
290 | call->tx_pending = NULL; | 291 | call->tx_pending = NULL; |
291 | rxrpc_see_skb(skb, rxrpc_skb_tx_seen); | 292 | rxrpc_see_skb(skb, rxrpc_skb_seen); |
292 | 293 | ||
293 | copied = 0; | 294 | copied = 0; |
294 | do { | 295 | do { |
@@ -335,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
335 | if (!skb) | 336 | if (!skb) |
336 | goto maybe_error; | 337 | goto maybe_error; |
337 | 338 | ||
338 | rxrpc_new_skb(skb, rxrpc_skb_tx_new); | 339 | sp = rxrpc_skb(skb); |
340 | sp->rx_flags |= RXRPC_SKB_TX_BUFFER; | ||
341 | rxrpc_new_skb(skb, rxrpc_skb_new); | ||
339 | 342 | ||
340 | _debug("ALLOC SEND %p", skb); | 343 | _debug("ALLOC SEND %p", skb); |
341 | 344 | ||
@@ -345,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
345 | skb_reserve(skb, call->conn->security_size); | 348 | skb_reserve(skb, call->conn->security_size); |
346 | skb->len += call->conn->security_size; | 349 | skb->len += call->conn->security_size; |
347 | 350 | ||
348 | sp = rxrpc_skb(skb); | ||
349 | sp->remain = chunk; | 351 | sp->remain = chunk; |
350 | if (sp->remain > skb_tailroom(skb)) | 352 | if (sp->remain > skb_tailroom(skb)) |
351 | sp->remain = skb_tailroom(skb); | 353 | sp->remain = skb_tailroom(skb); |
@@ -438,7 +440,7 @@ out: | |||
438 | return ret; | 440 | return ret; |
439 | 441 | ||
440 | call_terminated: | 442 | call_terminated: |
441 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 443 | rxrpc_free_skb(skb, rxrpc_skb_freed); |
442 | _leave(" = %d", call->error); | 444 | _leave(" = %d", call->error); |
443 | return call->error; | 445 | return call->error; |
444 | 446 | ||
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 9ad5045b7c2f..0348d2bf6f7d 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c | |||
@@ -14,7 +14,8 @@ | |||
14 | #include <net/af_rxrpc.h> | 14 | #include <net/af_rxrpc.h> |
15 | #include "ar-internal.h" | 15 | #include "ar-internal.h" |
16 | 16 | ||
17 | #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) | 17 | #define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER) |
18 | #define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Note the allocation or reception of a socket buffer. | 21 | * Note the allocation or reception of a socket buffer. |
@@ -22,8 +23,9 @@ | |||
22 | void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | 23 | void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
23 | { | 24 | { |
24 | const void *here = __builtin_return_address(0); | 25 | const void *here = __builtin_return_address(0); |
25 | int n = atomic_inc_return(select_skb_count(op)); | 26 | int n = atomic_inc_return(select_skb_count(skb)); |
26 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); | 27 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, |
28 | rxrpc_skb(skb)->rx_flags, here); | ||
27 | } | 29 | } |
28 | 30 | ||
29 | /* | 31 | /* |
@@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | |||
33 | { | 35 | { |
34 | const void *here = __builtin_return_address(0); | 36 | const void *here = __builtin_return_address(0); |
35 | if (skb) { | 37 | if (skb) { |
36 | int n = atomic_read(select_skb_count(op)); | 38 | int n = atomic_read(select_skb_count(skb)); |
37 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); | 39 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, |
40 | rxrpc_skb(skb)->rx_flags, here); | ||
38 | } | 41 | } |
39 | } | 42 | } |
40 | 43 | ||
@@ -44,12 +47,23 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | |||
44 | void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | 47 | void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
45 | { | 48 | { |
46 | const void *here = __builtin_return_address(0); | 49 | const void *here = __builtin_return_address(0); |
47 | int n = atomic_inc_return(select_skb_count(op)); | 50 | int n = atomic_inc_return(select_skb_count(skb)); |
48 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); | 51 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, |
52 | rxrpc_skb(skb)->rx_flags, here); | ||
49 | skb_get(skb); | 53 | skb_get(skb); |
50 | } | 54 | } |
51 | 55 | ||
52 | /* | 56 | /* |
57 | * Note the dropping of a ref on a socket buffer by the core. | ||
58 | */ | ||
59 | void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | ||
60 | { | ||
61 | const void *here = __builtin_return_address(0); | ||
62 | int n = atomic_inc_return(&rxrpc_n_rx_skbs); | ||
63 | trace_rxrpc_skb(skb, op, 0, n, 0, here); | ||
64 | } | ||
65 | |||
66 | /* | ||
53 | * Note the destruction of a socket buffer. | 67 | * Note the destruction of a socket buffer. |
54 | */ | 68 | */ |
55 | void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | 69 | void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
@@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) | |||
58 | if (skb) { | 72 | if (skb) { |
59 | int n; | 73 | int n; |
60 | CHECK_SLAB_OKAY(&skb->users); | 74 | CHECK_SLAB_OKAY(&skb->users); |
61 | n = atomic_dec_return(select_skb_count(op)); | 75 | n = atomic_dec_return(select_skb_count(skb)); |
62 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); | 76 | trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, |
77 | rxrpc_skb(skb)->rx_flags, here); | ||
63 | kfree_skb(skb); | 78 | kfree_skb(skb); |
64 | } | 79 | } |
65 | } | 80 | } |
@@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list) | |||
72 | const void *here = __builtin_return_address(0); | 87 | const void *here = __builtin_return_address(0); |
73 | struct sk_buff *skb; | 88 | struct sk_buff *skb; |
74 | while ((skb = skb_dequeue((list))) != NULL) { | 89 | while ((skb = skb_dequeue((list))) != NULL) { |
75 | int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); | 90 | int n = atomic_dec_return(select_skb_count(skb)); |
76 | trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, | 91 | trace_rxrpc_skb(skb, rxrpc_skb_purged, |
77 | refcount_read(&skb->users), n, here); | 92 | refcount_read(&skb->users), n, |
93 | rxrpc_skb(skb)->rx_flags, here); | ||
78 | kfree_skb(skb); | 94 | kfree_skb(skb); |
79 | } | 95 | } |
80 | } | 96 | } |