aboutsummaryrefslogtreecommitdiffstats
path: root/net/rxrpc/input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-13 00:38:46 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-13 00:38:46 -0400
commitd864991b220b7c62e81d21209e1fd978fd67352c (patch)
treeb570a1ad6fc1b959c5bcda6ceca0b321319c01e0 /net/rxrpc/input.c
parenta688c53a0277d8ea21d86a5c56884892e3442c5e (diff)
parentbab5c80b211035739997ebd361a679fa85b39465 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts were easy to resolve using immediate context mostly, except the cls_u32.c one where I simply too the entire HEAD chunk. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc/input.c')
-rw-r--r--net/rxrpc/input.c251
1 files changed, 131 insertions, 120 deletions
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5b2626929822..9128aa0e40aa 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
216/* 216/*
217 * Apply a hard ACK by advancing the Tx window. 217 * Apply a hard ACK by advancing the Tx window.
218 */ 218 */
219static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 219static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
220 struct rxrpc_ack_summary *summary) 220 struct rxrpc_ack_summary *summary)
221{ 221{
222 struct sk_buff *skb, *list = NULL; 222 struct sk_buff *skb, *list = NULL;
223 bool rot_last = false;
223 int ix; 224 int ix;
224 u8 annotation; 225 u8 annotation;
225 226
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
243 skb->next = list; 244 skb->next = list;
244 list = skb; 245 list = skb;
245 246
246 if (annotation & RXRPC_TX_ANNO_LAST) 247 if (annotation & RXRPC_TX_ANNO_LAST) {
247 set_bit(RXRPC_CALL_TX_LAST, &call->flags); 248 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
249 rot_last = true;
250 }
248 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) 251 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
249 summary->nr_rot_new_acks++; 252 summary->nr_rot_new_acks++;
250 } 253 }
251 254
252 spin_unlock(&call->lock); 255 spin_unlock(&call->lock);
253 256
254 trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? 257 trace_rxrpc_transmit(call, (rot_last ?
255 rxrpc_transmit_rotate_last : 258 rxrpc_transmit_rotate_last :
256 rxrpc_transmit_rotate)); 259 rxrpc_transmit_rotate));
257 wake_up(&call->waitq); 260 wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
262 skb_mark_not_on_list(skb); 265 skb_mark_not_on_list(skb);
263 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 266 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
264 } 267 }
268
269 return rot_last;
265} 270}
266 271
267/* 272/*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
273static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 278static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
274 const char *abort_why) 279 const char *abort_why)
275{ 280{
281 unsigned int state;
276 282
277 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 283 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
278 284
279 write_lock(&call->state_lock); 285 write_lock(&call->state_lock);
280 286
281 switch (call->state) { 287 state = call->state;
288 switch (state) {
282 case RXRPC_CALL_CLIENT_SEND_REQUEST: 289 case RXRPC_CALL_CLIENT_SEND_REQUEST:
283 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 290 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
284 if (reply_begun) 291 if (reply_begun)
285 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 292 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
286 else 293 else
287 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 294 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
288 break; 295 break;
289 296
290 case RXRPC_CALL_SERVER_AWAIT_ACK: 297 case RXRPC_CALL_SERVER_AWAIT_ACK:
291 __rxrpc_call_completed(call); 298 __rxrpc_call_completed(call);
292 rxrpc_notify_socket(call); 299 rxrpc_notify_socket(call);
300 state = call->state;
293 break; 301 break;
294 302
295 default: 303 default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
297 } 305 }
298 306
299 write_unlock(&call->state_lock); 307 write_unlock(&call->state_lock);
300 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { 308 if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
301 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); 309 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
302 } else { 310 else
303 trace_rxrpc_transmit(call, rxrpc_transmit_end); 311 trace_rxrpc_transmit(call, rxrpc_transmit_end);
304 }
305 _leave(" = ok"); 312 _leave(" = ok");
306 return true; 313 return true;
307 314
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
332 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 339 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
333 } 340 }
334 341
335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
336 rxrpc_rotate_tx_window(call, top, &summary);
337 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 342 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
338 rxrpc_proto_abort("TXL", call, top); 343 if (!rxrpc_rotate_tx_window(call, top, &summary)) {
339 return false; 344 rxrpc_proto_abort("TXL", call, top);
345 return false;
346 }
340 } 347 }
341 if (!rxrpc_end_tx_phase(call, true, "ETD")) 348 if (!rxrpc_end_tx_phase(call, true, "ETD"))
342 return false; 349 return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
452 } 459 }
453 } 460 }
454 461
462 spin_lock(&call->input_lock);
463
455 /* Received data implicitly ACKs all of the request packets we sent 464 /* Received data implicitly ACKs all of the request packets we sent
456 * when we're acting as a client. 465 * when we're acting as a client.
457 */ 466 */
458 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || 467 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
459 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 468 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
460 !rxrpc_receiving_reply(call)) 469 !rxrpc_receiving_reply(call))
461 return; 470 goto unlock;
462 471
463 call->ackr_prev_seq = seq; 472 call->ackr_prev_seq = seq;
464 473
@@ -488,12 +497,16 @@ next_subpacket:
488 497
489 if (flags & RXRPC_LAST_PACKET) { 498 if (flags & RXRPC_LAST_PACKET) {
490 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 499 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
491 seq != call->rx_top) 500 seq != call->rx_top) {
492 return rxrpc_proto_abort("LSN", call, seq); 501 rxrpc_proto_abort("LSN", call, seq);
502 goto unlock;
503 }
493 } else { 504 } else {
494 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 505 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
495 after_eq(seq, call->rx_top)) 506 after_eq(seq, call->rx_top)) {
496 return rxrpc_proto_abort("LSA", call, seq); 507 rxrpc_proto_abort("LSA", call, seq);
508 goto unlock;
509 }
497 } 510 }
498 511
499 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); 512 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
560skip: 573skip:
561 offset += len; 574 offset += len;
562 if (flags & RXRPC_JUMBO_PACKET) { 575 if (flags & RXRPC_JUMBO_PACKET) {
563 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 576 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
564 return rxrpc_proto_abort("XJF", call, seq); 577 rxrpc_proto_abort("XJF", call, seq);
578 goto unlock;
579 }
565 offset += sizeof(struct rxrpc_jumbo_header); 580 offset += sizeof(struct rxrpc_jumbo_header);
566 seq++; 581 seq++;
567 serial++; 582 serial++;
@@ -601,6 +616,9 @@ ack:
601 trace_rxrpc_notify_socket(call->debug_id, serial); 616 trace_rxrpc_notify_socket(call->debug_id, serial);
602 rxrpc_notify_socket(call); 617 rxrpc_notify_socket(call);
603 } 618 }
619
620unlock:
621 spin_unlock(&call->input_lock);
604 _leave(" [queued]"); 622 _leave(" [queued]");
605} 623}
606 624
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
687 705
688 ping_time = call->ping_time; 706 ping_time = call->ping_time;
689 smp_rmb(); 707 smp_rmb();
690 ping_serial = call->ping_serial; 708 ping_serial = READ_ONCE(call->ping_serial);
691 709
692 if (orig_serial == call->acks_lost_ping) 710 if (orig_serial == call->acks_lost_ping)
693 rxrpc_input_check_for_lost_ack(call); 711 rxrpc_input_check_for_lost_ack(call);
694 712
695 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 713 if (before(orig_serial, ping_serial) ||
696 before(orig_serial, ping_serial)) 714 !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
697 return; 715 return;
698 clear_bit(RXRPC_CALL_PINGING, &call->flags);
699 if (after(orig_serial, ping_serial)) 716 if (after(orig_serial, ping_serial))
700 return; 717 return;
701 718
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
861 rxrpc_propose_ack_respond_to_ack); 878 rxrpc_propose_ack_respond_to_ack);
862 } 879 }
863 880
881 /* Discard any out-of-order or duplicate ACKs. */
882 if (before_eq(sp->hdr.serial, call->acks_latest))
883 return;
884
885 buf.info.rxMTU = 0;
864 ioffset = offset + nr_acks + 3; 886 ioffset = offset + nr_acks + 3;
865 if (skb->len >= ioffset + sizeof(buf.info)) { 887 if (skb->len >= ioffset + sizeof(buf.info) &&
866 if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) 888 skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
867 return rxrpc_proto_abort("XAI", call, 0); 889 return rxrpc_proto_abort("XAI", call, 0);
890
891 spin_lock(&call->input_lock);
892
893 /* Discard any out-of-order or duplicate ACKs. */
894 if (before_eq(sp->hdr.serial, call->acks_latest))
895 goto out;
896 call->acks_latest_ts = skb->tstamp;
897 call->acks_latest = sp->hdr.serial;
898
899 /* Parse rwind and mtu sizes if provided. */
900 if (buf.info.rxMTU)
868 rxrpc_input_ackinfo(call, skb, &buf.info); 901 rxrpc_input_ackinfo(call, skb, &buf.info);
869 }
870 902
871 if (first_soft_ack == 0) 903 if (first_soft_ack == 0) {
872 return rxrpc_proto_abort("AK0", call, 0); 904 rxrpc_proto_abort("AK0", call, 0);
905 goto out;
906 }
873 907
874 /* Ignore ACKs unless we are or have just been transmitting. */ 908 /* Ignore ACKs unless we are or have just been transmitting. */
875 switch (READ_ONCE(call->state)) { 909 switch (READ_ONCE(call->state)) {
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
879 case RXRPC_CALL_SERVER_AWAIT_ACK: 913 case RXRPC_CALL_SERVER_AWAIT_ACK:
880 break; 914 break;
881 default: 915 default:
882 return; 916 goto out;
883 }
884
885 /* Discard any out-of-order or duplicate ACKs. */
886 if (before_eq(sp->hdr.serial, call->acks_latest)) {
887 _debug("discard ACK %d <= %d",
888 sp->hdr.serial, call->acks_latest);
889 return;
890 } 917 }
891 call->acks_latest_ts = skb->tstamp;
892 call->acks_latest = sp->hdr.serial;
893 918
894 if (before(hard_ack, call->tx_hard_ack) || 919 if (before(hard_ack, call->tx_hard_ack) ||
895 after(hard_ack, call->tx_top)) 920 after(hard_ack, call->tx_top)) {
896 return rxrpc_proto_abort("AKW", call, 0); 921 rxrpc_proto_abort("AKW", call, 0);
897 if (nr_acks > call->tx_top - hard_ack) 922 goto out;
898 return rxrpc_proto_abort("AKN", call, 0); 923 }
924 if (nr_acks > call->tx_top - hard_ack) {
925 rxrpc_proto_abort("AKN", call, 0);
926 goto out;
927 }
899 928
900 if (after(hard_ack, call->tx_hard_ack)) 929 if (after(hard_ack, call->tx_hard_ack)) {
901 rxrpc_rotate_tx_window(call, hard_ack, &summary); 930 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
931 rxrpc_end_tx_phase(call, false, "ETA");
932 goto out;
933 }
934 }
902 935
903 if (nr_acks > 0) { 936 if (nr_acks > 0) {
904 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) 937 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
905 return rxrpc_proto_abort("XSA", call, 0); 938 rxrpc_proto_abort("XSA", call, 0);
939 goto out;
940 }
906 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, 941 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
907 &summary); 942 &summary);
908 } 943 }
909 944
910 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
911 rxrpc_end_tx_phase(call, false, "ETA");
912 return;
913 }
914
915 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & 945 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
916 RXRPC_TX_ANNO_LAST && 946 RXRPC_TX_ANNO_LAST &&
917 summary.nr_acks == call->tx_top - hard_ack && 947 summary.nr_acks == call->tx_top - hard_ack &&
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
920 false, true, 950 false, true,
921 rxrpc_propose_ack_ping_for_lost_reply); 951 rxrpc_propose_ack_ping_for_lost_reply);
922 952
923 return rxrpc_congestion_management(call, skb, &summary, acked_serial); 953 rxrpc_congestion_management(call, skb, &summary, acked_serial);
954out:
955 spin_unlock(&call->input_lock);
924} 956}
925 957
926/* 958/*
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
933 965
934 _proto("Rx ACKALL %%%u", sp->hdr.serial); 966 _proto("Rx ACKALL %%%u", sp->hdr.serial);
935 967
936 rxrpc_rotate_tx_window(call, call->tx_top, &summary); 968 spin_lock(&call->input_lock);
937 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 969
970 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
938 rxrpc_end_tx_phase(call, false, "ETL"); 971 rxrpc_end_tx_phase(call, false, "ETL");
972
973 spin_unlock(&call->input_lock);
939} 974}
940 975
941/* 976/*
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1018} 1053}
1019 1054
1020/* 1055/*
1021 * Handle a new call on a channel implicitly completing the preceding call on 1056 * Handle a new service call on a channel implicitly completing the preceding
1022 * that channel. 1057 * call on that channel. This does not apply to client conns.
1023 * 1058 *
1024 * TODO: If callNumber > call_id + 1, renegotiate security. 1059 * TODO: If callNumber > call_id + 1, renegotiate security.
1025 */ 1060 */
1026static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 1061static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
1062 struct rxrpc_connection *conn,
1027 struct rxrpc_call *call) 1063 struct rxrpc_call *call)
1028{ 1064{
1029 switch (READ_ONCE(call->state)) { 1065 switch (READ_ONCE(call->state)) {
1030 case RXRPC_CALL_SERVER_AWAIT_ACK: 1066 case RXRPC_CALL_SERVER_AWAIT_ACK:
1031 rxrpc_call_completed(call); 1067 rxrpc_call_completed(call);
1032 break; 1068 /* Fall through */
1033 case RXRPC_CALL_COMPLETE: 1069 case RXRPC_CALL_COMPLETE:
1034 break; 1070 break;
1035 default: 1071 default:
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
1037 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 1073 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
1038 rxrpc_queue_call(call); 1074 rxrpc_queue_call(call);
1039 } 1075 }
1076 trace_rxrpc_improper_term(call);
1040 break; 1077 break;
1041 } 1078 }
1042 1079
1043 trace_rxrpc_improper_term(call); 1080 spin_lock(&rx->incoming_lock);
1044 __rxrpc_disconnect_call(conn, call); 1081 __rxrpc_disconnect_call(conn, call);
1082 spin_unlock(&rx->incoming_lock);
1045 rxrpc_notify_socket(call); 1083 rxrpc_notify_socket(call);
1046} 1084}
1047 1085
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
1120 * The socket is locked by the caller and this prevents the socket from being 1158 * The socket is locked by the caller and this prevents the socket from being
1121 * shut down and the local endpoint from going away, thus sk_user_data will not 1159 * shut down and the local endpoint from going away, thus sk_user_data will not
1122 * be cleared until this function returns. 1160 * be cleared until this function returns.
1161 *
1162 * Called with the RCU read lock held from the IP layer via UDP.
1123 */ 1163 */
1124void rxrpc_data_ready(struct sock *udp_sk) 1164int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1125{ 1165{
1126 struct rxrpc_connection *conn; 1166 struct rxrpc_connection *conn;
1127 struct rxrpc_channel *chan; 1167 struct rxrpc_channel *chan;
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk)
1130 struct rxrpc_local *local = udp_sk->sk_user_data; 1170 struct rxrpc_local *local = udp_sk->sk_user_data;
1131 struct rxrpc_peer *peer = NULL; 1171 struct rxrpc_peer *peer = NULL;
1132 struct rxrpc_sock *rx = NULL; 1172 struct rxrpc_sock *rx = NULL;
1133 struct sk_buff *skb;
1134 unsigned int channel; 1173 unsigned int channel;
1135 int ret, skew = 0; 1174 int skew = 0;
1136 1175
1137 _enter("%p", udp_sk); 1176 _enter("%p", udp_sk);
1138 1177
1139 ASSERT(!irqs_disabled());
1140
1141 skb = skb_recv_udp(udp_sk, 0, 1, &ret);
1142 if (!skb) {
1143 if (ret == -EAGAIN)
1144 return;
1145 _debug("UDP socket error %d", ret);
1146 return;
1147 }
1148
1149 if (skb->tstamp == 0) 1178 if (skb->tstamp == 0)
1150 skb->tstamp = ktime_get_real(); 1179 skb->tstamp = ktime_get_real();
1151 1180
1152 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1181 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1153 1182
1154 _net("recv skb %p", skb); 1183 skb_pull(skb, sizeof(struct udphdr));
1155
1156 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
1157 if (skb_checksum_complete(skb)) {
1158 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1159 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
1160 _leave(" [CSUM failed]");
1161 return;
1162 }
1163
1164 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
1165 1184
1166 /* The UDP protocol already released all skb resources; 1185 /* The UDP protocol already released all skb resources;
1167 * we are free to add our own data there. 1186 * we are free to add our own data there.
@@ -1177,10 +1196,12 @@ void rxrpc_data_ready(struct sock *udp_sk)
1177 if ((lose++ & 7) == 7) { 1196 if ((lose++ & 7) == 7) {
1178 trace_rxrpc_rx_lose(sp); 1197 trace_rxrpc_rx_lose(sp);
1179 rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1198 rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
1180 return; 1199 return 0;
1181 } 1200 }
1182 } 1201 }
1183 1202
1203 if (skb->tstamp == 0)
1204 skb->tstamp = ktime_get_real();
1184 trace_rxrpc_rx_packet(sp); 1205 trace_rxrpc_rx_packet(sp);
1185 1206
1186 switch (sp->hdr.type) { 1207 switch (sp->hdr.type) {
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
1234 if (sp->hdr.serviceId == 0) 1255 if (sp->hdr.serviceId == 0)
1235 goto bad_message; 1256 goto bad_message;
1236 1257
1237 rcu_read_lock();
1238
1239 if (rxrpc_to_server(sp)) { 1258 if (rxrpc_to_server(sp)) {
1240 /* Weed out packets to services we're not offering. Packets 1259 /* Weed out packets to services we're not offering. Packets
1241 * that would begin a call are explicitly rejected and the rest 1260 * that would begin a call are explicitly rejected and the rest
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1247 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 1266 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
1248 sp->hdr.seq == 1) 1267 sp->hdr.seq == 1)
1249 goto unsupported_service; 1268 goto unsupported_service;
1250 goto discard_unlock; 1269 goto discard;
1251 } 1270 }
1252 } 1271 }
1253 1272
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk)
1257 goto wrong_security; 1276 goto wrong_security;
1258 1277
1259 if (sp->hdr.serviceId != conn->service_id) { 1278 if (sp->hdr.serviceId != conn->service_id) {
1260 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || 1279 int old_id;
1261 conn->service_id != conn->params.service_id) 1280
1281 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
1282 goto reupgrade;
1283 old_id = cmpxchg(&conn->service_id, conn->params.service_id,
1284 sp->hdr.serviceId);
1285
1286 if (old_id != conn->params.service_id &&
1287 old_id != sp->hdr.serviceId)
1262 goto reupgrade; 1288 goto reupgrade;
1263 conn->service_id = sp->hdr.serviceId;
1264 } 1289 }
1265 1290
1266 if (sp->hdr.callNumber == 0) { 1291 if (sp->hdr.callNumber == 0) {
1267 /* Connection-level packet */ 1292 /* Connection-level packet */
1268 _debug("CONN %p {%d}", conn, conn->debug_id); 1293 _debug("CONN %p {%d}", conn, conn->debug_id);
1269 rxrpc_post_packet_to_conn(conn, skb); 1294 rxrpc_post_packet_to_conn(conn, skb);
1270 goto out_unlock; 1295 goto out;
1271 } 1296 }
1272 1297
1273 /* Note the serial number skew here */ 1298 /* Note the serial number skew here */
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
1286 1311
1287 /* Ignore really old calls */ 1312 /* Ignore really old calls */
1288 if (sp->hdr.callNumber < chan->last_call) 1313 if (sp->hdr.callNumber < chan->last_call)
1289 goto discard_unlock; 1314 goto discard;
1290 1315
1291 if (sp->hdr.callNumber == chan->last_call) { 1316 if (sp->hdr.callNumber == chan->last_call) {
1292 if (chan->call || 1317 if (chan->call ||
1293 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) 1318 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
1294 goto discard_unlock; 1319 goto discard;
1295 1320
1296 /* For the previous service call, if completed 1321 /* For the previous service call, if completed
1297 * successfully, we discard all further packets. 1322 * successfully, we discard all further packets.
1298 */ 1323 */
1299 if (rxrpc_conn_is_service(conn) && 1324 if (rxrpc_conn_is_service(conn) &&
1300 chan->last_type == RXRPC_PACKET_TYPE_ACK) 1325 chan->last_type == RXRPC_PACKET_TYPE_ACK)
1301 goto discard_unlock; 1326 goto discard;
1302 1327
1303 /* But otherwise we need to retransmit the final packet 1328 /* But otherwise we need to retransmit the final packet
1304 * from data cached in the connection record. 1329 * from data cached in the connection record.
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
1309 sp->hdr.serial, 1334 sp->hdr.serial,
1310 sp->hdr.flags, 0); 1335 sp->hdr.flags, 0);
1311 rxrpc_post_packet_to_conn(conn, skb); 1336 rxrpc_post_packet_to_conn(conn, skb);
1312 goto out_unlock; 1337 goto out;
1313 } 1338 }
1314 1339
1315 call = rcu_dereference(chan->call); 1340 call = rcu_dereference(chan->call);
1316 1341
1317 if (sp->hdr.callNumber > chan->call_id) { 1342 if (sp->hdr.callNumber > chan->call_id) {
1318 if (rxrpc_to_client(sp)) { 1343 if (rxrpc_to_client(sp))
1319 rcu_read_unlock();
1320 goto reject_packet; 1344 goto reject_packet;
1321 }
1322 if (call) 1345 if (call)
1323 rxrpc_input_implicit_end_call(conn, call); 1346 rxrpc_input_implicit_end_call(rx, conn, call);
1324 call = NULL; 1347 call = NULL;
1325 } 1348 }
1326 1349
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk)
1337 if (!call || atomic_read(&call->usage) == 0) { 1360 if (!call || atomic_read(&call->usage) == 0) {
1338 if (rxrpc_to_client(sp) || 1361 if (rxrpc_to_client(sp) ||
1339 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1362 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1340 goto bad_message_unlock; 1363 goto bad_message;
1341 if (sp->hdr.seq != 1) 1364 if (sp->hdr.seq != 1)
1342 goto discard_unlock; 1365 goto discard;
1343 call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); 1366 call = rxrpc_new_incoming_call(local, rx, skb);
1344 if (!call) { 1367 if (!call)
1345 rcu_read_unlock();
1346 goto reject_packet; 1368 goto reject_packet;
1347 }
1348 rxrpc_send_ping(call, skb, skew); 1369 rxrpc_send_ping(call, skb, skew);
1349 mutex_unlock(&call->user_mutex); 1370 mutex_unlock(&call->user_mutex);
1350 } 1371 }
1351 1372
1352 rxrpc_input_call_packet(call, skb, skew); 1373 rxrpc_input_call_packet(call, skb, skew);
1353 goto discard_unlock; 1374 goto discard;
1354 1375
1355discard_unlock:
1356 rcu_read_unlock();
1357discard: 1376discard:
1358 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1377 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1359out: 1378out:
1360 trace_rxrpc_rx_done(0, 0); 1379 trace_rxrpc_rx_done(0, 0);
1361 return; 1380 return 0;
1362
1363out_unlock:
1364 rcu_read_unlock();
1365 goto out;
1366 1381
1367wrong_security: 1382wrong_security:
1368 rcu_read_unlock();
1369 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1383 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1370 RXKADINCONSISTENCY, EBADMSG); 1384 RXKADINCONSISTENCY, EBADMSG);
1371 skb->priority = RXKADINCONSISTENCY; 1385 skb->priority = RXKADINCONSISTENCY;
1372 goto post_abort; 1386 goto post_abort;
1373 1387
1374unsupported_service: 1388unsupported_service:
1375 rcu_read_unlock();
1376 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1389 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1377 RX_INVALID_OPERATION, EOPNOTSUPP); 1390 RX_INVALID_OPERATION, EOPNOTSUPP);
1378 skb->priority = RX_INVALID_OPERATION; 1391 skb->priority = RX_INVALID_OPERATION;
1379 goto post_abort; 1392 goto post_abort;
1380 1393
1381reupgrade: 1394reupgrade:
1382 rcu_read_unlock();
1383 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1395 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1384 RX_PROTOCOL_ERROR, EBADMSG); 1396 RX_PROTOCOL_ERROR, EBADMSG);
1385 goto protocol_error; 1397 goto protocol_error;
1386 1398
1387bad_message_unlock:
1388 rcu_read_unlock();
1389bad_message: 1399bad_message:
1390 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1400 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1391 RX_PROTOCOL_ERROR, EBADMSG); 1401 RX_PROTOCOL_ERROR, EBADMSG);
@@ -1397,4 +1407,5 @@ reject_packet:
1397 trace_rxrpc_rx_done(skb->mark, skb->priority); 1407 trace_rxrpc_rx_done(skb->mark, skb->priority);
1398 rxrpc_reject_packet(local, skb); 1408 rxrpc_reject_packet(local, skb);
1399 _leave(" [badmsg]"); 1409 _leave(" [badmsg]");
1410 return 0;
1400} 1411}