diff options
author | Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> | 2018-05-14 13:35:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-14 23:15:27 -0400 |
commit | e136e965df596d8e4fffa4ae0b202fd4c388568f (patch) | |
tree | 66de45bfbb5d985bc9778ae51e2576d85d7d6ead /net/sctp/outqueue.c | |
parent | bb543847a9c1b3904180b22add5e522f1f6c11c7 (diff) |
sctp: add asoc and packet to sctp_flush_ctx
Pre-compute these so the compiler won't reload them (due to
no-strict-aliasing).
Changes since v2:
- Do not replace a return with a break in sctp_outq_flush_data
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r-- | net/sctp/outqueue.c | 97 |
1 files changed, 44 insertions, 53 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index db94a2513dd8..68b7baea3fea 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -798,16 +798,17 @@ struct sctp_flush_ctx { | |||
798 | struct sctp_transport *transport; | 798 | struct sctp_transport *transport; |
799 | /* These transports have chunks to send. */ | 799 | /* These transports have chunks to send. */ |
800 | struct list_head transport_list; | 800 | struct list_head transport_list; |
801 | struct sctp_association *asoc; | ||
802 | /* Packet on the current transport above */ | ||
803 | struct sctp_packet *packet; | ||
801 | gfp_t gfp; | 804 | gfp_t gfp; |
802 | }; | 805 | }; |
803 | 806 | ||
804 | /* transport: current transport */ | 807 | /* transport: current transport */ |
805 | static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | 808 | static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx, |
806 | struct sctp_chunk *chunk) | 809 | struct sctp_chunk *chunk) |
807 | { | 810 | { |
808 | struct sctp_transport *new_transport = chunk->transport; | 811 | struct sctp_transport *new_transport = chunk->transport; |
809 | struct sctp_association *asoc = ctx->q->asoc; | ||
810 | bool changed = false; | ||
811 | 812 | ||
812 | if (!new_transport) { | 813 | if (!new_transport) { |
813 | if (!sctp_chunk_is_data(chunk)) { | 814 | if (!sctp_chunk_is_data(chunk)) { |
@@ -825,7 +826,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | |||
825 | &ctx->transport->ipaddr)) | 826 | &ctx->transport->ipaddr)) |
826 | new_transport = ctx->transport; | 827 | new_transport = ctx->transport; |
827 | else | 828 | else |
828 | new_transport = sctp_assoc_lookup_paddr(asoc, | 829 | new_transport = sctp_assoc_lookup_paddr(ctx->asoc, |
829 | &chunk->dest); | 830 | &chunk->dest); |
830 | } | 831 | } |
831 | 832 | ||
@@ -833,7 +834,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | |||
833 | * use the current active path. | 834 | * use the current active path. |
834 | */ | 835 | */ |
835 | if (!new_transport) | 836 | if (!new_transport) |
836 | new_transport = asoc->peer.active_path; | 837 | new_transport = ctx->asoc->peer.active_path; |
837 | } else { | 838 | } else { |
838 | __u8 type; | 839 | __u8 type; |
839 | 840 | ||
@@ -858,7 +859,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | |||
858 | if (type != SCTP_CID_HEARTBEAT && | 859 | if (type != SCTP_CID_HEARTBEAT && |
859 | type != SCTP_CID_HEARTBEAT_ACK && | 860 | type != SCTP_CID_HEARTBEAT_ACK && |
860 | type != SCTP_CID_ASCONF_ACK) | 861 | type != SCTP_CID_ASCONF_ACK) |
861 | new_transport = asoc->peer.active_path; | 862 | new_transport = ctx->asoc->peer.active_path; |
862 | break; | 863 | break; |
863 | default: | 864 | default: |
864 | break; | 865 | break; |
@@ -867,27 +868,25 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | |||
867 | 868 | ||
868 | /* Are we switching transports? Take care of transport locks. */ | 869 | /* Are we switching transports? Take care of transport locks. */ |
869 | if (new_transport != ctx->transport) { | 870 | if (new_transport != ctx->transport) { |
870 | changed = true; | ||
871 | ctx->transport = new_transport; | 871 | ctx->transport = new_transport; |
872 | ctx->packet = &ctx->transport->packet; | ||
873 | |||
872 | if (list_empty(&ctx->transport->send_ready)) | 874 | if (list_empty(&ctx->transport->send_ready)) |
873 | list_add_tail(&ctx->transport->send_ready, | 875 | list_add_tail(&ctx->transport->send_ready, |
874 | &ctx->transport_list); | 876 | &ctx->transport_list); |
875 | 877 | ||
876 | sctp_packet_config(&ctx->transport->packet, asoc->peer.i.init_tag, | 878 | sctp_packet_config(ctx->packet, |
877 | asoc->peer.ecn_capable); | 879 | ctx->asoc->peer.i.init_tag, |
880 | ctx->asoc->peer.ecn_capable); | ||
878 | /* We've switched transports, so apply the | 881 | /* We've switched transports, so apply the |
879 | * Burst limit to the new transport. | 882 | * Burst limit to the new transport. |
880 | */ | 883 | */ |
881 | sctp_transport_burst_limited(ctx->transport); | 884 | sctp_transport_burst_limited(ctx->transport); |
882 | } | 885 | } |
883 | |||
884 | return changed; | ||
885 | } | 886 | } |
886 | 887 | ||
887 | static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | 888 | static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) |
888 | { | 889 | { |
889 | struct sctp_association *asoc = ctx->q->asoc; | ||
890 | struct sctp_packet *packet = NULL; | ||
891 | struct sctp_chunk *chunk, *tmp; | 890 | struct sctp_chunk *chunk, *tmp; |
892 | enum sctp_xmit status; | 891 | enum sctp_xmit status; |
893 | int one_packet, error; | 892 | int one_packet, error; |
@@ -901,7 +900,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
901 | * NOT use the new IP address as a source for ANY SCTP | 900 | * NOT use the new IP address as a source for ANY SCTP |
902 | * packet except on carrying an ASCONF Chunk. | 901 | * packet except on carrying an ASCONF Chunk. |
903 | */ | 902 | */ |
904 | if (asoc->src_out_of_asoc_ok && | 903 | if (ctx->asoc->src_out_of_asoc_ok && |
905 | chunk->chunk_hdr->type != SCTP_CID_ASCONF) | 904 | chunk->chunk_hdr->type != SCTP_CID_ASCONF) |
906 | continue; | 905 | continue; |
907 | 906 | ||
@@ -910,8 +909,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
910 | /* Pick the right transport to use. Should always be true for | 909 | /* Pick the right transport to use. Should always be true for |
911 | * the first chunk as we don't have a transport by then. | 910 | * the first chunk as we don't have a transport by then. |
912 | */ | 911 | */ |
913 | if (sctp_outq_select_transport(ctx, chunk)) | 912 | sctp_outq_select_transport(ctx, chunk); |
914 | packet = &ctx->transport->packet; | ||
915 | 913 | ||
916 | switch (chunk->chunk_hdr->type) { | 914 | switch (chunk->chunk_hdr->type) { |
917 | /* | 915 | /* |
@@ -926,14 +924,14 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
926 | error = sctp_packet_singleton(ctx->transport, chunk, | 924 | error = sctp_packet_singleton(ctx->transport, chunk, |
927 | ctx->gfp); | 925 | ctx->gfp); |
928 | if (error < 0) { | 926 | if (error < 0) { |
929 | asoc->base.sk->sk_err = -error; | 927 | ctx->asoc->base.sk->sk_err = -error; |
930 | return; | 928 | return; |
931 | } | 929 | } |
932 | break; | 930 | break; |
933 | 931 | ||
934 | case SCTP_CID_ABORT: | 932 | case SCTP_CID_ABORT: |
935 | if (sctp_test_T_bit(chunk)) | 933 | if (sctp_test_T_bit(chunk)) |
936 | packet->vtag = asoc->c.my_vtag; | 934 | ctx->packet->vtag = ctx->asoc->c.my_vtag; |
937 | /* fallthru */ | 935 | /* fallthru */ |
938 | 936 | ||
939 | /* The following chunks are "response" chunks, i.e. | 937 | /* The following chunks are "response" chunks, i.e. |
@@ -959,7 +957,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
959 | case SCTP_CID_FWD_TSN: | 957 | case SCTP_CID_FWD_TSN: |
960 | case SCTP_CID_I_FWD_TSN: | 958 | case SCTP_CID_I_FWD_TSN: |
961 | case SCTP_CID_RECONF: | 959 | case SCTP_CID_RECONF: |
962 | status = sctp_packet_transmit_chunk(packet, chunk, | 960 | status = sctp_packet_transmit_chunk(ctx->packet, chunk, |
963 | one_packet, ctx->gfp); | 961 | one_packet, ctx->gfp); |
964 | if (status != SCTP_XMIT_OK) { | 962 | if (status != SCTP_XMIT_OK) { |
965 | /* put the chunk back */ | 963 | /* put the chunk back */ |
@@ -967,7 +965,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
967 | break; | 965 | break; |
968 | } | 966 | } |
969 | 967 | ||
970 | asoc->stats.octrlchunks++; | 968 | ctx->asoc->stats.octrlchunks++; |
971 | /* PR-SCTP C5) If a FORWARD TSN is sent, the | 969 | /* PR-SCTP C5) If a FORWARD TSN is sent, the |
972 | * sender MUST assure that at least one T3-rtx | 970 | * sender MUST assure that at least one T3-rtx |
973 | * timer is running. | 971 | * timer is running. |
@@ -978,7 +976,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
978 | ctx->transport->last_time_sent = jiffies; | 976 | ctx->transport->last_time_sent = jiffies; |
979 | } | 977 | } |
980 | 978 | ||
981 | if (chunk == asoc->strreset_chunk) | 979 | if (chunk == ctx->asoc->strreset_chunk) |
982 | sctp_transport_reset_reconf_timer(ctx->transport); | 980 | sctp_transport_reset_reconf_timer(ctx->transport); |
983 | 981 | ||
984 | break; | 982 | break; |
@@ -994,31 +992,28 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) | |||
994 | static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, | 992 | static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, |
995 | int rtx_timeout) | 993 | int rtx_timeout) |
996 | { | 994 | { |
997 | struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet : | ||
998 | NULL; | ||
999 | struct sctp_association *asoc = ctx->q->asoc; | ||
1000 | int error, start_timer = 0; | 995 | int error, start_timer = 0; |
1001 | 996 | ||
1002 | if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) | 997 | if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED) |
1003 | return false; | 998 | return false; |
1004 | 999 | ||
1005 | if (ctx->transport != asoc->peer.retran_path) { | 1000 | if (ctx->transport != ctx->asoc->peer.retran_path) { |
1006 | /* Switch transports & prepare the packet. */ | 1001 | /* Switch transports & prepare the packet. */ |
1007 | ctx->transport = asoc->peer.retran_path; | 1002 | ctx->transport = ctx->asoc->peer.retran_path; |
1003 | ctx->packet = &ctx->transport->packet; | ||
1008 | 1004 | ||
1009 | if (list_empty(&ctx->transport->send_ready)) | 1005 | if (list_empty(&ctx->transport->send_ready)) |
1010 | list_add_tail(&ctx->transport->send_ready, | 1006 | list_add_tail(&ctx->transport->send_ready, |
1011 | &ctx->transport_list); | 1007 | &ctx->transport_list); |
1012 | 1008 | ||
1013 | packet = &ctx->transport->packet; | 1009 | sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag, |
1014 | sctp_packet_config(packet, asoc->peer.i.init_tag, | 1010 | ctx->asoc->peer.ecn_capable); |
1015 | asoc->peer.ecn_capable); | ||
1016 | } | 1011 | } |
1017 | 1012 | ||
1018 | error = __sctp_outq_flush_rtx(ctx->q, packet, rtx_timeout, &start_timer, | 1013 | error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, |
1019 | ctx->gfp); | 1014 | &start_timer, ctx->gfp); |
1020 | if (error < 0) | 1015 | if (error < 0) |
1021 | asoc->base.sk->sk_err = -error; | 1016 | ctx->asoc->base.sk->sk_err = -error; |
1022 | 1017 | ||
1023 | if (start_timer) { | 1018 | if (start_timer) { |
1024 | sctp_transport_reset_t3_rtx(ctx->transport); | 1019 | sctp_transport_reset_t3_rtx(ctx->transport); |
@@ -1028,7 +1023,7 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, | |||
1028 | /* This can happen on COOKIE-ECHO resend. Only | 1023 | /* This can happen on COOKIE-ECHO resend. Only |
1029 | * one chunk can get bundled with a COOKIE-ECHO. | 1024 | * one chunk can get bundled with a COOKIE-ECHO. |
1030 | */ | 1025 | */ |
1031 | if (packet->has_cookie_echo) | 1026 | if (ctx->packet->has_cookie_echo) |
1032 | return false; | 1027 | return false; |
1033 | 1028 | ||
1034 | /* Don't send new data if there is still data | 1029 | /* Don't send new data if there is still data |
@@ -1043,19 +1038,16 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, | |||
1043 | static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | 1038 | static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, |
1044 | int rtx_timeout) | 1039 | int rtx_timeout) |
1045 | { | 1040 | { |
1046 | struct sctp_packet *packet = ctx->transport ? &ctx->transport->packet : | ||
1047 | NULL; | ||
1048 | struct sctp_association *asoc = ctx->q->asoc; | ||
1049 | struct sctp_chunk *chunk; | 1041 | struct sctp_chunk *chunk; |
1050 | enum sctp_xmit status; | 1042 | enum sctp_xmit status; |
1051 | 1043 | ||
1052 | /* Is it OK to send data chunks? */ | 1044 | /* Is it OK to send data chunks? */ |
1053 | switch (asoc->state) { | 1045 | switch (ctx->asoc->state) { |
1054 | case SCTP_STATE_COOKIE_ECHOED: | 1046 | case SCTP_STATE_COOKIE_ECHOED: |
1055 | /* Only allow bundling when this packet has a COOKIE-ECHO | 1047 | /* Only allow bundling when this packet has a COOKIE-ECHO |
1056 | * chunk. | 1048 | * chunk. |
1057 | */ | 1049 | */ |
1058 | if (!packet || !packet->has_cookie_echo) | 1050 | if (!ctx->packet || !ctx->packet->has_cookie_echo) |
1059 | return; | 1051 | return; |
1060 | 1052 | ||
1061 | /* fallthru */ | 1053 | /* fallthru */ |
@@ -1078,12 +1070,9 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | |||
1078 | * are marked for retransmission (limited by the | 1070 | * are marked for retransmission (limited by the |
1079 | * current cwnd). | 1071 | * current cwnd). |
1080 | */ | 1072 | */ |
1081 | if (!list_empty(&ctx->q->retransmit)) { | 1073 | if (!list_empty(&ctx->q->retransmit) && |
1082 | if (!sctp_outq_flush_rtx(ctx, rtx_timeout)) | 1074 | !sctp_outq_flush_rtx(ctx, rtx_timeout)) |
1083 | return; | 1075 | return; |
1084 | /* We may have switched current transport */ | ||
1085 | packet = &ctx->transport->packet; | ||
1086 | } | ||
1087 | 1076 | ||
1088 | /* Apply Max.Burst limitation to the current transport in | 1077 | /* Apply Max.Burst limitation to the current transport in |
1089 | * case it will be used for new data. We are going to | 1078 | * case it will be used for new data. We are going to |
@@ -1105,13 +1094,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | |||
1105 | continue; | 1094 | continue; |
1106 | } | 1095 | } |
1107 | 1096 | ||
1108 | if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { | 1097 | if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { |
1109 | sctp_outq_head_data(ctx->q, chunk); | 1098 | sctp_outq_head_data(ctx->q, chunk); |
1110 | break; | 1099 | break; |
1111 | } | 1100 | } |
1112 | 1101 | ||
1113 | if (sctp_outq_select_transport(ctx, chunk)) | 1102 | sctp_outq_select_transport(ctx, chunk); |
1114 | packet = &ctx->transport->packet; | ||
1115 | 1103 | ||
1116 | pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " | 1104 | pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " |
1117 | "skb->users:%d\n", | 1105 | "skb->users:%d\n", |
@@ -1122,7 +1110,8 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | |||
1122 | refcount_read(&chunk->skb->users) : -1); | 1110 | refcount_read(&chunk->skb->users) : -1); |
1123 | 1111 | ||
1124 | /* Add the chunk to the packet. */ | 1112 | /* Add the chunk to the packet. */ |
1125 | status = sctp_packet_transmit_chunk(packet, chunk, 0, ctx->gfp); | 1113 | status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0, |
1114 | ctx->gfp); | ||
1126 | if (status != SCTP_XMIT_OK) { | 1115 | if (status != SCTP_XMIT_OK) { |
1127 | /* We could not append this chunk, so put | 1116 | /* We could not append this chunk, so put |
1128 | * the chunk back on the output queue. | 1117 | * the chunk back on the output queue. |
@@ -1139,12 +1128,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | |||
1139 | * The sender MAY set the I-bit in the DATA | 1128 | * The sender MAY set the I-bit in the DATA |
1140 | * chunk header. | 1129 | * chunk header. |
1141 | */ | 1130 | */ |
1142 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) | 1131 | if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING) |
1143 | chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; | 1132 | chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; |
1144 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 1133 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
1145 | asoc->stats.ouodchunks++; | 1134 | ctx->asoc->stats.ouodchunks++; |
1146 | else | 1135 | else |
1147 | asoc->stats.oodchunks++; | 1136 | ctx->asoc->stats.oodchunks++; |
1148 | 1137 | ||
1149 | /* Only now it's safe to consider this | 1138 | /* Only now it's safe to consider this |
1150 | * chunk as sent, sched-wise. | 1139 | * chunk as sent, sched-wise. |
@@ -1160,7 +1149,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, | |||
1160 | /* Only let one DATA chunk get bundled with a | 1149 | /* Only let one DATA chunk get bundled with a |
1161 | * COOKIE-ECHO chunk. | 1150 | * COOKIE-ECHO chunk. |
1162 | */ | 1151 | */ |
1163 | if (packet->has_cookie_echo) | 1152 | if (ctx->packet->has_cookie_echo) |
1164 | break; | 1153 | break; |
1165 | } | 1154 | } |
1166 | } | 1155 | } |
@@ -1202,6 +1191,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
1202 | .q = q, | 1191 | .q = q, |
1203 | .transport = NULL, | 1192 | .transport = NULL, |
1204 | .transport_list = LIST_HEAD_INIT(ctx.transport_list), | 1193 | .transport_list = LIST_HEAD_INIT(ctx.transport_list), |
1194 | .asoc = q->asoc, | ||
1195 | .packet = NULL, | ||
1205 | .gfp = gfp, | 1196 | .gfp = gfp, |
1206 | }; | 1197 | }; |
1207 | 1198 | ||