diff options
author | David S. Miller <davem@davemloft.net> | 2018-05-14 23:15:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-14 23:15:27 -0400 |
commit | 961423f9fcbcfebee5b7a5d6cc0f1069835f25c0 (patch) | |
tree | df6722edf2d4fd6b1c238a5d605f651f86c6e2cd | |
parent | ab619905508817e62f62d64fb7f2e82bfcb759b9 (diff) | |
parent | 5884f35f0d8924a1937c040e420255800c45ef0e (diff) |
Merge branch 'sctp-Introduce-sctp_flush_ctx'
Marcelo Ricardo Leitner says:
====================
sctp: Introduce sctp_flush_ctx
This struct will hold all the context used during the outq flush, so we
don't have to pass lots of pointers all around.
Checked on x86_64, the compiler inlines all these functions and there is no
derreference added because of the struct.
This patchset depends on 'sctp: refactor sctp_outq_flush'
Changes since v1:
- updated to build on top of v2 of 'sctp: refactor sctp_outq_flush'
Changes since v2:
- fixed a rebase issue which reverted a change in patch 2.
- rebased on v3 of 'sctp: refactor sctp_outq_flush'
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sctp/outqueue.c | 257 |
1 files changed, 118 insertions, 139 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index e9c22b3db11c..d68aa33485a9 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -791,19 +791,28 @@ static int sctp_packet_singleton(struct sctp_transport *transport, | |||
791 | return sctp_packet_transmit(&singleton, gfp); | 791 | return sctp_packet_transmit(&singleton, gfp); |
792 | } | 792 | } |
793 | 793 | ||
794 | static bool sctp_outq_select_transport(struct sctp_chunk *chunk, | 794 | /* Struct to hold the context during sctp outq flush */ |
795 | struct sctp_association *asoc, | 795 | struct sctp_flush_ctx { |
796 | struct sctp_transport **transport, | 796 | struct sctp_outq *q; |
797 | struct list_head *transport_list) | 797 | /* Current transport being used. It's NOT the same as curr active one */ |
798 | struct sctp_transport *transport; | ||
799 | /* These transports have chunks to send. */ | ||
800 | struct list_head transport_list; | ||
801 | struct sctp_association *asoc; | ||
802 | /* Packet on the current transport above */ | ||
803 | struct sctp_packet *packet; | ||
804 | gfp_t gfp; | ||
805 | }; | ||
806 | |||
807 | /* transport: current transport */ | ||
808 | static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx, | ||
809 | struct sctp_chunk *chunk) | ||
798 | { | 810 | { |
799 | struct sctp_transport *new_transport = chunk->transport; | 811 | struct sctp_transport *new_transport = chunk->transport; |
800 | struct sctp_transport *curr = *transport; | ||
801 | bool changed = false; | ||
802 | 812 | ||
803 | if (!new_transport) { | 813 | if (!new_transport) { |
804 | if (!sctp_chunk_is_data(chunk)) { | 814 | if (!sctp_chunk_is_data(chunk)) { |
805 | /* | 815 | /* If we have a prior transport pointer, see if |
806 | * If we have a prior transport pointer, see if | ||
807 | * the destination address of the chunk | 816 | * the destination address of the chunk |
808 | * matches the destination address of the | 817 | * matches the destination address of the |
809 | * current transport. If not a match, then | 818 | * current transport. If not a match, then |
@@ -812,11 +821,11 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk, | |||
812 | * after processing ASCONFs, we may have new | 821 | * after processing ASCONFs, we may have new |
813 | * transports created. | 822 | * transports created. |
814 | */ | 823 | */ |
815 | if (curr && sctp_cmp_addr_exact(&chunk->dest, | 824 | if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, |
816 | &curr->ipaddr)) | 825 | &ctx->transport->ipaddr)) |
817 | new_transport = curr; | 826 | new_transport = ctx->transport; |
818 | else | 827 | else |
819 | new_transport = sctp_assoc_lookup_paddr(asoc, | 828 | new_transport = sctp_assoc_lookup_paddr(ctx->asoc, |
820 | &chunk->dest); | 829 | &chunk->dest); |
821 | } | 830 | } |
822 | 831 | ||
@@ -824,7 +833,7 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk, | |||
824 | * use the current active path. | 833 | * use the current active path. |
825 | */ | 834 | */ |
826 | if (!new_transport) | 835 | if (!new_transport) |
827 | new_transport = asoc->peer.active_path; | 836 | new_transport = ctx->asoc->peer.active_path; |
828 | } else { | 837 | } else { |
829 | __u8 type; | 838 | __u8 type; |
830 | 839 | ||
@@ -849,7 +858,7 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk, | |||
849 | if (type != SCTP_CID_HEARTBEAT && | 858 | if (type != SCTP_CID_HEARTBEAT && |
850 | type != SCTP_CID_HEARTBEAT_ACK && | 859 | type != SCTP_CID_HEARTBEAT_ACK && |
851 | type != SCTP_CID_ASCONF_ACK) | 860 | type != SCTP_CID_ASCONF_ACK) |
852 | new_transport = asoc->peer.active_path; | 861 | new_transport = ctx->asoc->peer.active_path; |
853 | break; | 862 | break; |
854 | default: | 863 | default: |
855 | break; | 864 | break; |
@@ -857,37 +866,31 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk, | |||
857 | } | 866 | } |
858 | 867 | ||
859 | /* Are we switching transports? Take care of transport locks. */ | 868 | /* Are we switching transports? Take care of transport locks. */ |
860 | if (new_transport != curr) { | 869 | if (new_transport != ctx->transport) { |
861 | changed = true; | 870 | ctx->transport = new_transport; |
862 | curr = new_transport; | 871 | ctx->packet = &ctx->transport->packet; |
863 | *transport = curr; | 872 | |
864 | if (list_empty(&curr->send_ready)) | 873 | if (list_empty(&ctx->transport->send_ready)) |
865 | list_add_tail(&curr->send_ready, transport_list); | 874 | list_add_tail(&ctx->transport->send_ready, |
866 | 875 | &ctx->transport_list); | |
867 | sctp_packet_config(&curr->packet, asoc->peer.i.init_tag, | 876 | |
868 | asoc->peer.ecn_capable); | 877 | sctp_packet_config(ctx->packet, |
878 | ctx->asoc->peer.i.init_tag, | ||
879 | ctx->asoc->peer.ecn_capable); | ||
869 | /* We've switched transports, so apply the | 880 | /* We've switched transports, so apply the |
870 | * Burst limit to the new transport. | 881 | * Burst limit to the new transport. |
871 | */ | 882 | */ |
872 | sctp_transport_burst_limited(curr); | 883 | sctp_transport_burst_limited(ctx->transport); |
873 | } | 884 | } |
874 | |||
875 | return changed; | ||
876 | } | 885 | } |
877 | 886 | ||
878 | static void sctp_outq_flush_ctrl(struct sctp_outq *q, | 887 | static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) |
879 | struct sctp_transport **_transport, | ||
880 | struct list_head *transport_list, | ||
881 | gfp_t gfp) | ||
882 | { | 888 | { |
883 | struct sctp_transport *transport = *_transport; | ||
884 | struct sctp_association *asoc = q->asoc; | ||
885 | struct sctp_packet *packet = NULL; | ||
886 | struct sctp_chunk *chunk, *tmp; | 889 | struct sctp_chunk *chunk, *tmp; |
887 | enum sctp_xmit status; | 890 | enum sctp_xmit status; |
888 | int one_packet, error; | 891 | int one_packet, error; |
889 | 892 | ||
890 | list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { | 893 | list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) { |
891 | one_packet = 0; | 894 | one_packet = 0; |
892 | 895 | ||
893 | /* RFC 5061, 5.3 | 896 | /* RFC 5061, 5.3 |
@@ -896,7 +899,7 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, | |||
896 | * NOT use the new IP address as a source for ANY SCTP | 899 | * NOT use the new IP address as a source for ANY SCTP |
897 | * packet except on carrying an ASCONF Chunk. | 900 | * packet except on carrying an ASCONF Chunk. |
898 | */ | 901 | */ |
899 | if (asoc->src_out_of_asoc_ok && | 902 | if (ctx->asoc->src_out_of_asoc_ok && |
900 | chunk->chunk_hdr->type != SCTP_CID_ASCONF) | 903 | chunk->chunk_hdr->type != SCTP_CID_ASCONF) |
901 | continue; | 904 | continue; |
902 | 905 | ||
@@ -905,15 +908,10 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, | |||
905 | /* Pick the right transport to use. Should always be true for | 908 | /* Pick the right transport to use. Should always be true for |
906 | * the first chunk as we don't have a transport by then. | 909 | * the first chunk as we don't have a transport by then. |
907 | */ | 910 | */ |
908 | if (sctp_outq_select_transport(chunk, asoc, _transport, | 911 | sctp_outq_select_transport(ctx, chunk); |
909 | transport_list)) { | ||
910 | transport = *_transport; | ||
911 | packet = &transport->packet; | ||
912 | } | ||
913 | 912 | ||
914 | switch (chunk->chunk_hdr->type) { | 913 | switch (chunk->chunk_hdr->type) { |
915 | /* | 914 | /* 6.10 Bundling |
916 | * 6.10 Bundling | ||
917 | * ... | 915 | * ... |
918 | * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN | 916 | * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN |
919 | * COMPLETE with any other chunks. [Send them immediately.] | 917 | * COMPLETE with any other chunks. [Send them immediately.] |
@@ -921,16 +919,17 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, | |||
921 | case SCTP_CID_INIT: | 919 | case SCTP_CID_INIT: |
922 | case SCTP_CID_INIT_ACK: | 920 | case SCTP_CID_INIT_ACK: |
923 | case SCTP_CID_SHUTDOWN_COMPLETE: | 921 | case SCTP_CID_SHUTDOWN_COMPLETE: |
924 | error = sctp_packet_singleton(transport, chunk, gfp); | 922 | error = sctp_packet_singleton(ctx->transport, chunk, |
923 | ctx->gfp); | ||
925 | if (error < 0) { | 924 | if (error < 0) { |
926 | asoc->base.sk->sk_err = -error; | 925 | ctx->asoc->base.sk->sk_err = -error; |
927 | return; | 926 | return; |
928 | } | 927 | } |
929 | break; | 928 | break; |
930 | 929 | ||
931 | case SCTP_CID_ABORT: | 930 | case SCTP_CID_ABORT: |
932 | if (sctp_test_T_bit(chunk)) | 931 | if (sctp_test_T_bit(chunk)) |
933 | packet->vtag = asoc->c.my_vtag; | 932 | ctx->packet->vtag = ctx->asoc->c.my_vtag; |
934 | /* fallthru */ | 933 | /* fallthru */ |
935 | 934 | ||
936 | /* The following chunks are "response" chunks, i.e. | 935 | /* The following chunks are "response" chunks, i.e. |
@@ -956,27 +955,27 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, | |||
956 | case SCTP_CID_FWD_TSN: | 955 | case SCTP_CID_FWD_TSN: |
957 | case SCTP_CID_I_FWD_TSN: | 956 | case SCTP_CID_I_FWD_TSN: |
958 | case SCTP_CID_RECONF: | 957 | case SCTP_CID_RECONF: |
959 | status = sctp_packet_transmit_chunk(packet, chunk, | 958 | status = sctp_packet_transmit_chunk(ctx->packet, chunk, |
960 | one_packet, gfp); | 959 | one_packet, ctx->gfp); |
961 | if (status != SCTP_XMIT_OK) { | 960 | if (status != SCTP_XMIT_OK) { |
962 | /* put the chunk back */ | 961 | /* put the chunk back */ |
963 | list_add(&chunk->list, &q->control_chunk_list); | 962 | list_add(&chunk->list, &ctx->q->control_chunk_list); |
964 | break; | 963 | break; |
965 | } | 964 | } |
966 | 965 | ||
967 | asoc->stats.octrlchunks++; | 966 | ctx->asoc->stats.octrlchunks++; |
968 | /* PR-SCTP C5) If a FORWARD TSN is sent, the | 967 | /* PR-SCTP C5) If a FORWARD TSN is sent, the |
969 | * sender MUST assure that at least one T3-rtx | 968 | * sender MUST assure that at least one T3-rtx |
970 | * timer is running. | 969 | * timer is running. |
971 | */ | 970 | */ |
972 | if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN || | 971 | if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN || |
973 | chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) { | 972 | chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) { |
974 | sctp_transport_reset_t3_rtx(transport); | 973 | sctp_transport_reset_t3_rtx(ctx->transport); |
975 | transport->last_time_sent = jiffies; | 974 | ctx->transport->last_time_sent = jiffies; |
976 | } | 975 | } |
977 | 976 | ||
978 | if (chunk == asoc->strreset_chunk) | 977 | if (chunk == ctx->asoc->strreset_chunk) |
979 | sctp_transport_reset_reconf_timer(transport); | 978 | sctp_transport_reset_reconf_timer(ctx->transport); |
980 | 979 | ||
981 | break; | 980 | break; |
982 | 981 | ||
@@ -988,76 +987,65 @@ static void sctp_outq_flush_ctrl(struct sctp_outq *q, | |||
988 | } | 987 | } |
989 | 988 | ||
990 | /* Returns false if new data shouldn't be sent */ | 989 | /* Returns false if new data shouldn't be sent */ |
991 | static bool sctp_outq_flush_rtx(struct sctp_outq *q, | 990 | static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, |
992 | struct sctp_transport **_transport, | 991 | int rtx_timeout) |
993 | struct list_head *transport_list, | ||
994 | int rtx_timeout, gfp_t gfp) | ||
995 | { | 992 | { |
996 | struct sctp_transport *transport = *_transport; | ||
997 | struct sctp_packet *packet = transport ? &transport->packet : NULL; | ||
998 | struct sctp_association *asoc = q->asoc; | ||
999 | int error, start_timer = 0; | 993 | int error, start_timer = 0; |
1000 | 994 | ||
1001 | if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) | 995 | if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED) |
1002 | return false; | 996 | return false; |
1003 | 997 | ||
1004 | if (transport != asoc->peer.retran_path) { | 998 | if (ctx->transport != ctx->asoc->peer.retran_path) { |
1005 | /* Switch transports & prepare the packet. */ | 999 | /* Switch transports & prepare the packet. */ |
1006 | transport = asoc->peer.retran_path; | 1000 | ctx->transport = ctx->asoc->peer.retran_path; |
1007 | *_transport = transport; | 1001 | ctx->packet = &ctx->transport->packet; |
1008 | 1002 | ||
1009 | if (list_empty(&transport->send_ready)) | 1003 | if (list_empty(&ctx->transport->send_ready)) |
1010 | list_add_tail(&transport->send_ready, | 1004 | list_add_tail(&ctx->transport->send_ready, |
1011 | transport_list); | 1005 | &ctx->transport_list); |
1012 | 1006 | ||
1013 | packet = &transport->packet; | 1007 | sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag, |
1014 | sctp_packet_config(packet, asoc->peer.i.init_tag, | 1008 | ctx->asoc->peer.ecn_capable); |
1015 | asoc->peer.ecn_capable); | ||
1016 | } | 1009 | } |
1017 | 1010 | ||
1018 | error = __sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer, | 1011 | error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, |
1019 | gfp); | 1012 | &start_timer, ctx->gfp); |
1020 | if (error < 0) | 1013 | if (error < 0) |
1021 | asoc->base.sk->sk_err = -error; | 1014 | ctx->asoc->base.sk->sk_err = -error; |
1022 | 1015 | ||
1023 | if (start_timer) { | 1016 | if (start_timer) { |
1024 | sctp_transport_reset_t3_rtx(transport); | 1017 | sctp_transport_reset_t3_rtx(ctx->transport); |
1025 | transport->last_time_sent = jiffies; | 1018 | ctx->transport->last_time_sent = jiffies; |
1026 | } | 1019 | } |
1027 | 1020 | ||
1028 | /* This can happen on COOKIE-ECHO resend. Only | 1021 | /* This can happen on COOKIE-ECHO resend. Only |
1029 | * one chunk can get bundled with a COOKIE-ECHO. | 1022 | * one chunk can get bundled with a COOKIE-ECHO. |
1030 | */ | 1023 | */ |
1031 | if (packet->has_cookie_echo) | 1024 | if (ctx->packet->has_cookie_echo) |
1032 | return false; | 1025 | return false; |
1033 | 1026 | ||
1034 | /* Don't send new data if there is still data | 1027 | /* Don't send new data if there is still data |
1035 | * waiting to retransmit. | 1028 | * waiting to retransmit. |
1036 | */ | 1029 | */ |
1037 | if (!list_empty(&q->retransmit)) | 1030 | if (!list_empty(&ctx->q->retransmit)) |
1038 | return false; | 1031 | return false; |
1039 | 1032 | ||
1040 | return true; | 1033 | return true; |
1041 | } | 1034 | } |
1042 | 1035 | ||
1043 | static void sctp_outq_flush_data(struct sctp_outq *q, | 1036 | static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, |
1044 | struct sctp_transport **_transport, | 1037 | int rtx_timeout) |
1045 | struct list_head *transport_list, | ||
1046 | int rtx_timeout, gfp_t gfp) | ||
1047 | { | 1038 | { |
1048 | struct sctp_transport *transport = *_transport; | ||
1049 | struct sctp_packet *packet = transport ? &transport->packet : NULL; | ||
1050 | struct sctp_association *asoc = q->asoc; | ||
1051 | struct sctp_chunk *chunk; | 1039 | struct sctp_chunk *chunk; |
1052 | enum sctp_xmit status; | 1040 | enum sctp_xmit status; |
1053 | 1041 | ||
1054 | /* Is it OK to send data chunks? */ | 1042 | /* Is it OK to send data chunks? */ |
1055 | switch (asoc->state) { | 1043 | switch (ctx->asoc->state) { |
1056 | case SCTP_STATE_COOKIE_ECHOED: | 1044 | case SCTP_STATE_COOKIE_ECHOED: |
1057 | /* Only allow bundling when this packet has a COOKIE-ECHO | 1045 | /* Only allow bundling when this packet has a COOKIE-ECHO |
1058 | * chunk. | 1046 | * chunk. |
1059 | */ | 1047 | */ |
1060 | if (!packet || !packet->has_cookie_echo) | 1048 | if (!ctx->packet || !ctx->packet->has_cookie_echo) |
1061 | return; | 1049 | return; |
1062 | 1050 | ||
1063 | /* fallthru */ | 1051 | /* fallthru */ |
@@ -1071,8 +1059,7 @@ static void sctp_outq_flush_data(struct sctp_outq *q, | |||
1071 | return; | 1059 | return; |
1072 | } | 1060 | } |
1073 | 1061 | ||
1074 | /* | 1062 | /* RFC 2960 6.1 Transmission of DATA Chunks |
1075 | * RFC 2960 6.1 Transmission of DATA Chunks | ||
1076 | * | 1063 | * |
1077 | * C) When the time comes for the sender to transmit, | 1064 | * C) When the time comes for the sender to transmit, |
1078 | * before sending new DATA chunks, the sender MUST | 1065 | * before sending new DATA chunks, the sender MUST |
@@ -1080,56 +1067,47 @@ static void sctp_outq_flush_data(struct sctp_outq *q, | |||
1080 | * are marked for retransmission (limited by the | 1067 | * are marked for retransmission (limited by the |
1081 | * current cwnd). | 1068 | * current cwnd). |
1082 | */ | 1069 | */ |
1083 | if (!list_empty(&q->retransmit)) { | 1070 | if (!list_empty(&ctx->q->retransmit) && |
1084 | if (!sctp_outq_flush_rtx(q, _transport, transport_list, | 1071 | !sctp_outq_flush_rtx(ctx, rtx_timeout)) |
1085 | rtx_timeout, gfp)) | 1072 | return; |
1086 | return; | ||
1087 | /* We may have switched current transport */ | ||
1088 | transport = *_transport; | ||
1089 | packet = &transport->packet; | ||
1090 | } | ||
1091 | 1073 | ||
1092 | /* Apply Max.Burst limitation to the current transport in | 1074 | /* Apply Max.Burst limitation to the current transport in |
1093 | * case it will be used for new data. We are going to | 1075 | * case it will be used for new data. We are going to |
1094 | * rest it before we return, but we want to apply the limit | 1076 | * rest it before we return, but we want to apply the limit |
1095 | * to the currently queued data. | 1077 | * to the currently queued data. |
1096 | */ | 1078 | */ |
1097 | if (transport) | 1079 | if (ctx->transport) |
1098 | sctp_transport_burst_limited(transport); | 1080 | sctp_transport_burst_limited(ctx->transport); |
1099 | 1081 | ||
1100 | /* Finally, transmit new packets. */ | 1082 | /* Finally, transmit new packets. */ |
1101 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | 1083 | while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { |
1102 | __u32 sid = ntohs(chunk->subh.data_hdr->stream); | 1084 | __u32 sid = ntohs(chunk->subh.data_hdr->stream); |
1103 | 1085 | ||
1104 | /* Has this chunk expired? */ | 1086 | /* Has this chunk expired? */ |
1105 | if (sctp_chunk_abandoned(chunk)) { | 1087 | if (sctp_chunk_abandoned(chunk)) { |
1106 | sctp_sched_dequeue_done(q, chunk); | 1088 | sctp_sched_dequeue_done(ctx->q, chunk); |
1107 | sctp_chunk_fail(chunk, 0); | 1089 | sctp_chunk_fail(chunk, 0); |
1108 | sctp_chunk_free(chunk); | 1090 | sctp_chunk_free(chunk); |
1109 | continue; | 1091 | continue; |
1110 | } | 1092 | } |
1111 | 1093 | ||
1112 | if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { | 1094 | if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { |
1113 | sctp_outq_head_data(q, chunk); | 1095 | sctp_outq_head_data(ctx->q, chunk); |
1114 | break; | 1096 | break; |
1115 | } | 1097 | } |
1116 | 1098 | ||
1117 | if (sctp_outq_select_transport(chunk, asoc, _transport, | 1099 | sctp_outq_select_transport(ctx, chunk); |
1118 | transport_list)) { | ||
1119 | transport = *_transport; | ||
1120 | packet = &transport->packet; | ||
1121 | } | ||
1122 | 1100 | ||
1123 | pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " | 1101 | pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n", |
1124 | "skb->users:%d\n", | 1102 | __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? |
1125 | __func__, q, chunk, chunk && chunk->chunk_hdr ? | ||
1126 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : | 1103 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : |
1127 | "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), | 1104 | "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), |
1128 | chunk->skb ? chunk->skb->head : NULL, chunk->skb ? | 1105 | chunk->skb ? chunk->skb->head : NULL, chunk->skb ? |
1129 | refcount_read(&chunk->skb->users) : -1); | 1106 | refcount_read(&chunk->skb->users) : -1); |
1130 | 1107 | ||
1131 | /* Add the chunk to the packet. */ | 1108 | /* Add the chunk to the packet. */ |
1132 | status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); | 1109 | status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0, |
1110 | ctx->gfp); | ||
1133 | if (status != SCTP_XMIT_OK) { | 1111 | if (status != SCTP_XMIT_OK) { |
1134 | /* We could not append this chunk, so put | 1112 | /* We could not append this chunk, so put |
1135 | * the chunk back on the output queue. | 1113 | * the chunk back on the output queue. |
@@ -1138,7 +1116,7 @@ static void sctp_outq_flush_data(struct sctp_outq *q, | |||
1138 | __func__, ntohl(chunk->subh.data_hdr->tsn), | 1116 | __func__, ntohl(chunk->subh.data_hdr->tsn), |
1139 | status); | 1117 | status); |
1140 | 1118 | ||
1141 | sctp_outq_head_data(q, chunk); | 1119 | sctp_outq_head_data(ctx->q, chunk); |
1142 | break; | 1120 | break; |
1143 | } | 1121 | } |
1144 | 1122 | ||
@@ -1146,48 +1124,46 @@ static void sctp_outq_flush_data(struct sctp_outq *q, | |||
1146 | * The sender MAY set the I-bit in the DATA | 1124 | * The sender MAY set the I-bit in the DATA |
1147 | * chunk header. | 1125 | * chunk header. |
1148 | */ | 1126 | */ |
1149 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) | 1127 | if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING) |
1150 | chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; | 1128 | chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; |
1151 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 1129 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
1152 | asoc->stats.ouodchunks++; | 1130 | ctx->asoc->stats.ouodchunks++; |
1153 | else | 1131 | else |
1154 | asoc->stats.oodchunks++; | 1132 | ctx->asoc->stats.oodchunks++; |
1155 | 1133 | ||
1156 | /* Only now it's safe to consider this | 1134 | /* Only now it's safe to consider this |
1157 | * chunk as sent, sched-wise. | 1135 | * chunk as sent, sched-wise. |
1158 | */ | 1136 | */ |
1159 | sctp_sched_dequeue_done(q, chunk); | 1137 | sctp_sched_dequeue_done(ctx->q, chunk); |
1160 | 1138 | ||
1161 | list_add_tail(&chunk->transmitted_list, | 1139 | list_add_tail(&chunk->transmitted_list, |
1162 | &transport->transmitted); | 1140 | &ctx->transport->transmitted); |
1163 | 1141 | ||
1164 | sctp_transport_reset_t3_rtx(transport); | 1142 | sctp_transport_reset_t3_rtx(ctx->transport); |
1165 | transport->last_time_sent = jiffies; | 1143 | ctx->transport->last_time_sent = jiffies; |
1166 | 1144 | ||
1167 | /* Only let one DATA chunk get bundled with a | 1145 | /* Only let one DATA chunk get bundled with a |
1168 | * COOKIE-ECHO chunk. | 1146 | * COOKIE-ECHO chunk. |
1169 | */ | 1147 | */ |
1170 | if (packet->has_cookie_echo) | 1148 | if (ctx->packet->has_cookie_echo) |
1171 | break; | 1149 | break; |
1172 | } | 1150 | } |
1173 | } | 1151 | } |
1174 | 1152 | ||
1175 | static void sctp_outq_flush_transports(struct sctp_outq *q, | 1153 | static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx) |
1176 | struct list_head *transport_list, | ||
1177 | gfp_t gfp) | ||
1178 | { | 1154 | { |
1179 | struct list_head *ltransport; | 1155 | struct list_head *ltransport; |
1180 | struct sctp_packet *packet; | 1156 | struct sctp_packet *packet; |
1181 | struct sctp_transport *t; | 1157 | struct sctp_transport *t; |
1182 | int error = 0; | 1158 | int error = 0; |
1183 | 1159 | ||
1184 | while ((ltransport = sctp_list_dequeue(transport_list)) != NULL) { | 1160 | while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) { |
1185 | t = list_entry(ltransport, struct sctp_transport, send_ready); | 1161 | t = list_entry(ltransport, struct sctp_transport, send_ready); |
1186 | packet = &t->packet; | 1162 | packet = &t->packet; |
1187 | if (!sctp_packet_empty(packet)) { | 1163 | if (!sctp_packet_empty(packet)) { |
1188 | error = sctp_packet_transmit(packet, gfp); | 1164 | error = sctp_packet_transmit(packet, ctx->gfp); |
1189 | if (error < 0) | 1165 | if (error < 0) |
1190 | q->asoc->base.sk->sk_err = -error; | 1166 | ctx->q->asoc->base.sk->sk_err = -error; |
1191 | } | 1167 | } |
1192 | 1168 | ||
1193 | /* Clear the burst limited state, if any */ | 1169 | /* Clear the burst limited state, if any */ |
@@ -1195,8 +1171,7 @@ static void sctp_outq_flush_transports(struct sctp_outq *q, | |||
1195 | } | 1171 | } |
1196 | } | 1172 | } |
1197 | 1173 | ||
1198 | /* | 1174 | /* Try to flush an outqueue. |
1199 | * Try to flush an outqueue. | ||
1200 | * | 1175 | * |
1201 | * Description: Send everything in q which we legally can, subject to | 1176 | * Description: Send everything in q which we legally can, subject to |
1202 | * congestion limitations. | 1177 | * congestion limitations. |
@@ -1204,15 +1179,19 @@ static void sctp_outq_flush_transports(struct sctp_outq *q, | |||
1204 | * locking concerns must be made. Today we use the sock lock to protect | 1179 | * locking concerns must be made. Today we use the sock lock to protect |
1205 | * this function. | 1180 | * this function. |
1206 | */ | 1181 | */ |
1182 | |||
1207 | static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | 1183 | static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) |
1208 | { | 1184 | { |
1209 | /* Current transport being used. It's NOT the same as curr active one */ | 1185 | struct sctp_flush_ctx ctx = { |
1210 | struct sctp_transport *transport = NULL; | 1186 | .q = q, |
1211 | /* These transports have chunks to send. */ | 1187 | .transport = NULL, |
1212 | LIST_HEAD(transport_list); | 1188 | .transport_list = LIST_HEAD_INIT(ctx.transport_list), |
1213 | 1189 | .asoc = q->asoc, | |
1214 | /* | 1190 | .packet = NULL, |
1215 | * 6.10 Bundling | 1191 | .gfp = gfp, |
1192 | }; | ||
1193 | |||
1194 | /* 6.10 Bundling | ||
1216 | * ... | 1195 | * ... |
1217 | * When bundling control chunks with DATA chunks, an | 1196 | * When bundling control chunks with DATA chunks, an |
1218 | * endpoint MUST place control chunks first in the outbound | 1197 | * endpoint MUST place control chunks first in the outbound |
@@ -1221,16 +1200,16 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
1221 | * ... | 1200 | * ... |
1222 | */ | 1201 | */ |
1223 | 1202 | ||
1224 | sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp); | 1203 | sctp_outq_flush_ctrl(&ctx); |
1225 | 1204 | ||
1226 | if (q->asoc->src_out_of_asoc_ok) | 1205 | if (q->asoc->src_out_of_asoc_ok) |
1227 | goto sctp_flush_out; | 1206 | goto sctp_flush_out; |
1228 | 1207 | ||
1229 | sctp_outq_flush_data(q, &transport, &transport_list, rtx_timeout, gfp); | 1208 | sctp_outq_flush_data(&ctx, rtx_timeout); |
1230 | 1209 | ||
1231 | sctp_flush_out: | 1210 | sctp_flush_out: |
1232 | 1211 | ||
1233 | sctp_outq_flush_transports(q, &transport_list, gfp); | 1212 | sctp_outq_flush_transports(&ctx); |
1234 | } | 1213 | } |
1235 | 1214 | ||
1236 | /* Update unack_data based on the incoming SACK chunk */ | 1215 | /* Update unack_data based on the incoming SACK chunk */ |
@@ -1783,7 +1762,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) | |||
1783 | if (TSN_lte(tsn, ctsn)) | 1762 | if (TSN_lte(tsn, ctsn)) |
1784 | goto pass; | 1763 | goto pass; |
1785 | 1764 | ||
1786 | /* 3.3.4 Selective Acknowledgement (SACK) (3): | 1765 | /* 3.3.4 Selective Acknowledgment (SACK) (3): |
1787 | * | 1766 | * |
1788 | * Gap Ack Blocks: | 1767 | * Gap Ack Blocks: |
1789 | * These fields contain the Gap Ack Blocks. They are repeated | 1768 | * These fields contain the Gap Ack Blocks. They are repeated |