aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/outqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r--net/sctp/outqueue.c92
1 files changed, 56 insertions, 36 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index bda50596d4bf..92f14f51edf2 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -875,45 +875,21 @@ static bool sctp_outq_select_transport(struct sctp_chunk *chunk,
875 return changed; 875 return changed;
876} 876}
877 877
878/* 878static void sctp_outq_flush_ctrl(struct sctp_outq *q,
879 * Try to flush an outqueue. 879 struct sctp_transport **_transport,
880 * 880 struct list_head *transport_list,
881 * Description: Send everything in q which we legally can, subject to 881 gfp_t gfp)
882 * congestion limitations.
883 * * Note: This function can be called from multiple contexts so appropriate
884 * locking concerns must be made. Today we use the sock lock to protect
885 * this function.
886 */
887static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
888{ 882{
889 struct sctp_packet *packet; 883 struct sctp_transport *transport = *_transport;
890 struct sctp_association *asoc = q->asoc; 884 struct sctp_association *asoc = q->asoc;
891 __u32 vtag = asoc->peer.i.init_tag; 885 struct sctp_packet *packet = NULL;
892 struct sctp_transport *transport = NULL;
893 struct sctp_chunk *chunk, *tmp; 886 struct sctp_chunk *chunk, *tmp;
894 enum sctp_xmit status; 887 enum sctp_xmit status;
895 int error = 0; 888 int one_packet, error;
896 int start_timer = 0;
897 int one_packet = 0;
898
899 /* These transports have chunks to send. */
900 struct list_head transport_list;
901 struct list_head *ltransport;
902
903 INIT_LIST_HEAD(&transport_list);
904 packet = NULL;
905
906 /*
907 * 6.10 Bundling
908 * ...
909 * When bundling control chunks with DATA chunks, an
910 * endpoint MUST place control chunks first in the outbound
911 * SCTP packet. The transmitter MUST transmit DATA chunks
912 * within a SCTP packet in increasing order of TSN.
913 * ...
914 */
915 889
916 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 890 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
891 one_packet = 0;
892
917 /* RFC 5061, 5.3 893 /* RFC 5061, 5.3
918 * F1) This means that until such time as the ASCONF 894 * F1) This means that until such time as the ASCONF
919 * containing the add is acknowledged, the sender MUST 895 * containing the add is acknowledged, the sender MUST
@@ -929,9 +905,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
929 /* Pick the right transport to use. Should always be true for 905 /* Pick the right transport to use. Should always be true for
930 * the first chunk as we don't have a transport by then. 906 * the first chunk as we don't have a transport by then.
931 */ 907 */
932 if (sctp_outq_select_transport(chunk, asoc, &transport, 908 if (sctp_outq_select_transport(chunk, asoc, _transport,
933 &transport_list)) 909 transport_list)) {
910 transport = *_transport;
934 packet = &transport->packet; 911 packet = &transport->packet;
912 }
935 913
936 switch (chunk->chunk_hdr->type) { 914 switch (chunk->chunk_hdr->type) {
937 /* 915 /*
@@ -954,6 +932,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
954 if (sctp_test_T_bit(chunk)) 932 if (sctp_test_T_bit(chunk))
955 packet->vtag = asoc->c.my_vtag; 933 packet->vtag = asoc->c.my_vtag;
956 /* fallthru */ 934 /* fallthru */
935
957 /* The following chunks are "response" chunks, i.e. 936 /* The following chunks are "response" chunks, i.e.
958 * they are generated in response to something we 937 * they are generated in response to something we
959 * received. If we are sending these, then we can 938 * received. If we are sending these, then we can
@@ -979,7 +958,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
979 case SCTP_CID_RECONF: 958 case SCTP_CID_RECONF:
980 status = sctp_packet_transmit_chunk(packet, chunk, 959 status = sctp_packet_transmit_chunk(packet, chunk,
981 one_packet, gfp); 960 one_packet, gfp);
982 if (status != SCTP_XMIT_OK) { 961 if (status != SCTP_XMIT_OK) {
983 /* put the chunk back */ 962 /* put the chunk back */
984 list_add(&chunk->list, &q->control_chunk_list); 963 list_add(&chunk->list, &q->control_chunk_list);
985 break; 964 break;
@@ -1006,6 +985,47 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1006 BUG(); 985 BUG();
1007 } 986 }
1008 } 987 }
988}
989
990/*
991 * Try to flush an outqueue.
992 *
993 * Description: Send everything in q which we legally can, subject to
994 * congestion limitations.
995 * * Note: This function can be called from multiple contexts so appropriate
996 * locking concerns must be made. Today we use the sock lock to protect
997 * this function.
998 */
999static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1000{
1001 struct sctp_packet *packet;
1002 struct sctp_association *asoc = q->asoc;
1003 __u32 vtag = asoc->peer.i.init_tag;
1004 struct sctp_transport *transport = NULL;
1005 struct sctp_chunk *chunk;
1006 enum sctp_xmit status;
1007 int error = 0;
1008 int start_timer = 0;
1009
1010 /* These transports have chunks to send. */
1011 struct list_head transport_list;
1012 struct list_head *ltransport;
1013
1014 INIT_LIST_HEAD(&transport_list);
1015 packet = NULL;
1016
1017 /*
1018 * 6.10 Bundling
1019 * ...
1020 * When bundling control chunks with DATA chunks, an
1021 * endpoint MUST place control chunks first in the outbound
1022 * SCTP packet. The transmitter MUST transmit DATA chunks
1023 * within a SCTP packet in increasing order of TSN.
1024 * ...
1025 */
1026
1027 sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp);
1028 packet = &transport->packet;
1009 1029
1010 if (q->asoc->src_out_of_asoc_ok) 1030 if (q->asoc->src_out_of_asoc_ok)
1011 goto sctp_flush_out; 1031 goto sctp_flush_out;