diff options
| -rw-r--r-- | include/net/sctp/structs.h | 20 | ||||
| -rw-r--r-- | net/sctp/associola.c | 2 | ||||
| -rw-r--r-- | net/sctp/input.c | 26 | ||||
| -rw-r--r-- | net/sctp/inqueue.c | 18 | ||||
| -rw-r--r-- | net/sctp/output.c | 22 | ||||
| -rw-r--r-- | net/sctp/outqueue.c | 50 | ||||
| -rw-r--r-- | net/sctp/sm_make_chunk.c | 12 | ||||
| -rw-r--r-- | net/sctp/socket.c | 2 |
8 files changed, 86 insertions, 66 deletions
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 47727c7cc628..7435528a1747 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -582,7 +582,6 @@ void sctp_datamsg_track(struct sctp_chunk *); | |||
| 582 | void sctp_chunk_fail(struct sctp_chunk *, int error); | 582 | void sctp_chunk_fail(struct sctp_chunk *, int error); |
| 583 | int sctp_chunk_abandoned(struct sctp_chunk *); | 583 | int sctp_chunk_abandoned(struct sctp_chunk *); |
| 584 | 584 | ||
| 585 | |||
| 586 | /* RFC2960 1.4 Key Terms | 585 | /* RFC2960 1.4 Key Terms |
| 587 | * | 586 | * |
| 588 | * o Chunk: A unit of information within an SCTP packet, consisting of | 587 | * o Chunk: A unit of information within an SCTP packet, consisting of |
| @@ -592,13 +591,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *); | |||
| 592 | * each chunk as well as a few other header pointers... | 591 | * each chunk as well as a few other header pointers... |
| 593 | */ | 592 | */ |
| 594 | struct sctp_chunk { | 593 | struct sctp_chunk { |
| 595 | /* These first three elements MUST PRECISELY match the first | 594 | struct list_head list; |
| 596 | * three elements of struct sk_buff. This allows us to reuse | 595 | |
| 597 | * all the skb_* queue management functions. | ||
| 598 | */ | ||
| 599 | struct sctp_chunk *next; | ||
| 600 | struct sctp_chunk *prev; | ||
| 601 | struct sk_buff_head *list; | ||
| 602 | atomic_t refcnt; | 596 | atomic_t refcnt; |
| 603 | 597 | ||
| 604 | /* This is our link to the per-transport transmitted list. */ | 598 | /* This is our link to the per-transport transmitted list. */ |
| @@ -717,7 +711,7 @@ struct sctp_packet { | |||
| 717 | __u32 vtag; | 711 | __u32 vtag; |
| 718 | 712 | ||
| 719 | /* This contains the payload chunks. */ | 713 | /* This contains the payload chunks. */ |
| 720 | struct sk_buff_head chunks; | 714 | struct list_head chunk_list; |
| 721 | 715 | ||
| 722 | /* This is the overhead of the sctp and ip headers. */ | 716 | /* This is the overhead of the sctp and ip headers. */ |
| 723 | size_t overhead; | 717 | size_t overhead; |
| @@ -974,7 +968,7 @@ struct sctp_inq { | |||
| 974 | /* This is actually a queue of sctp_chunk each | 968 | /* This is actually a queue of sctp_chunk each |
| 975 | * containing a partially decoded packet. | 969 | * containing a partially decoded packet. |
| 976 | */ | 970 | */ |
| 977 | struct sk_buff_head in; | 971 | struct list_head in_chunk_list; |
| 978 | /* This is the packet which is currently off the in queue and is | 972 | /* This is the packet which is currently off the in queue and is |
| 979 | * being worked on through the inbound chunk processing. | 973 | * being worked on through the inbound chunk processing. |
| 980 | */ | 974 | */ |
| @@ -1017,7 +1011,7 @@ struct sctp_outq { | |||
| 1017 | struct sctp_association *asoc; | 1011 | struct sctp_association *asoc; |
| 1018 | 1012 | ||
| 1019 | /* Data pending that has never been transmitted. */ | 1013 | /* Data pending that has never been transmitted. */ |
| 1020 | struct sk_buff_head out; | 1014 | struct list_head out_chunk_list; |
| 1021 | 1015 | ||
| 1022 | unsigned out_qlen; /* Total length of queued data chunks. */ | 1016 | unsigned out_qlen; /* Total length of queued data chunks. */ |
| 1023 | 1017 | ||
| @@ -1025,7 +1019,7 @@ struct sctp_outq { | |||
| 1025 | unsigned error; | 1019 | unsigned error; |
| 1026 | 1020 | ||
| 1027 | /* These are control chunks we want to send. */ | 1021 | /* These are control chunks we want to send. */ |
| 1028 | struct sk_buff_head control; | 1022 | struct list_head control_chunk_list; |
| 1029 | 1023 | ||
| 1030 | /* These are chunks that have been sacked but are above the | 1024 | /* These are chunks that have been sacked but are above the |
| 1031 | * CTSN, or cumulative tsn ack point. | 1025 | * CTSN, or cumulative tsn ack point. |
| @@ -1672,7 +1666,7 @@ struct sctp_association { | |||
| 1672 | * which already resides in sctp_outq. Please move this | 1666 | * which already resides in sctp_outq. Please move this |
| 1673 | * queue and its supporting logic down there. --piggy] | 1667 | * queue and its supporting logic down there. --piggy] |
| 1674 | */ | 1668 | */ |
| 1675 | struct sk_buff_head addip_chunks; | 1669 | struct list_head addip_chunk_list; |
| 1676 | 1670 | ||
| 1677 | /* ADDIP Section 4.1 ASCONF Chunk Procedures | 1671 | /* ADDIP Section 4.1 ASCONF Chunk Procedures |
| 1678 | * | 1672 | * |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 7ae6aa772dab..4b47dd6f2485 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -203,7 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
| 203 | */ | 203 | */ |
| 204 | asoc->addip_serial = asoc->c.initial_tsn; | 204 | asoc->addip_serial = asoc->c.initial_tsn; |
| 205 | 205 | ||
| 206 | skb_queue_head_init(&asoc->addip_chunks); | 206 | INIT_LIST_HEAD(&asoc->addip_chunk_list); |
| 207 | 207 | ||
| 208 | /* Make an empty list of remote transport addresses. */ | 208 | /* Make an empty list of remote transport addresses. */ |
| 209 | INIT_LIST_HEAD(&asoc->peer.transport_addr_list); | 209 | INIT_LIST_HEAD(&asoc->peer.transport_addr_list); |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 339f7acfdb64..5e085e041a6e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -115,6 +115,17 @@ static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
| 115 | atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); | 115 | atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | struct sctp_input_cb { | ||
| 119 | union { | ||
| 120 | struct inet_skb_parm h4; | ||
| 121 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | ||
| 122 | struct inet6_skb_parm h6; | ||
| 123 | #endif | ||
| 124 | } header; | ||
| 125 | struct sctp_chunk *chunk; | ||
| 126 | }; | ||
| 127 | #define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) | ||
| 128 | |||
| 118 | /* | 129 | /* |
| 119 | * This is the routine which IP calls when receiving an SCTP packet. | 130 | * This is the routine which IP calls when receiving an SCTP packet. |
| 120 | */ | 131 | */ |
| @@ -243,6 +254,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 243 | ret = -ENOMEM; | 254 | ret = -ENOMEM; |
| 244 | goto discard_release; | 255 | goto discard_release; |
| 245 | } | 256 | } |
| 257 | SCTP_INPUT_CB(skb)->chunk = chunk; | ||
| 246 | 258 | ||
| 247 | sctp_rcv_set_owner_r(skb,sk); | 259 | sctp_rcv_set_owner_r(skb,sk); |
| 248 | 260 | ||
| @@ -265,9 +277,9 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 265 | sctp_bh_lock_sock(sk); | 277 | sctp_bh_lock_sock(sk); |
| 266 | 278 | ||
| 267 | if (sock_owned_by_user(sk)) | 279 | if (sock_owned_by_user(sk)) |
| 268 | sk_add_backlog(sk, (struct sk_buff *) chunk); | 280 | sk_add_backlog(sk, skb); |
| 269 | else | 281 | else |
| 270 | sctp_backlog_rcv(sk, (struct sk_buff *) chunk); | 282 | sctp_backlog_rcv(sk, skb); |
| 271 | 283 | ||
| 272 | /* Release the sock and any reference counts we took in the | 284 | /* Release the sock and any reference counts we took in the |
| 273 | * lookup calls. | 285 | * lookup calls. |
| @@ -302,14 +314,8 @@ discard_release: | |||
| 302 | */ | 314 | */ |
| 303 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 315 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
| 304 | { | 316 | { |
| 305 | struct sctp_chunk *chunk; | 317 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
| 306 | struct sctp_inq *inqueue; | 318 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; |
| 307 | |||
| 308 | /* One day chunk will live inside the skb, but for | ||
| 309 | * now this works. | ||
| 310 | */ | ||
| 311 | chunk = (struct sctp_chunk *) skb; | ||
| 312 | inqueue = &chunk->rcvr->inqueue; | ||
| 313 | 319 | ||
| 314 | sctp_inq_push(inqueue, chunk); | 320 | sctp_inq_push(inqueue, chunk); |
| 315 | return 0; | 321 | return 0; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cedf4351556c..2d33922c044b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | /* Initialize an SCTP inqueue. */ | 50 | /* Initialize an SCTP inqueue. */ |
| 51 | void sctp_inq_init(struct sctp_inq *queue) | 51 | void sctp_inq_init(struct sctp_inq *queue) |
| 52 | { | 52 | { |
| 53 | skb_queue_head_init(&queue->in); | 53 | INIT_LIST_HEAD(&queue->in_chunk_list); |
| 54 | queue->in_progress = NULL; | 54 | queue->in_progress = NULL; |
| 55 | 55 | ||
| 56 | /* Create a task for delivering data. */ | 56 | /* Create a task for delivering data. */ |
| @@ -62,11 +62,13 @@ void sctp_inq_init(struct sctp_inq *queue) | |||
| 62 | /* Release the memory associated with an SCTP inqueue. */ | 62 | /* Release the memory associated with an SCTP inqueue. */ |
| 63 | void sctp_inq_free(struct sctp_inq *queue) | 63 | void sctp_inq_free(struct sctp_inq *queue) |
| 64 | { | 64 | { |
| 65 | struct sctp_chunk *chunk; | 65 | struct sctp_chunk *chunk, *tmp; |
| 66 | 66 | ||
| 67 | /* Empty the queue. */ | 67 | /* Empty the queue. */ |
| 68 | while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) | 68 | list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { |
| 69 | list_del_init(&chunk->list); | ||
| 69 | sctp_chunk_free(chunk); | 70 | sctp_chunk_free(chunk); |
| 71 | } | ||
| 70 | 72 | ||
| 71 | /* If there is a packet which is currently being worked on, | 73 | /* If there is a packet which is currently being worked on, |
| 72 | * free it as well. | 74 | * free it as well. |
| @@ -92,7 +94,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *packet) | |||
| 92 | * Eventually, we should clean up inqueue to not rely | 94 | * Eventually, we should clean up inqueue to not rely |
| 93 | * on the BH related data structures. | 95 | * on the BH related data structures. |
| 94 | */ | 96 | */ |
| 95 | skb_queue_tail(&(q->in), (struct sk_buff *) packet); | 97 | list_add_tail(&packet->list, &q->in_chunk_list); |
| 96 | q->immediate.func(q->immediate.data); | 98 | q->immediate.func(q->immediate.data); |
| 97 | } | 99 | } |
| 98 | 100 | ||
| @@ -131,12 +133,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
| 131 | 133 | ||
| 132 | /* Do we need to take the next packet out of the queue to process? */ | 134 | /* Do we need to take the next packet out of the queue to process? */ |
| 133 | if (!chunk) { | 135 | if (!chunk) { |
| 136 | struct list_head *entry; | ||
| 137 | |||
| 134 | /* Is the queue empty? */ | 138 | /* Is the queue empty? */ |
| 135 | if (skb_queue_empty(&queue->in)) | 139 | if (list_empty(&queue->in_chunk_list)) |
| 136 | return NULL; | 140 | return NULL; |
| 137 | 141 | ||
| 142 | entry = queue->in_chunk_list.next; | ||
| 138 | chunk = queue->in_progress = | 143 | chunk = queue->in_progress = |
| 139 | (struct sctp_chunk *) skb_dequeue(&queue->in); | 144 | list_entry(entry, struct sctp_chunk, list); |
| 145 | list_del_init(entry); | ||
| 140 | 146 | ||
| 141 | /* This is the first chunk in the packet. */ | 147 | /* This is the first chunk in the packet. */ |
| 142 | chunk->singleton = 1; | 148 | chunk->singleton = 1; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 84b5b370b09d..931371633464 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -108,7 +108,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | |||
| 108 | packet->transport = transport; | 108 | packet->transport = transport; |
| 109 | packet->source_port = sport; | 109 | packet->source_port = sport; |
| 110 | packet->destination_port = dport; | 110 | packet->destination_port = dport; |
| 111 | skb_queue_head_init(&packet->chunks); | 111 | INIT_LIST_HEAD(&packet->chunk_list); |
| 112 | if (asoc) { | 112 | if (asoc) { |
| 113 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | 113 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); |
| 114 | overhead = sp->pf->af->net_header_len; | 114 | overhead = sp->pf->af->net_header_len; |
| @@ -129,12 +129,14 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | |||
| 129 | /* Free a packet. */ | 129 | /* Free a packet. */ |
| 130 | void sctp_packet_free(struct sctp_packet *packet) | 130 | void sctp_packet_free(struct sctp_packet *packet) |
| 131 | { | 131 | { |
| 132 | struct sctp_chunk *chunk; | 132 | struct sctp_chunk *chunk, *tmp; |
| 133 | 133 | ||
| 134 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 134 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); |
| 135 | 135 | ||
| 136 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) | 136 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
| 137 | list_del_init(&chunk->list); | ||
| 137 | sctp_chunk_free(chunk); | 138 | sctp_chunk_free(chunk); |
| 139 | } | ||
| 138 | 140 | ||
| 139 | if (packet->malloced) | 141 | if (packet->malloced) |
| 140 | kfree(packet); | 142 | kfree(packet); |
| @@ -276,7 +278,7 @@ append: | |||
| 276 | packet->has_sack = 1; | 278 | packet->has_sack = 1; |
| 277 | 279 | ||
| 278 | /* It is OK to send this chunk. */ | 280 | /* It is OK to send this chunk. */ |
| 279 | __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk); | 281 | list_add_tail(&chunk->list, &packet->chunk_list); |
| 280 | packet->size += chunk_len; | 282 | packet->size += chunk_len; |
| 281 | chunk->transport = packet->transport; | 283 | chunk->transport = packet->transport; |
| 282 | finish: | 284 | finish: |
| @@ -295,7 +297,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
| 295 | struct sctphdr *sh; | 297 | struct sctphdr *sh; |
| 296 | __u32 crc32; | 298 | __u32 crc32; |
| 297 | struct sk_buff *nskb; | 299 | struct sk_buff *nskb; |
| 298 | struct sctp_chunk *chunk; | 300 | struct sctp_chunk *chunk, *tmp; |
| 299 | struct sock *sk; | 301 | struct sock *sk; |
| 300 | int err = 0; | 302 | int err = 0; |
| 301 | int padding; /* How much padding do we need? */ | 303 | int padding; /* How much padding do we need? */ |
| @@ -305,11 +307,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
| 305 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 307 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); |
| 306 | 308 | ||
| 307 | /* Do NOT generate a chunkless packet. */ | 309 | /* Do NOT generate a chunkless packet. */ |
| 308 | chunk = (struct sctp_chunk *)skb_peek(&packet->chunks); | 310 | if (list_empty(&packet->chunk_list)) |
| 309 | if (unlikely(!chunk)) | ||
| 310 | return err; | 311 | return err; |
| 311 | 312 | ||
| 312 | /* Set up convenience variables... */ | 313 | /* Set up convenience variables... */ |
| 314 | chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); | ||
| 313 | sk = chunk->skb->sk; | 315 | sk = chunk->skb->sk; |
| 314 | 316 | ||
| 315 | /* Allocate the new skb. */ | 317 | /* Allocate the new skb. */ |
| @@ -370,7 +372,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
| 370 | * [This whole comment explains WORD_ROUND() below.] | 372 | * [This whole comment explains WORD_ROUND() below.] |
| 371 | */ | 373 | */ |
| 372 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); | 374 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); |
| 373 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { | 375 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
| 376 | list_del_init(&chunk->list); | ||
| 374 | if (sctp_chunk_is_data(chunk)) { | 377 | if (sctp_chunk_is_data(chunk)) { |
| 375 | 378 | ||
| 376 | if (!chunk->has_tsn) { | 379 | if (!chunk->has_tsn) { |
| @@ -511,7 +514,8 @@ err: | |||
| 511 | * will get resent or dropped later. | 514 | * will get resent or dropped later. |
| 512 | */ | 515 | */ |
| 513 | 516 | ||
| 514 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { | 517 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
| 518 | list_del_init(&chunk->list); | ||
| 515 | if (!sctp_chunk_is_data(chunk)) | 519 | if (!sctp_chunk_is_data(chunk)) |
| 516 | sctp_chunk_free(chunk); | 520 | sctp_chunk_free(chunk); |
| 517 | } | 521 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 4eb81a1407b7..efb72faba20c 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); | |||
| 75 | static inline void sctp_outq_head_data(struct sctp_outq *q, | 75 | static inline void sctp_outq_head_data(struct sctp_outq *q, |
| 76 | struct sctp_chunk *ch) | 76 | struct sctp_chunk *ch) |
| 77 | { | 77 | { |
| 78 | __skb_queue_head(&q->out, (struct sk_buff *)ch); | 78 | list_add(&ch->list, &q->out_chunk_list); |
| 79 | q->out_qlen += ch->skb->len; | 79 | q->out_qlen += ch->skb->len; |
| 80 | return; | 80 | return; |
| 81 | } | 81 | } |
| @@ -83,17 +83,22 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, | |||
| 83 | /* Take data from the front of the queue. */ | 83 | /* Take data from the front of the queue. */ |
| 84 | static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) | 84 | static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) |
| 85 | { | 85 | { |
| 86 | struct sctp_chunk *ch; | 86 | struct sctp_chunk *ch = NULL; |
| 87 | ch = (struct sctp_chunk *)__skb_dequeue(&q->out); | 87 | |
| 88 | if (ch) | 88 | if (!list_empty(&q->out_chunk_list)) { |
| 89 | struct list_head *entry = q->out_chunk_list.next; | ||
| 90 | |||
| 91 | ch = list_entry(entry, struct sctp_chunk, list); | ||
| 92 | list_del_init(entry); | ||
| 89 | q->out_qlen -= ch->skb->len; | 93 | q->out_qlen -= ch->skb->len; |
| 94 | } | ||
| 90 | return ch; | 95 | return ch; |
| 91 | } | 96 | } |
| 92 | /* Add data chunk to the end of the queue. */ | 97 | /* Add data chunk to the end of the queue. */ |
| 93 | static inline void sctp_outq_tail_data(struct sctp_outq *q, | 98 | static inline void sctp_outq_tail_data(struct sctp_outq *q, |
| 94 | struct sctp_chunk *ch) | 99 | struct sctp_chunk *ch) |
| 95 | { | 100 | { |
| 96 | __skb_queue_tail(&q->out, (struct sk_buff *)ch); | 101 | list_add_tail(&ch->list, &q->out_chunk_list); |
| 97 | q->out_qlen += ch->skb->len; | 102 | q->out_qlen += ch->skb->len; |
| 98 | return; | 103 | return; |
| 99 | } | 104 | } |
| @@ -197,8 +202,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary, | |||
| 197 | void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | 202 | void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) |
| 198 | { | 203 | { |
| 199 | q->asoc = asoc; | 204 | q->asoc = asoc; |
| 200 | skb_queue_head_init(&q->out); | 205 | INIT_LIST_HEAD(&q->out_chunk_list); |
| 201 | skb_queue_head_init(&q->control); | 206 | INIT_LIST_HEAD(&q->control_chunk_list); |
| 202 | INIT_LIST_HEAD(&q->retransmit); | 207 | INIT_LIST_HEAD(&q->retransmit); |
| 203 | INIT_LIST_HEAD(&q->sacked); | 208 | INIT_LIST_HEAD(&q->sacked); |
| 204 | INIT_LIST_HEAD(&q->abandoned); | 209 | INIT_LIST_HEAD(&q->abandoned); |
| @@ -217,7 +222,7 @@ void sctp_outq_teardown(struct sctp_outq *q) | |||
| 217 | { | 222 | { |
| 218 | struct sctp_transport *transport; | 223 | struct sctp_transport *transport; |
| 219 | struct list_head *lchunk, *pos, *temp; | 224 | struct list_head *lchunk, *pos, *temp; |
| 220 | struct sctp_chunk *chunk; | 225 | struct sctp_chunk *chunk, *tmp; |
| 221 | 226 | ||
| 222 | /* Throw away unacknowledged chunks. */ | 227 | /* Throw away unacknowledged chunks. */ |
| 223 | list_for_each(pos, &q->asoc->peer.transport_addr_list) { | 228 | list_for_each(pos, &q->asoc->peer.transport_addr_list) { |
| @@ -269,8 +274,10 @@ void sctp_outq_teardown(struct sctp_outq *q) | |||
| 269 | q->error = 0; | 274 | q->error = 0; |
| 270 | 275 | ||
| 271 | /* Throw away any leftover control chunks. */ | 276 | /* Throw away any leftover control chunks. */ |
| 272 | while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) | 277 | list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { |
| 278 | list_del_init(&chunk->list); | ||
| 273 | sctp_chunk_free(chunk); | 279 | sctp_chunk_free(chunk); |
| 280 | } | ||
| 274 | } | 281 | } |
| 275 | 282 | ||
| 276 | /* Free the outqueue structure and any related pending chunks. */ | 283 | /* Free the outqueue structure and any related pending chunks. */ |
| @@ -333,7 +340,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 333 | break; | 340 | break; |
| 334 | }; | 341 | }; |
| 335 | } else { | 342 | } else { |
| 336 | __skb_queue_tail(&q->control, (struct sk_buff *) chunk); | 343 | list_add_tail(&chunk->list, &q->control_chunk_list); |
| 337 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 344 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
| 338 | } | 345 | } |
| 339 | 346 | ||
| @@ -650,10 +657,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 650 | __u16 sport = asoc->base.bind_addr.port; | 657 | __u16 sport = asoc->base.bind_addr.port; |
| 651 | __u16 dport = asoc->peer.port; | 658 | __u16 dport = asoc->peer.port; |
| 652 | __u32 vtag = asoc->peer.i.init_tag; | 659 | __u32 vtag = asoc->peer.i.init_tag; |
| 653 | struct sk_buff_head *queue; | ||
| 654 | struct sctp_transport *transport = NULL; | 660 | struct sctp_transport *transport = NULL; |
| 655 | struct sctp_transport *new_transport; | 661 | struct sctp_transport *new_transport; |
| 656 | struct sctp_chunk *chunk; | 662 | struct sctp_chunk *chunk, *tmp; |
| 657 | sctp_xmit_t status; | 663 | sctp_xmit_t status; |
| 658 | int error = 0; | 664 | int error = 0; |
| 659 | int start_timer = 0; | 665 | int start_timer = 0; |
| @@ -675,8 +681,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 675 | * ... | 681 | * ... |
| 676 | */ | 682 | */ |
| 677 | 683 | ||
| 678 | queue = &q->control; | 684 | list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { |
| 679 | while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { | 685 | list_del_init(&chunk->list); |
| 686 | |||
| 680 | /* Pick the right transport to use. */ | 687 | /* Pick the right transport to use. */ |
| 681 | new_transport = chunk->transport; | 688 | new_transport = chunk->transport; |
| 682 | 689 | ||
| @@ -814,8 +821,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 814 | 821 | ||
| 815 | /* Finally, transmit new packets. */ | 822 | /* Finally, transmit new packets. */ |
| 816 | start_timer = 0; | 823 | start_timer = 0; |
| 817 | queue = &q->out; | ||
| 818 | |||
| 819 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | 824 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { |
| 820 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid | 825 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid |
| 821 | * stream identifier. | 826 | * stream identifier. |
| @@ -1149,8 +1154,9 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
| 1149 | /* See if all chunks are acked. | 1154 | /* See if all chunks are acked. |
| 1150 | * Make sure the empty queue handler will get run later. | 1155 | * Make sure the empty queue handler will get run later. |
| 1151 | */ | 1156 | */ |
| 1152 | q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && | 1157 | q->empty = (list_empty(&q->out_chunk_list) && |
| 1153 | list_empty(&q->retransmit); | 1158 | list_empty(&q->control_chunk_list) && |
| 1159 | list_empty(&q->retransmit)); | ||
| 1154 | if (!q->empty) | 1160 | if (!q->empty) |
| 1155 | goto finish; | 1161 | goto finish; |
| 1156 | 1162 | ||
| @@ -1679,9 +1685,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | |||
| 1679 | if (TSN_lte(tsn, ctsn)) { | 1685 | if (TSN_lte(tsn, ctsn)) { |
| 1680 | list_del_init(lchunk); | 1686 | list_del_init(lchunk); |
| 1681 | if (!chunk->tsn_gap_acked) { | 1687 | if (!chunk->tsn_gap_acked) { |
| 1682 | chunk->transport->flight_size -= | 1688 | chunk->transport->flight_size -= |
| 1683 | sctp_data_size(chunk); | 1689 | sctp_data_size(chunk); |
| 1684 | q->outstanding_bytes -= sctp_data_size(chunk); | 1690 | q->outstanding_bytes -= sctp_data_size(chunk); |
| 1685 | } | 1691 | } |
| 1686 | sctp_chunk_free(chunk); | 1692 | sctp_chunk_free(chunk); |
| 1687 | } else { | 1693 | } else { |
| @@ -1729,7 +1735,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | |||
| 1729 | nskips, &ftsn_skip_arr[0]); | 1735 | nskips, &ftsn_skip_arr[0]); |
| 1730 | 1736 | ||
| 1731 | if (ftsn_chunk) { | 1737 | if (ftsn_chunk) { |
| 1732 | __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); | 1738 | list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); |
| 1733 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 1739 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
| 1734 | } | 1740 | } |
| 1735 | } | 1741 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5baed9bb7de5..773cd93fa3d0 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -1003,6 +1003,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, | |||
| 1003 | SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); | 1003 | SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | INIT_LIST_HEAD(&retval->list); | ||
| 1006 | retval->skb = skb; | 1007 | retval->skb = skb; |
| 1007 | retval->asoc = (struct sctp_association *)asoc; | 1008 | retval->asoc = (struct sctp_association *)asoc; |
| 1008 | retval->resent = 0; | 1009 | retval->resent = 0; |
| @@ -1116,8 +1117,7 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) | |||
| 1116 | /* Possibly, free the chunk. */ | 1117 | /* Possibly, free the chunk. */ |
| 1117 | void sctp_chunk_free(struct sctp_chunk *chunk) | 1118 | void sctp_chunk_free(struct sctp_chunk *chunk) |
| 1118 | { | 1119 | { |
| 1119 | /* Make sure that we are not on any list. */ | 1120 | BUG_ON(!list_empty(&chunk->list)); |
| 1120 | skb_unlink((struct sk_buff *) chunk); | ||
| 1121 | list_del_init(&chunk->transmitted_list); | 1121 | list_del_init(&chunk->transmitted_list); |
| 1122 | 1122 | ||
| 1123 | /* Release our reference on the message tracker. */ | 1123 | /* Release our reference on the message tracker. */ |
| @@ -2739,8 +2739,12 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
| 2739 | asoc->addip_last_asconf = NULL; | 2739 | asoc->addip_last_asconf = NULL; |
| 2740 | 2740 | ||
| 2741 | /* Send the next asconf chunk from the addip chunk queue. */ | 2741 | /* Send the next asconf chunk from the addip chunk queue. */ |
| 2742 | asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks); | 2742 | if (!list_empty(&asoc->addip_chunk_list)) { |
| 2743 | if (asconf) { | 2743 | struct list_head *entry = asoc->addip_chunk_list.next; |
| 2744 | asconf = list_entry(entry, struct sctp_chunk, list); | ||
| 2745 | |||
| 2746 | list_del_init(entry); | ||
| 2747 | |||
| 2744 | /* Hold the chunk until an ASCONF_ACK is received. */ | 2748 | /* Hold the chunk until an ASCONF_ACK is received. */ |
| 2745 | sctp_chunk_hold(asconf); | 2749 | sctp_chunk_hold(asconf); |
| 2746 | if (sctp_primitive_ASCONF(asoc, asconf)) | 2750 | if (sctp_primitive_ASCONF(asoc, asconf)) |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index aad55dc3792b..091a66f06a35 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -406,7 +406,7 @@ static int sctp_send_asconf(struct sctp_association *asoc, | |||
| 406 | * transmission. | 406 | * transmission. |
| 407 | */ | 407 | */ |
| 408 | if (asoc->addip_last_asconf) { | 408 | if (asoc->addip_last_asconf) { |
| 409 | __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk); | 409 | list_add_tail(&chunk->list, &asoc->addip_chunk_list); |
| 410 | goto out; | 410 | goto out; |
| 411 | } | 411 | } |
| 412 | 412 | ||
