diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/endpointola.c | 2 | ||||
-rw-r--r-- | net/sctp/socket.c | 6 | ||||
-rw-r--r-- | net/sctp/ssnmap.c | 8 | ||||
-rw-r--r-- | net/sctp/tsnmap.c | 13 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 87 |
5 files changed, 85 insertions, 31 deletions
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2b3ef03c6098..12ed45dbe75d 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
155 | 155 | ||
156 | /* SCTP-AUTH extensions*/ | 156 | /* SCTP-AUTH extensions*/ |
157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); | 157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); |
158 | null_key = sctp_auth_shkey_create(0, GFP_KERNEL); | 158 | null_key = sctp_auth_shkey_create(0, gfp); |
159 | if (!null_key) | 159 | if (!null_key) |
160 | goto nomem; | 160 | goto nomem; |
161 | 161 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c99458df3f3f..b9070736b8d9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
5653 | if (len < sizeof(sctp_assoc_t)) | 5653 | if (len < sizeof(sctp_assoc_t)) |
5654 | return -EINVAL; | 5654 | return -EINVAL; |
5655 | 5655 | ||
5656 | /* Allow the struct to grow and fill in as much as possible */ | ||
5657 | len = min_t(size_t, len, sizeof(sas)); | ||
5658 | |||
5656 | if (copy_from_user(&sas, optval, len)) | 5659 | if (copy_from_user(&sas, optval, len)) |
5657 | return -EFAULT; | 5660 | return -EFAULT; |
5658 | 5661 | ||
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
5686 | /* Mark beginning of a new observation period */ | 5689 | /* Mark beginning of a new observation period */ |
5687 | asoc->stats.max_obs_rto = asoc->rto_min; | 5690 | asoc->stats.max_obs_rto = asoc->rto_min; |
5688 | 5691 | ||
5689 | /* Allow the struct to grow and fill in as much as possible */ | ||
5690 | len = min_t(size_t, len, sizeof(sas)); | ||
5691 | |||
5692 | if (put_user(len, optlen)) | 5692 | if (put_user(len, optlen)) |
5693 | return -EFAULT; | 5693 | return -EFAULT; |
5694 | 5694 | ||
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 442ad4ed6315..825ea94415b3 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #include <net/sctp/sctp.h> | 41 | #include <net/sctp/sctp.h> |
42 | #include <net/sctp/sm.h> | 42 | #include <net/sctp/sm.h> |
43 | 43 | ||
44 | #define MAX_KMALLOC_SIZE 131072 | ||
45 | |||
46 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, | 44 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, |
47 | __u16 out); | 45 | __u16 out); |
48 | 46 | ||
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
65 | int size; | 63 | int size; |
66 | 64 | ||
67 | size = sctp_ssnmap_size(in, out); | 65 | size = sctp_ssnmap_size(in, out); |
68 | if (size <= MAX_KMALLOC_SIZE) | 66 | if (size <= KMALLOC_MAX_SIZE) |
69 | retval = kmalloc(size, gfp); | 67 | retval = kmalloc(size, gfp); |
70 | else | 68 | else |
71 | retval = (struct sctp_ssnmap *) | 69 | retval = (struct sctp_ssnmap *) |
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
82 | return retval; | 80 | return retval; |
83 | 81 | ||
84 | fail_map: | 82 | fail_map: |
85 | if (size <= MAX_KMALLOC_SIZE) | 83 | if (size <= KMALLOC_MAX_SIZE) |
86 | kfree(retval); | 84 | kfree(retval); |
87 | else | 85 | else |
88 | free_pages((unsigned long)retval, get_order(size)); | 86 | free_pages((unsigned long)retval, get_order(size)); |
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map) | |||
124 | int size; | 122 | int size; |
125 | 123 | ||
126 | size = sctp_ssnmap_size(map->in.len, map->out.len); | 124 | size = sctp_ssnmap_size(map->in.len, map->out.len); |
127 | if (size <= MAX_KMALLOC_SIZE) | 125 | if (size <= KMALLOC_MAX_SIZE) |
128 | kfree(map); | 126 | kfree(map); |
129 | else | 127 | else |
130 | free_pages((unsigned long)map, get_order(size)); | 128 | free_pages((unsigned long)map, get_order(size)); |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 5f25e0c92c31..396c45174e5b 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -51,7 +51,7 @@ | |||
51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); | 51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); |
52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, | 52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, |
53 | __u16 len, __u16 *start, __u16 *end); | 53 | __u16 len, __u16 *start, __u16 *end); |
54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); | 54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); |
55 | 55 | ||
56 | /* Initialize a block of memory as a tsnmap. */ | 56 | /* Initialize a block of memory as a tsnmap. */ |
57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, | 57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, |
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, | |||
124 | 124 | ||
125 | gap = tsn - map->base_tsn; | 125 | gap = tsn - map->base_tsn; |
126 | 126 | ||
127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) | 127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) |
128 | return -ENOMEM; | 128 | return -ENOMEM; |
129 | 129 | ||
130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { | 130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { |
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, | |||
360 | return ngaps; | 360 | return ngaps; |
361 | } | 361 | } |
362 | 362 | ||
363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) | 363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) |
364 | { | 364 | { |
365 | unsigned long *new; | 365 | unsigned long *new; |
366 | unsigned long inc; | 366 | unsigned long inc; |
367 | u16 len; | 367 | u16 len; |
368 | 368 | ||
369 | if (gap >= SCTP_TSN_MAP_SIZE) | 369 | if (size > SCTP_TSN_MAP_SIZE) |
370 | return 0; | 370 | return 0; |
371 | 371 | ||
372 | inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; | 372 | inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; |
373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); | 373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); |
374 | 374 | ||
375 | new = kzalloc(len>>3, GFP_ATOMIC); | 375 | new = kzalloc(len>>3, GFP_ATOMIC); |
376 | if (!new) | 376 | if (!new) |
377 | return 0; | 377 | return 0; |
378 | 378 | ||
379 | bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); | 379 | bitmap_copy(new, map->tsn_map, |
380 | map->max_tsn_seen - map->cumulative_tsn_ack_point); | ||
380 | kfree(map->tsn_map); | 381 | kfree(map->tsn_map); |
381 | map->tsn_map = new; | 382 | map->tsn_map = new; |
382 | map->len = len; | 383 | map->len = len; |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65b..0fd5b3d2df03 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
106 | { | 106 | { |
107 | struct sk_buff_head temp; | 107 | struct sk_buff_head temp; |
108 | struct sctp_ulpevent *event; | 108 | struct sctp_ulpevent *event; |
109 | int event_eor = 0; | ||
109 | 110 | ||
110 | /* Create an event from the incoming chunk. */ | 111 | /* Create an event from the incoming chunk. */ |
111 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | 112 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); |
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
127 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 128 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
128 | * very first SKB on the 'temp' list. | 129 | * very first SKB on the 'temp' list. |
129 | */ | 130 | */ |
130 | if (event) | 131 | if (event) { |
132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | ||
131 | sctp_ulpq_tail_event(ulpq, event); | 133 | sctp_ulpq_tail_event(ulpq, event); |
134 | } | ||
132 | 135 | ||
133 | return 0; | 136 | return event_eor; |
134 | } | 137 | } |
135 | 138 | ||
136 | /* Add a new event for propagation to the ULP. */ | 139 | /* Add a new event for propagation to the ULP. */ |
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | |||
540 | ctsn = cevent->tsn; | 543 | ctsn = cevent->tsn; |
541 | 544 | ||
542 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 545 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
546 | case SCTP_DATA_FIRST_FRAG: | ||
547 | if (!first_frag) | ||
548 | return NULL; | ||
549 | goto done; | ||
543 | case SCTP_DATA_MIDDLE_FRAG: | 550 | case SCTP_DATA_MIDDLE_FRAG: |
544 | if (!first_frag) { | 551 | if (!first_frag) { |
545 | first_frag = pos; | 552 | first_frag = pos; |
546 | next_tsn = ctsn + 1; | 553 | next_tsn = ctsn + 1; |
547 | last_frag = pos; | 554 | last_frag = pos; |
548 | } else if (next_tsn == ctsn) | 555 | } else if (next_tsn == ctsn) { |
549 | next_tsn++; | 556 | next_tsn++; |
550 | else | 557 | last_frag = pos; |
558 | } else | ||
551 | goto done; | 559 | goto done; |
552 | break; | 560 | break; |
553 | case SCTP_DATA_LAST_FRAG: | 561 | case SCTP_DATA_LAST_FRAG: |
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | |||
651 | } else | 659 | } else |
652 | goto done; | 660 | goto done; |
653 | break; | 661 | break; |
662 | |||
663 | case SCTP_DATA_LAST_FRAG: | ||
664 | if (!first_frag) | ||
665 | return NULL; | ||
666 | else | ||
667 | goto done; | ||
668 | break; | ||
669 | |||
654 | default: | 670 | default: |
655 | return NULL; | 671 | return NULL; |
656 | } | 672 | } |
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | |||
962 | struct sk_buff_head *list, __u16 needed) | 978 | struct sk_buff_head *list, __u16 needed) |
963 | { | 979 | { |
964 | __u16 freed = 0; | 980 | __u16 freed = 0; |
965 | __u32 tsn; | 981 | __u32 tsn, last_tsn; |
966 | struct sk_buff *skb; | 982 | struct sk_buff *skb, *flist, *last; |
967 | struct sctp_ulpevent *event; | 983 | struct sctp_ulpevent *event; |
968 | struct sctp_tsnmap *tsnmap; | 984 | struct sctp_tsnmap *tsnmap; |
969 | 985 | ||
970 | tsnmap = &ulpq->asoc->peer.tsn_map; | 986 | tsnmap = &ulpq->asoc->peer.tsn_map; |
971 | 987 | ||
972 | while ((skb = __skb_dequeue_tail(list)) != NULL) { | 988 | while ((skb = skb_peek_tail(list)) != NULL) { |
973 | freed += skb_headlen(skb); | ||
974 | event = sctp_skb2event(skb); | 989 | event = sctp_skb2event(skb); |
975 | tsn = event->tsn; | 990 | tsn = event->tsn; |
976 | 991 | ||
992 | /* Don't renege below the Cumulative TSN ACK Point. */ | ||
993 | if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) | ||
994 | break; | ||
995 | |||
996 | /* Events in ordering queue may have multiple fragments | ||
997 | * corresponding to additional TSNs. Sum the total | ||
998 | * freed space; find the last TSN. | ||
999 | */ | ||
1000 | freed += skb_headlen(skb); | ||
1001 | flist = skb_shinfo(skb)->frag_list; | ||
1002 | for (last = flist; flist; flist = flist->next) { | ||
1003 | last = flist; | ||
1004 | freed += skb_headlen(last); | ||
1005 | } | ||
1006 | if (last) | ||
1007 | last_tsn = sctp_skb2event(last)->tsn; | ||
1008 | else | ||
1009 | last_tsn = tsn; | ||
1010 | |||
1011 | /* Unlink the event, then renege all applicable TSNs. */ | ||
1012 | __skb_unlink(skb, list); | ||
977 | sctp_ulpevent_free(event); | 1013 | sctp_ulpevent_free(event); |
978 | sctp_tsnmap_renege(tsnmap, tsn); | 1014 | while (TSN_lte(tsn, last_tsn)) { |
1015 | sctp_tsnmap_renege(tsnmap, tsn); | ||
1016 | tsn++; | ||
1017 | } | ||
979 | if (freed >= needed) | 1018 | if (freed >= needed) |
980 | return freed; | 1019 | return freed; |
981 | } | 1020 | } |
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
1002 | struct sctp_ulpevent *event; | 1041 | struct sctp_ulpevent *event; |
1003 | struct sctp_association *asoc; | 1042 | struct sctp_association *asoc; |
1004 | struct sctp_sock *sp; | 1043 | struct sctp_sock *sp; |
1044 | __u32 ctsn; | ||
1045 | struct sk_buff *skb; | ||
1005 | 1046 | ||
1006 | asoc = ulpq->asoc; | 1047 | asoc = ulpq->asoc; |
1007 | sp = sctp_sk(asoc->base.sk); | 1048 | sp = sctp_sk(asoc->base.sk); |
1008 | 1049 | ||
1009 | /* If the association is already in Partial Delivery mode | 1050 | /* If the association is already in Partial Delivery mode |
1010 | * we have noting to do. | 1051 | * we have nothing to do. |
1011 | */ | 1052 | */ |
1012 | if (ulpq->pd_mode) | 1053 | if (ulpq->pd_mode) |
1013 | return; | 1054 | return; |
1014 | 1055 | ||
1056 | /* Data must be at or below the Cumulative TSN ACK Point to | ||
1057 | * start partial delivery. | ||
1058 | */ | ||
1059 | skb = skb_peek(&asoc->ulpq.reasm); | ||
1060 | if (skb != NULL) { | ||
1061 | ctsn = sctp_skb2event(skb)->tsn; | ||
1062 | if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) | ||
1063 | return; | ||
1064 | } | ||
1065 | |||
1015 | /* If the user enabled fragment interleave socket option, | 1066 | /* If the user enabled fragment interleave socket option, |
1016 | * multiple associations can enter partial delivery. | 1067 | * multiple associations can enter partial delivery. |
1017 | * Otherwise, we can only enter partial delivery if the | 1068 | * Otherwise, we can only enter partial delivery if the |
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1054 | } | 1105 | } |
1055 | /* If able to free enough room, accept this chunk. */ | 1106 | /* If able to free enough room, accept this chunk. */ |
1056 | if (chunk && (freed >= needed)) { | 1107 | if (chunk && (freed >= needed)) { |
1057 | __u32 tsn; | 1108 | int retval; |
1058 | tsn = ntohl(chunk->subh.data_hdr->tsn); | 1109 | retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
1059 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); | 1110 | /* |
1060 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1111 | * Enter partial delivery if chunk has not been |
1061 | 1112 | * delivered; otherwise, drain the reassembly queue. | |
1062 | sctp_ulpq_partial_delivery(ulpq, gfp); | 1113 | */ |
1114 | if (retval <= 0) | ||
1115 | sctp_ulpq_partial_delivery(ulpq, gfp); | ||
1116 | else if (retval == 1) | ||
1117 | sctp_ulpq_reasm_drain(ulpq); | ||
1063 | } | 1118 | } |
1064 | 1119 | ||
1065 | sk_mem_reclaim(asoc->base.sk); | 1120 | sk_mem_reclaim(asoc->base.sk); |