aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:26:46 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:26:46 -0400
commit15c54033964a943de7b0763efd3bd0ede7326395 (patch)
tree840b292612d1b5396d5bab5bde537a9013db3ceb /net/sctp/ulpqueue.c
parentad5da3cf39a5b11a198929be1f2644e17ecd767e (diff)
parent912a41a4ab935ce8c4308428ec13fc7f8b1f18f4 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (448 commits) [IPV4] nl_fib_lookup: Initialise res.r before fib_res_put(&res) [IPV6]: Fix thinko in ipv6_rthdr_rcv() changes. [IPV4]: Add multipath cached to feature-removal-schedule.txt [WIRELESS] cfg80211: Clarify locking comment. [WIRELESS] cfg80211: Fix locking in wiphy_new. [WEXT] net_device: Don't include wext bits if not required. [WEXT]: Misc code cleanups. [WEXT]: Reduce inline abuse. [WEXT]: Move EXPORT_SYMBOL statements where they belong. [WEXT]: Cleanup early ioctl call path. [WEXT]: Remove options. [WEXT]: Remove dead debug code. [WEXT]: Clean up how wext is called. [WEXT]: Move to net/wireless [AFS]: Eliminate cmpxchg() usage in vlocation code. [RXRPC]: Fix pointers passed to bitops. [RXRPC]: Remove bogus atomic_* overrides. [AFS]: Fix u64 printing in debug logging. [AFS]: Add "directory write" support. [AFS]: Implement the CB.InitCallBackState3 operation. ...
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c173
1 files changed, 137 insertions, 36 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index b29e3e4b72c9..34eb977a204d 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -138,26 +138,59 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
138/* Clear the partial delivery mode for this socket. Note: This 138/* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode. 139 * assumes that no association is currently in partial delivery mode.
140 */ 140 */
141int sctp_clear_pd(struct sock *sk) 141int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
142{ 142{
143 struct sctp_sock *sp = sctp_sk(sk); 143 struct sctp_sock *sp = sctp_sk(sk);
144 144
145 sp->pd_mode = 0; 145 if (atomic_dec_and_test(&sp->pd_mode)) {
146 if (!skb_queue_empty(&sp->pd_lobby)) { 146 /* This means there are no other associations in PD, so
147 struct list_head *list; 147 * we can go ahead and clear out the lobby in one shot
148 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); 148 */
149 list = (struct list_head *)&sctp_sk(sk)->pd_lobby; 149 if (!skb_queue_empty(&sp->pd_lobby)) {
150 INIT_LIST_HEAD(list); 150 struct list_head *list;
151 return 1; 151 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 INIT_LIST_HEAD(list);
154 return 1;
155 }
156 } else {
157 /* There are other associations in PD, so we only need to
158 * pull stuff out of the lobby that belongs to the
159 * associations that is exiting PD (all of its notifications
160 * are posted here).
161 */
162 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 struct sk_buff *skb, *tmp;
164 struct sctp_ulpevent *event;
165
166 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 event = sctp_skb2event(skb);
168 if (event->asoc == asoc) {
169 __skb_unlink(skb, &sp->pd_lobby);
170 __skb_queue_tail(&sk->sk_receive_queue,
171 skb);
172 }
173 }
174 }
152 } 175 }
176
153 return 0; 177 return 0;
154} 178}
155 179
180/* Set the pd_mode on the socket and ulpq */
181static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182{
183 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184
185 atomic_inc(&sp->pd_mode);
186 ulpq->pd_mode = 1;
187}
188
156/* Clear the pd_mode and restart any pending messages waiting for delivery. */ 189/* Clear the pd_mode and restart any pending messages waiting for delivery. */
157static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) 190static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158{ 191{
159 ulpq->pd_mode = 0; 192 ulpq->pd_mode = 0;
160 return sctp_clear_pd(ulpq->asoc->base.sk); 193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
161} 194}
162 195
163/* If the SKB of 'event' is on a list, it is the first such member 196/* If the SKB of 'event' is on a list, it is the first such member
@@ -187,25 +220,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
187 * the association the cause of the partial delivery. 220 * the association the cause of the partial delivery.
188 */ 221 */
189 222
190 if (!sctp_sk(sk)->pd_mode) { 223 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
191 queue = &sk->sk_receive_queue; 224 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) { 225 } else {
193 /* If the association is in partial delivery, we 226 if (ulpq->pd_mode) {
194 * need to finish delivering the partially processed 227 /* If the association is in partial delivery, we
195 * packet before passing any other data. This is 228 * need to finish delivering the partially processed
196 * because we don't truly support stream interleaving. 229 * packet before passing any other data. This is
197 */ 230 * because we don't truly support stream interleaving.
198 if ((event->msg_flags & MSG_NOTIFICATION) || 231 */
199 (SCTP_DATA_NOT_FRAG == 232 if ((event->msg_flags & MSG_NOTIFICATION) ||
200 (event->msg_flags & SCTP_DATA_FRAG_MASK))) 233 (SCTP_DATA_NOT_FRAG ==
201 queue = &sctp_sk(sk)->pd_lobby; 234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
202 else { 235 queue = &sctp_sk(sk)->pd_lobby;
203 clear_pd = event->msg_flags & MSG_EOR; 236 else {
204 queue = &sk->sk_receive_queue; 237 clear_pd = event->msg_flags & MSG_EOR;
238 queue = &sk->sk_receive_queue;
239 }
240 } else {
241 /*
242 * If fragment interleave is enabled, we
243 * can queue this to the recieve queue instead
244 * of the lobby.
245 */
246 if (sctp_sk(sk)->frag_interleave)
247 queue = &sk->sk_receive_queue;
248 else
249 queue = &sctp_sk(sk)->pd_lobby;
205 } 250 }
206 } else 251 }
207 queue = &sctp_sk(sk)->pd_lobby;
208
209 252
210 /* If we are harvesting multiple skbs they will be 253 /* If we are harvesting multiple skbs they will be
211 * collected on a list. 254 * collected on a list.
@@ -348,7 +391,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
348 break; 391 break;
349 pos->next = pnext; 392 pos->next = pnext;
350 pos = pnext; 393 pos = pnext;
351 }; 394 }
352 395
353 event = sctp_skb2event(f_frag); 396 event = sctp_skb2event(f_frag);
354 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); 397 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
@@ -367,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
367 struct sk_buff *first_frag = NULL; 410 struct sk_buff *first_frag = NULL;
368 __u32 ctsn, next_tsn; 411 __u32 ctsn, next_tsn;
369 struct sctp_ulpevent *retval = NULL; 412 struct sctp_ulpevent *retval = NULL;
413 struct sk_buff *pd_first = NULL;
414 struct sk_buff *pd_last = NULL;
415 size_t pd_len = 0;
416 struct sctp_association *asoc;
417 u32 pd_point;
370 418
371 /* Initialized to 0 just to avoid compiler warning message. Will 419 /* Initialized to 0 just to avoid compiler warning message. Will
372 * never be used with this value. It is referenced only after it 420 * never be used with this value. It is referenced only after it
@@ -382,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
382 * we expect to find the remaining middle fragments and the last 430 * we expect to find the remaining middle fragments and the last
383 * fragment in order. If not, first_frag is reset to NULL and we 431 * fragment in order. If not, first_frag is reset to NULL and we
384 * start the next pass when we find another first fragment. 432 * start the next pass when we find another first fragment.
433 *
434 * There is a potential to do partial delivery if user sets
435 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436 * to see if can do PD.
385 */ 437 */
386 skb_queue_walk(&ulpq->reasm, pos) { 438 skb_queue_walk(&ulpq->reasm, pos) {
387 cevent = sctp_skb2event(pos); 439 cevent = sctp_skb2event(pos);
@@ -389,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
389 441
390 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 442 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
391 case SCTP_DATA_FIRST_FRAG: 443 case SCTP_DATA_FIRST_FRAG:
444 /* If this "FIRST_FRAG" is the first
445 * element in the queue, then count it towards
446 * possible PD.
447 */
448 if (pos == ulpq->reasm.next) {
449 pd_first = pos;
450 pd_last = pos;
451 pd_len = pos->len;
452 } else {
453 pd_first = NULL;
454 pd_last = NULL;
455 pd_len = 0;
456 }
457
392 first_frag = pos; 458 first_frag = pos;
393 next_tsn = ctsn + 1; 459 next_tsn = ctsn + 1;
394 break; 460 break;
395 461
396 case SCTP_DATA_MIDDLE_FRAG: 462 case SCTP_DATA_MIDDLE_FRAG:
397 if ((first_frag) && (ctsn == next_tsn)) 463 if ((first_frag) && (ctsn == next_tsn)) {
398 next_tsn++; 464 next_tsn++;
399 else 465 if (pd_first) {
466 pd_last = pos;
467 pd_len += pos->len;
468 }
469 } else
400 first_frag = NULL; 470 first_frag = NULL;
401 break; 471 break;
402 472
@@ -406,8 +476,29 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
406 else 476 else
407 first_frag = NULL; 477 first_frag = NULL;
408 break; 478 break;
409 }; 479 }
480 }
410 481
482 asoc = ulpq->asoc;
483 if (pd_first) {
484 /* Make sure we can enter partial deliver.
485 * We can trigger partial delivery only if framgent
486 * interleave is set, or the socket is not already
487 * in partial delivery.
488 */
489 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491 goto done;
492
493 cevent = sctp_skb2event(pd_first);
494 pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 if (pd_point && pd_point <= pd_len) {
496 retval = sctp_make_reassembled_event(&ulpq->reasm,
497 pd_first,
498 pd_last);
499 if (retval)
500 sctp_ulpq_set_pd(ulpq);
501 }
411 } 502 }
412done: 503done:
413 return retval; 504 return retval;
@@ -465,7 +556,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
465 goto done; 556 goto done;
466 default: 557 default:
467 return NULL; 558 return NULL;
468 }; 559 }
469 } 560 }
470 561
471 /* We have the reassembled event. There is no need to look 562 /* We have the reassembled event. There is no need to look
@@ -557,7 +648,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
557 break; 648 break;
558 default: 649 default:
559 return NULL; 650 return NULL;
560 }; 651 }
561 } 652 }
562 653
563 /* We have the reassembled event. There is no need to look 654 /* We have the reassembled event. There is no need to look
@@ -826,19 +917,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
826{ 917{
827 struct sctp_ulpevent *event; 918 struct sctp_ulpevent *event;
828 struct sctp_association *asoc; 919 struct sctp_association *asoc;
920 struct sctp_sock *sp;
829 921
830 asoc = ulpq->asoc; 922 asoc = ulpq->asoc;
923 sp = sctp_sk(asoc->base.sk);
831 924
832 /* Are we already in partial delivery mode? */ 925 /* If the association is already in Partial Delivery mode
833 if (!sctp_sk(asoc->base.sk)->pd_mode) { 926 * we have noting to do.
927 */
928 if (ulpq->pd_mode)
929 return;
834 930
931 /* If the user enabled fragment interleave socket option,
932 * multiple associations can enter partial delivery.
933 * Otherwise, we can only enter partial delivery if the
934 * socket is not in partial deliver mode.
935 */
936 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
835 /* Is partial delivery possible? */ 937 /* Is partial delivery possible? */
836 event = sctp_ulpq_retrieve_first(ulpq); 938 event = sctp_ulpq_retrieve_first(ulpq);
837 /* Send event to the ULP. */ 939 /* Send event to the ULP. */
838 if (event) { 940 if (event) {
839 sctp_ulpq_tail_event(ulpq, event); 941 sctp_ulpq_tail_event(ulpq, event);
840 sctp_sk(asoc->base.sk)->pd_mode = 1; 942 sctp_ulpq_set_pd(ulpq);
841 ulpq->pd_mode = 1;
842 return; 943 return;
843 } 944 }
844 } 945 }