diff options
author | Vlad Yasevich <vladislav.yasevich@hp.com> | 2007-03-23 14:32:00 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:28:00 -0400 |
commit | d49d91d79a8dc5e85108a5ae1c8eef23dec135c1 (patch) | |
tree | e98b53a69b310128a03a06fcc1dd9f94f7aa34b2 /net/sctp/ulpqueue.c | |
parent | b6e1331f3ce25a56edb956054eaf8011654686cb (diff) |
[SCTP]: Implement SCTP_PARTIAL_DELIVERY_POINT option.
This option induces partial delivery to run as soon
as the specified amount of data has been accumulated on
the association. However, we give preference to fully
reassembled messages over PD messages. In any case,
window and buffer is freed up.
Signed-off-by: Vlad Yasevich <vladislav.yasevich@.hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r-- | net/sctp/ulpqueue.c | 64 |
1 files changed, 60 insertions, 4 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ac80c34f6c2c..0fa4d4d4df17 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -177,6 +177,15 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) | |||
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
179 | 179 | ||
180 | /* Set the pd_mode on the socket and ulpq */ | ||
181 | static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) | ||
182 | { | ||
183 | struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); | ||
184 | |||
185 | atomic_inc(&sp->pd_mode); | ||
186 | ulpq->pd_mode = 1; | ||
187 | } | ||
188 | |||
180 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ | 189 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ |
181 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | 190 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) |
182 | { | 191 | { |
@@ -401,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
401 | struct sk_buff *first_frag = NULL; | 410 | struct sk_buff *first_frag = NULL; |
402 | __u32 ctsn, next_tsn; | 411 | __u32 ctsn, next_tsn; |
403 | struct sctp_ulpevent *retval = NULL; | 412 | struct sctp_ulpevent *retval = NULL; |
413 | struct sk_buff *pd_first = NULL; | ||
414 | struct sk_buff *pd_last = NULL; | ||
415 | size_t pd_len = 0; | ||
416 | struct sctp_association *asoc; | ||
417 | u32 pd_point; | ||
404 | 418 | ||
405 | /* Initialized to 0 just to avoid compiler warning message. Will | 419 | /* Initialized to 0 just to avoid compiler warning message. Will |
406 | * never be used with this value. It is referenced only after it | 420 | * never be used with this value. It is referenced only after it |
@@ -416,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
416 | * we expect to find the remaining middle fragments and the last | 430 | * we expect to find the remaining middle fragments and the last |
417 | * fragment in order. If not, first_frag is reset to NULL and we | 431 | * fragment in order. If not, first_frag is reset to NULL and we |
418 | * start the next pass when we find another first fragment. | 432 | * start the next pass when we find another first fragment. |
433 | * | ||
434 | * There is a potential to do partial delivery if user sets | ||
435 | * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here | ||
436 | * to see if can do PD. | ||
419 | */ | 437 | */ |
420 | skb_queue_walk(&ulpq->reasm, pos) { | 438 | skb_queue_walk(&ulpq->reasm, pos) { |
421 | cevent = sctp_skb2event(pos); | 439 | cevent = sctp_skb2event(pos); |
@@ -423,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
423 | 441 | ||
424 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 442 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
425 | case SCTP_DATA_FIRST_FRAG: | 443 | case SCTP_DATA_FIRST_FRAG: |
444 | /* If this "FIRST_FRAG" is the first | ||
445 | * element in the queue, then count it towards | ||
446 | * possible PD. | ||
447 | */ | ||
448 | if (pos == ulpq->reasm.next) { | ||
449 | pd_first = pos; | ||
450 | pd_last = pos; | ||
451 | pd_len = pos->len; | ||
452 | } else { | ||
453 | pd_first = NULL; | ||
454 | pd_last = NULL; | ||
455 | pd_len = 0; | ||
456 | } | ||
457 | |||
426 | first_frag = pos; | 458 | first_frag = pos; |
427 | next_tsn = ctsn + 1; | 459 | next_tsn = ctsn + 1; |
428 | break; | 460 | break; |
429 | 461 | ||
430 | case SCTP_DATA_MIDDLE_FRAG: | 462 | case SCTP_DATA_MIDDLE_FRAG: |
431 | if ((first_frag) && (ctsn == next_tsn)) | 463 | if ((first_frag) && (ctsn == next_tsn)) { |
432 | next_tsn++; | 464 | next_tsn++; |
433 | else | 465 | if (pd_first) { |
466 | pd_last = pos; | ||
467 | pd_len += pos->len; | ||
468 | } | ||
469 | } else | ||
434 | first_frag = NULL; | 470 | first_frag = NULL; |
435 | break; | 471 | break; |
436 | 472 | ||
@@ -441,7 +477,28 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
441 | first_frag = NULL; | 477 | first_frag = NULL; |
442 | break; | 478 | break; |
443 | }; | 479 | }; |
480 | } | ||
481 | |||
482 | asoc = ulpq->asoc; | ||
483 | if (pd_first) { | ||
484 | /* Make sure we can enter partial deliver. | ||
485 | * We can trigger partial delivery only if framgent | ||
486 | * interleave is set, or the socket is not already | ||
487 | * in partial delivery. | ||
488 | */ | ||
489 | if (!sctp_sk(asoc->base.sk)->frag_interleave && | ||
490 | atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) | ||
491 | goto done; | ||
444 | 492 | ||
493 | cevent = sctp_skb2event(pd_first); | ||
494 | pd_point = sctp_sk(asoc->base.sk)->pd_point; | ||
495 | if (pd_point && pd_point <= pd_len) { | ||
496 | retval = sctp_make_reassembled_event(&ulpq->reasm, | ||
497 | pd_first, | ||
498 | pd_last); | ||
499 | if (retval) | ||
500 | sctp_ulpq_set_pd(ulpq); | ||
501 | } | ||
445 | } | 502 | } |
446 | done: | 503 | done: |
447 | return retval; | 504 | return retval; |
@@ -882,8 +939,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
882 | /* Send event to the ULP. */ | 939 | /* Send event to the ULP. */ |
883 | if (event) { | 940 | if (event) { |
884 | sctp_ulpq_tail_event(ulpq, event); | 941 | sctp_ulpq_tail_event(ulpq, event); |
885 | atomic_inc(&sp->pd_mode); | 942 | sctp_ulpq_set_pd(ulpq); |
886 | ulpq->pd_mode = 1; | ||
887 | return; | 943 | return; |
888 | } | 944 | } |
889 | } | 945 | } |