diff options
Diffstat (limited to 'net/sctp')
| -rw-r--r-- | net/sctp/associola.c | 29 | ||||
| -rw-r--r-- | net/sctp/debug.c | 5 | ||||
| -rw-r--r-- | net/sctp/input.c | 51 | ||||
| -rw-r--r-- | net/sctp/inqueue.c | 8 | ||||
| -rw-r--r-- | net/sctp/ipv6.c | 40 | ||||
| -rw-r--r-- | net/sctp/output.c | 2 | ||||
| -rw-r--r-- | net/sctp/outqueue.c | 39 | ||||
| -rw-r--r-- | net/sctp/protocol.c | 20 | ||||
| -rw-r--r-- | net/sctp/sm_make_chunk.c | 12 | ||||
| -rw-r--r-- | net/sctp/sm_sideeffect.c | 16 | ||||
| -rw-r--r-- | net/sctp/sm_statefuns.c | 56 | ||||
| -rw-r--r-- | net/sctp/sm_statetable.c | 2 | ||||
| -rw-r--r-- | net/sctp/socket.c | 417 | ||||
| -rw-r--r-- | net/sctp/transport.c | 34 | ||||
| -rw-r--r-- | net/sctp/ulpevent.c | 49 | ||||
| -rw-r--r-- | net/sctp/ulpqueue.c | 168 |
16 files changed, 735 insertions, 213 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index fa82b73c965b..db73ef97485a 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -143,7 +143,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
| 143 | /* Initialize the maximum mumber of new data packets that can be sent | 143 | /* Initialize the maximum mumber of new data packets that can be sent |
| 144 | * in a burst. | 144 | * in a burst. |
| 145 | */ | 145 | */ |
| 146 | asoc->max_burst = sctp_max_burst; | 146 | asoc->max_burst = sp->max_burst; |
| 147 | 147 | ||
| 148 | /* initialize association timers */ | 148 | /* initialize association timers */ |
| 149 | asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; | 149 | asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; |
| @@ -714,8 +714,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
| 714 | /* Record the transition on the transport. */ | 714 | /* Record the transition on the transport. */ |
| 715 | switch (command) { | 715 | switch (command) { |
| 716 | case SCTP_TRANSPORT_UP: | 716 | case SCTP_TRANSPORT_UP: |
| 717 | /* If we are moving from UNCONFIRMED state due | ||
| 718 | * to heartbeat success, report the SCTP_ADDR_CONFIRMED | ||
| 719 | * state to the user, otherwise report SCTP_ADDR_AVAILABLE. | ||
| 720 | */ | ||
| 721 | if (SCTP_UNCONFIRMED == transport->state && | ||
| 722 | SCTP_HEARTBEAT_SUCCESS == error) | ||
| 723 | spc_state = SCTP_ADDR_CONFIRMED; | ||
| 724 | else | ||
| 725 | spc_state = SCTP_ADDR_AVAILABLE; | ||
| 717 | transport->state = SCTP_ACTIVE; | 726 | transport->state = SCTP_ACTIVE; |
| 718 | spc_state = SCTP_ADDR_AVAILABLE; | ||
| 719 | break; | 727 | break; |
| 720 | 728 | ||
| 721 | case SCTP_TRANSPORT_DOWN: | 729 | case SCTP_TRANSPORT_DOWN: |
| @@ -725,7 +733,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
| 725 | 733 | ||
| 726 | default: | 734 | default: |
| 727 | return; | 735 | return; |
| 728 | }; | 736 | } |
| 729 | 737 | ||
| 730 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the | 738 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the |
| 731 | * user. | 739 | * user. |
| @@ -1046,6 +1054,9 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
| 1046 | trans = list_entry(pos, struct sctp_transport, transports); | 1054 | trans = list_entry(pos, struct sctp_transport, transports); |
| 1047 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | 1055 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) |
| 1048 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | 1056 | sctp_assoc_del_peer(asoc, &trans->ipaddr); |
| 1057 | |||
| 1058 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | ||
| 1059 | sctp_transport_reset(trans); | ||
| 1049 | } | 1060 | } |
| 1050 | 1061 | ||
| 1051 | /* If the case is A (association restart), use | 1062 | /* If the case is A (association restart), use |
| @@ -1063,6 +1074,18 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
| 1063 | */ | 1074 | */ |
| 1064 | sctp_ssnmap_clear(asoc->ssnmap); | 1075 | sctp_ssnmap_clear(asoc->ssnmap); |
| 1065 | 1076 | ||
| 1077 | /* Flush the ULP reassembly and ordered queue. | ||
| 1078 | * Any data there will now be stale and will | ||
| 1079 | * cause problems. | ||
| 1080 | */ | ||
| 1081 | sctp_ulpq_flush(&asoc->ulpq); | ||
| 1082 | |||
| 1083 | /* reset the overall association error count so | ||
| 1084 | * that the restarted association doesn't get torn | ||
| 1085 | * down on the next retransmission timer. | ||
| 1086 | */ | ||
| 1087 | asoc->overall_error_count = 0; | ||
| 1088 | |||
| 1066 | } else { | 1089 | } else { |
| 1067 | /* Add any peer addresses from the new association. */ | 1090 | /* Add any peer addresses from the new association. */ |
| 1068 | list_for_each(pos, &new->peer.transport_addr_list) { | 1091 | list_for_each(pos, &new->peer.transport_addr_list) { |
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 5f5ab28977c9..e8c0f7435d7f 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c | |||
| @@ -93,8 +93,9 @@ const char *sctp_cname(const sctp_subtype_t cid) | |||
| 93 | return "FWD_TSN"; | 93 | return "FWD_TSN"; |
| 94 | 94 | ||
| 95 | default: | 95 | default: |
| 96 | return "unknown chunk"; | 96 | break; |
| 97 | }; | 97 | } |
| 98 | |||
| 98 | return "unknown chunk"; | 99 | return "unknown chunk"; |
| 99 | } | 100 | } |
| 100 | 101 | ||
diff --git a/net/sctp/input.c b/net/sctp/input.c index 71db66873695..885109fb3dda 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -79,14 +79,10 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | |||
| 79 | /* Calculate the SCTP checksum of an SCTP packet. */ | 79 | /* Calculate the SCTP checksum of an SCTP packet. */ |
| 80 | static inline int sctp_rcv_checksum(struct sk_buff *skb) | 80 | static inline int sctp_rcv_checksum(struct sk_buff *skb) |
| 81 | { | 81 | { |
| 82 | struct sctphdr *sh; | ||
| 83 | __u32 cmp, val; | ||
| 84 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 82 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 85 | 83 | struct sctphdr *sh = sctp_hdr(skb); | |
| 86 | sh = (struct sctphdr *) skb->h.raw; | 84 | __u32 cmp = ntohl(sh->checksum); |
| 87 | cmp = ntohl(sh->checksum); | 85 | __u32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); |
| 88 | |||
| 89 | val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); | ||
| 90 | 86 | ||
| 91 | for (; list; list = list->next) | 87 | for (; list; list = list->next) |
| 92 | val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), | 88 | val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), |
| @@ -138,14 +134,13 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 138 | if (skb_linearize(skb)) | 134 | if (skb_linearize(skb)) |
| 139 | goto discard_it; | 135 | goto discard_it; |
| 140 | 136 | ||
| 141 | sh = (struct sctphdr *) skb->h.raw; | 137 | sh = sctp_hdr(skb); |
| 142 | 138 | ||
| 143 | /* Pull up the IP and SCTP headers. */ | 139 | /* Pull up the IP and SCTP headers. */ |
| 144 | __skb_pull(skb, skb->h.raw - skb->data); | 140 | __skb_pull(skb, skb_transport_offset(skb)); |
| 145 | if (skb->len < sizeof(struct sctphdr)) | 141 | if (skb->len < sizeof(struct sctphdr)) |
| 146 | goto discard_it; | 142 | goto discard_it; |
| 147 | if ((skb->ip_summed != CHECKSUM_UNNECESSARY) && | 143 | if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) |
| 148 | (sctp_rcv_checksum(skb) < 0)) | ||
| 149 | goto discard_it; | 144 | goto discard_it; |
| 150 | 145 | ||
| 151 | skb_pull(skb, sizeof(struct sctphdr)); | 146 | skb_pull(skb, sizeof(struct sctphdr)); |
| @@ -154,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 154 | if (skb->len < sizeof(struct sctp_chunkhdr)) | 149 | if (skb->len < sizeof(struct sctp_chunkhdr)) |
| 155 | goto discard_it; | 150 | goto discard_it; |
| 156 | 151 | ||
| 157 | family = ipver2af(skb->nh.iph->version); | 152 | family = ipver2af(ip_hdr(skb)->version); |
| 158 | af = sctp_get_af_specific(family); | 153 | af = sctp_get_af_specific(family); |
| 159 | if (unlikely(!af)) | 154 | if (unlikely(!af)) |
| 160 | goto discard_it; | 155 | goto discard_it; |
| @@ -510,30 +505,30 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | |||
| 510 | void sctp_v4_err(struct sk_buff *skb, __u32 info) | 505 | void sctp_v4_err(struct sk_buff *skb, __u32 info) |
| 511 | { | 506 | { |
| 512 | struct iphdr *iph = (struct iphdr *)skb->data; | 507 | struct iphdr *iph = (struct iphdr *)skb->data; |
| 513 | struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2)); | 508 | const int ihlen = iph->ihl * 4; |
| 514 | int type = skb->h.icmph->type; | 509 | const int type = icmp_hdr(skb)->type; |
| 515 | int code = skb->h.icmph->code; | 510 | const int code = icmp_hdr(skb)->code; |
| 516 | struct sock *sk; | 511 | struct sock *sk; |
| 517 | struct sctp_association *asoc = NULL; | 512 | struct sctp_association *asoc = NULL; |
| 518 | struct sctp_transport *transport; | 513 | struct sctp_transport *transport; |
| 519 | struct inet_sock *inet; | 514 | struct inet_sock *inet; |
| 520 | char *saveip, *savesctp; | 515 | sk_buff_data_t saveip, savesctp; |
| 521 | int err; | 516 | int err; |
| 522 | 517 | ||
| 523 | if (skb->len < ((iph->ihl << 2) + 8)) { | 518 | if (skb->len < ihlen + 8) { |
| 524 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 519 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
| 525 | return; | 520 | return; |
| 526 | } | 521 | } |
| 527 | 522 | ||
| 528 | /* Fix up skb to look at the embedded net header. */ | 523 | /* Fix up skb to look at the embedded net header. */ |
| 529 | saveip = skb->nh.raw; | 524 | saveip = skb->network_header; |
| 530 | savesctp = skb->h.raw; | 525 | savesctp = skb->transport_header; |
| 531 | skb->nh.iph = iph; | 526 | skb_reset_network_header(skb); |
| 532 | skb->h.raw = (char *)sh; | 527 | skb_set_transport_header(skb, ihlen); |
| 533 | sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport); | 528 | sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); |
| 534 | /* Put back, the original pointers. */ | 529 | /* Put back, the original values. */ |
| 535 | skb->nh.raw = saveip; | 530 | skb->network_header = saveip; |
| 536 | skb->h.raw = savesctp; | 531 | skb->transport_header = savesctp; |
| 537 | if (!sk) { | 532 | if (!sk) { |
| 538 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 533 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
| 539 | return; | 534 | return; |
| @@ -616,7 +611,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) | |||
| 616 | break; | 611 | break; |
| 617 | 612 | ||
| 618 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 613 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); |
| 619 | if (ch_end > skb->tail) | 614 | if (ch_end > skb_tail_pointer(skb)) |
| 620 | break; | 615 | break; |
| 621 | 616 | ||
| 622 | /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the | 617 | /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the |
| @@ -648,7 +643,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) | |||
| 648 | } | 643 | } |
| 649 | 644 | ||
| 650 | ch = (sctp_chunkhdr_t *) ch_end; | 645 | ch = (sctp_chunkhdr_t *) ch_end; |
| 651 | } while (ch_end < skb->tail); | 646 | } while (ch_end < skb_tail_pointer(skb)); |
| 652 | 647 | ||
| 653 | return 0; | 648 | return 0; |
| 654 | 649 | ||
| @@ -905,7 +900,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, | |||
| 905 | struct sctp_association *asoc; | 900 | struct sctp_association *asoc; |
| 906 | union sctp_addr addr; | 901 | union sctp_addr addr; |
| 907 | union sctp_addr *paddr = &addr; | 902 | union sctp_addr *paddr = &addr; |
| 908 | struct sctphdr *sh = (struct sctphdr *) skb->h.raw; | 903 | struct sctphdr *sh = sctp_hdr(skb); |
| 909 | sctp_chunkhdr_t *ch; | 904 | sctp_chunkhdr_t *ch; |
| 910 | union sctp_params params; | 905 | union sctp_params params; |
| 911 | sctp_init_chunk_t *init; | 906 | sctp_init_chunk_t *init; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index c30629e17781..88aa22407549 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
| @@ -159,16 +159,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
| 159 | * the skb->tail. | 159 | * the skb->tail. |
| 160 | */ | 160 | */ |
| 161 | if (unlikely(skb_is_nonlinear(chunk->skb))) { | 161 | if (unlikely(skb_is_nonlinear(chunk->skb))) { |
| 162 | if (chunk->chunk_end > chunk->skb->tail) | 162 | if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) |
| 163 | chunk->chunk_end = chunk->skb->tail; | 163 | chunk->chunk_end = skb_tail_pointer(chunk->skb); |
| 164 | } | 164 | } |
| 165 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); | 165 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); |
| 166 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ | 166 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ |
| 167 | 167 | ||
| 168 | if (chunk->chunk_end < chunk->skb->tail) { | 168 | if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { |
| 169 | /* This is not a singleton */ | 169 | /* This is not a singleton */ |
| 170 | chunk->singleton = 0; | 170 | chunk->singleton = 0; |
| 171 | } else if (chunk->chunk_end > chunk->skb->tail) { | 171 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { |
| 172 | /* RFC 2960, Section 6.10 Bundling | 172 | /* RFC 2960, Section 6.10 Bundling |
| 173 | * | 173 | * |
| 174 | * Partial chunks MUST NOT be placed in an SCTP packet. | 174 | * Partial chunks MUST NOT be placed in an SCTP packet. |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 63fe1093b616..ca527a27dd05 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -122,26 +122,24 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 122 | int type, int code, int offset, __be32 info) | 122 | int type, int code, int offset, __be32 info) |
| 123 | { | 123 | { |
| 124 | struct inet6_dev *idev; | 124 | struct inet6_dev *idev; |
| 125 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; | ||
| 126 | struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); | ||
| 127 | struct sock *sk; | 125 | struct sock *sk; |
| 128 | struct sctp_association *asoc; | 126 | struct sctp_association *asoc; |
| 129 | struct sctp_transport *transport; | 127 | struct sctp_transport *transport; |
| 130 | struct ipv6_pinfo *np; | 128 | struct ipv6_pinfo *np; |
| 131 | char *saveip, *savesctp; | 129 | sk_buff_data_t saveip, savesctp; |
| 132 | int err; | 130 | int err; |
| 133 | 131 | ||
| 134 | idev = in6_dev_get(skb->dev); | 132 | idev = in6_dev_get(skb->dev); |
| 135 | 133 | ||
| 136 | /* Fix up skb to look at the embedded net header. */ | 134 | /* Fix up skb to look at the embedded net header. */ |
| 137 | saveip = skb->nh.raw; | 135 | saveip = skb->network_header; |
| 138 | savesctp = skb->h.raw; | 136 | savesctp = skb->transport_header; |
| 139 | skb->nh.ipv6h = iph; | 137 | skb_reset_network_header(skb); |
| 140 | skb->h.raw = (char *)sh; | 138 | skb_set_transport_header(skb, offset); |
| 141 | sk = sctp_err_lookup(AF_INET6, skb, sh, &asoc, &transport); | 139 | sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); |
| 142 | /* Put back, the original pointers. */ | 140 | /* Put back, the original pointers. */ |
| 143 | skb->nh.raw = saveip; | 141 | skb->network_header = saveip; |
| 144 | skb->h.raw = savesctp; | 142 | skb->transport_header = savesctp; |
| 145 | if (!sk) { | 143 | if (!sk) { |
| 146 | ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS); | 144 | ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS); |
| 147 | goto out; | 145 | goto out; |
| @@ -360,7 +358,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
| 360 | return; | 358 | return; |
| 361 | } | 359 | } |
| 362 | 360 | ||
| 363 | read_lock(&in6_dev->lock); | 361 | read_lock_bh(&in6_dev->lock); |
| 364 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { | 362 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { |
| 365 | /* Add the address to the local list. */ | 363 | /* Add the address to the local list. */ |
| 366 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | 364 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); |
| @@ -374,7 +372,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
| 374 | } | 372 | } |
| 375 | } | 373 | } |
| 376 | 374 | ||
| 377 | read_unlock(&in6_dev->lock); | 375 | read_unlock_bh(&in6_dev->lock); |
| 378 | rcu_read_unlock(); | 376 | rcu_read_unlock(); |
| 379 | } | 377 | } |
| 380 | 378 | ||
| @@ -391,13 +389,13 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, | |||
| 391 | addr->v6.sin6_flowinfo = 0; /* FIXME */ | 389 | addr->v6.sin6_flowinfo = 0; /* FIXME */ |
| 392 | addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; | 390 | addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; |
| 393 | 391 | ||
| 394 | sh = (struct sctphdr *) skb->h.raw; | 392 | sh = sctp_hdr(skb); |
| 395 | if (is_saddr) { | 393 | if (is_saddr) { |
| 396 | *port = sh->source; | 394 | *port = sh->source; |
| 397 | from = &skb->nh.ipv6h->saddr; | 395 | from = &ipv6_hdr(skb)->saddr; |
| 398 | } else { | 396 | } else { |
| 399 | *port = sh->dest; | 397 | *port = sh->dest; |
| 400 | from = &skb->nh.ipv6h->daddr; | 398 | from = &ipv6_hdr(skb)->daddr; |
| 401 | } | 399 | } |
| 402 | ipv6_addr_copy(&addr->v6.sin6_addr, from); | 400 | ipv6_addr_copy(&addr->v6.sin6_addr, from); |
| 403 | } | 401 | } |
| @@ -606,7 +604,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) | |||
| 606 | default: | 604 | default: |
| 607 | retval = SCTP_SCOPE_GLOBAL; | 605 | retval = SCTP_SCOPE_GLOBAL; |
| 608 | break; | 606 | break; |
| 609 | }; | 607 | } |
| 610 | 608 | ||
| 611 | return retval; | 609 | return retval; |
| 612 | } | 610 | } |
| @@ -699,7 +697,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb) | |||
| 699 | /* Was this packet marked by Explicit Congestion Notification? */ | 697 | /* Was this packet marked by Explicit Congestion Notification? */ |
| 700 | static int sctp_v6_is_ce(const struct sk_buff *skb) | 698 | static int sctp_v6_is_ce(const struct sk_buff *skb) |
| 701 | { | 699 | { |
| 702 | return *((__u32 *)(skb->nh.ipv6h)) & htonl(1<<20); | 700 | return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); |
| 703 | } | 701 | } |
| 704 | 702 | ||
| 705 | /* Dump the v6 addr to the seq file. */ | 703 | /* Dump the v6 addr to the seq file. */ |
| @@ -766,19 +764,19 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, | |||
| 766 | if (msgname) { | 764 | if (msgname) { |
| 767 | sctp_inet6_msgname(msgname, addr_len); | 765 | sctp_inet6_msgname(msgname, addr_len); |
| 768 | sin6 = (struct sockaddr_in6 *)msgname; | 766 | sin6 = (struct sockaddr_in6 *)msgname; |
| 769 | sh = (struct sctphdr *)skb->h.raw; | 767 | sh = sctp_hdr(skb); |
| 770 | sin6->sin6_port = sh->source; | 768 | sin6->sin6_port = sh->source; |
| 771 | 769 | ||
| 772 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | 770 | /* Map ipv4 address into v4-mapped-on-v6 address. */ |
| 773 | if (sctp_sk(skb->sk)->v4mapped && | 771 | if (sctp_sk(skb->sk)->v4mapped && |
| 774 | skb->nh.iph->version == 4) { | 772 | ip_hdr(skb)->version == 4) { |
| 775 | sctp_v4_map_v6((union sctp_addr *)sin6); | 773 | sctp_v4_map_v6((union sctp_addr *)sin6); |
| 776 | sin6->sin6_addr.s6_addr32[3] = skb->nh.iph->saddr; | 774 | sin6->sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr; |
| 777 | return; | 775 | return; |
| 778 | } | 776 | } |
| 779 | 777 | ||
| 780 | /* Otherwise, just copy the v6 address. */ | 778 | /* Otherwise, just copy the v6 address. */ |
| 781 | ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); | 779 | ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); |
| 782 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { | 780 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { |
| 783 | struct sctp_ulpevent *ev = sctp_skb2event(skb); | 781 | struct sctp_ulpevent *ev = sctp_skb2event(skb); |
| 784 | sin6->sin6_scope_id = ev->iif; | 782 | sin6->sin6_scope_id = ev->iif; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index f875fc3ced54..d85543def754 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -176,7 +176,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | |||
| 176 | case SCTP_XMIT_OK: | 176 | case SCTP_XMIT_OK: |
| 177 | case SCTP_XMIT_NAGLE_DELAY: | 177 | case SCTP_XMIT_NAGLE_DELAY: |
| 178 | break; | 178 | break; |
| 179 | }; | 179 | } |
| 180 | 180 | ||
| 181 | return retval; | 181 | return retval; |
| 182 | } | 182 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 5c2ddd10db06..992f361084b7 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -338,7 +338,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 338 | SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); | 338 | SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); |
| 339 | q->empty = 0; | 339 | q->empty = 0; |
| 340 | break; | 340 | break; |
| 341 | }; | 341 | } |
| 342 | } else { | 342 | } else { |
| 343 | list_add_tail(&chunk->list, &q->control_chunk_list); | 343 | list_add_tail(&chunk->list, &q->control_chunk_list); |
| 344 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 344 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
| @@ -396,6 +396,19 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
| 396 | if (sctp_chunk_abandoned(chunk)) { | 396 | if (sctp_chunk_abandoned(chunk)) { |
| 397 | list_del_init(lchunk); | 397 | list_del_init(lchunk); |
| 398 | sctp_insert_list(&q->abandoned, lchunk); | 398 | sctp_insert_list(&q->abandoned, lchunk); |
| 399 | |||
| 400 | /* If this chunk has not been previousely acked, | ||
| 401 | * stop considering it 'outstanding'. Our peer | ||
| 402 | * will most likely never see it since it will | ||
| 403 | * not be retransmitted | ||
| 404 | */ | ||
| 405 | if (!chunk->tsn_gap_acked) { | ||
| 406 | chunk->transport->flight_size -= | ||
| 407 | sctp_data_size(chunk); | ||
| 408 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
| 409 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | ||
| 410 | sizeof(struct sk_buff)); | ||
| 411 | } | ||
| 399 | continue; | 412 | continue; |
| 400 | } | 413 | } |
| 401 | 414 | ||
| @@ -617,7 +630,7 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
| 617 | /* Retrieve a new chunk to bundle. */ | 630 | /* Retrieve a new chunk to bundle. */ |
| 618 | lchunk = sctp_list_dequeue(lqueue); | 631 | lchunk = sctp_list_dequeue(lqueue); |
| 619 | break; | 632 | break; |
| 620 | }; | 633 | } |
| 621 | 634 | ||
| 622 | /* If we are here due to a retransmit timeout or a fast | 635 | /* If we are here due to a retransmit timeout or a fast |
| 623 | * retransmit and if there are any chunks left in the retransmit | 636 | * retransmit and if there are any chunks left in the retransmit |
| @@ -766,7 +779,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
| 766 | default: | 779 | default: |
| 767 | /* We built a chunk with an illegal type! */ | 780 | /* We built a chunk with an illegal type! */ |
| 768 | BUG(); | 781 | BUG(); |
| 769 | }; | 782 | } |
| 770 | } | 783 | } |
| 771 | 784 | ||
| 772 | /* Is it OK to send data chunks? */ | 785 | /* Is it OK to send data chunks? */ |
| @@ -1244,6 +1257,15 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
| 1244 | if (sctp_chunk_abandoned(tchunk)) { | 1257 | if (sctp_chunk_abandoned(tchunk)) { |
| 1245 | /* Move the chunk to abandoned list. */ | 1258 | /* Move the chunk to abandoned list. */ |
| 1246 | sctp_insert_list(&q->abandoned, lchunk); | 1259 | sctp_insert_list(&q->abandoned, lchunk); |
| 1260 | |||
| 1261 | /* If this chunk has not been acked, stop | ||
| 1262 | * considering it as 'outstanding'. | ||
| 1263 | */ | ||
| 1264 | if (!tchunk->tsn_gap_acked) { | ||
| 1265 | tchunk->transport->flight_size -= | ||
| 1266 | sctp_data_size(tchunk); | ||
| 1267 | q->outstanding_bytes -= sctp_data_size(tchunk); | ||
| 1268 | } | ||
| 1247 | continue; | 1269 | continue; |
| 1248 | } | 1270 | } |
| 1249 | 1271 | ||
| @@ -1375,7 +1397,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
| 1375 | SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); | 1397 | SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); |
| 1376 | dbg_prt_state = 0; | 1398 | dbg_prt_state = 0; |
| 1377 | dbg_ack_tsn = tsn; | 1399 | dbg_ack_tsn = tsn; |
| 1378 | }; | 1400 | } |
| 1379 | 1401 | ||
| 1380 | dbg_last_ack_tsn = tsn; | 1402 | dbg_last_ack_tsn = tsn; |
| 1381 | #endif /* SCTP_DEBUG */ | 1403 | #endif /* SCTP_DEBUG */ |
| @@ -1430,7 +1452,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
| 1430 | SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); | 1452 | SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); |
| 1431 | dbg_prt_state = 1; | 1453 | dbg_prt_state = 1; |
| 1432 | dbg_kept_tsn = tsn; | 1454 | dbg_kept_tsn = tsn; |
| 1433 | }; | 1455 | } |
| 1434 | 1456 | ||
| 1435 | dbg_last_kept_tsn = tsn; | 1457 | dbg_last_kept_tsn = tsn; |
| 1436 | #endif /* SCTP_DEBUG */ | 1458 | #endif /* SCTP_DEBUG */ |
| @@ -1454,7 +1476,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
| 1454 | } else { | 1476 | } else { |
| 1455 | SCTP_DEBUG_PRINTK("\n"); | 1477 | SCTP_DEBUG_PRINTK("\n"); |
| 1456 | } | 1478 | } |
| 1457 | }; | 1479 | } |
| 1458 | #endif /* SCTP_DEBUG */ | 1480 | #endif /* SCTP_DEBUG */ |
| 1459 | if (transport) { | 1481 | if (transport) { |
| 1460 | if (bytes_acked) { | 1482 | if (bytes_acked) { |
| @@ -1695,11 +1717,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | |||
| 1695 | */ | 1717 | */ |
| 1696 | if (TSN_lte(tsn, ctsn)) { | 1718 | if (TSN_lte(tsn, ctsn)) { |
| 1697 | list_del_init(lchunk); | 1719 | list_del_init(lchunk); |
| 1698 | if (!chunk->tsn_gap_acked) { | ||
| 1699 | chunk->transport->flight_size -= | ||
| 1700 | sctp_data_size(chunk); | ||
| 1701 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
| 1702 | } | ||
| 1703 | sctp_chunk_free(chunk); | 1720 | sctp_chunk_free(chunk); |
| 1704 | } else { | 1721 | } else { |
| 1705 | if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { | 1722 | if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e17a823ca90f..c361deb6cea9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -235,13 +235,13 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | |||
| 235 | port = &addr->v4.sin_port; | 235 | port = &addr->v4.sin_port; |
| 236 | addr->v4.sin_family = AF_INET; | 236 | addr->v4.sin_family = AF_INET; |
| 237 | 237 | ||
| 238 | sh = (struct sctphdr *) skb->h.raw; | 238 | sh = sctp_hdr(skb); |
| 239 | if (is_saddr) { | 239 | if (is_saddr) { |
| 240 | *port = sh->source; | 240 | *port = sh->source; |
| 241 | from = &skb->nh.iph->saddr; | 241 | from = &ip_hdr(skb)->saddr; |
| 242 | } else { | 242 | } else { |
| 243 | *port = sh->dest; | 243 | *port = sh->dest; |
| 244 | from = &skb->nh.iph->daddr; | 244 | from = &ip_hdr(skb)->daddr; |
| 245 | } | 245 | } |
| 246 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); | 246 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); |
| 247 | } | 247 | } |
| @@ -530,7 +530,7 @@ static int sctp_v4_skb_iif(const struct sk_buff *skb) | |||
| 530 | /* Was this packet marked by Explicit Congestion Notification? */ | 530 | /* Was this packet marked by Explicit Congestion Notification? */ |
| 531 | static int sctp_v4_is_ce(const struct sk_buff *skb) | 531 | static int sctp_v4_is_ce(const struct sk_buff *skb) |
| 532 | { | 532 | { |
| 533 | return INET_ECN_is_ce(skb->nh.iph->tos); | 533 | return INET_ECN_is_ce(ip_hdr(skb)->tos); |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | /* Create and initialize a new sk for the socket returned by accept(). */ | 536 | /* Create and initialize a new sk for the socket returned by accept(). */ |
| @@ -731,15 +731,13 @@ static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, | |||
| 731 | /* Initialize and copy out a msgname from an inbound skb. */ | 731 | /* Initialize and copy out a msgname from an inbound skb. */ |
| 732 | static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) | 732 | static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) |
| 733 | { | 733 | { |
| 734 | struct sctphdr *sh; | ||
| 735 | struct sockaddr_in *sin; | ||
| 736 | |||
| 737 | if (msgname) { | 734 | if (msgname) { |
| 735 | struct sctphdr *sh = sctp_hdr(skb); | ||
| 736 | struct sockaddr_in *sin = (struct sockaddr_in *)msgname; | ||
| 737 | |||
| 738 | sctp_inet_msgname(msgname, len); | 738 | sctp_inet_msgname(msgname, len); |
| 739 | sin = (struct sockaddr_in *)msgname; | ||
| 740 | sh = (struct sctphdr *)skb->h.raw; | ||
| 741 | sin->sin_port = sh->source; | 739 | sin->sin_port = sh->source; |
| 742 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | 740 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
| 743 | } | 741 | } |
| 744 | } | 742 | } |
| 745 | 743 | ||
| @@ -1044,7 +1042,7 @@ SCTP_STATIC __init int sctp_init(void) | |||
| 1044 | sctp_cookie_preserve_enable = 1; | 1042 | sctp_cookie_preserve_enable = 1; |
| 1045 | 1043 | ||
| 1046 | /* Max.Burst - 4 */ | 1044 | /* Max.Burst - 4 */ |
| 1047 | sctp_max_burst = SCTP_MAX_BURST; | 1045 | sctp_max_burst = SCTP_DEFAULT_MAX_BURST; |
| 1048 | 1046 | ||
| 1049 | /* Association.Max.Retrans - 10 attempts | 1047 | /* Association.Max.Retrans - 10 attempts |
| 1050 | * Path.Max.Retrans - 5 attempts (per destination address) | 1048 | * Path.Max.Retrans - 5 attempts (per destination address) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f7fb29d5a0c7..be783a3761c4 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -86,7 +86,7 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) | |||
| 86 | struct sctp_af *af; | 86 | struct sctp_af *af; |
| 87 | int iif = 0; | 87 | int iif = 0; |
| 88 | 88 | ||
| 89 | af = sctp_get_af_specific(ipver2af(chunk->skb->nh.iph->version)); | 89 | af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); |
| 90 | if (af) | 90 | if (af) |
| 91 | iif = af->skb_iif(chunk->skb); | 91 | iif = af->skb_iif(chunk->skb); |
| 92 | 92 | ||
| @@ -1143,7 +1143,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
| 1143 | 1143 | ||
| 1144 | /* Adjust the chunk length field. */ | 1144 | /* Adjust the chunk length field. */ |
| 1145 | chunk->chunk_hdr->length = htons(chunklen + padlen + len); | 1145 | chunk->chunk_hdr->length = htons(chunklen + padlen + len); |
| 1146 | chunk->chunk_end = chunk->skb->tail; | 1146 | chunk->chunk_end = skb_tail_pointer(chunk->skb); |
| 1147 | 1147 | ||
| 1148 | return target; | 1148 | return target; |
| 1149 | } | 1149 | } |
| @@ -1168,7 +1168,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, | |||
| 1168 | /* Adjust the chunk length field. */ | 1168 | /* Adjust the chunk length field. */ |
| 1169 | chunk->chunk_hdr->length = | 1169 | chunk->chunk_hdr->length = |
| 1170 | htons(ntohs(chunk->chunk_hdr->length) + len); | 1170 | htons(ntohs(chunk->chunk_hdr->length) + len); |
| 1171 | chunk->chunk_end = chunk->skb->tail; | 1171 | chunk->chunk_end = skb_tail_pointer(chunk->skb); |
| 1172 | 1172 | ||
| 1173 | out: | 1173 | out: |
| 1174 | return err; | 1174 | return err; |
| @@ -1233,7 +1233,7 @@ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, | |||
| 1233 | asoc->temp = 1; | 1233 | asoc->temp = 1; |
| 1234 | skb = chunk->skb; | 1234 | skb = chunk->skb; |
| 1235 | /* Create an entry for the source address of the packet. */ | 1235 | /* Create an entry for the source address of the packet. */ |
| 1236 | af = sctp_get_af_specific(ipver2af(skb->nh.iph->version)); | 1236 | af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); |
| 1237 | if (unlikely(!af)) | 1237 | if (unlikely(!af)) |
| 1238 | goto fail; | 1238 | goto fail; |
| 1239 | af->from_skb(&asoc->c.peer_addr, skb, 1); | 1239 | af->from_skb(&asoc->c.peer_addr, skb, 1); |
| @@ -2077,7 +2077,7 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
| 2077 | 2077 | ||
| 2078 | default: /* Just ignore anything else. */ | 2078 | default: /* Just ignore anything else. */ |
| 2079 | break; | 2079 | break; |
| 2080 | }; | 2080 | } |
| 2081 | } | 2081 | } |
| 2082 | break; | 2082 | break; |
| 2083 | 2083 | ||
| @@ -2118,7 +2118,7 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
| 2118 | SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n", | 2118 | SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n", |
| 2119 | ntohs(param.p->type), asoc); | 2119 | ntohs(param.p->type), asoc); |
| 2120 | break; | 2120 | break; |
| 2121 | }; | 2121 | } |
| 2122 | 2122 | ||
| 2123 | return retval; | 2123 | return retval; |
| 2124 | } | 2124 | } |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 135567493119..b37a7adeb150 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -464,7 +464,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | |||
| 464 | struct sctp_ulpevent *event; | 464 | struct sctp_ulpevent *event; |
| 465 | 465 | ||
| 466 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, | 466 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, |
| 467 | (__u16)error, 0, 0, | 467 | (__u16)error, 0, 0, NULL, |
| 468 | GFP_ATOMIC); | 468 | GFP_ATOMIC); |
| 469 | 469 | ||
| 470 | if (event) | 470 | if (event) |
| @@ -492,8 +492,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |||
| 492 | /* Cancel any partial delivery in progress. */ | 492 | /* Cancel any partial delivery in progress. */ |
| 493 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | 493 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
| 494 | 494 | ||
| 495 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | 495 | if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) |
| 496 | (__u16)error, 0, 0, | 496 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, |
| 497 | (__u16)error, 0, 0, chunk, | ||
| 498 | GFP_ATOMIC); | ||
| 499 | else | ||
| 500 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | ||
| 501 | (__u16)error, 0, 0, NULL, | ||
| 497 | GFP_ATOMIC); | 502 | GFP_ATOMIC); |
| 498 | if (event) | 503 | if (event) |
| 499 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | 504 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, |
| @@ -1004,7 +1009,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |||
| 1004 | status, state, event_type, subtype.chunk); | 1009 | status, state, event_type, subtype.chunk); |
| 1005 | BUG(); | 1010 | BUG(); |
| 1006 | break; | 1011 | break; |
| 1007 | }; | 1012 | } |
| 1008 | 1013 | ||
| 1009 | bail: | 1014 | bail: |
| 1010 | return error; | 1015 | return error; |
| @@ -1484,7 +1489,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
| 1484 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1489 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
| 1485 | cmd->verb, cmd->obj.ptr); | 1490 | cmd->verb, cmd->obj.ptr); |
| 1486 | break; | 1491 | break; |
| 1487 | }; | 1492 | } |
| 1493 | |||
| 1488 | if (error) | 1494 | if (error) |
| 1489 | break; | 1495 | break; |
| 1490 | } | 1496 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index b3cad8a03736..9e28a5d51200 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -186,7 +186,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, | |||
| 186 | * notification is passed to the upper layer. | 186 | * notification is passed to the upper layer. |
| 187 | */ | 187 | */ |
| 188 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, | 188 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, |
| 189 | 0, 0, 0, GFP_ATOMIC); | 189 | 0, 0, 0, NULL, GFP_ATOMIC); |
| 190 | if (ev) | 190 | if (ev) |
| 191 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | 191 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, |
| 192 | SCTP_ULPEVENT(ev)); | 192 | SCTP_ULPEVENT(ev)); |
| @@ -629,7 +629,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
| 629 | case -SCTP_IERROR_BAD_SIG: | 629 | case -SCTP_IERROR_BAD_SIG: |
| 630 | default: | 630 | default: |
| 631 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 631 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
| 632 | }; | 632 | } |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | 635 | ||
| @@ -661,7 +661,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
| 661 | ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, | 661 | ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, |
| 662 | new_asoc->c.sinit_num_ostreams, | 662 | new_asoc->c.sinit_num_ostreams, |
| 663 | new_asoc->c.sinit_max_instreams, | 663 | new_asoc->c.sinit_max_instreams, |
| 664 | GFP_ATOMIC); | 664 | NULL, GFP_ATOMIC); |
| 665 | if (!ev) | 665 | if (!ev) |
| 666 | goto nomem_ev; | 666 | goto nomem_ev; |
| 667 | 667 | ||
| @@ -790,7 +790,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, | |||
| 790 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, | 790 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, |
| 791 | 0, asoc->c.sinit_num_ostreams, | 791 | 0, asoc->c.sinit_num_ostreams, |
| 792 | asoc->c.sinit_max_instreams, | 792 | asoc->c.sinit_max_instreams, |
| 793 | GFP_ATOMIC); | 793 | NULL, GFP_ATOMIC); |
| 794 | 794 | ||
| 795 | if (!ev) | 795 | if (!ev) |
| 796 | goto nomem; | 796 | goto nomem; |
| @@ -1195,7 +1195,7 @@ static void sctp_tietags_populate(struct sctp_association *new_asoc, | |||
| 1195 | new_asoc->c.my_ttag = asoc->c.my_vtag; | 1195 | new_asoc->c.my_ttag = asoc->c.my_vtag; |
| 1196 | new_asoc->c.peer_ttag = asoc->c.peer_vtag; | 1196 | new_asoc->c.peer_ttag = asoc->c.peer_vtag; |
| 1197 | break; | 1197 | break; |
| 1198 | }; | 1198 | } |
| 1199 | 1199 | ||
| 1200 | /* Other parameters for the endpoint SHOULD be copied from the | 1200 | /* Other parameters for the endpoint SHOULD be copied from the |
| 1201 | * existing parameters of the association (e.g. number of | 1201 | * existing parameters of the association (e.g. number of |
| @@ -1625,7 +1625,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, | |||
| 1625 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, | 1625 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, |
| 1626 | new_asoc->c.sinit_num_ostreams, | 1626 | new_asoc->c.sinit_num_ostreams, |
| 1627 | new_asoc->c.sinit_max_instreams, | 1627 | new_asoc->c.sinit_max_instreams, |
| 1628 | GFP_ATOMIC); | 1628 | NULL, GFP_ATOMIC); |
| 1629 | if (!ev) | 1629 | if (!ev) |
| 1630 | goto nomem_ev; | 1630 | goto nomem_ev; |
| 1631 | 1631 | ||
| @@ -1691,7 +1691,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, | |||
| 1691 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, | 1691 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, |
| 1692 | new_asoc->c.sinit_num_ostreams, | 1692 | new_asoc->c.sinit_num_ostreams, |
| 1693 | new_asoc->c.sinit_max_instreams, | 1693 | new_asoc->c.sinit_max_instreams, |
| 1694 | GFP_ATOMIC); | 1694 | NULL, GFP_ATOMIC); |
| 1695 | if (!ev) | 1695 | if (!ev) |
| 1696 | goto nomem_ev; | 1696 | goto nomem_ev; |
| 1697 | 1697 | ||
| @@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, | |||
| 1786 | SCTP_COMM_UP, 0, | 1786 | SCTP_COMM_UP, 0, |
| 1787 | asoc->c.sinit_num_ostreams, | 1787 | asoc->c.sinit_num_ostreams, |
| 1788 | asoc->c.sinit_max_instreams, | 1788 | asoc->c.sinit_max_instreams, |
| 1789 | GFP_ATOMIC); | 1789 | NULL, GFP_ATOMIC); |
| 1790 | if (!ev) | 1790 | if (!ev) |
| 1791 | goto nomem; | 1791 | goto nomem; |
| 1792 | 1792 | ||
| @@ -1904,7 +1904,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, | |||
| 1904 | case -SCTP_IERROR_BAD_SIG: | 1904 | case -SCTP_IERROR_BAD_SIG: |
| 1905 | default: | 1905 | default: |
| 1906 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 1906 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
| 1907 | }; | 1907 | } |
| 1908 | } | 1908 | } |
| 1909 | 1909 | ||
| 1910 | /* Compare the tie_tag in cookie with the verification tag of | 1910 | /* Compare the tie_tag in cookie with the verification tag of |
| @@ -1936,7 +1936,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, | |||
| 1936 | default: /* Discard packet for all others. */ | 1936 | default: /* Discard packet for all others. */ |
| 1937 | retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 1937 | retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
| 1938 | break; | 1938 | break; |
| 1939 | }; | 1939 | } |
| 1940 | 1940 | ||
| 1941 | /* Delete the tempory new association. */ | 1941 | /* Delete the tempory new association. */ |
| 1942 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | 1942 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); |
| @@ -3035,7 +3035,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, | |||
| 3035 | * notification is passed to the upper layer. | 3035 | * notification is passed to the upper layer. |
| 3036 | */ | 3036 | */ |
| 3037 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, | 3037 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, |
| 3038 | 0, 0, 0, GFP_ATOMIC); | 3038 | 0, 0, 0, NULL, GFP_ATOMIC); |
| 3039 | if (!ev) | 3039 | if (!ev) |
| 3040 | goto nomem; | 3040 | goto nomem; |
| 3041 | 3041 | ||
| @@ -3115,7 +3115,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
| 3115 | break; | 3115 | break; |
| 3116 | 3116 | ||
| 3117 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | 3117 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); |
| 3118 | if (ch_end > skb->tail) | 3118 | if (ch_end > skb_tail_pointer(skb)) |
| 3119 | break; | 3119 | break; |
| 3120 | 3120 | ||
| 3121 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) | 3121 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) |
| @@ -3130,7 +3130,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | |||
| 3130 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 3130 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
| 3131 | 3131 | ||
| 3132 | ch = (sctp_chunkhdr_t *) ch_end; | 3132 | ch = (sctp_chunkhdr_t *) ch_end; |
| 3133 | } while (ch_end < skb->tail); | 3133 | } while (ch_end < skb_tail_pointer(skb)); |
| 3134 | 3134 | ||
| 3135 | if (ootb_shut_ack) | 3135 | if (ootb_shut_ack) |
| 3136 | sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); | 3136 | sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); |
| @@ -4342,8 +4342,24 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( | |||
| 4342 | void *arg, | 4342 | void *arg, |
| 4343 | sctp_cmd_seq_t *commands) | 4343 | sctp_cmd_seq_t *commands) |
| 4344 | { | 4344 | { |
| 4345 | return sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg, | 4345 | if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, |
| 4346 | commands); | 4346 | (struct sctp_transport *)arg, commands)) |
| 4347 | return SCTP_DISPOSITION_NOMEM; | ||
| 4348 | |||
| 4349 | /* | ||
| 4350 | * RFC 2960 (bis), section 8.3 | ||
| 4351 | * | ||
| 4352 | * D) Request an on-demand HEARTBEAT on a specific destination | ||
| 4353 | * transport address of a given association. | ||
| 4354 | * | ||
| 4355 | * The endpoint should increment the respective error counter of | ||
| 4356 | * the destination transport address each time a HEARTBEAT is sent | ||
| 4357 | * to that address and not acknowledged within one RTO. | ||
| 4358 | * | ||
| 4359 | */ | ||
| 4360 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET, | ||
| 4361 | SCTP_TRANSPORT(arg)); | ||
| 4362 | return SCTP_DISPOSITION_CONSUME; | ||
| 4347 | } | 4363 | } |
| 4348 | 4364 | ||
| 4349 | /* | 4365 | /* |
| @@ -4605,12 +4621,12 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | |||
| 4605 | * sent as soon as cwnd allows (normally when a SACK arrives). | 4621 | * sent as soon as cwnd allows (normally when a SACK arrives). |
| 4606 | */ | 4622 | */ |
| 4607 | 4623 | ||
| 4608 | /* NB: Rules E4 and F1 are implicit in R1. */ | ||
| 4609 | sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); | ||
| 4610 | |||
| 4611 | /* Do some failure management (Section 8.2). */ | 4624 | /* Do some failure management (Section 8.2). */ |
| 4612 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); | 4625 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); |
| 4613 | 4626 | ||
| 4627 | /* NB: Rules E4 and F1 are implicit in R1. */ | ||
| 4628 | sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); | ||
| 4629 | |||
| 4614 | return SCTP_DISPOSITION_CONSUME; | 4630 | return SCTP_DISPOSITION_CONSUME; |
| 4615 | } | 4631 | } |
| 4616 | 4632 | ||
| @@ -4800,7 +4816,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | |||
| 4800 | default: | 4816 | default: |
| 4801 | BUG(); | 4817 | BUG(); |
| 4802 | break; | 4818 | break; |
| 4803 | }; | 4819 | } |
| 4804 | 4820 | ||
| 4805 | if (!reply) | 4821 | if (!reply) |
| 4806 | goto nomem; | 4822 | goto nomem; |
| @@ -5270,7 +5286,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
| 5270 | chunk->ecn_ce_done = 1; | 5286 | chunk->ecn_ce_done = 1; |
| 5271 | 5287 | ||
| 5272 | af = sctp_get_af_specific( | 5288 | af = sctp_get_af_specific( |
| 5273 | ipver2af(chunk->skb->nh.iph->version)); | 5289 | ipver2af(ip_hdr(chunk->skb)->version)); |
| 5274 | 5290 | ||
| 5275 | if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { | 5291 | if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { |
| 5276 | /* Do real work as sideffect. */ | 5292 | /* Do real work as sideffect. */ |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 5e54b17377f4..523071c7902f 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
| @@ -101,7 +101,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
| 101 | default: | 101 | default: |
| 102 | /* Yikes! We got an illegal event type. */ | 102 | /* Yikes! We got an illegal event type. */ |
| 103 | return &bug; | 103 | return &bug; |
| 104 | }; | 104 | } |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} | 107 | #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 536298c2eda2..2fc0a92caa78 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
| 627 | retval = -EINVAL; | 627 | retval = -EINVAL; |
| 628 | goto err_bindx_rem; | 628 | goto err_bindx_rem; |
| 629 | } | 629 | } |
| 630 | |||
| 631 | if (!af->addr_valid(sa_addr, sp, NULL)) { | ||
| 632 | retval = -EADDRNOTAVAIL; | ||
| 633 | goto err_bindx_rem; | ||
| 634 | } | ||
| 635 | |||
| 630 | if (sa_addr->v4.sin_port != htons(bp->port)) { | 636 | if (sa_addr->v4.sin_port != htons(bp->port)) { |
| 631 | retval = -EINVAL; | 637 | retval = -EINVAL; |
| 632 | goto err_bindx_rem; | 638 | goto err_bindx_rem; |
| @@ -935,7 +941,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, | |||
| 935 | default: | 941 | default: |
| 936 | err = -EINVAL; | 942 | err = -EINVAL; |
| 937 | break; | 943 | break; |
| 938 | }; | 944 | } |
| 939 | 945 | ||
| 940 | out: | 946 | out: |
| 941 | kfree(kaddrs); | 947 | kfree(kaddrs); |
| @@ -2033,6 +2039,10 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, | |||
| 2033 | * SPP_HB_DEMAND - Request a user initiated heartbeat | 2039 | * SPP_HB_DEMAND - Request a user initiated heartbeat |
| 2034 | * to be made immediately. | 2040 | * to be made immediately. |
| 2035 | * | 2041 | * |
| 2042 | * SPP_HB_TIME_IS_ZERO - Specify's that the time for | ||
| 2043 | * heartbeat delayis to be set to the value of 0 | ||
| 2044 | * milliseconds. | ||
| 2045 | * | ||
| 2036 | * SPP_PMTUD_ENABLE - This field will enable PMTU | 2046 | * SPP_PMTUD_ENABLE - This field will enable PMTU |
| 2037 | * discovery upon the specified address. Note that | 2047 | * discovery upon the specified address. Note that |
| 2038 | * if the address feild is empty then all addresses | 2048 | * if the address feild is empty then all addresses |
| @@ -2075,13 +2085,30 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2075 | return error; | 2085 | return error; |
| 2076 | } | 2086 | } |
| 2077 | 2087 | ||
| 2078 | if (params->spp_hbinterval) { | 2088 | /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of |
| 2079 | if (trans) { | 2089 | * this field is ignored. Note also that a value of zero indicates |
| 2080 | trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); | 2090 | * the current setting should be left unchanged. |
| 2081 | } else if (asoc) { | 2091 | */ |
| 2082 | asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); | 2092 | if (params->spp_flags & SPP_HB_ENABLE) { |
| 2083 | } else { | 2093 | |
| 2084 | sp->hbinterval = params->spp_hbinterval; | 2094 | /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is |
| 2095 | * set. This lets us use 0 value when this flag | ||
| 2096 | * is set. | ||
| 2097 | */ | ||
| 2098 | if (params->spp_flags & SPP_HB_TIME_IS_ZERO) | ||
| 2099 | params->spp_hbinterval = 0; | ||
| 2100 | |||
| 2101 | if (params->spp_hbinterval || | ||
| 2102 | (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { | ||
| 2103 | if (trans) { | ||
| 2104 | trans->hbinterval = | ||
| 2105 | msecs_to_jiffies(params->spp_hbinterval); | ||
| 2106 | } else if (asoc) { | ||
| 2107 | asoc->hbinterval = | ||
| 2108 | msecs_to_jiffies(params->spp_hbinterval); | ||
| 2109 | } else { | ||
| 2110 | sp->hbinterval = params->spp_hbinterval; | ||
| 2111 | } | ||
| 2085 | } | 2112 | } |
| 2086 | } | 2113 | } |
| 2087 | 2114 | ||
| @@ -2098,7 +2125,12 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2098 | } | 2125 | } |
| 2099 | } | 2126 | } |
| 2100 | 2127 | ||
| 2101 | if (params->spp_pathmtu) { | 2128 | /* When Path MTU discovery is disabled the value specified here will |
| 2129 | * be the "fixed" path mtu (i.e. the value of the spp_flags field must | ||
| 2130 | * include the flag SPP_PMTUD_DISABLE for this field to have any | ||
| 2131 | * effect). | ||
| 2132 | */ | ||
| 2133 | if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { | ||
| 2102 | if (trans) { | 2134 | if (trans) { |
| 2103 | trans->pathmtu = params->spp_pathmtu; | 2135 | trans->pathmtu = params->spp_pathmtu; |
| 2104 | sctp_assoc_sync_pmtu(asoc); | 2136 | sctp_assoc_sync_pmtu(asoc); |
| @@ -2129,7 +2161,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2129 | } | 2161 | } |
| 2130 | } | 2162 | } |
| 2131 | 2163 | ||
| 2132 | if (params->spp_sackdelay) { | 2164 | /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the |
| 2165 | * value of this field is ignored. Note also that a value of zero | ||
| 2166 | * indicates the current setting should be left unchanged. | ||
| 2167 | */ | ||
| 2168 | if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { | ||
| 2133 | if (trans) { | 2169 | if (trans) { |
| 2134 | trans->sackdelay = | 2170 | trans->sackdelay = |
| 2135 | msecs_to_jiffies(params->spp_sackdelay); | 2171 | msecs_to_jiffies(params->spp_sackdelay); |
| @@ -2157,7 +2193,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
| 2157 | } | 2193 | } |
| 2158 | } | 2194 | } |
| 2159 | 2195 | ||
| 2160 | if (params->spp_pathmaxrxt) { | 2196 | /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value |
| 2197 | * of this field is ignored. Note also that a value of zero | ||
| 2198 | * indicates the current setting should be left unchanged. | ||
| 2199 | */ | ||
| 2200 | if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) { | ||
| 2161 | if (trans) { | 2201 | if (trans) { |
| 2162 | trans->pathmaxrxt = params->spp_pathmaxrxt; | 2202 | trans->pathmaxrxt = params->spp_pathmaxrxt; |
| 2163 | } else if (asoc) { | 2203 | } else if (asoc) { |
| @@ -2249,7 +2289,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, | |||
| 2249 | return 0; | 2289 | return 0; |
| 2250 | } | 2290 | } |
| 2251 | 2291 | ||
| 2252 | /* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) | 2292 | /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) |
| 2253 | * | 2293 | * |
| 2254 | * This options will get or set the delayed ack timer. The time is set | 2294 | * This options will get or set the delayed ack timer. The time is set |
| 2255 | * in milliseconds. If the assoc_id is 0, then this sets or gets the | 2295 | * in milliseconds. If the assoc_id is 0, then this sets or gets the |
| @@ -2786,6 +2826,102 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, | |||
| 2786 | return 0; | 2826 | return 0; |
| 2787 | } | 2827 | } |
| 2788 | 2828 | ||
| 2829 | /* | ||
| 2830 | * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) | ||
| 2831 | * | ||
| 2832 | * This options will at a minimum specify if the implementation is doing | ||
| 2833 | * fragmented interleave. Fragmented interleave, for a one to many | ||
| 2834 | * socket, is when subsequent calls to receive a message may return | ||
| 2835 | * parts of messages from different associations. Some implementations | ||
| 2836 | * may allow you to turn this value on or off. If so, when turned off, | ||
| 2837 | * no fragment interleave will occur (which will cause a head of line | ||
| 2838 | * blocking amongst multiple associations sharing the same one to many | ||
| 2839 | * socket). When this option is turned on, then each receive call may | ||
| 2840 | * come from a different association (thus the user must receive data | ||
| 2841 | * with the extended calls (e.g. sctp_recvmsg) to keep track of which | ||
| 2842 | * association each receive belongs to. | ||
| 2843 | * | ||
| 2844 | * This option takes a boolean value. A non-zero value indicates that | ||
| 2845 | * fragmented interleave is on. A value of zero indicates that | ||
| 2846 | * fragmented interleave is off. | ||
| 2847 | * | ||
| 2848 | * Note that it is important that an implementation that allows this | ||
| 2849 | * option to be turned on, have it off by default. Otherwise an unaware | ||
| 2850 | * application using the one to many model may become confused and act | ||
| 2851 | * incorrectly. | ||
| 2852 | */ | ||
| 2853 | static int sctp_setsockopt_fragment_interleave(struct sock *sk, | ||
| 2854 | char __user *optval, | ||
| 2855 | int optlen) | ||
| 2856 | { | ||
| 2857 | int val; | ||
| 2858 | |||
| 2859 | if (optlen != sizeof(int)) | ||
| 2860 | return -EINVAL; | ||
| 2861 | if (get_user(val, (int __user *)optval)) | ||
| 2862 | return -EFAULT; | ||
| 2863 | |||
| 2864 | sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; | ||
| 2865 | |||
| 2866 | return 0; | ||
| 2867 | } | ||
| 2868 | |||
| 2869 | /* | ||
| 2870 | * 7.1.25. Set or Get the sctp partial delivery point | ||
| 2871 | * (SCTP_PARTIAL_DELIVERY_POINT) | ||
| 2872 | * This option will set or get the SCTP partial delivery point. This | ||
| 2873 | * point is the size of a message where the partial delivery API will be | ||
| 2874 | * invoked to help free up rwnd space for the peer. Setting this to a | ||
| 2875 | * lower value will cause partial delivery's to happen more often. The | ||
| 2876 | * calls argument is an integer that sets or gets the partial delivery | ||
| 2877 | * point. | ||
| 2878 | */ | ||
| 2879 | static int sctp_setsockopt_partial_delivery_point(struct sock *sk, | ||
| 2880 | char __user *optval, | ||
| 2881 | int optlen) | ||
| 2882 | { | ||
| 2883 | u32 val; | ||
| 2884 | |||
| 2885 | if (optlen != sizeof(u32)) | ||
| 2886 | return -EINVAL; | ||
| 2887 | if (get_user(val, (int __user *)optval)) | ||
| 2888 | return -EFAULT; | ||
| 2889 | |||
| 2890 | sctp_sk(sk)->pd_point = val; | ||
| 2891 | |||
| 2892 | return 0; /* is this the right error code? */ | ||
| 2893 | } | ||
| 2894 | |||
| 2895 | /* | ||
| 2896 | * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) | ||
| 2897 | * | ||
| 2898 | * This option will allow a user to change the maximum burst of packets | ||
| 2899 | * that can be emitted by this association. Note that the default value | ||
| 2900 | * is 4, and some implementations may restrict this setting so that it | ||
| 2901 | * can only be lowered. | ||
| 2902 | * | ||
| 2903 | * NOTE: This text doesn't seem right. Do this on a socket basis with | ||
| 2904 | * future associations inheriting the socket value. | ||
| 2905 | */ | ||
| 2906 | static int sctp_setsockopt_maxburst(struct sock *sk, | ||
| 2907 | char __user *optval, | ||
| 2908 | int optlen) | ||
| 2909 | { | ||
| 2910 | int val; | ||
| 2911 | |||
| 2912 | if (optlen != sizeof(int)) | ||
| 2913 | return -EINVAL; | ||
| 2914 | if (get_user(val, (int __user *)optval)) | ||
| 2915 | return -EFAULT; | ||
| 2916 | |||
| 2917 | if (val < 0) | ||
| 2918 | return -EINVAL; | ||
| 2919 | |||
| 2920 | sctp_sk(sk)->max_burst = val; | ||
| 2921 | |||
| 2922 | return 0; | ||
| 2923 | } | ||
| 2924 | |||
| 2789 | /* API 6.2 setsockopt(), getsockopt() | 2925 | /* API 6.2 setsockopt(), getsockopt() |
| 2790 | * | 2926 | * |
| 2791 | * Applications use setsockopt() and getsockopt() to set or retrieve | 2927 | * Applications use setsockopt() and getsockopt() to set or retrieve |
| @@ -2865,6 +3001,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
| 2865 | case SCTP_DELAYED_ACK_TIME: | 3001 | case SCTP_DELAYED_ACK_TIME: |
| 2866 | retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); | 3002 | retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); |
| 2867 | break; | 3003 | break; |
| 3004 | case SCTP_PARTIAL_DELIVERY_POINT: | ||
| 3005 | retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); | ||
| 3006 | break; | ||
| 2868 | 3007 | ||
| 2869 | case SCTP_INITMSG: | 3008 | case SCTP_INITMSG: |
| 2870 | retval = sctp_setsockopt_initmsg(sk, optval, optlen); | 3009 | retval = sctp_setsockopt_initmsg(sk, optval, optlen); |
| @@ -2900,11 +3039,16 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | |||
| 2900 | case SCTP_CONTEXT: | 3039 | case SCTP_CONTEXT: |
| 2901 | retval = sctp_setsockopt_context(sk, optval, optlen); | 3040 | retval = sctp_setsockopt_context(sk, optval, optlen); |
| 2902 | break; | 3041 | break; |
| 2903 | 3042 | case SCTP_FRAGMENT_INTERLEAVE: | |
| 3043 | retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); | ||
| 3044 | break; | ||
| 3045 | case SCTP_MAX_BURST: | ||
| 3046 | retval = sctp_setsockopt_maxburst(sk, optval, optlen); | ||
| 3047 | break; | ||
| 2904 | default: | 3048 | default: |
| 2905 | retval = -ENOPROTOOPT; | 3049 | retval = -ENOPROTOOPT; |
| 2906 | break; | 3050 | break; |
| 2907 | }; | 3051 | } |
| 2908 | 3052 | ||
| 2909 | sctp_release_sock(sk); | 3053 | sctp_release_sock(sk); |
| 2910 | 3054 | ||
| @@ -3060,6 +3204,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
| 3060 | sp->default_timetolive = 0; | 3204 | sp->default_timetolive = 0; |
| 3061 | 3205 | ||
| 3062 | sp->default_rcv_context = 0; | 3206 | sp->default_rcv_context = 0; |
| 3207 | sp->max_burst = sctp_max_burst; | ||
| 3063 | 3208 | ||
| 3064 | /* Initialize default setup parameters. These parameters | 3209 | /* Initialize default setup parameters. These parameters |
| 3065 | * can be modified with the SCTP_INITMSG socket option or | 3210 | * can be modified with the SCTP_INITMSG socket option or |
| @@ -3128,8 +3273,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
| 3128 | sp->pf = sctp_get_pf_specific(sk->sk_family); | 3273 | sp->pf = sctp_get_pf_specific(sk->sk_family); |
| 3129 | 3274 | ||
| 3130 | /* Control variables for partial data delivery. */ | 3275 | /* Control variables for partial data delivery. */ |
| 3131 | sp->pd_mode = 0; | 3276 | atomic_set(&sp->pd_mode, 0); |
| 3132 | skb_queue_head_init(&sp->pd_lobby); | 3277 | skb_queue_head_init(&sp->pd_lobby); |
| 3278 | sp->frag_interleave = 0; | ||
| 3133 | 3279 | ||
| 3134 | /* Create a per socket endpoint structure. Even if we | 3280 | /* Create a per socket endpoint structure. Even if we |
| 3135 | * change the data structure relationships, this may still | 3281 | * change the data structure relationships, this may still |
| @@ -3636,7 +3782,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, | |||
| 3636 | return 0; | 3782 | return 0; |
| 3637 | } | 3783 | } |
| 3638 | 3784 | ||
| 3639 | /* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) | 3785 | /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) |
| 3640 | * | 3786 | * |
| 3641 | * This options will get or set the delayed ack timer. The time is set | 3787 | * This options will get or set the delayed ack timer. The time is set |
| 3642 | * in milliseconds. If the assoc_id is 0, then this sets or gets the | 3788 | * in milliseconds. If the assoc_id is 0, then this sets or gets the |
| @@ -3841,7 +3987,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, | |||
| 3841 | memcpy(&temp, &from->ipaddr, sizeof(temp)); | 3987 | memcpy(&temp, &from->ipaddr, sizeof(temp)); |
| 3842 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 3988 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
| 3843 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; | 3989 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; |
| 3844 | if(space_left < addrlen) | 3990 | if (space_left < addrlen) |
| 3845 | return -ENOMEM; | 3991 | return -ENOMEM; |
| 3846 | if (copy_to_user(to, &temp, addrlen)) | 3992 | if (copy_to_user(to, &temp, addrlen)) |
| 3847 | return -EFAULT; | 3993 | return -EFAULT; |
| @@ -3930,8 +4076,9 @@ done: | |||
| 3930 | /* Helper function that copies local addresses to user and returns the number | 4076 | /* Helper function that copies local addresses to user and returns the number |
| 3931 | * of addresses copied. | 4077 | * of addresses copied. |
| 3932 | */ | 4078 | */ |
| 3933 | static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, | 4079 | static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, |
| 3934 | void __user *to) | 4080 | int max_addrs, void *to, |
| 4081 | int *bytes_copied) | ||
| 3935 | { | 4082 | { |
| 3936 | struct list_head *pos, *next; | 4083 | struct list_head *pos, *next; |
| 3937 | struct sctp_sockaddr_entry *addr; | 4084 | struct sctp_sockaddr_entry *addr; |
| @@ -3948,10 +4095,10 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add | |||
| 3948 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 4095 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
| 3949 | &temp); | 4096 | &temp); |
| 3950 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4097 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 3951 | if (copy_to_user(to, &temp, addrlen)) | 4098 | memcpy(to, &temp, addrlen); |
| 3952 | return -EFAULT; | ||
| 3953 | 4099 | ||
| 3954 | to += addrlen; | 4100 | to += addrlen; |
| 4101 | *bytes_copied += addrlen; | ||
| 3955 | cnt ++; | 4102 | cnt ++; |
| 3956 | if (cnt >= max_addrs) break; | 4103 | if (cnt >= max_addrs) break; |
| 3957 | } | 4104 | } |
| @@ -3959,8 +4106,8 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add | |||
| 3959 | return cnt; | 4106 | return cnt; |
| 3960 | } | 4107 | } |
| 3961 | 4108 | ||
| 3962 | static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, | 4109 | static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, |
| 3963 | void __user **to, size_t space_left) | 4110 | size_t space_left, int *bytes_copied) |
| 3964 | { | 4111 | { |
| 3965 | struct list_head *pos, *next; | 4112 | struct list_head *pos, *next; |
| 3966 | struct sctp_sockaddr_entry *addr; | 4113 | struct sctp_sockaddr_entry *addr; |
| @@ -3977,14 +4124,14 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, | |||
| 3977 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 4124 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
| 3978 | &temp); | 4125 | &temp); |
| 3979 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4126 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 3980 | if(space_left<addrlen) | 4127 | if (space_left < addrlen) |
| 3981 | return -ENOMEM; | 4128 | return -ENOMEM; |
| 3982 | if (copy_to_user(*to, &temp, addrlen)) | 4129 | memcpy(to, &temp, addrlen); |
| 3983 | return -EFAULT; | ||
| 3984 | 4130 | ||
| 3985 | *to += addrlen; | 4131 | to += addrlen; |
| 3986 | cnt ++; | 4132 | cnt ++; |
| 3987 | space_left -= addrlen; | 4133 | space_left -= addrlen; |
| 4134 | bytes_copied += addrlen; | ||
| 3988 | } | 4135 | } |
| 3989 | 4136 | ||
| 3990 | return cnt; | 4137 | return cnt; |
| @@ -4008,6 +4155,8 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
| 4008 | int addrlen; | 4155 | int addrlen; |
| 4009 | rwlock_t *addr_lock; | 4156 | rwlock_t *addr_lock; |
| 4010 | int err = 0; | 4157 | int err = 0; |
| 4158 | void *addrs; | ||
| 4159 | int bytes_copied = 0; | ||
| 4011 | 4160 | ||
| 4012 | if (len != sizeof(struct sctp_getaddrs_old)) | 4161 | if (len != sizeof(struct sctp_getaddrs_old)) |
| 4013 | return -EINVAL; | 4162 | return -EINVAL; |
| @@ -4035,6 +4184,15 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
| 4035 | 4184 | ||
| 4036 | to = getaddrs.addrs; | 4185 | to = getaddrs.addrs; |
| 4037 | 4186 | ||
| 4187 | /* Allocate space for a local instance of packed array to hold all | ||
| 4188 | * the data. We store addresses here first and then put write them | ||
| 4189 | * to the user in one shot. | ||
| 4190 | */ | ||
| 4191 | addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num, | ||
| 4192 | GFP_KERNEL); | ||
| 4193 | if (!addrs) | ||
| 4194 | return -ENOMEM; | ||
| 4195 | |||
| 4038 | sctp_read_lock(addr_lock); | 4196 | sctp_read_lock(addr_lock); |
| 4039 | 4197 | ||
| 4040 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid | 4198 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid |
| @@ -4044,13 +4202,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
| 4044 | addr = list_entry(bp->address_list.next, | 4202 | addr = list_entry(bp->address_list.next, |
| 4045 | struct sctp_sockaddr_entry, list); | 4203 | struct sctp_sockaddr_entry, list); |
| 4046 | if (sctp_is_any(&addr->a)) { | 4204 | if (sctp_is_any(&addr->a)) { |
| 4047 | cnt = sctp_copy_laddrs_to_user_old(sk, bp->port, | 4205 | cnt = sctp_copy_laddrs_old(sk, bp->port, |
| 4048 | getaddrs.addr_num, | 4206 | getaddrs.addr_num, |
| 4049 | to); | 4207 | addrs, &bytes_copied); |
| 4050 | if (cnt < 0) { | ||
| 4051 | err = cnt; | ||
| 4052 | goto unlock; | ||
| 4053 | } | ||
| 4054 | goto copy_getaddrs; | 4208 | goto copy_getaddrs; |
| 4055 | } | 4209 | } |
| 4056 | } | 4210 | } |
| @@ -4060,22 +4214,29 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
| 4060 | memcpy(&temp, &addr->a, sizeof(temp)); | 4214 | memcpy(&temp, &addr->a, sizeof(temp)); |
| 4061 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4215 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
| 4062 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4216 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 4063 | if (copy_to_user(to, &temp, addrlen)) { | 4217 | memcpy(addrs, &temp, addrlen); |
| 4064 | err = -EFAULT; | ||
| 4065 | goto unlock; | ||
| 4066 | } | ||
| 4067 | to += addrlen; | 4218 | to += addrlen; |
| 4219 | bytes_copied += addrlen; | ||
| 4068 | cnt ++; | 4220 | cnt ++; |
| 4069 | if (cnt >= getaddrs.addr_num) break; | 4221 | if (cnt >= getaddrs.addr_num) break; |
| 4070 | } | 4222 | } |
| 4071 | 4223 | ||
| 4072 | copy_getaddrs: | 4224 | copy_getaddrs: |
| 4225 | sctp_read_unlock(addr_lock); | ||
| 4226 | |||
| 4227 | /* copy the entire address list into the user provided space */ | ||
| 4228 | if (copy_to_user(to, addrs, bytes_copied)) { | ||
| 4229 | err = -EFAULT; | ||
| 4230 | goto error; | ||
| 4231 | } | ||
| 4232 | |||
| 4233 | /* copy the leading structure back to user */ | ||
| 4073 | getaddrs.addr_num = cnt; | 4234 | getaddrs.addr_num = cnt; |
| 4074 | if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) | 4235 | if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) |
| 4075 | err = -EFAULT; | 4236 | err = -EFAULT; |
| 4076 | 4237 | ||
| 4077 | unlock: | 4238 | error: |
| 4078 | sctp_read_unlock(addr_lock); | 4239 | kfree(addrs); |
| 4079 | return err; | 4240 | return err; |
| 4080 | } | 4241 | } |
| 4081 | 4242 | ||
| @@ -4095,7 +4256,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
| 4095 | rwlock_t *addr_lock; | 4256 | rwlock_t *addr_lock; |
| 4096 | int err = 0; | 4257 | int err = 0; |
| 4097 | size_t space_left; | 4258 | size_t space_left; |
| 4098 | int bytes_copied; | 4259 | int bytes_copied = 0; |
| 4260 | void *addrs; | ||
| 4099 | 4261 | ||
| 4100 | if (len <= sizeof(struct sctp_getaddrs)) | 4262 | if (len <= sizeof(struct sctp_getaddrs)) |
| 4101 | return -EINVAL; | 4263 | return -EINVAL; |
| @@ -4123,6 +4285,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
| 4123 | to = optval + offsetof(struct sctp_getaddrs,addrs); | 4285 | to = optval + offsetof(struct sctp_getaddrs,addrs); |
| 4124 | space_left = len - sizeof(struct sctp_getaddrs) - | 4286 | space_left = len - sizeof(struct sctp_getaddrs) - |
| 4125 | offsetof(struct sctp_getaddrs,addrs); | 4287 | offsetof(struct sctp_getaddrs,addrs); |
| 4288 | addrs = kmalloc(space_left, GFP_KERNEL); | ||
| 4289 | if (!addrs) | ||
| 4290 | return -ENOMEM; | ||
| 4126 | 4291 | ||
| 4127 | sctp_read_lock(addr_lock); | 4292 | sctp_read_lock(addr_lock); |
| 4128 | 4293 | ||
| @@ -4133,11 +4298,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
| 4133 | addr = list_entry(bp->address_list.next, | 4298 | addr = list_entry(bp->address_list.next, |
| 4134 | struct sctp_sockaddr_entry, list); | 4299 | struct sctp_sockaddr_entry, list); |
| 4135 | if (sctp_is_any(&addr->a)) { | 4300 | if (sctp_is_any(&addr->a)) { |
| 4136 | cnt = sctp_copy_laddrs_to_user(sk, bp->port, | 4301 | cnt = sctp_copy_laddrs(sk, bp->port, addrs, |
| 4137 | &to, space_left); | 4302 | space_left, &bytes_copied); |
| 4138 | if (cnt < 0) { | 4303 | if (cnt < 0) { |
| 4139 | err = cnt; | 4304 | err = cnt; |
| 4140 | goto unlock; | 4305 | goto error; |
| 4141 | } | 4306 | } |
| 4142 | goto copy_getaddrs; | 4307 | goto copy_getaddrs; |
| 4143 | } | 4308 | } |
| @@ -4148,26 +4313,31 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
| 4148 | memcpy(&temp, &addr->a, sizeof(temp)); | 4313 | memcpy(&temp, &addr->a, sizeof(temp)); |
| 4149 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4314 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
| 4150 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4315 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
| 4151 | if(space_left < addrlen) | 4316 | if (space_left < addrlen) { |
| 4152 | return -ENOMEM; /*fixme: right error?*/ | 4317 | err = -ENOMEM; /*fixme: right error?*/ |
| 4153 | if (copy_to_user(to, &temp, addrlen)) { | 4318 | goto error; |
| 4154 | err = -EFAULT; | ||
| 4155 | goto unlock; | ||
| 4156 | } | 4319 | } |
| 4320 | memcpy(addrs, &temp, addrlen); | ||
| 4157 | to += addrlen; | 4321 | to += addrlen; |
| 4322 | bytes_copied += addrlen; | ||
| 4158 | cnt ++; | 4323 | cnt ++; |
| 4159 | space_left -= addrlen; | 4324 | space_left -= addrlen; |
| 4160 | } | 4325 | } |
| 4161 | 4326 | ||
| 4162 | copy_getaddrs: | 4327 | copy_getaddrs: |
| 4328 | sctp_read_unlock(addr_lock); | ||
| 4329 | |||
| 4330 | if (copy_to_user(to, addrs, bytes_copied)) { | ||
| 4331 | err = -EFAULT; | ||
| 4332 | goto error; | ||
| 4333 | } | ||
| 4163 | if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) | 4334 | if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) |
| 4164 | return -EFAULT; | 4335 | return -EFAULT; |
| 4165 | bytes_copied = ((char __user *)to) - optval; | ||
| 4166 | if (put_user(bytes_copied, optlen)) | 4336 | if (put_user(bytes_copied, optlen)) |
| 4167 | return -EFAULT; | 4337 | return -EFAULT; |
| 4168 | 4338 | ||
| 4169 | unlock: | 4339 | error: |
| 4170 | sctp_read_unlock(addr_lock); | 4340 | kfree(addrs); |
| 4171 | return err; | 4341 | return err; |
| 4172 | } | 4342 | } |
| 4173 | 4343 | ||
| @@ -4530,6 +4700,77 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len, | |||
| 4530 | return 0; | 4700 | return 0; |
| 4531 | } | 4701 | } |
| 4532 | 4702 | ||
| 4703 | /* | ||
| 4704 | * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) | ||
| 4705 | * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) | ||
| 4706 | */ | ||
| 4707 | static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, | ||
| 4708 | char __user *optval, int __user *optlen) | ||
| 4709 | { | ||
| 4710 | int val; | ||
| 4711 | |||
| 4712 | if (len < sizeof(int)) | ||
| 4713 | return -EINVAL; | ||
| 4714 | |||
| 4715 | len = sizeof(int); | ||
| 4716 | |||
| 4717 | val = sctp_sk(sk)->frag_interleave; | ||
| 4718 | if (put_user(len, optlen)) | ||
| 4719 | return -EFAULT; | ||
| 4720 | if (copy_to_user(optval, &val, len)) | ||
| 4721 | return -EFAULT; | ||
| 4722 | |||
| 4723 | return 0; | ||
| 4724 | } | ||
| 4725 | |||
| 4726 | /* | ||
| 4727 | * 7.1.25. Set or Get the sctp partial delivery point | ||
| 4728 | * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) | ||
| 4729 | */ | ||
| 4730 | static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, | ||
| 4731 | char __user *optval, | ||
| 4732 | int __user *optlen) | ||
| 4733 | { | ||
| 4734 | u32 val; | ||
| 4735 | |||
| 4736 | if (len < sizeof(u32)) | ||
| 4737 | return -EINVAL; | ||
| 4738 | |||
| 4739 | len = sizeof(u32); | ||
| 4740 | |||
| 4741 | val = sctp_sk(sk)->pd_point; | ||
| 4742 | if (put_user(len, optlen)) | ||
| 4743 | return -EFAULT; | ||
| 4744 | if (copy_to_user(optval, &val, len)) | ||
| 4745 | return -EFAULT; | ||
| 4746 | |||
| 4747 | return -ENOTSUPP; | ||
| 4748 | } | ||
| 4749 | |||
| 4750 | /* | ||
| 4751 | * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) | ||
| 4752 | * (chapter and verse is quoted at sctp_setsockopt_maxburst()) | ||
| 4753 | */ | ||
| 4754 | static int sctp_getsockopt_maxburst(struct sock *sk, int len, | ||
| 4755 | char __user *optval, | ||
| 4756 | int __user *optlen) | ||
| 4757 | { | ||
| 4758 | int val; | ||
| 4759 | |||
| 4760 | if (len < sizeof(int)) | ||
| 4761 | return -EINVAL; | ||
| 4762 | |||
| 4763 | len = sizeof(int); | ||
| 4764 | |||
| 4765 | val = sctp_sk(sk)->max_burst; | ||
| 4766 | if (put_user(len, optlen)) | ||
| 4767 | return -EFAULT; | ||
| 4768 | if (copy_to_user(optval, &val, len)) | ||
| 4769 | return -EFAULT; | ||
| 4770 | |||
| 4771 | return -ENOTSUPP; | ||
| 4772 | } | ||
| 4773 | |||
| 4533 | SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | 4774 | SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, |
| 4534 | char __user *optval, int __user *optlen) | 4775 | char __user *optval, int __user *optlen) |
| 4535 | { | 4776 | { |
| @@ -4642,10 +4883,21 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
| 4642 | case SCTP_CONTEXT: | 4883 | case SCTP_CONTEXT: |
| 4643 | retval = sctp_getsockopt_context(sk, len, optval, optlen); | 4884 | retval = sctp_getsockopt_context(sk, len, optval, optlen); |
| 4644 | break; | 4885 | break; |
| 4886 | case SCTP_FRAGMENT_INTERLEAVE: | ||
| 4887 | retval = sctp_getsockopt_fragment_interleave(sk, len, optval, | ||
| 4888 | optlen); | ||
| 4889 | break; | ||
| 4890 | case SCTP_PARTIAL_DELIVERY_POINT: | ||
| 4891 | retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, | ||
| 4892 | optlen); | ||
| 4893 | break; | ||
| 4894 | case SCTP_MAX_BURST: | ||
| 4895 | retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); | ||
| 4896 | break; | ||
| 4645 | default: | 4897 | default: |
| 4646 | retval = -ENOPROTOOPT; | 4898 | retval = -ENOPROTOOPT; |
| 4647 | break; | 4899 | break; |
| 4648 | }; | 4900 | } |
| 4649 | 4901 | ||
| 4650 | sctp_release_sock(sk); | 4902 | sctp_release_sock(sk); |
| 4651 | return retval; | 4903 | return retval; |
| @@ -4970,7 +5222,8 @@ int sctp_inet_listen(struct socket *sock, int backlog) | |||
| 4970 | break; | 5222 | break; |
| 4971 | default: | 5223 | default: |
| 4972 | break; | 5224 | break; |
| 4973 | }; | 5225 | } |
| 5226 | |||
| 4974 | if (err) | 5227 | if (err) |
| 4975 | goto cleanup; | 5228 | goto cleanup; |
| 4976 | 5229 | ||
| @@ -5233,7 +5486,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, | |||
| 5233 | 5486 | ||
| 5234 | default: | 5487 | default: |
| 5235 | return -EINVAL; | 5488 | return -EINVAL; |
| 5236 | }; | 5489 | } |
| 5237 | } | 5490 | } |
| 5238 | return 0; | 5491 | return 0; |
| 5239 | } | 5492 | } |
| @@ -5638,6 +5891,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout) | |||
| 5638 | finish_wait(sk->sk_sleep, &wait); | 5891 | finish_wait(sk->sk_sleep, &wait); |
| 5639 | } | 5892 | } |
| 5640 | 5893 | ||
| 5894 | static void sctp_sock_rfree_frag(struct sk_buff *skb) | ||
| 5895 | { | ||
| 5896 | struct sk_buff *frag; | ||
| 5897 | |||
| 5898 | if (!skb->data_len) | ||
| 5899 | goto done; | ||
| 5900 | |||
| 5901 | /* Don't forget the fragments. */ | ||
| 5902 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5903 | sctp_sock_rfree_frag(frag); | ||
| 5904 | |||
| 5905 | done: | ||
| 5906 | sctp_sock_rfree(skb); | ||
| 5907 | } | ||
| 5908 | |||
| 5909 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | ||
| 5910 | { | ||
| 5911 | struct sk_buff *frag; | ||
| 5912 | |||
| 5913 | if (!skb->data_len) | ||
| 5914 | goto done; | ||
| 5915 | |||
| 5916 | /* Don't forget the fragments. */ | ||
| 5917 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | ||
| 5918 | sctp_skb_set_owner_r_frag(frag, sk); | ||
| 5919 | |||
| 5920 | done: | ||
| 5921 | sctp_skb_set_owner_r(skb, sk); | ||
| 5922 | } | ||
| 5923 | |||
| 5641 | /* Populate the fields of the newsk from the oldsk and migrate the assoc | 5924 | /* Populate the fields of the newsk from the oldsk and migrate the assoc |
| 5642 | * and its messages to the newsk. | 5925 | * and its messages to the newsk. |
| 5643 | */ | 5926 | */ |
| @@ -5692,10 +5975,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5692 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { | 5975 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { |
| 5693 | event = sctp_skb2event(skb); | 5976 | event = sctp_skb2event(skb); |
| 5694 | if (event->asoc == assoc) { | 5977 | if (event->asoc == assoc) { |
| 5695 | sctp_sock_rfree(skb); | 5978 | sctp_sock_rfree_frag(skb); |
| 5696 | __skb_unlink(skb, &oldsk->sk_receive_queue); | 5979 | __skb_unlink(skb, &oldsk->sk_receive_queue); |
| 5697 | __skb_queue_tail(&newsk->sk_receive_queue, skb); | 5980 | __skb_queue_tail(&newsk->sk_receive_queue, skb); |
| 5698 | sctp_skb_set_owner_r(skb, newsk); | 5981 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5699 | } | 5982 | } |
| 5700 | } | 5983 | } |
| 5701 | 5984 | ||
| @@ -5706,9 +5989,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5706 | * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. | 5989 | * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. |
| 5707 | */ | 5990 | */ |
| 5708 | skb_queue_head_init(&newsp->pd_lobby); | 5991 | skb_queue_head_init(&newsp->pd_lobby); |
| 5709 | sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode; | 5992 | atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); |
| 5710 | 5993 | ||
| 5711 | if (sctp_sk(oldsk)->pd_mode) { | 5994 | if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { |
| 5712 | struct sk_buff_head *queue; | 5995 | struct sk_buff_head *queue; |
| 5713 | 5996 | ||
| 5714 | /* Decide which queue to move pd_lobby skbs to. */ | 5997 | /* Decide which queue to move pd_lobby skbs to. */ |
| @@ -5723,10 +6006,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5723 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { | 6006 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { |
| 5724 | event = sctp_skb2event(skb); | 6007 | event = sctp_skb2event(skb); |
| 5725 | if (event->asoc == assoc) { | 6008 | if (event->asoc == assoc) { |
| 5726 | sctp_sock_rfree(skb); | 6009 | sctp_sock_rfree_frag(skb); |
| 5727 | __skb_unlink(skb, &oldsp->pd_lobby); | 6010 | __skb_unlink(skb, &oldsp->pd_lobby); |
| 5728 | __skb_queue_tail(queue, skb); | 6011 | __skb_queue_tail(queue, skb); |
| 5729 | sctp_skb_set_owner_r(skb, newsk); | 6012 | sctp_skb_set_owner_r_frag(skb, newsk); |
| 5730 | } | 6013 | } |
| 5731 | } | 6014 | } |
| 5732 | 6015 | ||
| @@ -5734,8 +6017,18 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
| 5734 | * delivery to finish. | 6017 | * delivery to finish. |
| 5735 | */ | 6018 | */ |
| 5736 | if (assoc->ulpq.pd_mode) | 6019 | if (assoc->ulpq.pd_mode) |
| 5737 | sctp_clear_pd(oldsk); | 6020 | sctp_clear_pd(oldsk, NULL); |
| 6021 | |||
| 6022 | } | ||
| 6023 | |||
| 6024 | sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) { | ||
| 6025 | sctp_sock_rfree_frag(skb); | ||
| 6026 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 6027 | } | ||
| 5738 | 6028 | ||
| 6029 | sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) { | ||
| 6030 | sctp_sock_rfree_frag(skb); | ||
| 6031 | sctp_skb_set_owner_r_frag(skb, newsk); | ||
| 5739 | } | 6032 | } |
| 5740 | 6033 | ||
| 5741 | /* Set the type of socket to indicate that it is peeled off from the | 6034 | /* Set the type of socket to indicate that it is peeled off from the |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index a596f5308cb1..961df275d5b9 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
| @@ -507,7 +507,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
| 507 | transport->cwnd = max(transport->cwnd/2, | 507 | transport->cwnd = max(transport->cwnd/2, |
| 508 | 4*transport->asoc->pathmtu); | 508 | 4*transport->asoc->pathmtu); |
| 509 | break; | 509 | break; |
| 510 | }; | 510 | } |
| 511 | 511 | ||
| 512 | transport->partial_bytes_acked = 0; | 512 | transport->partial_bytes_acked = 0; |
| 513 | SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " | 513 | SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " |
| @@ -526,3 +526,35 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t) | |||
| 526 | timeout += jiffies; | 526 | timeout += jiffies; |
| 527 | return timeout; | 527 | return timeout; |
| 528 | } | 528 | } |
| 529 | |||
| 530 | /* Reset transport variables to their initial values */ | ||
| 531 | void sctp_transport_reset(struct sctp_transport *t) | ||
| 532 | { | ||
| 533 | struct sctp_association *asoc = t->asoc; | ||
| 534 | |||
| 535 | /* RFC 2960 (bis), Section 5.2.4 | ||
| 536 | * All the congestion control parameters (e.g., cwnd, ssthresh) | ||
| 537 | * related to this peer MUST be reset to their initial values | ||
| 538 | * (see Section 6.2.1) | ||
| 539 | */ | ||
| 540 | t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); | ||
| 541 | t->ssthresh = asoc->peer.i.a_rwnd; | ||
| 542 | t->rto = asoc->rto_initial; | ||
| 543 | t->rtt = 0; | ||
| 544 | t->srtt = 0; | ||
| 545 | t->rttvar = 0; | ||
| 546 | |||
| 547 | /* Reset these additional varibles so that we have a clean | ||
| 548 | * slate. | ||
| 549 | */ | ||
| 550 | t->partial_bytes_acked = 0; | ||
| 551 | t->flight_size = 0; | ||
| 552 | t->error_count = 0; | ||
| 553 | t->rto_pending = 0; | ||
| 554 | |||
| 555 | /* Initialize the state information for SFR-CACC */ | ||
| 556 | t->cacc.changeover_active = 0; | ||
| 557 | t->cacc.cycling_changeover = 0; | ||
| 558 | t->cacc.next_tsn_at_change = 0; | ||
| 559 | t->cacc.cacc_saw_newack = 0; | ||
| 560 | } | ||
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 2e11bc8d5d35..661ea2dd78ba 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
| @@ -131,19 +131,54 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) | |||
| 131 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( | 131 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( |
| 132 | const struct sctp_association *asoc, | 132 | const struct sctp_association *asoc, |
| 133 | __u16 flags, __u16 state, __u16 error, __u16 outbound, | 133 | __u16 flags, __u16 state, __u16 error, __u16 outbound, |
| 134 | __u16 inbound, gfp_t gfp) | 134 | __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) |
| 135 | { | 135 | { |
| 136 | struct sctp_ulpevent *event; | 136 | struct sctp_ulpevent *event; |
| 137 | struct sctp_assoc_change *sac; | 137 | struct sctp_assoc_change *sac; |
| 138 | struct sk_buff *skb; | 138 | struct sk_buff *skb; |
| 139 | 139 | ||
| 140 | event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), | 140 | /* If the lower layer passed in the chunk, it will be |
| 141 | * an ABORT, so we need to include it in the sac_info. | ||
| 142 | */ | ||
| 143 | if (chunk) { | ||
| 144 | /* sctp_inqu_pop() has allready pulled off the chunk | ||
| 145 | * header. We need to put it back temporarily | ||
| 146 | */ | ||
| 147 | skb_push(chunk->skb, sizeof(sctp_chunkhdr_t)); | ||
| 148 | |||
| 149 | /* Copy the chunk data to a new skb and reserve enough | ||
| 150 | * head room to use as notification. | ||
| 151 | */ | ||
| 152 | skb = skb_copy_expand(chunk->skb, | ||
| 153 | sizeof(struct sctp_assoc_change), 0, gfp); | ||
| 154 | |||
| 155 | if (!skb) | ||
| 156 | goto fail; | ||
| 157 | |||
| 158 | /* put back the chunk header now that we have a copy */ | ||
| 159 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); | ||
| 160 | |||
| 161 | /* Embed the event fields inside the cloned skb. */ | ||
| 162 | event = sctp_skb2event(skb); | ||
| 163 | sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); | ||
| 164 | |||
| 165 | /* Include the notification structure */ | ||
| 166 | sac = (struct sctp_assoc_change *) | ||
| 167 | skb_push(skb, sizeof(struct sctp_assoc_change)); | ||
| 168 | |||
| 169 | /* Trim the buffer to the right length. */ | ||
| 170 | skb_trim(skb, sizeof(struct sctp_assoc_change) + | ||
| 171 | ntohs(chunk->chunk_hdr->length)); | ||
| 172 | } else { | ||
| 173 | event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), | ||
| 141 | MSG_NOTIFICATION, gfp); | 174 | MSG_NOTIFICATION, gfp); |
| 142 | if (!event) | 175 | if (!event) |
| 143 | goto fail; | 176 | goto fail; |
| 144 | skb = sctp_event2skb(event); | 177 | |
| 145 | sac = (struct sctp_assoc_change *) | 178 | skb = sctp_event2skb(event); |
| 146 | skb_put(skb, sizeof(struct sctp_assoc_change)); | 179 | sac = (struct sctp_assoc_change *) skb_put(skb, |
| 180 | sizeof(struct sctp_assoc_change)); | ||
| 181 | } | ||
| 147 | 182 | ||
| 148 | /* Socket Extensions for SCTP | 183 | /* Socket Extensions for SCTP |
| 149 | * 5.3.1.1 SCTP_ASSOC_CHANGE | 184 | * 5.3.1.1 SCTP_ASSOC_CHANGE |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index f4759a9bdaee..34eb977a204d 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -73,7 +73,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, | |||
| 73 | 73 | ||
| 74 | 74 | ||
| 75 | /* Flush the reassembly and ordering queues. */ | 75 | /* Flush the reassembly and ordering queues. */ |
| 76 | static void sctp_ulpq_flush(struct sctp_ulpq *ulpq) | 76 | void sctp_ulpq_flush(struct sctp_ulpq *ulpq) |
| 77 | { | 77 | { |
| 78 | struct sk_buff *skb; | 78 | struct sk_buff *skb; |
| 79 | struct sctp_ulpevent *event; | 79 | struct sctp_ulpevent *event; |
| @@ -138,26 +138,59 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
| 138 | /* Clear the partial delivery mode for this socket. Note: This | 138 | /* Clear the partial delivery mode for this socket. Note: This |
| 139 | * assumes that no association is currently in partial delivery mode. | 139 | * assumes that no association is currently in partial delivery mode. |
| 140 | */ | 140 | */ |
| 141 | int sctp_clear_pd(struct sock *sk) | 141 | int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) |
| 142 | { | 142 | { |
| 143 | struct sctp_sock *sp = sctp_sk(sk); | 143 | struct sctp_sock *sp = sctp_sk(sk); |
| 144 | 144 | ||
| 145 | sp->pd_mode = 0; | 145 | if (atomic_dec_and_test(&sp->pd_mode)) { |
| 146 | if (!skb_queue_empty(&sp->pd_lobby)) { | 146 | /* This means there are no other associations in PD, so |
| 147 | struct list_head *list; | 147 | * we can go ahead and clear out the lobby in one shot |
| 148 | sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); | 148 | */ |
| 149 | list = (struct list_head *)&sctp_sk(sk)->pd_lobby; | 149 | if (!skb_queue_empty(&sp->pd_lobby)) { |
| 150 | INIT_LIST_HEAD(list); | 150 | struct list_head *list; |
| 151 | return 1; | 151 | sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); |
| 152 | list = (struct list_head *)&sctp_sk(sk)->pd_lobby; | ||
| 153 | INIT_LIST_HEAD(list); | ||
| 154 | return 1; | ||
| 155 | } | ||
| 156 | } else { | ||
| 157 | /* There are other associations in PD, so we only need to | ||
| 158 | * pull stuff out of the lobby that belongs to the | ||
| 159 | * associations that is exiting PD (all of its notifications | ||
| 160 | * are posted here). | ||
| 161 | */ | ||
| 162 | if (!skb_queue_empty(&sp->pd_lobby) && asoc) { | ||
| 163 | struct sk_buff *skb, *tmp; | ||
| 164 | struct sctp_ulpevent *event; | ||
| 165 | |||
| 166 | sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { | ||
| 167 | event = sctp_skb2event(skb); | ||
| 168 | if (event->asoc == asoc) { | ||
| 169 | __skb_unlink(skb, &sp->pd_lobby); | ||
| 170 | __skb_queue_tail(&sk->sk_receive_queue, | ||
| 171 | skb); | ||
| 172 | } | ||
| 173 | } | ||
| 174 | } | ||
| 152 | } | 175 | } |
| 176 | |||
| 153 | return 0; | 177 | return 0; |
| 154 | } | 178 | } |
| 155 | 179 | ||
| 180 | /* Set the pd_mode on the socket and ulpq */ | ||
| 181 | static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) | ||
| 182 | { | ||
| 183 | struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); | ||
| 184 | |||
| 185 | atomic_inc(&sp->pd_mode); | ||
| 186 | ulpq->pd_mode = 1; | ||
| 187 | } | ||
| 188 | |||
| 156 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ | 189 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ |
| 157 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | 190 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) |
| 158 | { | 191 | { |
| 159 | ulpq->pd_mode = 0; | 192 | ulpq->pd_mode = 0; |
| 160 | return sctp_clear_pd(ulpq->asoc->base.sk); | 193 | return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); |
| 161 | } | 194 | } |
| 162 | 195 | ||
| 163 | /* If the SKB of 'event' is on a list, it is the first such member | 196 | /* If the SKB of 'event' is on a list, it is the first such member |
| @@ -187,18 +220,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
| 187 | * the association the cause of the partial delivery. | 220 | * the association the cause of the partial delivery. |
| 188 | */ | 221 | */ |
| 189 | 222 | ||
| 190 | if (!sctp_sk(sk)->pd_mode) { | 223 | if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { |
| 191 | queue = &sk->sk_receive_queue; | 224 | queue = &sk->sk_receive_queue; |
| 192 | } else if (ulpq->pd_mode) { | 225 | } else { |
| 193 | if (event->msg_flags & MSG_NOTIFICATION) | 226 | if (ulpq->pd_mode) { |
| 194 | queue = &sctp_sk(sk)->pd_lobby; | 227 | /* If the association is in partial delivery, we |
| 195 | else { | 228 | * need to finish delivering the partially processed |
| 196 | clear_pd = event->msg_flags & MSG_EOR; | 229 | * packet before passing any other data. This is |
| 197 | queue = &sk->sk_receive_queue; | 230 | * because we don't truly support stream interleaving. |
| 231 | */ | ||
| 232 | if ((event->msg_flags & MSG_NOTIFICATION) || | ||
| 233 | (SCTP_DATA_NOT_FRAG == | ||
| 234 | (event->msg_flags & SCTP_DATA_FRAG_MASK))) | ||
| 235 | queue = &sctp_sk(sk)->pd_lobby; | ||
| 236 | else { | ||
| 237 | clear_pd = event->msg_flags & MSG_EOR; | ||
| 238 | queue = &sk->sk_receive_queue; | ||
| 239 | } | ||
| 240 | } else { | ||
| 241 | /* | ||
| 242 | * If fragment interleave is enabled, we | ||
| 243 | * can queue this to the recieve queue instead | ||
| 244 | * of the lobby. | ||
| 245 | */ | ||
| 246 | if (sctp_sk(sk)->frag_interleave) | ||
| 247 | queue = &sk->sk_receive_queue; | ||
| 248 | else | ||
| 249 | queue = &sctp_sk(sk)->pd_lobby; | ||
| 198 | } | 250 | } |
| 199 | } else | 251 | } |
| 200 | queue = &sctp_sk(sk)->pd_lobby; | ||
| 201 | |||
| 202 | 252 | ||
| 203 | /* If we are harvesting multiple skbs they will be | 253 | /* If we are harvesting multiple skbs they will be |
| 204 | * collected on a list. | 254 | * collected on a list. |
| @@ -341,7 +391,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu | |||
| 341 | break; | 391 | break; |
| 342 | pos->next = pnext; | 392 | pos->next = pnext; |
| 343 | pos = pnext; | 393 | pos = pnext; |
| 344 | }; | 394 | } |
| 345 | 395 | ||
| 346 | event = sctp_skb2event(f_frag); | 396 | event = sctp_skb2event(f_frag); |
| 347 | SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); | 397 | SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); |
| @@ -360,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
| 360 | struct sk_buff *first_frag = NULL; | 410 | struct sk_buff *first_frag = NULL; |
| 361 | __u32 ctsn, next_tsn; | 411 | __u32 ctsn, next_tsn; |
| 362 | struct sctp_ulpevent *retval = NULL; | 412 | struct sctp_ulpevent *retval = NULL; |
| 413 | struct sk_buff *pd_first = NULL; | ||
| 414 | struct sk_buff *pd_last = NULL; | ||
| 415 | size_t pd_len = 0; | ||
| 416 | struct sctp_association *asoc; | ||
| 417 | u32 pd_point; | ||
| 363 | 418 | ||
| 364 | /* Initialized to 0 just to avoid compiler warning message. Will | 419 | /* Initialized to 0 just to avoid compiler warning message. Will |
| 365 | * never be used with this value. It is referenced only after it | 420 | * never be used with this value. It is referenced only after it |
| @@ -375,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
| 375 | * we expect to find the remaining middle fragments and the last | 430 | * we expect to find the remaining middle fragments and the last |
| 376 | * fragment in order. If not, first_frag is reset to NULL and we | 431 | * fragment in order. If not, first_frag is reset to NULL and we |
| 377 | * start the next pass when we find another first fragment. | 432 | * start the next pass when we find another first fragment. |
| 433 | * | ||
| 434 | * There is a potential to do partial delivery if user sets | ||
| 435 | * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here | ||
| 436 | * to see if can do PD. | ||
| 378 | */ | 437 | */ |
| 379 | skb_queue_walk(&ulpq->reasm, pos) { | 438 | skb_queue_walk(&ulpq->reasm, pos) { |
| 380 | cevent = sctp_skb2event(pos); | 439 | cevent = sctp_skb2event(pos); |
| @@ -382,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
| 382 | 441 | ||
| 383 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 442 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
| 384 | case SCTP_DATA_FIRST_FRAG: | 443 | case SCTP_DATA_FIRST_FRAG: |
| 444 | /* If this "FIRST_FRAG" is the first | ||
| 445 | * element in the queue, then count it towards | ||
| 446 | * possible PD. | ||
| 447 | */ | ||
| 448 | if (pos == ulpq->reasm.next) { | ||
| 449 | pd_first = pos; | ||
| 450 | pd_last = pos; | ||
| 451 | pd_len = pos->len; | ||
| 452 | } else { | ||
| 453 | pd_first = NULL; | ||
| 454 | pd_last = NULL; | ||
| 455 | pd_len = 0; | ||
| 456 | } | ||
| 457 | |||
| 385 | first_frag = pos; | 458 | first_frag = pos; |
| 386 | next_tsn = ctsn + 1; | 459 | next_tsn = ctsn + 1; |
| 387 | break; | 460 | break; |
| 388 | 461 | ||
| 389 | case SCTP_DATA_MIDDLE_FRAG: | 462 | case SCTP_DATA_MIDDLE_FRAG: |
| 390 | if ((first_frag) && (ctsn == next_tsn)) | 463 | if ((first_frag) && (ctsn == next_tsn)) { |
| 391 | next_tsn++; | 464 | next_tsn++; |
| 392 | else | 465 | if (pd_first) { |
| 466 | pd_last = pos; | ||
| 467 | pd_len += pos->len; | ||
| 468 | } | ||
| 469 | } else | ||
| 393 | first_frag = NULL; | 470 | first_frag = NULL; |
| 394 | break; | 471 | break; |
| 395 | 472 | ||
| @@ -399,8 +476,29 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u | |||
| 399 | else | 476 | else |
| 400 | first_frag = NULL; | 477 | first_frag = NULL; |
| 401 | break; | 478 | break; |
| 402 | }; | 479 | } |
| 480 | } | ||
| 403 | 481 | ||
| 482 | asoc = ulpq->asoc; | ||
| 483 | if (pd_first) { | ||
| 484 | /* Make sure we can enter partial deliver. | ||
| 485 | * We can trigger partial delivery only if framgent | ||
| 486 | * interleave is set, or the socket is not already | ||
| 487 | * in partial delivery. | ||
| 488 | */ | ||
| 489 | if (!sctp_sk(asoc->base.sk)->frag_interleave && | ||
| 490 | atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) | ||
| 491 | goto done; | ||
| 492 | |||
| 493 | cevent = sctp_skb2event(pd_first); | ||
| 494 | pd_point = sctp_sk(asoc->base.sk)->pd_point; | ||
| 495 | if (pd_point && pd_point <= pd_len) { | ||
| 496 | retval = sctp_make_reassembled_event(&ulpq->reasm, | ||
| 497 | pd_first, | ||
| 498 | pd_last); | ||
| 499 | if (retval) | ||
| 500 | sctp_ulpq_set_pd(ulpq); | ||
| 501 | } | ||
| 404 | } | 502 | } |
| 405 | done: | 503 | done: |
| 406 | return retval; | 504 | return retval; |
| @@ -458,7 +556,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq | |||
| 458 | goto done; | 556 | goto done; |
| 459 | default: | 557 | default: |
| 460 | return NULL; | 558 | return NULL; |
| 461 | }; | 559 | } |
| 462 | } | 560 | } |
| 463 | 561 | ||
| 464 | /* We have the reassembled event. There is no need to look | 562 | /* We have the reassembled event. There is no need to look |
| @@ -550,7 +648,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u | |||
| 550 | break; | 648 | break; |
| 551 | default: | 649 | default: |
| 552 | return NULL; | 650 | return NULL; |
| 553 | }; | 651 | } |
| 554 | } | 652 | } |
| 555 | 653 | ||
| 556 | /* We have the reassembled event. There is no need to look | 654 | /* We have the reassembled event. There is no need to look |
| @@ -819,19 +917,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
| 819 | { | 917 | { |
| 820 | struct sctp_ulpevent *event; | 918 | struct sctp_ulpevent *event; |
| 821 | struct sctp_association *asoc; | 919 | struct sctp_association *asoc; |
| 920 | struct sctp_sock *sp; | ||
| 822 | 921 | ||
| 823 | asoc = ulpq->asoc; | 922 | asoc = ulpq->asoc; |
| 923 | sp = sctp_sk(asoc->base.sk); | ||
| 824 | 924 | ||
| 825 | /* Are we already in partial delivery mode? */ | 925 | /* If the association is already in Partial Delivery mode |
| 826 | if (!sctp_sk(asoc->base.sk)->pd_mode) { | 926 | * we have noting to do. |
| 927 | */ | ||
| 928 | if (ulpq->pd_mode) | ||
| 929 | return; | ||
| 827 | 930 | ||
| 931 | /* If the user enabled fragment interleave socket option, | ||
| 932 | * multiple associations can enter partial delivery. | ||
| 933 | * Otherwise, we can only enter partial delivery if the | ||
| 934 | * socket is not in partial deliver mode. | ||
| 935 | */ | ||
| 936 | if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { | ||
| 828 | /* Is partial delivery possible? */ | 937 | /* Is partial delivery possible? */ |
| 829 | event = sctp_ulpq_retrieve_first(ulpq); | 938 | event = sctp_ulpq_retrieve_first(ulpq); |
| 830 | /* Send event to the ULP. */ | 939 | /* Send event to the ULP. */ |
| 831 | if (event) { | 940 | if (event) { |
| 832 | sctp_ulpq_tail_event(ulpq, event); | 941 | sctp_ulpq_tail_event(ulpq, event); |
| 833 | sctp_sk(asoc->base.sk)->pd_mode = 1; | 942 | sctp_ulpq_set_pd(ulpq); |
| 834 | ulpq->pd_mode = 1; | ||
| 835 | return; | 943 | return; |
| 836 | } | 944 | } |
| 837 | } | 945 | } |
