aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/sm_statefuns.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 13:37:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 13:37:28 -0400
commitd7e9660ad9d5e0845f52848bce31bcf5cdcdea6b (patch)
treec6c67d145771187b194d79d603742b31090a59d6 /net/sctp/sm_statefuns.c
parentb8cb48aae1b8c50b37dcb7710363aa69a7a0d9ca (diff)
parent13af7a6ea502fcdd4c0e3d7de6e332b102309491 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1623 commits) netxen: update copyright netxen: fix tx timeout recovery netxen: fix file firmware leak netxen: improve pci memory access netxen: change firmware write size tg3: Fix return ring size breakage netxen: build fix for INET=n cdc-phonet: autoconfigure Phonet address Phonet: back-end for autoconfigured addresses Phonet: fix netlink address dump error handling ipv6: Add IFA_F_DADFAILED flag net: Add DEVTYPE support for Ethernet based devices mv643xx_eth.c: remove unused txq_set_wrr() ucc_geth: Fix hangs after switching from full to half duplex ucc_geth: Rearrange some code to avoid forward declarations phy/marvell: Make non-aneg speed/duplex forcing work for 88E1111 PHYs drivers/net/phy: introduce missing kfree drivers/net/wan: introduce missing kfree net: force bridge module(s) to be GPL Subject: [PATCH] appletalk: Fix skb leak when ipddp interface is not loaded ... Fixed up trivial conflicts: - arch/x86/include/asm/socket.h converted to <asm-generic/socket.h> in the x86 tree. The generic header has the same new #define's, so that works out fine. - drivers/net/tun.c fix conflict between 89f56d1e9 ("tun: reuse struct sock fields") that switched over to using 'tun->socket.sk' instead of the redundantly available (and thus removed) 'tun->sk', and 2b980dbd ("lsm: Add hooks to the TUN driver") which added a new 'tun->sk' use. Noted in 'next' by Stephen Rothwell.
Diffstat (limited to 'net/sctp/sm_statefuns.c')
-rw-r--r--net/sctp/sm_statefuns.c68
1 files changed, 61 insertions, 7 deletions
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7288192f7df5..c8fae1983dd1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
334 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) 334 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
335 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 335 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
336 336
337 /* If the INIT is coming toward a closing socket, we'll send back
338 * and ABORT. Essentially, this catches the race of INIT being
339 * backloged to the socket at the same time as the user isses close().
340 * Since the socket and all its associations are going away, we
341 * can treat this OOTB
342 */
343 if (sctp_sstate(ep->base.sk, CLOSING))
344 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
345
337 /* Verify the INIT chunk before processing it. */ 346 /* Verify the INIT chunk before processing it. */
338 err_chunk = NULL; 347 err_chunk = NULL;
339 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, 348 if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
@@ -962,7 +971,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
962{ 971{
963 struct sctp_transport *transport = (struct sctp_transport *) arg; 972 struct sctp_transport *transport = (struct sctp_transport *) arg;
964 973
965 if (asoc->overall_error_count > asoc->max_retrans) { 974 if (asoc->overall_error_count >= asoc->max_retrans) {
966 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 975 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
967 SCTP_ERROR(ETIMEDOUT)); 976 SCTP_ERROR(ETIMEDOUT));
968 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 977 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
@@ -1106,7 +1115,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1106 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 1115 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
1107 1116
1108 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ 1117 /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
1109 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) 1118 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
1119 sizeof(sctp_sender_hb_info_t)))
1110 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 1120 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
1111 commands); 1121 commands);
1112 1122
@@ -2561,6 +2571,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
2561 chunk->subh.shutdown_hdr = sdh; 2571 chunk->subh.shutdown_hdr = sdh;
2562 ctsn = ntohl(sdh->cum_tsn_ack); 2572 ctsn = ntohl(sdh->cum_tsn_ack);
2563 2573
2574 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2575 SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
2576 SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
2577 return SCTP_DISPOSITION_DISCARD;
2578 }
2579
2564 /* If Cumulative TSN Ack beyond the max tsn currently 2580 /* If Cumulative TSN Ack beyond the max tsn currently
2565 * send, terminating the association and respond to the 2581 * send, terminating the association and respond to the
2566 * sender with an ABORT. 2582 * sender with an ABORT.
@@ -2624,6 +2640,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2624{ 2640{
2625 struct sctp_chunk *chunk = arg; 2641 struct sctp_chunk *chunk = arg;
2626 sctp_shutdownhdr_t *sdh; 2642 sctp_shutdownhdr_t *sdh;
2643 __u32 ctsn;
2627 2644
2628 if (!sctp_vtag_verify(chunk, asoc)) 2645 if (!sctp_vtag_verify(chunk, asoc))
2629 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 2646 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2635,12 +2652,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
2635 commands); 2652 commands);
2636 2653
2637 sdh = (sctp_shutdownhdr_t *)chunk->skb->data; 2654 sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
2655 ctsn = ntohl(sdh->cum_tsn_ack);
2656
2657 if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
2658 SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
2659 SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
2660 return SCTP_DISPOSITION_DISCARD;
2661 }
2638 2662
2639 /* If Cumulative TSN Ack beyond the max tsn currently 2663 /* If Cumulative TSN Ack beyond the max tsn currently
2640 * send, terminating the association and respond to the 2664 * send, terminating the association and respond to the
2641 * sender with an ABORT. 2665 * sender with an ABORT.
2642 */ 2666 */
2643 if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) 2667 if (!TSN_lt(ctsn, asoc->next_tsn))
2644 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); 2668 return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
2645 2669
2646 /* verify, by checking the Cumulative TSN Ack field of the 2670 /* verify, by checking the Cumulative TSN Ack field of the
@@ -2867,6 +2891,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
2867 goto discard_force; 2891 goto discard_force;
2868 case SCTP_IERROR_NO_DATA: 2892 case SCTP_IERROR_NO_DATA:
2869 goto consume; 2893 goto consume;
2894 case SCTP_IERROR_PROTO_VIOLATION:
2895 return sctp_sf_abort_violation(ep, asoc, chunk, commands,
2896 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
2870 default: 2897 default:
2871 BUG(); 2898 BUG();
2872 } 2899 }
@@ -2977,6 +3004,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
2977 break; 3004 break;
2978 case SCTP_IERROR_NO_DATA: 3005 case SCTP_IERROR_NO_DATA:
2979 goto consume; 3006 goto consume;
3007 case SCTP_IERROR_PROTO_VIOLATION:
3008 return sctp_sf_abort_violation(ep, asoc, chunk, commands,
3009 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
2980 default: 3010 default:
2981 BUG(); 3011 BUG();
2982 } 3012 }
@@ -3519,6 +3549,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3519 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); 3549 asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
3520 if (!asconf_ack) 3550 if (!asconf_ack)
3521 return SCTP_DISPOSITION_DISCARD; 3551 return SCTP_DISPOSITION_DISCARD;
3552
3553 /* Reset the transport so that we select the correct one
3554 * this time around. This is to make sure that we don't
3555 * accidentally use a stale transport that's been removed.
3556 */
3557 asconf_ack->transport = NULL;
3522 } else { 3558 } else {
3523 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since 3559 /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
3524 * it must be either a stale packet or from an attacker. 3560 * it must be either a stale packet or from an attacker.
@@ -4546,9 +4582,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
4546 void *arg, 4582 void *arg,
4547 sctp_cmd_seq_t *commands) 4583 sctp_cmd_seq_t *commands)
4548{ 4584{
4549 struct sctp_chunk *chunk = arg; 4585 struct sctp_datamsg *msg = arg;
4550 4586
4551 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); 4587 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
4552 return SCTP_DISPOSITION_CONSUME; 4588 return SCTP_DISPOSITION_CONSUME;
4553} 4589}
4554 4590
@@ -5847,6 +5883,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5847 __u32 tsn; 5883 __u32 tsn;
5848 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; 5884 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5849 struct sock *sk = asoc->base.sk; 5885 struct sock *sk = asoc->base.sk;
5886 u16 ssn;
5887 u16 sid;
5888 u8 ordered = 0;
5850 5889
5851 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5890 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5852 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5891 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5986,8 +6025,10 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5986 */ 6025 */
5987 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 6026 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
5988 SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); 6027 SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
5989 else 6028 else {
5990 SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); 6029 SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
6030 ordered = 1;
6031 }
5991 6032
5992 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number 6033 /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
5993 * 6034 *
@@ -5997,7 +6038,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5997 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) 6038 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
5998 * and discard the DATA chunk. 6039 * and discard the DATA chunk.
5999 */ 6040 */
6000 if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { 6041 sid = ntohs(data_hdr->stream);
6042 if (sid >= asoc->c.sinit_max_instreams) {
6001 /* Mark tsn as received even though we drop it */ 6043 /* Mark tsn as received even though we drop it */
6002 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6044 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6003 6045
@@ -6010,6 +6052,18 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6010 return SCTP_IERROR_BAD_STREAM; 6052 return SCTP_IERROR_BAD_STREAM;
6011 } 6053 }
6012 6054
6055 /* Check to see if the SSN is possible for this TSN.
6056 * The biggest gap we can record is 4K wide. Since SSNs wrap
6057 * at an unsigned short, there is no way that an SSN can
6058 * wrap and for a valid TSN. We can simply check if the current
6059 * SSN is smaller then the next expected one. If it is, it wrapped
6060 * and is invalid.
6061 */
6062 ssn = ntohs(data_hdr->ssn);
6063 if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
6064 return SCTP_IERROR_PROTO_VIOLATION;
6065 }
6066
6013 /* Send the data up to the user. Note: Schedule the 6067 /* Send the data up to the user. Note: Schedule the
6014 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK 6068 * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
6015 * chunk needs the updated rwnd. 6069 * chunk needs the updated rwnd.