diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-31 03:52:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-08-31 03:52:22 -0400 |
commit | 0ee13079906f0e0b185b13a1fbdaf24d853baa8d (patch) | |
tree | f9f81bc64f623c59fe8f806a131e5866c570b762 | |
parent | 2d8348b429b4ae5cc47449c787881221fe43af4b (diff) | |
parent | 88282c6ecf0dacefda6090b0289d35e8a3cc6221 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[PKTGEN]: Remove write-only variable.
[NETFILTER]: xt_tcpudp: fix wrong struct in udp_checkentry
[NET_SCHED] sch_prio.c: remove duplicate call of tc_classify()
[BRIDGE]: Fix OOPS when bridging device without ethtool.
[BRIDGE]: Packets leaking out of disabled/blocked ports.
[TCP]: Allow minimum RTO to be configurable via routing metrics.
SCTP: Fix to handle invalid parameter length correctly
SCTP: Abort on COOKIE-ECHO if backlog is exceeded.
SCTP: Correctly disable listening when backlog is 0.
SCTP: Do not retransmit chunks that are newer then rtt.
SCTP: Uncomfirmed transports can't become Inactive
SCTP: Pick the correct port when binding to 0.
SCTP: Use net_ratelimit to suppress error messages print too fast
SCTP: Fix to encode PROTOCOL VIOLATION error cause correctly
SCTP: Fix sctp_addto_chunk() to add pad with correct length
SCTP: Assign stream sequence numbers to the entire message
SCTP: properly clean up fragment and ordering queues during FWD-TSN.
[PKTGEN]: Fix multiqueue oops.
[BNX2]: Add write posting comment.
[BNX2]: Use msleep().
-rw-r--r-- | drivers/net/bnx2.c | 10 | ||||
-rw-r--r-- | include/linux/rtnetlink.h | 2 | ||||
-rw-r--r-- | include/net/sctp/sm.h | 2 | ||||
-rw-r--r-- | include/net/sctp/structs.h | 1 | ||||
-rw-r--r-- | include/net/sctp/ulpqueue.h | 1 | ||||
-rw-r--r-- | net/bridge/br_fdb.c | 5 | ||||
-rw-r--r-- | net/bridge/br_if.c | 16 | ||||
-rw-r--r-- | net/bridge/br_input.c | 3 | ||||
-rw-r--r-- | net/core/pktgen.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 14 | ||||
-rw-r--r-- | net/netfilter/xt_tcpudp.c | 2 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 2 | ||||
-rw-r--r-- | net/sctp/associola.c | 7 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 7 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 112 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 8 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 51 | ||||
-rw-r--r-- | net/sctp/socket.c | 3 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 75 |
19 files changed, 229 insertions, 100 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 24e7f9ab3f5a..854d80c330ec 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
3934 | /* Chip reset. */ | 3934 | /* Chip reset. */ |
3935 | REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); | 3935 | REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); |
3936 | 3936 | ||
3937 | /* Reading back any register after chip reset will hang the | ||
3938 | * bus on 5706 A0 and A1. The msleep below provides plenty | ||
3939 | * of margin for write posting. | ||
3940 | */ | ||
3937 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || | 3941 | if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || |
3938 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) { | 3942 | (CHIP_ID(bp) == CHIP_ID_5706_A1)) |
3939 | current->state = TASK_UNINTERRUPTIBLE; | 3943 | msleep(20); |
3940 | schedule_timeout(HZ / 50); | ||
3941 | } | ||
3942 | 3944 | ||
3943 | /* Reset takes approximate 30 usec */ | 3945 | /* Reset takes approximate 30 usec */ |
3944 | for (i = 0; i < 10; i++) { | 3946 | for (i = 0; i < 10; i++) { |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index c91476ce314a..dff3192374f8 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -351,6 +351,8 @@ enum | |||
351 | #define RTAX_INITCWND RTAX_INITCWND | 351 | #define RTAX_INITCWND RTAX_INITCWND |
352 | RTAX_FEATURES, | 352 | RTAX_FEATURES, |
353 | #define RTAX_FEATURES RTAX_FEATURES | 353 | #define RTAX_FEATURES RTAX_FEATURES |
354 | RTAX_RTO_MIN, | ||
355 | #define RTAX_RTO_MIN RTAX_RTO_MIN | ||
354 | __RTAX_MAX | 356 | __RTAX_MAX |
355 | }; | 357 | }; |
356 | 358 | ||
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 73cb9943c8a8..991c85bb9e36 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, | |||
214 | const struct sctp_chunk *); | 214 | const struct sctp_chunk *); |
215 | struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, | 215 | struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, |
216 | const struct sctp_chunk *); | 216 | const struct sctp_chunk *); |
217 | void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t); | 217 | void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); |
218 | struct sctp_chunk *sctp_make_abort(const struct sctp_association *, | 218 | struct sctp_chunk *sctp_make_abort(const struct sctp_association *, |
219 | const struct sctp_chunk *, | 219 | const struct sctp_chunk *, |
220 | const size_t hint); | 220 | const size_t hint); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ee4559b11302..c0d5848c33dc 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, | |||
726 | struct iovec *data); | 726 | struct iovec *data); |
727 | void sctp_chunk_free(struct sctp_chunk *); | 727 | void sctp_chunk_free(struct sctp_chunk *); |
728 | void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); | 728 | void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); |
729 | void *sctp_addto_param(struct sctp_chunk *, int len, const void *data); | ||
729 | struct sctp_chunk *sctp_chunkify(struct sk_buff *, | 730 | struct sctp_chunk *sctp_chunkify(struct sk_buff *, |
730 | const struct sctp_association *, | 731 | const struct sctp_association *, |
731 | struct sock *); | 732 | struct sock *); |
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 39ea3f442b47..cd33270e86dd 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h | |||
@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); | |||
83 | /* Skip over an SSN. */ | 83 | /* Skip over an SSN. */ |
84 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); | 84 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); |
85 | 85 | ||
86 | void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); | ||
86 | #endif /* __sctp_ulpqueue_h__ */ | 87 | #endif /* __sctp_ulpqueue_h__ */ |
87 | 88 | ||
88 | 89 | ||
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 69b70977f000..eb57502bb264 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | |||
384 | if (hold_time(br) == 0) | 384 | if (hold_time(br) == 0) |
385 | return; | 385 | return; |
386 | 386 | ||
387 | /* ignore packets unless we are using this port */ | ||
388 | if (!(source->state == BR_STATE_LEARNING || | ||
389 | source->state == BR_STATE_FORWARDING)) | ||
390 | return; | ||
391 | |||
387 | fdb = fdb_find(head, addr); | 392 | fdb = fdb_find(head, addr); |
388 | if (likely(fdb)) { | 393 | if (likely(fdb)) { |
389 | /* attempt to update an entry for a local interface */ | 394 | /* attempt to update an entry for a local interface */ |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 749f0e8f541d..9272f12f664c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -33,17 +33,17 @@ | |||
33 | */ | 33 | */ |
34 | static int port_cost(struct net_device *dev) | 34 | static int port_cost(struct net_device *dev) |
35 | { | 35 | { |
36 | if (dev->ethtool_ops->get_settings) { | 36 | if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { |
37 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; | 37 | struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; |
38 | int err = dev->ethtool_ops->get_settings(dev, &ecmd); | 38 | |
39 | if (!err) { | 39 | if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { |
40 | switch(ecmd.speed) { | 40 | switch(ecmd.speed) { |
41 | case SPEED_100: | ||
42 | return 19; | ||
43 | case SPEED_1000: | ||
44 | return 4; | ||
45 | case SPEED_10000: | 41 | case SPEED_10000: |
46 | return 2; | 42 | return 2; |
43 | case SPEED_1000: | ||
44 | return 4; | ||
45 | case SPEED_100: | ||
46 | return 19; | ||
47 | case SPEED_10: | 47 | case SPEED_10: |
48 | return 100; | 48 | return 100; |
49 | } | 49 | } |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5c18595b7616..6f468fc3357a 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb) | |||
101 | { | 101 | { |
102 | struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); | 102 | struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); |
103 | 103 | ||
104 | if (p && p->state != BR_STATE_DISABLED) | 104 | if (p) |
105 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source); | 105 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source); |
106 | |||
107 | return 0; /* process further */ | 106 | return 0; /* process further */ |
108 | } | 107 | } |
109 | 108 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 7bae576ac115..36fdea71d742 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -380,7 +380,6 @@ struct pktgen_thread { | |||
380 | /* Field for thread to receive "posted" events terminate, stop ifs etc. */ | 380 | /* Field for thread to receive "posted" events terminate, stop ifs etc. */ |
381 | 381 | ||
382 | u32 control; | 382 | u32 control; |
383 | int pid; | ||
384 | int cpu; | 383 | int cpu; |
385 | 384 | ||
386 | wait_queue_head_t queue; | 385 | wait_queue_head_t queue; |
@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3331 | } | 3330 | } |
3332 | 3331 | ||
3333 | if ((netif_queue_stopped(odev) || | 3332 | if ((netif_queue_stopped(odev) || |
3334 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || | 3333 | (pkt_dev->skb && |
3335 | need_resched()) { | 3334 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || |
3335 | need_resched()) { | ||
3336 | idle_start = getCurUs(); | 3336 | idle_start = getCurUs(); |
3337 | 3337 | ||
3338 | if (!netif_running(odev)) { | 3338 | if (!netif_running(odev)) { |
@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg) | |||
3462 | 3462 | ||
3463 | init_waitqueue_head(&t->queue); | 3463 | init_waitqueue_head(&t->queue); |
3464 | 3464 | ||
3465 | t->pid = current->pid; | ||
3466 | |||
3467 | pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); | 3465 | pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); |
3468 | 3466 | ||
3469 | max_before_softirq = t->max_before_softirq; | 3467 | max_before_softirq = t->max_before_softirq; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9785df37a65f..1ee72127462b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
555 | tcp_grow_window(sk, skb); | 555 | tcp_grow_window(sk, skb); |
556 | } | 556 | } |
557 | 557 | ||
558 | static u32 tcp_rto_min(struct sock *sk) | ||
559 | { | ||
560 | struct dst_entry *dst = __sk_dst_get(sk); | ||
561 | u32 rto_min = TCP_RTO_MIN; | ||
562 | |||
563 | if (dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
564 | rto_min = dst->metrics[RTAX_RTO_MIN-1]; | ||
565 | return rto_min; | ||
566 | } | ||
567 | |||
558 | /* Called to compute a smoothed rtt estimate. The data fed to this | 568 | /* Called to compute a smoothed rtt estimate. The data fed to this |
559 | * routine either comes from timestamps, or from segments that were | 569 | * routine either comes from timestamps, or from segments that were |
560 | * known _not_ to have been retransmitted [see Karn/Partridge | 570 | * known _not_ to have been retransmitted [see Karn/Partridge |
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
616 | if (tp->mdev_max < tp->rttvar) | 626 | if (tp->mdev_max < tp->rttvar) |
617 | tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; | 627 | tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; |
618 | tp->rtt_seq = tp->snd_nxt; | 628 | tp->rtt_seq = tp->snd_nxt; |
619 | tp->mdev_max = TCP_RTO_MIN; | 629 | tp->mdev_max = tcp_rto_min(sk); |
620 | } | 630 | } |
621 | } else { | 631 | } else { |
622 | /* no previous measure. */ | 632 | /* no previous measure. */ |
623 | tp->srtt = m<<3; /* take the measured time to be rtt */ | 633 | tp->srtt = m<<3; /* take the measured time to be rtt */ |
624 | tp->mdev = m<<1; /* make sure rto = 3*rtt */ | 634 | tp->mdev = m<<1; /* make sure rto = 3*rtt */ |
625 | tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); | 635 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
626 | tp->rtt_seq = tp->snd_nxt; | 636 | tp->rtt_seq = tp->snd_nxt; |
627 | } | 637 | } |
628 | } | 638 | } |
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c index ab7d845224fc..223f9bded672 100644 --- a/net/netfilter/xt_tcpudp.c +++ b/net/netfilter/xt_tcpudp.c | |||
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename, | |||
188 | void *matchinfo, | 188 | void *matchinfo, |
189 | unsigned int hook_mask) | 189 | unsigned int hook_mask) |
190 | { | 190 | { |
191 | const struct xt_tcp *udpinfo = matchinfo; | 191 | const struct xt_udp *udpinfo = matchinfo; |
192 | 192 | ||
193 | /* Must specify no unknown invflags */ | 193 | /* Must specify no unknown invflags */ |
194 | return !(udpinfo->invflags & ~XT_UDP_INV_MASK); | 194 | return !(udpinfo->invflags & ~XT_UDP_INV_MASK); |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 4a49db65772e..abd82fc3ec60 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |||
44 | if (TC_H_MAJ(skb->priority) != sch->handle) { | 44 | if (TC_H_MAJ(skb->priority) != sch->handle) { |
45 | err = tc_classify(skb, q->filter_list, &res); | 45 | err = tc_classify(skb, q->filter_list, &res); |
46 | #ifdef CONFIG_NET_CLS_ACT | 46 | #ifdef CONFIG_NET_CLS_ACT |
47 | switch (tc_classify(skb, q->filter_list, &res)) { | 47 | switch (err) { |
48 | case TC_ACT_STOLEN: | 48 | case TC_ACT_STOLEN: |
49 | case TC_ACT_QUEUED: | 49 | case TC_ACT_QUEUED: |
50 | *qerr = NET_XMIT_SUCCESS; | 50 | *qerr = NET_XMIT_SUCCESS; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 498edb0cd4e5..2ad1caf1ea42 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
727 | break; | 727 | break; |
728 | 728 | ||
729 | case SCTP_TRANSPORT_DOWN: | 729 | case SCTP_TRANSPORT_DOWN: |
730 | transport->state = SCTP_INACTIVE; | 730 | /* if the transort was never confirmed, do not transition it |
731 | * to inactive state. | ||
732 | */ | ||
733 | if (transport->state != SCTP_UNCONFIRMED) | ||
734 | transport->state = SCTP_INACTIVE; | ||
735 | |||
731 | spc_state = SCTP_ADDR_UNREACHABLE; | 736 | spc_state = SCTP_ADDR_UNREACHABLE; |
732 | break; | 737 | break; |
733 | 738 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 992f361084b7..28f4fe77ceee 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
421 | */ | 421 | */ |
422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || | 422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
424 | /* If this chunk was sent less then 1 rto ago, do not | ||
425 | * retransmit this chunk, but give the peer time | ||
426 | * to acknowlege it. | ||
427 | */ | ||
428 | if ((jiffies - chunk->sent_at) < transport->rto) | ||
429 | continue; | ||
430 | |||
424 | /* RFC 2960 6.2.1 Processing a Received SACK | 431 | /* RFC 2960 6.2.1 Processing a Received SACK |
425 | * | 432 | * |
426 | * C) Any time a DATA chunk is marked for | 433 | * C) Any time a DATA chunk is marked for |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 51c4d7fef1d2..79856c924525 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
110 | * abort chunk. | 110 | * abort chunk. |
111 | */ | 111 | */ |
112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | 112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, |
113 | const void *payload, size_t paylen) | 113 | size_t paylen) |
114 | { | 114 | { |
115 | sctp_errhdr_t err; | 115 | sctp_errhdr_t err; |
116 | __u16 len; | 116 | __u16 len; |
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
120 | len = sizeof(sctp_errhdr_t) + paylen; | 120 | len = sizeof(sctp_errhdr_t) + paylen; |
121 | err.length = htons(len); | 121 | err.length = htons(len); |
122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
123 | sctp_addto_chunk(chunk, paylen, payload); | ||
124 | } | 123 | } |
125 | 124 | ||
126 | /* 3.3.2 Initiation (INIT) (1) | 125 | /* 3.3.2 Initiation (INIT) (1) |
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data( | |||
780 | 779 | ||
781 | /* Put the tsn back into network byte order. */ | 780 | /* Put the tsn back into network byte order. */ |
782 | payload = htonl(tsn); | 781 | payload = htonl(tsn); |
783 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, | 782 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); |
784 | sizeof(payload)); | 783 | sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); |
785 | 784 | ||
786 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | 785 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints |
787 | * | 786 | * |
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, | |||
823 | goto err_copy; | 822 | goto err_copy; |
824 | } | 823 | } |
825 | 824 | ||
826 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); | 825 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); |
826 | sctp_addto_chunk(retval, paylen, payload); | ||
827 | 827 | ||
828 | if (paylen) | 828 | if (paylen) |
829 | kfree(payload); | 829 | kfree(payload); |
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation( | |||
850 | struct sctp_paramhdr phdr; | 850 | struct sctp_paramhdr phdr; |
851 | 851 | ||
852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen | 852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen |
853 | + sizeof(sctp_chunkhdr_t)); | 853 | + sizeof(sctp_paramhdr_t)); |
854 | if (!retval) | 854 | if (!retval) |
855 | goto end; | 855 | goto end; |
856 | 856 | ||
857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); | 857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen |
858 | + sizeof(sctp_paramhdr_t)); | ||
858 | 859 | ||
859 | phdr.type = htons(chunk->chunk_hdr->type); | 860 | phdr.type = htons(chunk->chunk_hdr->type); |
860 | phdr.length = chunk->chunk_hdr->length; | 861 | phdr.length = chunk->chunk_hdr->length; |
861 | sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); | 862 | sctp_addto_chunk(retval, paylen, payload); |
863 | sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); | ||
862 | 864 | ||
863 | end: | 865 | end: |
864 | return retval; | 866 | return retval; |
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | |||
955 | if (!retval) | 957 | if (!retval) |
956 | goto nodata; | 958 | goto nodata; |
957 | 959 | ||
958 | sctp_init_cause(retval, cause_code, payload, paylen); | 960 | sctp_init_cause(retval, cause_code, paylen); |
961 | sctp_addto_chunk(retval, paylen, payload); | ||
959 | 962 | ||
960 | nodata: | 963 | nodata: |
961 | return retval; | 964 | return retval; |
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1128 | void *target; | 1131 | void *target; |
1129 | void *padding; | 1132 | void *padding; |
1130 | int chunklen = ntohs(chunk->chunk_hdr->length); | 1133 | int chunklen = ntohs(chunk->chunk_hdr->length); |
1131 | int padlen = chunklen % 4; | 1134 | int padlen = WORD_ROUND(chunklen) - chunklen; |
1132 | 1135 | ||
1133 | padding = skb_put(chunk->skb, padlen); | 1136 | padding = skb_put(chunk->skb, padlen); |
1134 | target = skb_put(chunk->skb, len); | 1137 | target = skb_put(chunk->skb, len); |
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1143 | return target; | 1146 | return target; |
1144 | } | 1147 | } |
1145 | 1148 | ||
1149 | /* Append bytes to the end of a parameter. Will panic if chunk is not big | ||
1150 | * enough. | ||
1151 | */ | ||
1152 | void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) | ||
1153 | { | ||
1154 | void *target; | ||
1155 | int chunklen = ntohs(chunk->chunk_hdr->length); | ||
1156 | |||
1157 | target = skb_put(chunk->skb, len); | ||
1158 | |||
1159 | memcpy(target, data, len); | ||
1160 | |||
1161 | /* Adjust the chunk length field. */ | ||
1162 | chunk->chunk_hdr->length = htons(chunklen + len); | ||
1163 | chunk->chunk_end = skb_tail_pointer(chunk->skb); | ||
1164 | |||
1165 | return target; | ||
1166 | } | ||
1167 | |||
1146 | /* Append bytes from user space to the end of a chunk. Will panic if | 1168 | /* Append bytes from user space to the end of a chunk. Will panic if |
1147 | * chunk is not big enough. | 1169 | * chunk is not big enough. |
1148 | * Returns a kernel err value. | 1170 | * Returns a kernel err value. |
@@ -1174,25 +1196,36 @@ out: | |||
1174 | */ | 1196 | */ |
1175 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) | 1197 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) |
1176 | { | 1198 | { |
1199 | struct sctp_datamsg *msg; | ||
1200 | struct sctp_chunk *lchunk; | ||
1201 | struct sctp_stream *stream; | ||
1177 | __u16 ssn; | 1202 | __u16 ssn; |
1178 | __u16 sid; | 1203 | __u16 sid; |
1179 | 1204 | ||
1180 | if (chunk->has_ssn) | 1205 | if (chunk->has_ssn) |
1181 | return; | 1206 | return; |
1182 | 1207 | ||
1183 | /* This is the last possible instant to assign a SSN. */ | 1208 | /* All fragments will be on the same stream */ |
1184 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | 1209 | sid = ntohs(chunk->subh.data_hdr->stream); |
1185 | ssn = 0; | 1210 | stream = &chunk->asoc->ssnmap->out; |
1186 | } else { | ||
1187 | sid = ntohs(chunk->subh.data_hdr->stream); | ||
1188 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1189 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); | ||
1190 | else | ||
1191 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); | ||
1192 | } | ||
1193 | 1211 | ||
1194 | chunk->subh.data_hdr->ssn = htons(ssn); | 1212 | /* Now assign the sequence number to the entire message. |
1195 | chunk->has_ssn = 1; | 1213 | * All fragments must have the same stream sequence number. |
1214 | */ | ||
1215 | msg = chunk->msg; | ||
1216 | list_for_each_entry(lchunk, &msg->chunks, frag_list) { | ||
1217 | if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | ||
1218 | ssn = 0; | ||
1219 | } else { | ||
1220 | if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1221 | ssn = sctp_ssn_next(stream, sid); | ||
1222 | else | ||
1223 | ssn = sctp_ssn_peek(stream, sid); | ||
1224 | } | ||
1225 | |||
1226 | lchunk->subh.data_hdr->ssn = htons(ssn); | ||
1227 | lchunk->has_ssn = 1; | ||
1228 | } | ||
1196 | } | 1229 | } |
1197 | 1230 | ||
1198 | /* Helper function to assign a TSN if needed. This assumes that both | 1231 | /* Helper function to assign a TSN if needed. This assumes that both |
@@ -1466,7 +1499,8 @@ no_hmac: | |||
1466 | __be32 n = htonl(usecs); | 1499 | __be32 n = htonl(usecs); |
1467 | 1500 | ||
1468 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, | 1501 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, |
1469 | &n, sizeof(n)); | 1502 | sizeof(n)); |
1503 | sctp_addto_chunk(*errp, sizeof(n), &n); | ||
1470 | *error = -SCTP_IERROR_STALE_COOKIE; | 1504 | *error = -SCTP_IERROR_STALE_COOKIE; |
1471 | } else | 1505 | } else |
1472 | *error = -SCTP_IERROR_NOMEM; | 1506 | *error = -SCTP_IERROR_NOMEM; |
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc, | |||
1556 | report.num_missing = htonl(1); | 1590 | report.num_missing = htonl(1); |
1557 | report.type = paramtype; | 1591 | report.type = paramtype; |
1558 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, | 1592 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, |
1559 | &report, sizeof(report)); | 1593 | sizeof(report)); |
1594 | sctp_addto_chunk(*errp, sizeof(report), &report); | ||
1560 | } | 1595 | } |
1561 | 1596 | ||
1562 | /* Stop processing this chunk. */ | 1597 | /* Stop processing this chunk. */ |
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, | |||
1574 | *errp = sctp_make_op_error_space(asoc, chunk, 0); | 1609 | *errp = sctp_make_op_error_space(asoc, chunk, 0); |
1575 | 1610 | ||
1576 | if (*errp) | 1611 | if (*errp) |
1577 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); | 1612 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); |
1578 | 1613 | ||
1579 | /* Stop processing this chunk. */ | 1614 | /* Stop processing this chunk. */ |
1580 | return 0; | 1615 | return 0; |
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | |||
1595 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); | 1630 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); |
1596 | 1631 | ||
1597 | if (*errp) { | 1632 | if (*errp) { |
1598 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, | 1633 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, |
1599 | sizeof(error)); | 1634 | sizeof(error) + sizeof(sctp_paramhdr_t)); |
1600 | sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); | 1635 | sctp_addto_chunk(*errp, sizeof(error), error); |
1636 | sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); | ||
1601 | } | 1637 | } |
1602 | 1638 | ||
1603 | return 0; | 1639 | return 0; |
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, | |||
1618 | if (!*errp) | 1654 | if (!*errp) |
1619 | *errp = sctp_make_op_error_space(asoc, chunk, len); | 1655 | *errp = sctp_make_op_error_space(asoc, chunk, len); |
1620 | 1656 | ||
1621 | if (*errp) | 1657 | if (*errp) { |
1622 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, | 1658 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); |
1623 | param.v, len); | 1659 | sctp_addto_chunk(*errp, len, param.v); |
1660 | } | ||
1624 | 1661 | ||
1625 | /* Stop processing this chunk. */ | 1662 | /* Stop processing this chunk. */ |
1626 | return 0; | 1663 | return 0; |
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1672 | *errp = sctp_make_op_error_space(asoc, chunk, | 1709 | *errp = sctp_make_op_error_space(asoc, chunk, |
1673 | ntohs(chunk->chunk_hdr->length)); | 1710 | ntohs(chunk->chunk_hdr->length)); |
1674 | 1711 | ||
1675 | if (*errp) | 1712 | if (*errp) { |
1676 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1713 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1677 | param.v, | ||
1678 | WORD_ROUND(ntohs(param.p->length))); | 1714 | WORD_ROUND(ntohs(param.p->length))); |
1715 | sctp_addto_chunk(*errp, | ||
1716 | WORD_ROUND(ntohs(param.p->length)), | ||
1717 | param.v); | ||
1718 | } | ||
1679 | 1719 | ||
1680 | break; | 1720 | break; |
1681 | case SCTP_PARAM_ACTION_SKIP: | 1721 | case SCTP_PARAM_ACTION_SKIP: |
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1690 | 1730 | ||
1691 | if (*errp) { | 1731 | if (*errp) { |
1692 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1732 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1693 | param.v, | ||
1694 | WORD_ROUND(ntohs(param.p->length))); | 1733 | WORD_ROUND(ntohs(param.p->length))); |
1734 | sctp_addto_chunk(*errp, | ||
1735 | WORD_ROUND(ntohs(param.p->length)), | ||
1736 | param.v); | ||
1695 | } else { | 1737 | } else { |
1696 | /* If there is no memory for generating the ERROR | 1738 | /* If there is no memory for generating the ERROR |
1697 | * report as specified, an ABORT will be triggered | 1739 | * report as specified, an ABORT will be triggered |
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc, | |||
1791 | * VIOLATION error. We build the ERROR chunk here and let the normal | 1833 | * VIOLATION error. We build the ERROR chunk here and let the normal |
1792 | * error handling code build and send the packet. | 1834 | * error handling code build and send the packet. |
1793 | */ | 1835 | */ |
1794 | if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { | 1836 | if (param.v != (void*)chunk->chunk_end) { |
1795 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); | 1837 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); |
1796 | return 0; | 1838 | return 0; |
1797 | } | 1839 | } |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d9fad4f6ffc3..8d7890083493 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |||
1013 | break; | 1013 | break; |
1014 | 1014 | ||
1015 | case SCTP_DISPOSITION_VIOLATION: | 1015 | case SCTP_DISPOSITION_VIOLATION: |
1016 | printk(KERN_ERR "sctp protocol violation state %d " | 1016 | if (net_ratelimit()) |
1017 | "chunkid %d\n", state, subtype.chunk); | 1017 | printk(KERN_ERR "sctp protocol violation state %d " |
1018 | "chunkid %d\n", state, subtype.chunk); | ||
1018 | break; | 1019 | break; |
1019 | 1020 | ||
1020 | case SCTP_DISPOSITION_NOT_IMPL: | 1021 | case SCTP_DISPOSITION_NOT_IMPL: |
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1130 | /* Move the Cumulattive TSN Ack ahead. */ | 1131 | /* Move the Cumulattive TSN Ack ahead. */ |
1131 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | 1132 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); |
1132 | 1133 | ||
1134 | /* purge the fragmentation queue */ | ||
1135 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | ||
1136 | |||
1133 | /* Abort any in progress partial delivery. */ | 1137 | /* Abort any in progress partial delivery. */ |
1134 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | 1138 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
1135 | break; | 1139 | break; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71cad56dd73f..177528ed3e1b 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
264 | struct sctp_chunk *err_chunk; | 264 | struct sctp_chunk *err_chunk; |
265 | struct sctp_packet *packet; | 265 | struct sctp_packet *packet; |
266 | sctp_unrecognized_param_t *unk_param; | 266 | sctp_unrecognized_param_t *unk_param; |
267 | struct sock *sk; | ||
268 | int len; | 267 | int len; |
269 | 268 | ||
270 | /* 6.10 Bundling | 269 | /* 6.10 Bundling |
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
285 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | 284 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) |
286 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | 285 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
287 | 286 | ||
288 | sk = ep->base.sk; | ||
289 | /* If the endpoint is not listening or if the number of associations | ||
290 | * on the TCP-style socket exceed the max backlog, respond with an | ||
291 | * ABORT. | ||
292 | */ | ||
293 | if (!sctp_sstate(sk, LISTENING) || | ||
294 | (sctp_style(sk, TCP) && | ||
295 | sk_acceptq_is_full(sk))) | ||
296 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
297 | |||
298 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification | 287 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification |
299 | * Tag. | 288 | * Tag. |
300 | */ | 289 | */ |
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
590 | struct sctp_ulpevent *ev, *ai_ev = NULL; | 579 | struct sctp_ulpevent *ev, *ai_ev = NULL; |
591 | int error = 0; | 580 | int error = 0; |
592 | struct sctp_chunk *err_chk_p; | 581 | struct sctp_chunk *err_chk_p; |
582 | struct sock *sk; | ||
593 | 583 | ||
594 | /* If the packet is an OOTB packet which is temporarily on the | 584 | /* If the packet is an OOTB packet which is temporarily on the |
595 | * control endpoint, respond with an ABORT. | 585 | * control endpoint, respond with an ABORT. |
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
605 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | 595 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
606 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 596 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
607 | 597 | ||
598 | /* If the endpoint is not listening or if the number of associations | ||
599 | * on the TCP-style socket exceed the max backlog, respond with an | ||
600 | * ABORT. | ||
601 | */ | ||
602 | sk = ep->base.sk; | ||
603 | if (!sctp_sstate(sk, LISTENING) || | ||
604 | (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) | ||
605 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
606 | |||
608 | /* "Decode" the chunk. We have no optional parameters so we | 607 | /* "Decode" the chunk. We have no optional parameters so we |
609 | * are in good shape. | 608 | * are in good shape. |
610 | */ | 609 | */ |
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1032 | /* This should never happen, but lets log it if so. */ | 1031 | /* This should never happen, but lets log it if so. */ |
1033 | if (unlikely(!link)) { | 1032 | if (unlikely(!link)) { |
1034 | if (from_addr.sa.sa_family == AF_INET6) { | 1033 | if (from_addr.sa.sa_family == AF_INET6) { |
1035 | printk(KERN_WARNING | 1034 | if (net_ratelimit()) |
1036 | "%s association %p could not find address " | 1035 | printk(KERN_WARNING |
1037 | NIP6_FMT "\n", | 1036 | "%s association %p could not find address " |
1038 | __FUNCTION__, | 1037 | NIP6_FMT "\n", |
1039 | asoc, | 1038 | __FUNCTION__, |
1040 | NIP6(from_addr.v6.sin6_addr)); | 1039 | asoc, |
1040 | NIP6(from_addr.v6.sin6_addr)); | ||
1041 | } else { | 1041 | } else { |
1042 | printk(KERN_WARNING | 1042 | if (net_ratelimit()) |
1043 | "%s association %p could not find address " | 1043 | printk(KERN_WARNING |
1044 | NIPQUAD_FMT "\n", | 1044 | "%s association %p could not find address " |
1045 | __FUNCTION__, | 1045 | NIPQUAD_FMT "\n", |
1046 | asoc, | 1046 | __FUNCTION__, |
1047 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | 1047 | asoc, |
1048 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | ||
1048 | } | 1049 | } |
1049 | return SCTP_DISPOSITION_DISCARD; | 1050 | return SCTP_DISPOSITION_DISCARD; |
1050 | } | 1051 | } |
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3362 | abort = sctp_make_abort(asoc, asconf_ack, | 3363 | abort = sctp_make_abort(asoc, asconf_ack, |
3363 | sizeof(sctp_errhdr_t)); | 3364 | sizeof(sctp_errhdr_t)); |
3364 | if (abort) { | 3365 | if (abort) { |
3365 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); | 3366 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); |
3366 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3367 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3367 | SCTP_CHUNK(abort)); | 3368 | SCTP_CHUNK(abort)); |
3368 | } | 3369 | } |
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3392 | abort = sctp_make_abort(asoc, asconf_ack, | 3393 | abort = sctp_make_abort(asoc, asconf_ack, |
3393 | sizeof(sctp_errhdr_t)); | 3394 | sizeof(sctp_errhdr_t)); |
3394 | if (abort) { | 3395 | if (abort) { |
3395 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); | 3396 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); |
3396 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3397 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3397 | SCTP_CHUNK(abort)); | 3398 | SCTP_CHUNK(abort)); |
3398 | } | 3399 | } |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 01c6364245b7..33354602ae86 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
353 | * The function sctp_get_port_local() does duplicate address | 353 | * The function sctp_get_port_local() does duplicate address |
354 | * detection. | 354 | * detection. |
355 | */ | 355 | */ |
356 | addr->v4.sin_port = htons(snum); | ||
356 | if ((ret = sctp_get_port_local(sk, addr))) { | 357 | if ((ret = sctp_get_port_local(sk, addr))) { |
357 | if (ret == (long) sk) { | 358 | if (ret == (long) sk) { |
358 | /* This endpoint has a conflicting address. */ | 359 | /* This endpoint has a conflicting address. */ |
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) | |||
5202 | 5203 | ||
5203 | sctp_unhash_endpoint(ep); | 5204 | sctp_unhash_endpoint(ep); |
5204 | sk->sk_state = SCTP_SS_CLOSED; | 5205 | sk->sk_state = SCTP_SS_CLOSED; |
5206 | return 0; | ||
5205 | } | 5207 | } |
5206 | 5208 | ||
5207 | /* Return if we are already listening. */ | 5209 | /* Return if we are already listening. */ |
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) | |||
5249 | 5251 | ||
5250 | sctp_unhash_endpoint(ep); | 5252 | sctp_unhash_endpoint(ep); |
5251 | sk->sk_state = SCTP_SS_CLOSED; | 5253 | sk->sk_state = SCTP_SS_CLOSED; |
5254 | return 0; | ||
5252 | } | 5255 | } |
5253 | 5256 | ||
5254 | if (sctp_sstate(sk, LISTENING)) | 5257 | if (sctp_sstate(sk, LISTENING)) |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 34eb977a204d..fa0ba2a5564e 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -659,6 +659,46 @@ done: | |||
659 | return retval; | 659 | return retval; |
660 | } | 660 | } |
661 | 661 | ||
662 | /* | ||
663 | * Flush out stale fragments from the reassembly queue when processing | ||
664 | * a Forward TSN. | ||
665 | * | ||
666 | * RFC 3758, Section 3.6 | ||
667 | * | ||
668 | * After receiving and processing a FORWARD TSN, the data receiver MUST | ||
669 | * take cautions in updating its re-assembly queue. The receiver MUST | ||
670 | * remove any partially reassembled message, which is still missing one | ||
671 | * or more TSNs earlier than or equal to the new cumulative TSN point. | ||
672 | * In the event that the receiver has invoked the partial delivery API, | ||
673 | * a notification SHOULD also be generated to inform the upper layer API | ||
674 | * that the message being partially delivered will NOT be completed. | ||
675 | */ | ||
676 | void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) | ||
677 | { | ||
678 | struct sk_buff *pos, *tmp; | ||
679 | struct sctp_ulpevent *event; | ||
680 | __u32 tsn; | ||
681 | |||
682 | if (skb_queue_empty(&ulpq->reasm)) | ||
683 | return; | ||
684 | |||
685 | skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { | ||
686 | event = sctp_skb2event(pos); | ||
687 | tsn = event->tsn; | ||
688 | |||
689 | /* Since the entire message must be abandoned by the | ||
690 | * sender (item A3 in Section 3.5, RFC 3758), we can | ||
691 | * free all fragments on the list that are less then | ||
692 | * or equal to ctsn_point | ||
693 | */ | ||
694 | if (TSN_lte(tsn, fwd_tsn)) { | ||
695 | __skb_unlink(pos, &ulpq->reasm); | ||
696 | sctp_ulpevent_free(event); | ||
697 | } else | ||
698 | break; | ||
699 | } | ||
700 | } | ||
701 | |||
662 | /* Helper function to gather skbs that have possibly become | 702 | /* Helper function to gather skbs that have possibly become |
663 | * ordered by an an incoming chunk. | 703 | * ordered by an an incoming chunk. |
664 | */ | 704 | */ |
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | |||
794 | /* Helper function to gather skbs that have possibly become | 834 | /* Helper function to gather skbs that have possibly become |
795 | * ordered by forward tsn skipping their dependencies. | 835 | * ordered by forward tsn skipping their dependencies. |
796 | */ | 836 | */ |
797 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | 837 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) |
798 | { | 838 | { |
799 | struct sk_buff *pos, *tmp; | 839 | struct sk_buff *pos, *tmp; |
800 | struct sctp_ulpevent *cevent; | 840 | struct sctp_ulpevent *cevent; |
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
813 | csid = cevent->stream; | 853 | csid = cevent->stream; |
814 | cssn = cevent->ssn; | 854 | cssn = cevent->ssn; |
815 | 855 | ||
816 | if (cssn != sctp_ssn_peek(in, csid)) | 856 | /* Have we gone too far? */ |
857 | if (csid > sid) | ||
817 | break; | 858 | break; |
818 | 859 | ||
819 | /* Found it, so mark in the ssnmap. */ | 860 | /* Have we not gone far enough? */ |
820 | sctp_ssn_next(in, csid); | 861 | if (csid < sid) |
862 | continue; | ||
863 | |||
864 | /* see if this ssn has been marked by skipping */ | ||
865 | if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) | ||
866 | break; | ||
821 | 867 | ||
822 | __skb_unlink(pos, &ulpq->lobby); | 868 | __skb_unlink(pos, &ulpq->lobby); |
823 | if (!event) { | 869 | if (!event) |
824 | /* Create a temporary list to collect chunks on. */ | 870 | /* Create a temporary list to collect chunks on. */ |
825 | event = sctp_skb2event(pos); | 871 | event = sctp_skb2event(pos); |
826 | __skb_queue_tail(&temp, sctp_event2skb(event)); | 872 | |
827 | } else { | 873 | /* Attach all gathered skbs to the event. */ |
828 | /* Attach all gathered skbs to the event. */ | 874 | __skb_queue_tail(&temp, pos); |
829 | __skb_queue_tail(&temp, pos); | ||
830 | } | ||
831 | } | 875 | } |
832 | 876 | ||
833 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 877 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
834 | * very first SKB on the 'temp' list. | 878 | * very first SKB on the 'temp' list. |
835 | */ | 879 | */ |
836 | if (event) | 880 | if (event) { |
881 | /* see if we have more ordered that we can deliver */ | ||
882 | sctp_ulpq_retrieve_ordered(ulpq, event); | ||
837 | sctp_ulpq_tail_event(ulpq, event); | 883 | sctp_ulpq_tail_event(ulpq, event); |
884 | } | ||
838 | } | 885 | } |
839 | 886 | ||
840 | /* Skip over an SSN. */ | 887 | /* Skip over an SSN. This is used during the processing of |
888 | * Forwared TSN chunk to skip over the abandoned ordered data | ||
889 | */ | ||
841 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | 890 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) |
842 | { | 891 | { |
843 | struct sctp_stream *in; | 892 | struct sctp_stream *in; |
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
855 | /* Go find any other chunks that were waiting for | 904 | /* Go find any other chunks that were waiting for |
856 | * ordering and deliver them if needed. | 905 | * ordering and deliver them if needed. |
857 | */ | 906 | */ |
858 | sctp_ulpq_reap_ordered(ulpq); | 907 | sctp_ulpq_reap_ordered(ulpq, sid); |
859 | return; | 908 | return; |
860 | } | 909 | } |
861 | 910 | ||