aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/psnap.c17
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/9p/conv.c1
-rw-r--r--net/9p/mux.c9
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_if.c16
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_netfilter.c12
-rw-r--r--net/bridge/br_sysfs_br.c1
-rw-r--r--net/core/dev_mcast.c14
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/dccp/ccids/ccid2.c2
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/irda/irmod.c2
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/netfilter/xt_tcpudp.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sctp/associola.c7
-rw-r--r--net/sctp/outqueue.c7
-rw-r--r--net/sctp/sm_make_chunk.c112
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c51
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sctp/ulpqueue.c75
29 files changed, 290 insertions, 120 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 04ee43e7538f..31128cb92a23 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -55,6 +55,9 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
55 .type = __constant_htons(ETH_P_SNAP), 55 .type = __constant_htons(ETH_P_SNAP),
56 }; 56 };
57 57
58 if (unlikely(!pskb_may_pull(skb, 5)))
59 goto drop;
60
58 rcu_read_lock(); 61 rcu_read_lock();
59 proto = find_snap_client(skb_transport_header(skb)); 62 proto = find_snap_client(skb_transport_header(skb));
60 if (proto) { 63 if (proto) {
@@ -62,14 +65,18 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
62 skb->transport_header += 5; 65 skb->transport_header += 5;
63 skb_pull_rcsum(skb, 5); 66 skb_pull_rcsum(skb, 5);
64 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); 67 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
65 } else {
66 skb->sk = NULL;
67 kfree_skb(skb);
68 rc = 1;
69 } 68 }
70
71 rcu_read_unlock(); 69 rcu_read_unlock();
70
71 if (unlikely(!proto))
72 goto drop;
73
74out:
72 return rc; 75 return rc;
76
77drop:
78 kfree_skb(skb);
79 goto out;
73} 80}
74 81
75/* 82/*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bab322c9f8f..328759c32d61 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -116,12 +116,22 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
116 struct packet_type* ptype, struct net_device *orig_dev) 116 struct packet_type* ptype, struct net_device *orig_dev)
117{ 117{
118 unsigned char *rawp = NULL; 118 unsigned char *rawp = NULL;
119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); 119 struct vlan_hdr *vhdr;
120 unsigned short vid; 120 unsigned short vid;
121 struct net_device_stats *stats; 121 struct net_device_stats *stats;
122 unsigned short vlan_TCI; 122 unsigned short vlan_TCI;
123 __be16 proto; 123 __be16 proto;
124 124
125 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
126 return -1;
127
128 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) {
129 kfree_skb(skb);
130 return -1;
131 }
132
133 vhdr = (struct vlan_hdr *)(skb->data);
134
125 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ 135 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
126 vlan_TCI = ntohs(vhdr->h_vlan_TCI); 136 vlan_TCI = ntohs(vhdr->h_vlan_TCI);
127 137
diff --git a/net/9p/conv.c b/net/9p/conv.c
index f2a041cb508a..d979d958ea19 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -796,6 +796,7 @@ struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count,
796 if (err) { 796 if (err) {
797 kfree(fc); 797 kfree(fc);
798 fc = ERR_PTR(err); 798 fc = ERR_PTR(err);
799 goto error;
799 } 800 }
800 801
801 if (buf_check_overflow(bufp)) { 802 if (buf_check_overflow(bufp)) {
diff --git a/net/9p/mux.c b/net/9p/mux.c
index acb038810f39..5d70558c4c61 100644
--- a/net/9p/mux.c
+++ b/net/9p/mux.c
@@ -288,9 +288,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
288 m->extended = extended; 288 m->extended = extended;
289 m->trans = trans; 289 m->trans = trans;
290 m->tagpool = p9_idpool_create(); 290 m->tagpool = p9_idpool_create();
291 if (!m->tagpool) { 291 if (IS_ERR(m->tagpool)) {
292 mtmp = ERR_PTR(-ENOMEM);
292 kfree(m); 293 kfree(m);
293 return ERR_PTR(PTR_ERR(m->tagpool)); 294 return mtmp;
294 } 295 }
295 296
296 m->err = 0; 297 m->err = 0;
@@ -308,8 +309,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
308 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 309 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
309 m->poll_task = NULL; 310 m->poll_task = NULL;
310 n = p9_mux_poll_start(m); 311 n = p9_mux_poll_start(m);
311 if (n) 312 if (n) {
313 kfree(m);
312 return ERR_PTR(n); 314 return ERR_PTR(n);
315 }
313 316
314 n = trans->poll(trans, &m->pt); 317 n = trans->poll(trans, &m->pt);
315 if (n & POLLIN) { 318 if (n & POLLIN) {
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 69b70977f000..eb57502bb264 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
384 if (hold_time(br) == 0) 384 if (hold_time(br) == 0)
385 return; 385 return;
386 386
387 /* ignore packets unless we are using this port */
388 if (!(source->state == BR_STATE_LEARNING ||
389 source->state == BR_STATE_FORWARDING))
390 return;
391
387 fdb = fdb_find(head, addr); 392 fdb = fdb_find(head, addr);
388 if (likely(fdb)) { 393 if (likely(fdb)) {
389 /* attempt to update an entry for a local interface */ 394 /* attempt to update an entry for a local interface */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 749f0e8f541d..9272f12f664c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -33,17 +33,17 @@
33 */ 33 */
34static int port_cost(struct net_device *dev) 34static int port_cost(struct net_device *dev)
35{ 35{
36 if (dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 int err = dev->ethtool_ops->get_settings(dev, &ecmd); 38
39 if (!err) { 39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch(ecmd.speed) {
41 case SPEED_100:
42 return 19;
43 case SPEED_1000:
44 return 4;
45 case SPEED_10000: 41 case SPEED_10000:
46 return 2; 42 return 2;
43 case SPEED_1000:
44 return 4;
45 case SPEED_100:
46 return 19;
47 case SPEED_10: 47 case SPEED_10:
48 return 100; 48 return 100;
49 } 49 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5c18595b7616..6f468fc3357a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
101{ 101{
102 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 102 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
103 103
104 if (p && p->state != BR_STATE_DISABLED) 104 if (p)
105 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 105 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
106
107 return 0; /* process further */ 106 return 0; /* process further */
108} 107}
109 108
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa779874b9dd..3ee2022928e3 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -509,8 +509,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
509 int (*okfn)(struct sk_buff *)) 509 int (*okfn)(struct sk_buff *))
510{ 510{
511 struct iphdr *iph; 511 struct iphdr *iph;
512 __u32 len;
513 struct sk_buff *skb = *pskb; 512 struct sk_buff *skb = *pskb;
513 __u32 len = nf_bridge_encap_header_len(skb);
514
515 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
516 return NF_STOLEN;
517
518 if (unlikely(!pskb_may_pull(skb, len)))
519 goto out;
514 520
515 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 521 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
516 IS_PPPOE_IPV6(skb)) { 522 IS_PPPOE_IPV6(skb)) {
@@ -518,8 +524,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
518 if (!brnf_call_ip6tables) 524 if (!brnf_call_ip6tables)
519 return NF_ACCEPT; 525 return NF_ACCEPT;
520#endif 526#endif
521 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
522 goto out;
523 nf_bridge_pull_encap_header_rcsum(skb); 527 nf_bridge_pull_encap_header_rcsum(skb);
524 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); 528 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
525 } 529 }
@@ -532,8 +536,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
532 !IS_PPPOE_IP(skb)) 536 !IS_PPPOE_IP(skb))
533 return NF_ACCEPT; 537 return NF_ACCEPT;
534 538
535 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
536 goto out;
537 nf_bridge_pull_encap_header_rcsum(skb); 539 nf_bridge_pull_encap_header_rcsum(skb);
538 540
539 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 541 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 88f43003b193..c65f54e0e27f 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -167,6 +167,7 @@ static ssize_t store_stp_state(struct device *d,
167 br_stp_set_enabled(br, val); 167 br_stp_set_enabled(br, val);
168 rtnl_unlock(); 168 rtnl_unlock();
169 169
170 return len;
170} 171}
171static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, 172static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
172 store_stp_state); 173 store_stp_state);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 99aece1aeccf..20330c572610 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -116,11 +116,13 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
116 */ 116 */
117int dev_mc_sync(struct net_device *to, struct net_device *from) 117int dev_mc_sync(struct net_device *to, struct net_device *from)
118{ 118{
119 struct dev_addr_list *da; 119 struct dev_addr_list *da, *next;
120 int err = 0; 120 int err = 0;
121 121
122 netif_tx_lock_bh(to); 122 netif_tx_lock_bh(to);
123 for (da = from->mc_list; da != NULL; da = da->next) { 123 da = from->mc_list;
124 while (da != NULL) {
125 next = da->next;
124 if (!da->da_synced) { 126 if (!da->da_synced) {
125 err = __dev_addr_add(&to->mc_list, &to->mc_count, 127 err = __dev_addr_add(&to->mc_list, &to->mc_count,
126 da->da_addr, da->da_addrlen, 0); 128 da->da_addr, da->da_addrlen, 0);
@@ -134,6 +136,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
134 __dev_addr_delete(&from->mc_list, &from->mc_count, 136 __dev_addr_delete(&from->mc_list, &from->mc_count,
135 da->da_addr, da->da_addrlen, 0); 137 da->da_addr, da->da_addrlen, 0);
136 } 138 }
139 da = next;
137 } 140 }
138 if (!err) 141 if (!err)
139 __dev_set_rx_mode(to); 142 __dev_set_rx_mode(to);
@@ -156,12 +159,14 @@ EXPORT_SYMBOL(dev_mc_sync);
156 */ 159 */
157void dev_mc_unsync(struct net_device *to, struct net_device *from) 160void dev_mc_unsync(struct net_device *to, struct net_device *from)
158{ 161{
159 struct dev_addr_list *da; 162 struct dev_addr_list *da, *next;
160 163
161 netif_tx_lock_bh(from); 164 netif_tx_lock_bh(from);
162 netif_tx_lock_bh(to); 165 netif_tx_lock_bh(to);
163 166
164 for (da = from->mc_list; da != NULL; da = da->next) { 167 da = from->mc_list;
168 while (da != NULL) {
169 next = da->next;
165 if (!da->da_synced) 170 if (!da->da_synced)
166 continue; 171 continue;
167 __dev_addr_delete(&to->mc_list, &to->mc_count, 172 __dev_addr_delete(&to->mc_list, &to->mc_count,
@@ -169,6 +174,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
169 da->da_synced = 0; 174 da->da_synced = 0;
170 __dev_addr_delete(&from->mc_list, &from->mc_count, 175 __dev_addr_delete(&from->mc_list, &from->mc_count,
171 da->da_addr, da->da_addrlen, 0); 176 da->da_addr, da->da_addrlen, 0);
177 da = next;
172 } 178 }
173 __dev_set_rx_mode(to); 179 __dev_set_rx_mode(to);
174 180
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca2a1533138a..f7de8f24d8dd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -33,6 +33,7 @@
33#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/log2.h>
36 37
37#define NEIGH_DEBUG 1 38#define NEIGH_DEBUG 1
38 39
@@ -311,7 +312,7 @@ static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
311 312
312 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 313 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
313 314
314 BUG_ON(new_entries & (new_entries - 1)); 315 BUG_ON(!is_power_of_2(new_entries));
315 new_hash = neigh_hash_alloc(new_entries); 316 new_hash = neigh_hash_alloc(new_entries);
316 if (!new_hash) 317 if (!new_hash)
317 return; 318 return;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7bae576ac115..36fdea71d742 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -380,7 +380,6 @@ struct pktgen_thread {
380 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 380 /* Field for thread to receive "posted" events terminate, stop ifs etc. */
381 381
382 u32 control; 382 u32 control;
383 int pid;
384 int cpu; 383 int cpu;
385 384
386 wait_queue_head_t queue; 385 wait_queue_head_t queue;
@@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3330 }
3332 3331
3333 if ((netif_queue_stopped(odev) || 3332 if ((netif_queue_stopped(odev) ||
3334 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || 3333 (pkt_dev->skb &&
3335 need_resched()) { 3334 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
3335 need_resched()) {
3336 idle_start = getCurUs(); 3336 idle_start = getCurUs();
3337 3337
3338 if (!netif_running(odev)) { 3338 if (!netif_running(odev)) {
@@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg)
3462 3462
3463 init_waitqueue_head(&t->queue); 3463 init_waitqueue_head(&t->queue);
3464 3464
3465 t->pid = current->pid;
3466
3467 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); 3465 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
3468 3466
3469 max_before_softirq = t->max_before_softirq; 3467 max_before_softirq = t->max_before_softirq;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 248d20f4c7c4..d29b88fe723c 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -298,7 +298,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
298 int rc; 298 int rc;
299 299
300 ccid2_pr_debug("allocating more space in history\n"); 300 ccid2_pr_debug("allocating more space in history\n");
301 rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_KERNEL); 301 rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, gfp_any());
302 BUG_ON(rc); /* XXX what do we do? */ 302 BUG_ON(rc); /* XXX what do we do? */
303 303
304 next = hctx->ccid2hctx_seqh->ccid2s_next; 304 next = hctx->ccid2hctx_seqh->ccid2s_next;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7a23e59c374a..39f6211f1496 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -46,7 +46,7 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
46 memcpy(daddr, optptr+optlen-4, 4); 46 memcpy(daddr, optptr+optlen-4, 4);
47 /* Fall through */ 47 /* Fall through */
48 default: 48 default:
49 memset(optptr+2, 0, optlen-2); 49 memset(optptr, 0, optlen);
50 } 50 }
51 l -= optlen; 51 l -= optlen;
52 optptr += optlen; 52 optptr += optlen;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d544573f48a..6b420aedcdcf 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -625,6 +625,10 @@ static int do_ip_setsockopt(struct sock *sk, int level,
625 { 625 {
626 struct ip_mreqn mreq; 626 struct ip_mreqn mreq;
627 627
628 err = -EPROTO;
629 if (inet_sk(sk)->is_icsk)
630 break;
631
628 if (optlen < sizeof(struct ip_mreq)) 632 if (optlen < sizeof(struct ip_mreq))
629 goto e_inval; 633 goto e_inval;
630 err = -EFAULT; 634 err = -EFAULT;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f030435e0eb4..1ee72127462b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
555 tcp_grow_window(sk, skb); 555 tcp_grow_window(sk, skb);
556} 556}
557 557
558static u32 tcp_rto_min(struct sock *sk)
559{
560 struct dst_entry *dst = __sk_dst_get(sk);
561 u32 rto_min = TCP_RTO_MIN;
562
563 if (dst_metric_locked(dst, RTAX_RTO_MIN))
564 rto_min = dst->metrics[RTAX_RTO_MIN-1];
565 return rto_min;
566}
567
558/* Called to compute a smoothed rtt estimate. The data fed to this 568/* Called to compute a smoothed rtt estimate. The data fed to this
559 * routine either comes from timestamps, or from segments that were 569 * routine either comes from timestamps, or from segments that were
560 * known _not_ to have been retransmitted [see Karn/Partridge 570 * known _not_ to have been retransmitted [see Karn/Partridge
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
616 if (tp->mdev_max < tp->rttvar) 626 if (tp->mdev_max < tp->rttvar)
617 tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; 627 tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
618 tp->rtt_seq = tp->snd_nxt; 628 tp->rtt_seq = tp->snd_nxt;
619 tp->mdev_max = TCP_RTO_MIN; 629 tp->mdev_max = tcp_rto_min(sk);
620 } 630 }
621 } else { 631 } else {
622 /* no previous measure. */ 632 /* no previous measure. */
623 tp->srtt = m<<3; /* take the measured time to be rtt */ 633 tp->srtt = m<<3; /* take the measured time to be rtt */
624 tp->mdev = m<<1; /* make sure rto = 3*rtt */ 634 tp->mdev = m<<1; /* make sure rto = 3*rtt */
625 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 635 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
626 tp->rtt_seq = tp->snd_nxt; 636 tp->rtt_seq = tp->snd_nxt;
627 } 637 }
628} 638}
@@ -755,7 +765,15 @@ void tcp_update_metrics(struct sock *sk)
755 } 765 }
756} 766}
757 767
758/* Numbers are taken from RFC2414. */ 768/* Numbers are taken from RFC3390.
769 *
770 * John Heffner states:
771 *
772 * The RFC specifies a window of no more than 4380 bytes
773 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
774 * is a bit misleading because they use a clamp at 4380 bytes
775 * rather than use a multiplier in the relevant range.
776 */
759__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 777__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
760{ 778{
761 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 779 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 50d86e94d9ed..5dead399fe64 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -794,7 +794,7 @@ slow_path:
794 /* 794 /*
795 * Copy a block of the IP datagram. 795 * Copy a block of the IP datagram.
796 */ 796 */
797 if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len)) 797 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
798 BUG(); 798 BUG();
799 left -= len; 799 left -= len;
800 800
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 761a910f4f97..6b038aa72e88 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -554,6 +554,10 @@ done:
554 { 554 {
555 struct ipv6_mreq mreq; 555 struct ipv6_mreq mreq;
556 556
557 retv = -EPROTO;
558 if (inet_sk(sk)->is_icsk)
559 break;
560
557 retv = -EFAULT; 561 retv = -EFAULT;
558 if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) 562 if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
559 break; 563 break;
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index 1900937b3328..8ba703da2797 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -128,8 +128,8 @@ static int __init irda_init(void)
128 out_err_3: 128 out_err_3:
129#ifdef CONFIG_SYSCTL 129#ifdef CONFIG_SYSCTL
130 irda_sysctl_unregister(); 130 irda_sysctl_unregister();
131#endif
132 out_err_2: 131 out_err_2:
132#endif
133#ifdef CONFIG_PROC_FS 133#ifdef CONFIG_PROC_FS
134 irda_proc_unregister(); 134 irda_proc_unregister();
135#endif 135#endif
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 694ea4d92fa8..1e429c929739 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -106,7 +106,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
106 } 106 }
107 107
108 if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME, 108 if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME,
109 dev->name)); 109 dev->name))
110 goto err_out; 110 goto err_out;
111 111
112 if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode)) 112 if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode))
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index ab7d845224fc..223f9bded672 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
188 void *matchinfo, 188 void *matchinfo,
189 unsigned int hook_mask) 189 unsigned int hook_mask)
190{ 190{
191 const struct xt_tcp *udpinfo = matchinfo; 191 const struct xt_udp *udpinfo = matchinfo;
192 192
193 /* Must specify no unknown invflags */ 193 /* Must specify no unknown invflags */
194 return !(udpinfo->invflags & ~XT_UDP_INV_MASK); 194 return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4a49db65772e..abd82fc3ec60 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
44 if (TC_H_MAJ(skb->priority) != sch->handle) { 44 if (TC_H_MAJ(skb->priority) != sch->handle) {
45 err = tc_classify(skb, q->filter_list, &res); 45 err = tc_classify(skb, q->filter_list, &res);
46#ifdef CONFIG_NET_CLS_ACT 46#ifdef CONFIG_NET_CLS_ACT
47 switch (tc_classify(skb, q->filter_list, &res)) { 47 switch (err) {
48 case TC_ACT_STOLEN: 48 case TC_ACT_STOLEN:
49 case TC_ACT_QUEUED: 49 case TC_ACT_QUEUED:
50 *qerr = NET_XMIT_SUCCESS; 50 *qerr = NET_XMIT_SUCCESS;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 498edb0cd4e5..2ad1caf1ea42 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
727 break; 727 break;
728 728
729 case SCTP_TRANSPORT_DOWN: 729 case SCTP_TRANSPORT_DOWN:
730 transport->state = SCTP_INACTIVE; 730 /* if the transort was never confirmed, do not transition it
731 * to inactive state.
732 */
733 if (transport->state != SCTP_UNCONFIRMED)
734 transport->state = SCTP_INACTIVE;
735
731 spc_state = SCTP_ADDR_UNREACHABLE; 736 spc_state = SCTP_ADDR_UNREACHABLE;
732 break; 737 break;
733 738
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 992f361084b7..28f4fe77ceee 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
421 */ 421 */
422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 423 (!fast_retransmit && !chunk->tsn_gap_acked)) {
424 /* If this chunk was sent less then 1 rto ago, do not
425 * retransmit this chunk, but give the peer time
426 * to acknowlege it.
427 */
428 if ((jiffies - chunk->sent_at) < transport->rto)
429 continue;
430
424 /* RFC 2960 6.2.1 Processing a Received SACK 431 /* RFC 2960 6.2.1 Processing a Received SACK
425 * 432 *
426 * C) Any time a DATA chunk is marked for 433 * C) Any time a DATA chunk is marked for
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 51c4d7fef1d2..79856c924525 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
110 * abort chunk. 110 * abort chunk.
111 */ 111 */
112void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 112void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
113 const void *payload, size_t paylen) 113 size_t paylen)
114{ 114{
115 sctp_errhdr_t err; 115 sctp_errhdr_t err;
116 __u16 len; 116 __u16 len;
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
120 len = sizeof(sctp_errhdr_t) + paylen; 120 len = sizeof(sctp_errhdr_t) + paylen;
121 err.length = htons(len); 121 err.length = htons(len);
122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
123 sctp_addto_chunk(chunk, paylen, payload);
124} 123}
125 124
126/* 3.3.2 Initiation (INIT) (1) 125/* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
780 779
781 /* Put the tsn back into network byte order. */ 780 /* Put the tsn back into network byte order. */
782 payload = htonl(tsn); 781 payload = htonl(tsn);
783 sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, 782 sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
784 sizeof(payload)); 783 sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
785 784
786 /* RFC 2960 6.4 Multi-homed SCTP Endpoints 785 /* RFC 2960 6.4 Multi-homed SCTP Endpoints
787 * 786 *
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
823 goto err_copy; 822 goto err_copy;
824 } 823 }
825 824
826 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 825 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
826 sctp_addto_chunk(retval, paylen, payload);
827 827
828 if (paylen) 828 if (paylen)
829 kfree(payload); 829 kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
850 struct sctp_paramhdr phdr; 850 struct sctp_paramhdr phdr;
851 851
852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen 852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
853 + sizeof(sctp_chunkhdr_t)); 853 + sizeof(sctp_paramhdr_t));
854 if (!retval) 854 if (!retval)
855 goto end; 855 goto end;
856 856
857 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); 857 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
858 + sizeof(sctp_paramhdr_t));
858 859
859 phdr.type = htons(chunk->chunk_hdr->type); 860 phdr.type = htons(chunk->chunk_hdr->type);
860 phdr.length = chunk->chunk_hdr->length; 861 phdr.length = chunk->chunk_hdr->length;
861 sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); 862 sctp_addto_chunk(retval, paylen, payload);
863 sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
862 864
863end: 865end:
864 return retval; 866 return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
955 if (!retval) 957 if (!retval)
956 goto nodata; 958 goto nodata;
957 959
958 sctp_init_cause(retval, cause_code, payload, paylen); 960 sctp_init_cause(retval, cause_code, paylen);
961 sctp_addto_chunk(retval, paylen, payload);
959 962
960nodata: 963nodata:
961 return retval; 964 return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1128 void *target; 1131 void *target;
1129 void *padding; 1132 void *padding;
1130 int chunklen = ntohs(chunk->chunk_hdr->length); 1133 int chunklen = ntohs(chunk->chunk_hdr->length);
1131 int padlen = chunklen % 4; 1134 int padlen = WORD_ROUND(chunklen) - chunklen;
1132 1135
1133 padding = skb_put(chunk->skb, padlen); 1136 padding = skb_put(chunk->skb, padlen);
1134 target = skb_put(chunk->skb, len); 1137 target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1143 return target; 1146 return target;
1144} 1147}
1145 1148
1149/* Append bytes to the end of a parameter. Will panic if chunk is not big
1150 * enough.
1151 */
1152void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
1153{
1154 void *target;
1155 int chunklen = ntohs(chunk->chunk_hdr->length);
1156
1157 target = skb_put(chunk->skb, len);
1158
1159 memcpy(target, data, len);
1160
1161 /* Adjust the chunk length field. */
1162 chunk->chunk_hdr->length = htons(chunklen + len);
1163 chunk->chunk_end = skb_tail_pointer(chunk->skb);
1164
1165 return target;
1166}
1167
1146/* Append bytes from user space to the end of a chunk. Will panic if 1168/* Append bytes from user space to the end of a chunk. Will panic if
1147 * chunk is not big enough. 1169 * chunk is not big enough.
1148 * Returns a kernel err value. 1170 * Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
1174 */ 1196 */
1175void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) 1197void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
1176{ 1198{
1199 struct sctp_datamsg *msg;
1200 struct sctp_chunk *lchunk;
1201 struct sctp_stream *stream;
1177 __u16 ssn; 1202 __u16 ssn;
1178 __u16 sid; 1203 __u16 sid;
1179 1204
1180 if (chunk->has_ssn) 1205 if (chunk->has_ssn)
1181 return; 1206 return;
1182 1207
1183 /* This is the last possible instant to assign a SSN. */ 1208 /* All fragments will be on the same stream */
1184 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1209 sid = ntohs(chunk->subh.data_hdr->stream);
1185 ssn = 0; 1210 stream = &chunk->asoc->ssnmap->out;
1186 } else {
1187 sid = ntohs(chunk->subh.data_hdr->stream);
1188 if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
1189 ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
1190 else
1191 ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
1192 }
1193 1211
1194 chunk->subh.data_hdr->ssn = htons(ssn); 1212 /* Now assign the sequence number to the entire message.
1195 chunk->has_ssn = 1; 1213 * All fragments must have the same stream sequence number.
1214 */
1215 msg = chunk->msg;
1216 list_for_each_entry(lchunk, &msg->chunks, frag_list) {
1217 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
1218 ssn = 0;
1219 } else {
1220 if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
1221 ssn = sctp_ssn_next(stream, sid);
1222 else
1223 ssn = sctp_ssn_peek(stream, sid);
1224 }
1225
1226 lchunk->subh.data_hdr->ssn = htons(ssn);
1227 lchunk->has_ssn = 1;
1228 }
1196} 1229}
1197 1230
1198/* Helper function to assign a TSN if needed. This assumes that both 1231/* Helper function to assign a TSN if needed. This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
1466 __be32 n = htonl(usecs); 1499 __be32 n = htonl(usecs);
1467 1500
1468 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1501 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
1469 &n, sizeof(n)); 1502 sizeof(n));
1503 sctp_addto_chunk(*errp, sizeof(n), &n);
1470 *error = -SCTP_IERROR_STALE_COOKIE; 1504 *error = -SCTP_IERROR_STALE_COOKIE;
1471 } else 1505 } else
1472 *error = -SCTP_IERROR_NOMEM; 1506 *error = -SCTP_IERROR_NOMEM;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
1556 report.num_missing = htonl(1); 1590 report.num_missing = htonl(1);
1557 report.type = paramtype; 1591 report.type = paramtype;
1558 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, 1592 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
1559 &report, sizeof(report)); 1593 sizeof(report));
1594 sctp_addto_chunk(*errp, sizeof(report), &report);
1560 } 1595 }
1561 1596
1562 /* Stop processing this chunk. */ 1597 /* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
1574 *errp = sctp_make_op_error_space(asoc, chunk, 0); 1609 *errp = sctp_make_op_error_space(asoc, chunk, 0);
1575 1610
1576 if (*errp) 1611 if (*errp)
1577 sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); 1612 sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
1578 1613
1579 /* Stop processing this chunk. */ 1614 /* Stop processing this chunk. */
1580 return 0; 1615 return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1595 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1630 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1596 1631
1597 if (*errp) { 1632 if (*errp) {
1598 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, 1633 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
1599 sizeof(error)); 1634 sizeof(error) + sizeof(sctp_paramhdr_t));
1600 sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); 1635 sctp_addto_chunk(*errp, sizeof(error), error);
1636 sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
1601 } 1637 }
1602 1638
1603 return 0; 1639 return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1618 if (!*errp) 1654 if (!*errp)
1619 *errp = sctp_make_op_error_space(asoc, chunk, len); 1655 *errp = sctp_make_op_error_space(asoc, chunk, len);
1620 1656
1621 if (*errp) 1657 if (*errp) {
1622 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, 1658 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
1623 param.v, len); 1659 sctp_addto_chunk(*errp, len, param.v);
1660 }
1624 1661
1625 /* Stop processing this chunk. */ 1662 /* Stop processing this chunk. */
1626 return 0; 1663 return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1672 *errp = sctp_make_op_error_space(asoc, chunk, 1709 *errp = sctp_make_op_error_space(asoc, chunk,
1673 ntohs(chunk->chunk_hdr->length)); 1710 ntohs(chunk->chunk_hdr->length));
1674 1711
1675 if (*errp) 1712 if (*errp) {
1676 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1713 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1677 param.v,
1678 WORD_ROUND(ntohs(param.p->length))); 1714 WORD_ROUND(ntohs(param.p->length)));
1715 sctp_addto_chunk(*errp,
1716 WORD_ROUND(ntohs(param.p->length)),
1717 param.v);
1718 }
1679 1719
1680 break; 1720 break;
1681 case SCTP_PARAM_ACTION_SKIP: 1721 case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1690 1730
1691 if (*errp) { 1731 if (*errp) {
1692 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1732 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1693 param.v,
1694 WORD_ROUND(ntohs(param.p->length))); 1733 WORD_ROUND(ntohs(param.p->length)));
1734 sctp_addto_chunk(*errp,
1735 WORD_ROUND(ntohs(param.p->length)),
1736 param.v);
1695 } else { 1737 } else {
1696 /* If there is no memory for generating the ERROR 1738 /* If there is no memory for generating the ERROR
1697 * report as specified, an ABORT will be triggered 1739 * report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
1791 * VIOLATION error. We build the ERROR chunk here and let the normal 1833 * VIOLATION error. We build the ERROR chunk here and let the normal
1792 * error handling code build and send the packet. 1834 * error handling code build and send the packet.
1793 */ 1835 */
1794 if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { 1836 if (param.v != (void*)chunk->chunk_end) {
1795 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 1837 sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
1796 return 0; 1838 return 0;
1797 } 1839 }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f6ffc3..8d7890083493 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1013 break; 1013 break;
1014 1014
1015 case SCTP_DISPOSITION_VIOLATION: 1015 case SCTP_DISPOSITION_VIOLATION:
1016 printk(KERN_ERR "sctp protocol violation state %d " 1016 if (net_ratelimit())
1017 "chunkid %d\n", state, subtype.chunk); 1017 printk(KERN_ERR "sctp protocol violation state %d "
1018 "chunkid %d\n", state, subtype.chunk);
1018 break; 1019 break;
1019 1020
1020 case SCTP_DISPOSITION_NOT_IMPL: 1021 case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1130 /* Move the Cumulattive TSN Ack ahead. */ 1131 /* Move the Cumulattive TSN Ack ahead. */
1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1132 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1132 1133
1134 /* purge the fragmentation queue */
1135 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1136
1133 /* Abort any in progress partial delivery. */ 1137 /* Abort any in progress partial delivery. */
1134 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1138 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1135 break; 1139 break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 71cad56dd73f..177528ed3e1b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
264 struct sctp_chunk *err_chunk; 264 struct sctp_chunk *err_chunk;
265 struct sctp_packet *packet; 265 struct sctp_packet *packet;
266 sctp_unrecognized_param_t *unk_param; 266 sctp_unrecognized_param_t *unk_param;
267 struct sock *sk;
268 int len; 267 int len;
269 268
270 /* 6.10 Bundling 269 /* 6.10 Bundling
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
285 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 284 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 285 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
287 286
288 sk = ep->base.sk;
289 /* If the endpoint is not listening or if the number of associations
290 * on the TCP-style socket exceed the max backlog, respond with an
291 * ABORT.
292 */
293 if (!sctp_sstate(sk, LISTENING) ||
294 (sctp_style(sk, TCP) &&
295 sk_acceptq_is_full(sk)))
296 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
297
298 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 287 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
299 * Tag. 288 * Tag.
300 */ 289 */
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
590 struct sctp_ulpevent *ev, *ai_ev = NULL; 579 struct sctp_ulpevent *ev, *ai_ev = NULL;
591 int error = 0; 580 int error = 0;
592 struct sctp_chunk *err_chk_p; 581 struct sctp_chunk *err_chk_p;
582 struct sock *sk;
593 583
594 /* If the packet is an OOTB packet which is temporarily on the 584 /* If the packet is an OOTB packet which is temporarily on the
595 * control endpoint, respond with an ABORT. 585 * control endpoint, respond with an ABORT.
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
605 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 595 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
606 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 596 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
607 597
598 /* If the endpoint is not listening or if the number of associations
599 * on the TCP-style socket exceed the max backlog, respond with an
600 * ABORT.
601 */
602 sk = ep->base.sk;
603 if (!sctp_sstate(sk, LISTENING) ||
604 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
605 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
606
608 /* "Decode" the chunk. We have no optional parameters so we 607 /* "Decode" the chunk. We have no optional parameters so we
609 * are in good shape. 608 * are in good shape.
610 */ 609 */
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1032 /* This should never happen, but lets log it if so. */ 1031 /* This should never happen, but lets log it if so. */
1033 if (unlikely(!link)) { 1032 if (unlikely(!link)) {
1034 if (from_addr.sa.sa_family == AF_INET6) { 1033 if (from_addr.sa.sa_family == AF_INET6) {
1035 printk(KERN_WARNING 1034 if (net_ratelimit())
1036 "%s association %p could not find address " 1035 printk(KERN_WARNING
1037 NIP6_FMT "\n", 1036 "%s association %p could not find address "
1038 __FUNCTION__, 1037 NIP6_FMT "\n",
1039 asoc, 1038 __FUNCTION__,
1040 NIP6(from_addr.v6.sin6_addr)); 1039 asoc,
1040 NIP6(from_addr.v6.sin6_addr));
1041 } else { 1041 } else {
1042 printk(KERN_WARNING 1042 if (net_ratelimit())
1043 "%s association %p could not find address " 1043 printk(KERN_WARNING
1044 NIPQUAD_FMT "\n", 1044 "%s association %p could not find address "
1045 __FUNCTION__, 1045 NIPQUAD_FMT "\n",
1046 asoc, 1046 __FUNCTION__,
1047 NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1047 asoc,
1048 NIPQUAD(from_addr.v4.sin_addr.s_addr));
1048 } 1049 }
1049 return SCTP_DISPOSITION_DISCARD; 1050 return SCTP_DISPOSITION_DISCARD;
1050 } 1051 }
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3362 abort = sctp_make_abort(asoc, asconf_ack, 3363 abort = sctp_make_abort(asoc, asconf_ack,
3363 sizeof(sctp_errhdr_t)); 3364 sizeof(sctp_errhdr_t));
3364 if (abort) { 3365 if (abort) {
3365 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); 3366 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
3366 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3367 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3367 SCTP_CHUNK(abort)); 3368 SCTP_CHUNK(abort));
3368 } 3369 }
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3392 abort = sctp_make_abort(asoc, asconf_ack, 3393 abort = sctp_make_abort(asoc, asconf_ack,
3393 sizeof(sctp_errhdr_t)); 3394 sizeof(sctp_errhdr_t));
3394 if (abort) { 3395 if (abort) {
3395 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); 3396 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
3396 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3397 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3397 SCTP_CHUNK(abort)); 3398 SCTP_CHUNK(abort));
3398 } 3399 }
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 01c6364245b7..33354602ae86 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
353 * The function sctp_get_port_local() does duplicate address 353 * The function sctp_get_port_local() does duplicate address
354 * detection. 354 * detection.
355 */ 355 */
356 addr->v4.sin_port = htons(snum);
356 if ((ret = sctp_get_port_local(sk, addr))) { 357 if ((ret = sctp_get_port_local(sk, addr))) {
357 if (ret == (long) sk) { 358 if (ret == (long) sk) {
358 /* This endpoint has a conflicting address. */ 359 /* This endpoint has a conflicting address. */
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
5202 5203
5203 sctp_unhash_endpoint(ep); 5204 sctp_unhash_endpoint(ep);
5204 sk->sk_state = SCTP_SS_CLOSED; 5205 sk->sk_state = SCTP_SS_CLOSED;
5206 return 0;
5205 } 5207 }
5206 5208
5207 /* Return if we are already listening. */ 5209 /* Return if we are already listening. */
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
5249 5251
5250 sctp_unhash_endpoint(ep); 5252 sctp_unhash_endpoint(ep);
5251 sk->sk_state = SCTP_SS_CLOSED; 5253 sk->sk_state = SCTP_SS_CLOSED;
5254 return 0;
5252 } 5255 }
5253 5256
5254 if (sctp_sstate(sk, LISTENING)) 5257 if (sctp_sstate(sk, LISTENING))
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977a204d..fa0ba2a5564e 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
659 return retval; 659 return retval;
660} 660}
661 661
662/*
663 * Flush out stale fragments from the reassembly queue when processing
664 * a Forward TSN.
665 *
666 * RFC 3758, Section 3.6
667 *
668 * After receiving and processing a FORWARD TSN, the data receiver MUST
669 * take cautions in updating its re-assembly queue. The receiver MUST
670 * remove any partially reassembled message, which is still missing one
671 * or more TSNs earlier than or equal to the new cumulative TSN point.
672 * In the event that the receiver has invoked the partial delivery API,
673 * a notification SHOULD also be generated to inform the upper layer API
674 * that the message being partially delivered will NOT be completed.
675 */
676void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
677{
678 struct sk_buff *pos, *tmp;
679 struct sctp_ulpevent *event;
680 __u32 tsn;
681
682 if (skb_queue_empty(&ulpq->reasm))
683 return;
684
685 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
686 event = sctp_skb2event(pos);
687 tsn = event->tsn;
688
689 /* Since the entire message must be abandoned by the
690 * sender (item A3 in Section 3.5, RFC 3758), we can
691 * free all fragments on the list that are less then
692 * or equal to ctsn_point
693 */
694 if (TSN_lte(tsn, fwd_tsn)) {
695 __skb_unlink(pos, &ulpq->reasm);
696 sctp_ulpevent_free(event);
697 } else
698 break;
699 }
700}
701
662/* Helper function to gather skbs that have possibly become 702/* Helper function to gather skbs that have possibly become
663 * ordered by an an incoming chunk. 703 * ordered by an an incoming chunk.
664 */ 704 */
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
794/* Helper function to gather skbs that have possibly become 834/* Helper function to gather skbs that have possibly become
795 * ordered by forward tsn skipping their dependencies. 835 * ordered by forward tsn skipping their dependencies.
796 */ 836 */
797static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) 837static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
798{ 838{
799 struct sk_buff *pos, *tmp; 839 struct sk_buff *pos, *tmp;
800 struct sctp_ulpevent *cevent; 840 struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
813 csid = cevent->stream; 853 csid = cevent->stream;
814 cssn = cevent->ssn; 854 cssn = cevent->ssn;
815 855
816 if (cssn != sctp_ssn_peek(in, csid)) 856 /* Have we gone too far? */
857 if (csid > sid)
817 break; 858 break;
818 859
819 /* Found it, so mark in the ssnmap. */ 860 /* Have we not gone far enough? */
820 sctp_ssn_next(in, csid); 861 if (csid < sid)
862 continue;
863
864 /* see if this ssn has been marked by skipping */
865 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
866 break;
821 867
822 __skb_unlink(pos, &ulpq->lobby); 868 __skb_unlink(pos, &ulpq->lobby);
823 if (!event) { 869 if (!event)
824 /* Create a temporary list to collect chunks on. */ 870 /* Create a temporary list to collect chunks on. */
825 event = sctp_skb2event(pos); 871 event = sctp_skb2event(pos);
826 __skb_queue_tail(&temp, sctp_event2skb(event)); 872
827 } else { 873 /* Attach all gathered skbs to the event. */
828 /* Attach all gathered skbs to the event. */ 874 __skb_queue_tail(&temp, pos);
829 __skb_queue_tail(&temp, pos);
830 }
831 } 875 }
832 876
833 /* Send event to the ULP. 'event' is the sctp_ulpevent for 877 /* Send event to the ULP. 'event' is the sctp_ulpevent for
834 * very first SKB on the 'temp' list. 878 * very first SKB on the 'temp' list.
835 */ 879 */
836 if (event) 880 if (event) {
881 /* see if we have more ordered that we can deliver */
882 sctp_ulpq_retrieve_ordered(ulpq, event);
837 sctp_ulpq_tail_event(ulpq, event); 883 sctp_ulpq_tail_event(ulpq, event);
884 }
838} 885}
839 886
840/* Skip over an SSN. */ 887/* Skip over an SSN. This is used during the processing of
888 * Forwared TSN chunk to skip over the abandoned ordered data
889 */
841void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 890void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
842{ 891{
843 struct sctp_stream *in; 892 struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
855 /* Go find any other chunks that were waiting for 904 /* Go find any other chunks that were waiting for
856 * ordering and deliver them if needed. 905 * ordering and deliver them if needed.
857 */ 906 */
858 sctp_ulpq_reap_ordered(ulpq); 907 sctp_ulpq_reap_ordered(ulpq, sid);
859 return; 908 return;
860} 909}
861 910