aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br.c3
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/core/dev.c20
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c36
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c12
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c16
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_recent.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c1
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/route.c16
-rw-r--r--net/irda/iriap.c3
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/sctp/input.c144
-rw-r--r--net/sctp/sm_sideeffect.c16
-rw-r--r--net/sctp/sm_statefuns.c81
-rw-r--r--net/sctp/socket.c29
-rw-r--r--net/sunrpc/cache.c1
-rw-r--r--net/xfrm/xfrm_input.c2
27 files changed, 243 insertions, 168 deletions
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 22d806cf40ca..12da21afb9ca 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -55,7 +55,7 @@ static int __init br_init(void)
55 55
56static void __exit br_deinit(void) 56static void __exit br_deinit(void)
57{ 57{
58 llc_sap_close(br_stp_sap); 58 rcu_assign_pointer(br_stp_sap->rcv_func, NULL);
59 59
60#ifdef CONFIG_BRIDGE_NETFILTER 60#ifdef CONFIG_BRIDGE_NETFILTER
61 br_netfilter_fini(); 61 br_netfilter_fini();
@@ -67,6 +67,7 @@ static void __exit br_deinit(void)
67 67
68 synchronize_net(); 68 synchronize_net();
69 69
70 llc_sap_put(br_stp_sap);
70 br_fdb_get_hook = NULL; 71 br_fdb_get_hook = NULL;
71 br_fdb_put_hook = NULL; 72 br_fdb_put_hook = NULL;
72 73
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92cca84..466ed3440b74 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
168 168
169 if (info->bitmask & EBT_LOG_NFLOG) 169 if (info->bitmask & EBT_LOG_NFLOG)
170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
171 info->prefix); 171 "%s", info->prefix);
172 else 172 else
173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
174 info->prefix); 174 info->prefix);
diff --git a/net/core/dev.c b/net/core/dev.c
index 2dce673a039b..4fba549caf29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -127,7 +127,7 @@
127 * sure which should go first, but I bet it won't make much 127 * sure which should go first, but I bet it won't make much
128 * difference if we are running VLANs. The good news is that 128 * difference if we are running VLANs. The good news is that
129 * this protocol won't be in the list unless compiled in, so 129 * this protocol won't be in the list unless compiled in, so
130 * the average user (w/out VLANs) will not be adversly affected. 130 * the average user (w/out VLANs) will not be adversely affected.
131 * --BLG 131 * --BLG
132 * 132 *
133 * 0800 IP 133 * 0800 IP
@@ -149,7 +149,7 @@ static struct list_head ptype_base[16]; /* 16 way hashed list */
149static struct list_head ptype_all; /* Taps */ 149static struct list_head ptype_all; /* Taps */
150 150
151/* 151/*
152 * The @dev_base list is protected by @dev_base_lock and the rtln 152 * The @dev_base list is protected by @dev_base_lock and the rtnl
153 * semaphore. 153 * semaphore.
154 * 154 *
155 * Pure readers hold dev_base_lock for reading. 155 * Pure readers hold dev_base_lock for reading.
@@ -641,10 +641,12 @@ int dev_valid_name(const char *name)
641 * @name: name format string 641 * @name: name format string
642 * 642 *
643 * Passed a format string - eg "lt%d" it will try and find a suitable 643 * Passed a format string - eg "lt%d" it will try and find a suitable
644 * id. Not efficient for many devices, not called a lot. The caller 644 * id. It scans list of devices to build up a free map, then chooses
645 * must hold the dev_base or rtnl lock while allocating the name and 645 * the first empty slot. The caller must hold the dev_base or rtnl lock
646 * adding the device in order to avoid duplicates. Returns the number 646 * while allocating the name and adding the device in order to avoid
647 * of the unit assigned or a negative errno code. 647 * duplicates.
648 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
649 * Returns the number of the unit assigned or a negative errno code.
648 */ 650 */
649 651
650int dev_alloc_name(struct net_device *dev, const char *name) 652int dev_alloc_name(struct net_device *dev, const char *name)
@@ -744,7 +746,7 @@ int dev_change_name(struct net_device *dev, char *newname)
744} 746}
745 747
746/** 748/**
747 * netdev_features_change - device changes fatures 749 * netdev_features_change - device changes features
748 * @dev: device to cause notification 750 * @dev: device to cause notification
749 * 751 *
750 * Called to indicate a device has changed features. 752 * Called to indicate a device has changed features.
@@ -2196,7 +2198,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2196 * @dev: device 2198 * @dev: device
2197 * @inc: modifier 2199 * @inc: modifier
2198 * 2200 *
2199 * Add or remove promsicuity from a device. While the count in the device 2201 * Add or remove promiscuity from a device. While the count in the device
2200 * remains above zero the interface remains promiscuous. Once it hits zero 2202 * remains above zero the interface remains promiscuous. Once it hits zero
2201 * the device reverts back to normal filtering operation. A negative inc 2203 * the device reverts back to normal filtering operation. A negative inc
2202 * value is used to drop promiscuity on the device. 2204 * value is used to drop promiscuity on the device.
@@ -3122,7 +3124,7 @@ EXPORT_SYMBOL(alloc_netdev);
3122void free_netdev(struct net_device *dev) 3124void free_netdev(struct net_device *dev)
3123{ 3125{
3124#ifdef CONFIG_SYSFS 3126#ifdef CONFIG_SYSFS
3125 /* Compatiablity with error handling in drivers */ 3127 /* Compatibility with error handling in drivers */
3126 if (dev->reg_state == NETREG_UNINITIALIZED) { 3128 if (dev->reg_state == NETREG_UNINITIALIZED) {
3127 kfree((char *)dev - dev->padded); 3129 kfree((char *)dev - dev->padded);
3128 return; 3130 return;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index cd810f41af1a..95278b22b669 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -210,7 +210,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
210 skb->h.icmph->code != ICMP_FRAG_NEEDED) 210 skb->h.icmph->code != ICMP_FRAG_NEEDED)
211 return; 211 return;
212 212
213 spi = ntohl(ntohs(ipch->cpi)); 213 spi = htonl(ntohs(ipch->cpi));
214 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, 214 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
215 spi, IPPROTO_COMP, AF_INET); 215 spi, IPPROTO_COMP, AF_INET);
216 if (!x) 216 if (!x)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 3d560dec63ab..d4072533da21 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -170,8 +170,8 @@ config IP_NF_PPTP
170 Documentation/modules.txt. If unsure, say `N'. 170 Documentation/modules.txt. If unsure, say `N'.
171 171
172config IP_NF_H323 172config IP_NF_H323
173 tristate 'H.323 protocol support' 173 tristate 'H.323 protocol support (EXPERIMENTAL)'
174 depends on IP_NF_CONNTRACK 174 depends on IP_NF_CONNTRACK && EXPERIMENTAL
175 help 175 help
176 H.323 is a VoIP signalling protocol from ITU-T. As one of the most 176 H.323 is a VoIP signalling protocol from ITU-T. As one of the most
177 important VoIP protocols, it is widely used by voice hardware and 177 important VoIP protocols, it is widely used by voice hardware and
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c2d92f99a2b8..d0d19192026d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len)
948 948
949 write_lock_bh(&t->lock); 949 write_lock_bh(&t->lock);
950 private = t->private; 950 private = t->private;
951 if (private->number != paddc->num_counters) { 951 if (private->number != tmp.num_counters) {
952 ret = -EINVAL; 952 ret = -EINVAL;
953 goto unlock_up_free; 953 goto unlock_up_free;
954 } 954 }
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 979a2eac6f00..a297da7bbef5 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -1318,6 +1318,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1318 .tuple.dst.u.tcp.port; 1318 .tuple.dst.u.tcp.port;
1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
1320 .tuple.dst.ip; 1320 .tuple.dst.ip;
1321 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
1321 1322
1322 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 1323 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
1323 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 1324 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
index 355a53a5b6cd..26dfecadb335 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
@@ -528,14 +528,15 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
528 528
529 /* Decode */ 529 /* Decode */
530 if ((err = (Decoders[son->type]) (bs, son, base, 530 if ((err = (Decoders[son->type]) (bs, son, base,
531 level + 1)) > 531 level + 1)) <
532 H323_ERROR_STOP) 532 H323_ERROR_NONE)
533 return err; 533 return err;
534 534
535 bs->cur = beg + len; 535 bs->cur = beg + len;
536 bs->bit = 0; 536 bs->bit = 0;
537 } else if ((err = (Decoders[son->type]) (bs, son, base, 537 } else if ((err = (Decoders[son->type]) (bs, son, base,
538 level + 1))) 538 level + 1)) <
539 H323_ERROR_NONE)
539 return err; 540 return err;
540 } 541 }
541 542
@@ -554,7 +555,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
554 555
555 /* Decode the extension components */ 556 /* Decode the extension components */
556 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 557 for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
557 if (son->attr & STOP) { 558 if (i < f->ub && son->attr & STOP) {
558 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 559 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
559 son->name); 560 son->name);
560 return H323_ERROR_STOP; 561 return H323_ERROR_STOP;
@@ -584,8 +585,8 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
584 beg = bs->cur; 585 beg = bs->cur;
585 586
586 if ((err = (Decoders[son->type]) (bs, son, base, 587 if ((err = (Decoders[son->type]) (bs, son, base,
587 level + 1)) > 588 level + 1)) <
588 H323_ERROR_STOP) 589 H323_ERROR_NONE)
589 return err; 590 return err;
590 591
591 bs->cur = beg + len; 592 bs->cur = beg + len;
@@ -660,18 +661,20 @@ int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level)
660 i < 661 i <
661 effective_count ? 662 effective_count ?
662 base : NULL, 663 base : NULL,
663 level + 1)) > 664 level + 1)) <
664 H323_ERROR_STOP) 665 H323_ERROR_NONE)
665 return err; 666 return err;
666 667
667 bs->cur = beg + len; 668 bs->cur = beg + len;
668 bs->bit = 0; 669 bs->bit = 0;
669 } else 670 } else
670 if ((err = (Decoders[son->type]) (bs, son, 671 if ((err = (Decoders[son->type]) (bs, son,
671 i < effective_count ? 672 i <
672 base : NULL, 673 effective_count ?
673 level + 1))) 674 base : NULL,
674 return err; 675 level + 1)) <
676 H323_ERROR_NONE)
677 return err;
675 678
676 if (base) 679 if (base)
677 base += son->offset; 680 base += son->offset;
@@ -735,13 +738,14 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
735 } 738 }
736 beg = bs->cur; 739 beg = bs->cur;
737 740
738 if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) > 741 if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
739 H323_ERROR_STOP) 742 H323_ERROR_NONE)
740 return err; 743 return err;
741 744
742 bs->cur = beg + len; 745 bs->cur = beg + len;
743 bs->bit = 0; 746 bs->bit = 0;
744 } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1))) 747 } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
748 H323_ERROR_NONE)
745 return err; 749 return err;
746 750
747 return H323_ERROR_NONE; 751 return H323_ERROR_NONE;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index 7d3ba4302e9e..8ccfe17bb253 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -469,8 +469,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
469 DEBUGP("%s but no session\n", pptp_msg_name[msg]); 469 DEBUGP("%s but no session\n", pptp_msg_name[msg]);
470 break; 470 break;
471 } 471 }
472 if (info->sstate != PPTP_CALL_IN_REP 472 if (info->cstate != PPTP_CALL_IN_REP
473 && info->sstate != PPTP_CALL_IN_CONF) { 473 && info->cstate != PPTP_CALL_IN_CONF) {
474 DEBUGP("%s but never sent IN_CALL_REPLY\n", 474 DEBUGP("%s but never sent IN_CALL_REPLY\n",
475 pptp_msg_name[msg]); 475 pptp_msg_name[msg]);
476 break; 476 break;
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d8046a..96ceabaec402 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple,
49 const union ip_conntrack_manip_proto *min, 49 const union ip_conntrack_manip_proto *min,
50 const union ip_conntrack_manip_proto *max) 50 const union ip_conntrack_manip_proto *max)
51{ 51{
52 u_int32_t key; 52 __be16 key;
53 53
54 if (maniptype == IP_NAT_MANIP_SRC) 54 if (maniptype == IP_NAT_MANIP_SRC)
55 key = tuple->src.u.gre.key; 55 key = tuple->src.u.gre.key;
56 else 56 else
57 key = tuple->dst.u.gre.key; 57 key = tuple->dst.u.gre.key;
58 58
59 return ntohl(key) >= ntohl(min->gre.key) 59 return ntohs(key) >= ntohs(min->gre.key)
60 && ntohl(key) <= ntohl(max->gre.key); 60 && ntohs(key) <= ntohs(max->gre.key);
61} 61}
62 62
63/* generate unique tuple ... */ 63/* generate unique tuple ... */
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
81 min = 1; 81 min = 1;
82 range_size = 0xffff; 82 range_size = 0xffff;
83 } else { 83 } else {
84 min = ntohl(range->min.gre.key); 84 min = ntohs(range->min.gre.key);
85 range_size = ntohl(range->max.gre.key) - min + 1; 85 range_size = ntohs(range->max.gre.key) - min + 1;
86 } 86 }
87 87
88 DEBUGP("min = %u, range_size = %u\n", min, range_size); 88 DEBUGP("min = %u, range_size = %u\n", min, range_size);
89 89
90 for (i = 0; i < range_size; i++, key++) { 90 for (i = 0; i < range_size; i++, key++) {
91 *keyptr = htonl(min + key % range_size); 91 *keyptr = htons(min + key % range_size);
92 if (!ip_nat_used_tuple(tuple, conntrack)) 92 if (!ip_nat_used_tuple(tuple, conntrack))
93 return 1; 93 return 1;
94 } 94 }
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index c62253845538..c33244263b90 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -768,6 +768,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
768 len *= sizeof(unsigned long); 768 len *= sizeof(unsigned long);
769 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 769 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
770 if (*obj == NULL) { 770 if (*obj == NULL) {
771 kfree(lp);
771 kfree(id); 772 kfree(id);
772 if (net_ratelimit()) 773 if (net_ratelimit())
773 printk("OOM in bsalg (%d)\n", __LINE__); 774 printk("OOM in bsalg (%d)\n", __LINE__);
@@ -1003,12 +1004,12 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
1003 1004
1004 return 1; 1005 return 1;
1005 1006
1007err_addr_free:
1008 kfree((unsigned long *)trap->ip_address);
1009
1006err_id_free: 1010err_id_free:
1007 kfree(trap->id); 1011 kfree(trap->id);
1008 1012
1009err_addr_free:
1010 kfree((unsigned long *)trap->ip_address);
1011
1012 return 0; 1013 return 0;
1013} 1014}
1014 1015
@@ -1126,11 +1127,10 @@ static int snmp_parse_mangle(unsigned char *msg,
1126 struct snmp_v1_trap trap; 1127 struct snmp_v1_trap trap;
1127 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); 1128 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
1128 1129
1129 /* Discard trap allocations regardless */ 1130 if (ret) {
1130 kfree(trap.id); 1131 kfree(trap.id);
1131 kfree((unsigned long *)trap.ip_address); 1132 kfree((unsigned long *)trap.ip_address);
1132 1133 } else
1133 if (!ret)
1134 return ret; 1134 return ret;
1135 1135
1136 } else { 1136 } else {
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2a2386..b98f7b08b084 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb,
428 428
429 if (loginfo->logflags & IPT_LOG_NFLOG) 429 if (loginfo->logflags & IPT_LOG_NFLOG)
430 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 430 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
431 loginfo->prefix); 431 "%s", loginfo->prefix);
432 else 432 else
433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
434 loginfo->prefix); 434 loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 143843285702..b847ee409efb 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@ checkentry(const char *tablename,
821 /* Create our proc 'status' entry. */ 821 /* Create our proc 'status' entry. */
822 curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); 822 curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
823 if (!curr_table->status_proc) { 823 if (!curr_table->status_proc) {
824 vfree(hold);
824 printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); 825 printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
825 /* Destroy the created table */ 826 /* Destroy the created table */
826 spin_lock_bh(&recent_lock); 827 spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@ checkentry(const char *tablename,
845 spin_unlock_bh(&recent_lock); 846 spin_unlock_bh(&recent_lock);
846 vfree(curr_table->time_info); 847 vfree(curr_table->time_info);
847 vfree(curr_table->hash_table); 848 vfree(curr_table->hash_table);
848 vfree(hold);
849 vfree(curr_table->table); 849 vfree(curr_table->table);
850 vfree(curr_table); 850 vfree(curr_table);
851 return 0; 851 return 0;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5bc9f64d7b5b..77d974443c7b 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -348,6 +348,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
348 .tuple.dst.u.tcp.port; 348 .tuple.dst.u.tcp.port;
349 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 349 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
350 .tuple.dst.u3.ip; 350 .tuple.dst.u3.ip;
351 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
351 352
352 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 353 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
353 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 354 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index f285bbf296e2..8604c747bca5 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -221,7 +221,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
221 if (pskb_may_pull(skb, xprth + 4 - skb->data)) { 221 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
222 u16 *ipcomp_hdr = (u16 *)xprth; 222 u16 *ipcomp_hdr = (u16 *)xprth;
223 223
224 fl->fl_ipsec_spi = ntohl(ntohs(ipcomp_hdr[1])); 224 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
225 } 225 }
226 break; 226 break;
227 default: 227 default:
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 05eb67def39f..48636436028a 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -208,7 +208,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
208 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) 208 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
209 return; 209 return;
210 210
211 spi = ntohl(ntohs(ipcomph->cpi)); 211 spi = htonl(ntohs(ipcomph->cpi));
212 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 212 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
213 if (!x) 213 if (!x)
214 return; 214 return;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0a673038344f..2e72f89a7019 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len)
1103 1103
1104 write_lock_bh(&t->lock); 1104 write_lock_bh(&t->lock);
1105 private = t->private; 1105 private = t->private;
1106 if (private->number != paddc->num_counters) { 1106 if (private->number != tmp.num_counters) {
1107 ret = -EINVAL; 1107 ret = -EINVAL;
1108 goto unlock_up_free; 1108 goto unlock_up_free;
1109 } 1109 }
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de14b00..73c6300109d6 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb,
439 439
440 if (loginfo->logflags & IP6T_LOG_NFLOG) 440 if (loginfo->logflags & IP6T_LOG_NFLOG)
441 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 441 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
442 loginfo->prefix); 442 "%s", loginfo->prefix);
443 else 443 else
444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
445 loginfo->prefix); 445 loginfo->prefix);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0190e39096b9..8a777932786d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -280,10 +280,13 @@ static int inline rt6_check_neigh(struct rt6_info *rt)
280{ 280{
281 struct neighbour *neigh = rt->rt6i_nexthop; 281 struct neighbour *neigh = rt->rt6i_nexthop;
282 int m = 0; 282 int m = 0;
283 if (neigh) { 283 if (rt->rt6i_flags & RTF_NONEXTHOP ||
284 !(rt->rt6i_flags & RTF_GATEWAY))
285 m = 1;
286 else if (neigh) {
284 read_lock_bh(&neigh->lock); 287 read_lock_bh(&neigh->lock);
285 if (neigh->nud_state & NUD_VALID) 288 if (neigh->nud_state & NUD_VALID)
286 m = 1; 289 m = 2;
287 read_unlock_bh(&neigh->lock); 290 read_unlock_bh(&neigh->lock);
288 } 291 }
289 return m; 292 return m;
@@ -292,15 +295,18 @@ static int inline rt6_check_neigh(struct rt6_info *rt)
292static int rt6_score_route(struct rt6_info *rt, int oif, 295static int rt6_score_route(struct rt6_info *rt, int oif,
293 int strict) 296 int strict)
294{ 297{
295 int m = rt6_check_dev(rt, oif); 298 int m, n;
299
300 m = rt6_check_dev(rt, oif);
296 if (!m && (strict & RT6_SELECT_F_IFACE)) 301 if (!m && (strict & RT6_SELECT_F_IFACE))
297 return -1; 302 return -1;
298#ifdef CONFIG_IPV6_ROUTER_PREF 303#ifdef CONFIG_IPV6_ROUTER_PREF
299 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 304 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
300#endif 305#endif
301 if (rt6_check_neigh(rt)) 306 n = rt6_check_neigh(rt);
307 if (n > 1)
302 m |= 16; 308 m |= 16;
303 else if (strict & RT6_SELECT_F_REACHABLE) 309 else if (!n && strict & RT6_SELECT_F_REACHABLE)
304 return -1; 310 return -1;
305 return m; 311 return m;
306} 312}
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 254f90746900..2d2e2b1919f4 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -544,7 +544,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
544{ 544{
545 struct sk_buff *tx_skb; 545 struct sk_buff *tx_skb;
546 int n; 546 int n;
547 __u32 tmp_be32, tmp_be16; 547 __u32 tmp_be32;
548 __be16 tmp_be16;
548 __u8 *fp; 549 __u8 *fp;
549 550
550 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 551 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c60273cad778..61cdda4e5d3b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@ static int
321nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) 321nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
322{ 322{
323 spin_lock_bh(&inst->lock); 323 spin_lock_bh(&inst->lock);
324 inst->flags = ntohs(flags); 324 inst->flags = flags;
325 spin_unlock_bh(&inst->lock); 325 spin_unlock_bh(&inst->lock);
326 326
327 return 0; 327 return 0;
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
902 if (nfula[NFULA_CFG_FLAGS-1]) { 902 if (nfula[NFULA_CFG_FLAGS-1]) {
903 u_int16_t flags = 903 u_int16_t flags =
904 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); 904 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
905 nfulnl_set_flags(inst, ntohl(flags)); 905 nfulnl_set_flags(inst, ntohs(flags));
906 } 906 }
907 907
908out_put: 908out_put:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc75cf8..1662f9cc869e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
73 const union sctp_addr *peer, 73 const union sctp_addr *peer,
74 struct sctp_transport **pt); 74 struct sctp_transport **pt);
75 75
76static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
77
76 78
77/* Calculate the SCTP checksum of an SCTP packet. */ 79/* Calculate the SCTP checksum of an SCTP packet. */
78static inline int sctp_rcv_checksum(struct sk_buff *skb) 80static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
186 */ 188 */
187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) 189 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
188 { 190 {
189 sock_put(sk);
190 if (asoc) { 191 if (asoc) {
191 sctp_association_put(asoc); 192 sctp_association_put(asoc);
192 asoc = NULL; 193 asoc = NULL;
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
197 sk = sctp_get_ctl_sock(); 198 sk = sctp_get_ctl_sock();
198 ep = sctp_sk(sk)->ep; 199 ep = sctp_sk(sk)->ep;
199 sctp_endpoint_hold(ep); 200 sctp_endpoint_hold(ep);
200 sock_hold(sk);
201 rcvr = &ep->base; 201 rcvr = &ep->base;
202 } 202 }
203 203
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
253 */ 253 */
254 sctp_bh_lock_sock(sk); 254 sctp_bh_lock_sock(sk);
255 255
256 /* It is possible that the association could have moved to a different
257 * socket if it is peeled off. If so, update the sk.
258 */
259 if (sk != rcvr->sk) {
260 sctp_bh_lock_sock(rcvr->sk);
261 sctp_bh_unlock_sock(sk);
262 sk = rcvr->sk;
263 }
264
265 if (sock_owned_by_user(sk)) 256 if (sock_owned_by_user(sk))
266 sk_add_backlog(sk, skb); 257 sctp_add_backlog(sk, skb);
267 else 258 else
268 sctp_backlog_rcv(sk, skb); 259 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
269 260
270 /* Release the sock and the sock ref we took in the lookup calls.
271 * The asoc/ep ref will be released in sctp_backlog_rcv.
272 */
273 sctp_bh_unlock_sock(sk); 261 sctp_bh_unlock_sock(sk);
274 sock_put(sk); 262
263 /* Release the asoc/ep ref we took in the lookup calls. */
264 if (asoc)
265 sctp_association_put(asoc);
266 else
267 sctp_endpoint_put(ep);
275 268
276 return 0; 269 return 0;
277 270
@@ -280,8 +273,7 @@ discard_it:
280 return 0; 273 return 0;
281 274
282discard_release: 275discard_release:
283 /* Release any structures we may be holding. */ 276 /* Release the asoc/ep ref we took in the lookup calls. */
284 sock_put(sk);
285 if (asoc) 277 if (asoc)
286 sctp_association_put(asoc); 278 sctp_association_put(asoc);
287 else 279 else
@@ -290,56 +282,87 @@ discard_release:
290 goto discard_it; 282 goto discard_it;
291} 283}
292 284
293/* Handle second half of inbound skb processing. If the sock was busy, 285/* Process the backlog queue of the socket. Every skb on
294 * we may have need to delay processing until later when the sock is 286 * the backlog holds a ref on an association or endpoint.
295 * released (on the backlog). If not busy, we call this routine 287 * We hold this ref throughout the state machine to make
296 * directly from the bottom half. 288 * sure that the structure we need is still around.
297 */ 289 */
298int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 290int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
299{ 291{
300 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 292 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
301 struct sctp_inq *inqueue = NULL; 293 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
302 struct sctp_ep_common *rcvr = NULL; 294 struct sctp_ep_common *rcvr = NULL;
295 int backloged = 0;
303 296
304 rcvr = chunk->rcvr; 297 rcvr = chunk->rcvr;
305 298
306 BUG_TRAP(rcvr->sk == sk); 299 /* If the rcvr is dead then the association or endpoint
307 300 * has been deleted and we can safely drop the chunk
308 if (rcvr->dead) { 301 * and refs that we are holding.
309 sctp_chunk_free(chunk); 302 */
310 } else { 303 if (rcvr->dead) {
311 inqueue = &chunk->rcvr->inqueue; 304 sctp_chunk_free(chunk);
312 sctp_inq_push(inqueue, chunk); 305 goto done;
313 } 306 }
314 307
315 /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 308 if (unlikely(rcvr->sk != sk)) {
316 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 309 /* In this case, the association moved from one socket to
317 sctp_association_put(sctp_assoc(rcvr)); 310 * another. We are currently sitting on the backlog of the
318 else 311 * old socket, so we need to move.
319 sctp_endpoint_put(sctp_ep(rcvr)); 312 * However, since we are here in the process context we
320 313 * need to take make sure that the user doesn't own
314 * the new socket when we process the packet.
315 * If the new socket is user-owned, queue the chunk to the
316 * backlog of the new socket without dropping any refs.
317 * Otherwise, we can safely push the chunk on the inqueue.
318 */
319
320 sk = rcvr->sk;
321 sctp_bh_lock_sock(sk);
322
323 if (sock_owned_by_user(sk)) {
324 sk_add_backlog(sk, skb);
325 backloged = 1;
326 } else
327 sctp_inq_push(inqueue, chunk);
328
329 sctp_bh_unlock_sock(sk);
330
331 /* If the chunk was backloged again, don't drop refs */
332 if (backloged)
333 return 0;
334 } else {
335 sctp_inq_push(inqueue, chunk);
336 }
337
338done:
339 /* Release the refs we took in sctp_add_backlog */
340 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
341 sctp_association_put(sctp_assoc(rcvr));
342 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
343 sctp_endpoint_put(sctp_ep(rcvr));
344 else
345 BUG();
346
321 return 0; 347 return 0;
322} 348}
323 349
324void sctp_backlog_migrate(struct sctp_association *assoc, 350static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
325 struct sock *oldsk, struct sock *newsk)
326{ 351{
327 struct sk_buff *skb; 352 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
328 struct sctp_chunk *chunk; 353 struct sctp_ep_common *rcvr = chunk->rcvr;
329 354
330 skb = oldsk->sk_backlog.head; 355 /* Hold the assoc/ep while hanging on the backlog queue.
331 oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; 356 * This way, we know structures we need will not disappear from us
332 while (skb != NULL) { 357 */
333 struct sk_buff *next = skb->next; 358 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
334 359 sctp_association_hold(sctp_assoc(rcvr));
335 chunk = SCTP_INPUT_CB(skb)->chunk; 360 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
336 skb->next = NULL; 361 sctp_endpoint_hold(sctp_ep(rcvr));
337 if (&assoc->base == chunk->rcvr) 362 else
338 sk_add_backlog(newsk, skb); 363 BUG();
339 else 364
340 sk_add_backlog(oldsk, skb); 365 sk_add_backlog(sk, skb);
341 skb = next;
342 }
343} 366}
344 367
345/* Handle icmp frag needed error. */ 368/* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
412 union sctp_addr daddr; 435 union sctp_addr daddr;
413 struct sctp_af *af; 436 struct sctp_af *af;
414 struct sock *sk = NULL; 437 struct sock *sk = NULL;
415 struct sctp_association *asoc = NULL; 438 struct sctp_association *asoc;
416 struct sctp_transport *transport = NULL; 439 struct sctp_transport *transport = NULL;
417 440
418 *app = NULL; *tpp = NULL; 441 *app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
453 return sk; 476 return sk;
454 477
455out: 478out:
456 sock_put(sk);
457 if (asoc) 479 if (asoc)
458 sctp_association_put(asoc); 480 sctp_association_put(asoc);
459 return NULL; 481 return NULL;
@@ -463,7 +485,6 @@ out:
463void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 485void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
464{ 486{
465 sctp_bh_unlock_sock(sk); 487 sctp_bh_unlock_sock(sk);
466 sock_put(sk);
467 if (asoc) 488 if (asoc)
468 sctp_association_put(asoc); 489 sctp_association_put(asoc);
469} 490}
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
490 int type = skb->h.icmph->type; 511 int type = skb->h.icmph->type;
491 int code = skb->h.icmph->code; 512 int code = skb->h.icmph->code;
492 struct sock *sk; 513 struct sock *sk;
493 struct sctp_association *asoc; 514 struct sctp_association *asoc = NULL;
494 struct sctp_transport *transport; 515 struct sctp_transport *transport;
495 struct inet_sock *inet; 516 struct inet_sock *inet;
496 char *saveip, *savesctp; 517 char *saveip, *savesctp;
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
716 737
717hit: 738hit:
718 sctp_endpoint_hold(ep); 739 sctp_endpoint_hold(ep);
719 sock_hold(epb->sk);
720 read_unlock(&head->lock); 740 read_unlock(&head->lock);
721 return ep; 741 return ep;
722} 742}
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
818hit: 838hit:
819 *pt = transport; 839 *pt = transport;
820 sctp_association_hold(asoc); 840 sctp_association_hold(asoc);
821 sock_hold(epb->sk);
822 read_unlock(&head->lock); 841 read_unlock(&head->lock);
823 return asoc; 842 return asoc;
824} 843}
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
846 struct sctp_transport *transport; 865 struct sctp_transport *transport;
847 866
848 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { 867 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
849 sock_put(asoc->base.sk);
850 sctp_association_put(asoc); 868 sctp_association_put(asoc);
851 return 1; 869 return 1;
852 } 870 }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24bab4c..c5beb2ad7ef7 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
498 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 498 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
499 SCTP_STATE(SCTP_STATE_CLOSED)); 499 SCTP_STATE(SCTP_STATE_CLOSED));
500 500
501 /* Set sk_err to ECONNRESET on a 1-1 style socket. */
502 if (!sctp_style(asoc->base.sk, UDP))
503 asoc->base.sk->sk_err = ECONNRESET;
504
505 /* SEND_FAILED sent later when cleaning up the association. */ 501 /* SEND_FAILED sent later when cleaning up the association. */
506 asoc->outqueue.error = error; 502 asoc->outqueue.error = error;
507 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 503 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
838 return; 834 return;
839} 835}
840 836
837/* Helper function to set sk_err on a 1-1 style socket. */
838static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
839{
840 struct sock *sk = asoc->base.sk;
841
842 if (!sctp_style(sk, UDP))
843 sk->sk_err = error;
844}
845
841/* These three macros allow us to pull the debugging code out of the 846/* These three macros allow us to pull the debugging code out of the
842 * main flow of sctp_do_sm() to keep attention focused on the real 847 * main flow of sctp_do_sm() to keep attention focused on the real
843 * functionality there. 848 * functionality there.
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1458 local_cork = 0; 1463 local_cork = 0;
1459 asoc->peer.retran_path = t; 1464 asoc->peer.retran_path = t;
1460 break; 1465 break;
1466 case SCTP_CMD_SET_SK_ERR:
1467 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1468 break;
1461 default: 1469 default:
1462 printk(KERN_WARNING "Impossible command: %u, %p\n", 1470 printk(KERN_WARNING "Impossible command: %u, %p\n",
1463 cmd->verb, cmd->obj.ptr); 1471 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8cdba51ec076..8bc279219a72 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
94 94
95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
96 __u16 error, 96 __u16 error, int sk_err,
97 const struct sctp_association *asoc, 97 const struct sctp_association *asoc,
98 struct sctp_transport *transport); 98 struct sctp_transport *transport);
99 99
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
448 __u32 init_tag; 448 __u32 init_tag;
449 struct sctp_chunk *err_chunk; 449 struct sctp_chunk *err_chunk;
450 struct sctp_packet *packet; 450 struct sctp_packet *packet;
451 sctp_disposition_t ret; 451 __u16 error;
452 452
453 if (!sctp_vtag_verify(chunk, asoc)) 453 if (!sctp_vtag_verify(chunk, asoc))
454 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 454 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
480 goto nomem; 480 goto nomem;
481 481
482 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 482 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
483 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 483 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
484 SCTP_STATE(SCTP_STATE_CLOSED)); 484 ECONNREFUSED, asoc,
485 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 485 chunk->transport);
486 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
487 return SCTP_DISPOSITION_DELETE_TCB;
488 } 486 }
489 487
490 /* Verify the INIT chunk before processing it. */ 488 /* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
511 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 509 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
512 SCTP_PACKET(packet)); 510 SCTP_PACKET(packet));
513 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 511 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
514 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 512 error = SCTP_ERROR_INV_PARAM;
515 SCTP_STATE(SCTP_STATE_CLOSED));
516 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
517 SCTP_NULL());
518 return SCTP_DISPOSITION_CONSUME;
519 } else { 513 } else {
520 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 514 error = SCTP_ERROR_NO_RESOURCE;
521 SCTP_STATE(SCTP_STATE_CLOSED));
522 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
523 SCTP_NULL());
524 return SCTP_DISPOSITION_NOMEM;
525 } 515 }
526 } else { 516 } else {
527 ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, 517 sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
528 commands); 518 error = SCTP_ERROR_INV_PARAM;
529 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
530 SCTP_STATE(SCTP_STATE_CLOSED));
531 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
532 SCTP_NULL());
533 return ret;
534 } 519 }
520 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
521 asoc, chunk->transport);
535 } 522 }
536 523
537 /* Tag the variable length parameters. Note that we never 524 /* Tag the variable length parameters. Note that we never
@@ -886,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
886 struct sctp_transport *transport = (struct sctp_transport *) arg; 873 struct sctp_transport *transport = (struct sctp_transport *) arg;
887 874
888 if (asoc->overall_error_count >= asoc->max_retrans) { 875 if (asoc->overall_error_count >= asoc->max_retrans) {
876 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
877 SCTP_ERROR(ETIMEDOUT));
889 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 878 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
890 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 879 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
891 SCTP_U32(SCTP_ERROR_NO_ERROR)); 880 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -1030,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1030 commands); 1019 commands);
1031 1020
1032 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 1021 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
1022 /* Make sure that the length of the parameter is what we expect */
1023 if (ntohs(hbinfo->param_hdr.length) !=
1024 sizeof(sctp_sender_hb_info_t)) {
1025 return SCTP_DISPOSITION_DISCARD;
1026 }
1027
1033 from_addr = hbinfo->daddr; 1028 from_addr = hbinfo->daddr;
1034 link = sctp_assoc_lookup_paddr(asoc, &from_addr); 1029 link = sctp_assoc_lookup_paddr(asoc, &from_addr);
1035 1030
@@ -2126,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2126 int attempts = asoc->init_err_counter + 1; 2121 int attempts = asoc->init_err_counter + 1;
2127 2122
2128 if (attempts > asoc->max_init_attempts) { 2123 if (attempts > asoc->max_init_attempts) {
2124 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
2125 SCTP_ERROR(ETIMEDOUT));
2129 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2126 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2130 SCTP_U32(SCTP_ERROR_STALE_COOKIE)); 2127 SCTP_U32(SCTP_ERROR_STALE_COOKIE));
2131 return SCTP_DISPOSITION_DELETE_TCB; 2128 return SCTP_DISPOSITION_DELETE_TCB;
@@ -2262,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2262 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2259 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2263 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2260 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2264 2261
2262 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2265 /* ASSOC_FAILED will DELETE_TCB. */ 2263 /* ASSOC_FAILED will DELETE_TCB. */
2266 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); 2264 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
2267 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2265 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2306,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2306 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2304 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2307 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2305 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2308 2306
2309 return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport); 2307 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
2308 chunk->transport);
2310} 2309}
2311 2310
2312/* 2311/*
@@ -2318,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
2318 void *arg, 2317 void *arg,
2319 sctp_cmd_seq_t *commands) 2318 sctp_cmd_seq_t *commands)
2320{ 2319{
2321 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc, 2320 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
2321 ENOPROTOOPT, asoc,
2322 (struct sctp_transport *)arg); 2322 (struct sctp_transport *)arg);
2323} 2323}
2324 2324
@@ -2343,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
2343 * This is common code called by several sctp_sf_*_abort() functions above. 2343 * This is common code called by several sctp_sf_*_abort() functions above.
2344 */ 2344 */
2345static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 2345static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2346 __u16 error, 2346 __u16 error, int sk_err,
2347 const struct sctp_association *asoc, 2347 const struct sctp_association *asoc,
2348 struct sctp_transport *transport) 2348 struct sctp_transport *transport)
2349{ 2349{
@@ -2353,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2353 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2353 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
2354 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2354 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2355 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2355 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
2356 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
2356 /* CMD_INIT_FAILED will DELETE_TCB. */ 2357 /* CMD_INIT_FAILED will DELETE_TCB. */
2357 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2358 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2358 SCTP_U32(error)); 2359 SCTP_U32(error));
@@ -3336,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3336 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3337 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3337 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3338 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3338 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3339 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3340 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3341 SCTP_ERROR(ECONNABORTED));
3339 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3342 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3340 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3343 SCTP_U32(SCTP_ERROR_ASCONF_ACK));
3341 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3344 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3362,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3362 * processing the rest of the chunks in the packet. 3365 * processing the rest of the chunks in the packet.
3363 */ 3366 */
3364 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3367 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3368 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3369 SCTP_ERROR(ECONNABORTED));
3365 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3370 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3366 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3371 SCTP_U32(SCTP_ERROR_ASCONF_ACK));
3367 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3372 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3714,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3714 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3719 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
3715 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3720 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3716 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3721 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3722 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3723 SCTP_ERROR(ECONNREFUSED));
3717 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3724 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
3718 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3725 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
3719 } else { 3726 } else {
3727 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3728 SCTP_ERROR(ECONNABORTED));
3720 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3729 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3721 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3730 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
3722 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3731 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4034,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4034 * TCB. This is a departure from our typical NOMEM handling. 4043 * TCB. This is a departure from our typical NOMEM handling.
4035 */ 4044 */
4036 4045
4046 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4047 SCTP_ERROR(ECONNABORTED));
4037 /* Delete the established association. */ 4048 /* Delete the established association. */
4038 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4049 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4039 SCTP_U32(SCTP_ERROR_USER_ABORT)); 4050 SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4175,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4175 * TCB. This is a departure from our typical NOMEM handling. 4186 * TCB. This is a departure from our typical NOMEM handling.
4176 */ 4187 */
4177 4188
4189 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4190 SCTP_ERROR(ECONNREFUSED));
4178 /* Delete the established association. */ 4191 /* Delete the established association. */
4179 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4192 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4180 SCTP_U32(SCTP_ERROR_USER_ABORT)); 4193 SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4543,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
4543 struct sctp_transport *transport = arg; 4556 struct sctp_transport *transport = arg;
4544 4557
4545 if (asoc->overall_error_count >= asoc->max_retrans) { 4558 if (asoc->overall_error_count >= asoc->max_retrans) {
4559 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4560 SCTP_ERROR(ETIMEDOUT));
4546 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4561 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
4547 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4562 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4548 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4563 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4662,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
4662 SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" 4677 SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
4663 " max_init_attempts: %d\n", 4678 " max_init_attempts: %d\n",
4664 attempts, asoc->max_init_attempts); 4679 attempts, asoc->max_init_attempts);
4680 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4681 SCTP_ERROR(ETIMEDOUT));
4665 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4682 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4666 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4683 SCTP_U32(SCTP_ERROR_NO_ERROR));
4667 return SCTP_DISPOSITION_DELETE_TCB; 4684 return SCTP_DISPOSITION_DELETE_TCB;
@@ -4711,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
4711 4728
4712 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 4729 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
4713 } else { 4730 } else {
4731 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4732 SCTP_ERROR(ETIMEDOUT));
4714 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4733 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4715 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4734 SCTP_U32(SCTP_ERROR_NO_ERROR));
4716 return SCTP_DISPOSITION_DELETE_TCB; 4735 return SCTP_DISPOSITION_DELETE_TCB;
@@ -4742,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
4742 4761
4743 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 4762 SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
4744 if (asoc->overall_error_count >= asoc->max_retrans) { 4763 if (asoc->overall_error_count >= asoc->max_retrans) {
4764 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4765 SCTP_ERROR(ETIMEDOUT));
4745 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4766 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
4746 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4767 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4747 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4768 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4817,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
4817 if (asoc->overall_error_count >= asoc->max_retrans) { 4838 if (asoc->overall_error_count >= asoc->max_retrans) {
4818 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4839 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4819 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 4840 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
4841 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4842 SCTP_ERROR(ETIMEDOUT));
4820 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4843 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4821 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4844 SCTP_U32(SCTP_ERROR_NO_ERROR));
4822 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4845 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4870,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
4870 goto nomem; 4893 goto nomem;
4871 4894
4872 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 4895 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
4896 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4897 SCTP_ERROR(ETIMEDOUT));
4873 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4898 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4874 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4899 SCTP_U32(SCTP_ERROR_NO_ERROR));
4875 4900
@@ -5309,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5309 * processing the rest of the chunks in the packet. 5334 * processing the rest of the chunks in the packet.
5310 */ 5335 */
5311 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 5336 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
5337 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5338 SCTP_ERROR(ECONNABORTED));
5312 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5339 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5313 SCTP_U32(SCTP_ERROR_NO_DATA)); 5340 SCTP_U32(SCTP_ERROR_NO_DATA));
5314 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5341 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89539b3..174d4d35e951 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk,
1057 inet_sk(sk)->dport = htons(asoc->peer.port); 1057 inet_sk(sk)->dport = htons(asoc->peer.port);
1058 af = sctp_get_af_specific(to.sa.sa_family); 1058 af = sctp_get_af_specific(to.sa.sa_family);
1059 af->to_sk_daddr(&to, sk); 1059 af->to_sk_daddr(&to, sk);
1060 sk->sk_err = 0;
1060 1061
1061 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); 1062 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
1062 err = sctp_wait_for_connect(asoc, &timeo); 1063 err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1228 1229
1229 ep = sctp_sk(sk)->ep; 1230 ep = sctp_sk(sk)->ep;
1230 1231
1231 /* Walk all associations on a socket, not on an endpoint. */ 1232 /* Walk all associations on an endpoint. */
1232 list_for_each_safe(pos, temp, &ep->asocs) { 1233 list_for_each_safe(pos, temp, &ep->asocs) {
1233 asoc = list_entry(pos, struct sctp_association, asocs); 1234 asoc = list_entry(pos, struct sctp_association, asocs);
1234 1235
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1241 if (sctp_state(asoc, CLOSED)) { 1242 if (sctp_state(asoc, CLOSED)) {
1242 sctp_unhash_established(asoc); 1243 sctp_unhash_established(asoc);
1243 sctp_association_free(asoc); 1244 sctp_association_free(asoc);
1245 continue;
1246 }
1247 }
1244 1248
1245 } else if (sock_flag(sk, SOCK_LINGER) && 1249 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
1246 !sk->sk_lingertime) 1250 sctp_primitive_ABORT(asoc, NULL);
1247 sctp_primitive_ABORT(asoc, NULL); 1251 else
1248 else
1249 sctp_primitive_SHUTDOWN(asoc, NULL);
1250 } else
1251 sctp_primitive_SHUTDOWN(asoc, NULL); 1252 sctp_primitive_SHUTDOWN(asoc, NULL);
1252 } 1253 }
1253 1254
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
5317 */ 5318 */
5318 sctp_release_sock(sk); 5319 sctp_release_sock(sk);
5319 current_timeo = schedule_timeout(current_timeo); 5320 current_timeo = schedule_timeout(current_timeo);
5321 BUG_ON(sk != asoc->base.sk);
5320 sctp_lock_sock(sk); 5322 sctp_lock_sock(sk);
5321 5323
5322 *timeo_p = current_timeo; 5324 *timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5604 */ 5606 */
5605 newsp->type = type; 5607 newsp->type = type;
5606 5608
5607 spin_lock_bh(&oldsk->sk_lock.slock); 5609 /* Mark the new socket "in-use" by the user so that any packets
5608 /* Migrate the backlog from oldsk to newsk. */ 5610 * that may arrive on the association after we've moved it are
5609 sctp_backlog_migrate(assoc, oldsk, newsk); 5611 * queued to the backlog. This prevents a potential race between
5610 /* Migrate the association to the new socket. */ 5612 * backlog processing on the old socket and new-packet processing
5613 * on the new socket.
5614 */
5615 sctp_lock_sock(newsk);
5611 sctp_assoc_migrate(assoc, newsk); 5616 sctp_assoc_migrate(assoc, newsk);
5612 spin_unlock_bh(&oldsk->sk_lock.slock);
5613 5617
5614 /* If the association on the newsk is already closed before accept() 5618 /* If the association on the newsk is already closed before accept()
5615 * is called, set RCV_SHUTDOWN flag. 5619 * is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5618 newsk->sk_shutdown |= RCV_SHUTDOWN; 5622 newsk->sk_shutdown |= RCV_SHUTDOWN;
5619 5623
5620 newsk->sk_state = SCTP_SS_ESTABLISHED; 5624 newsk->sk_state = SCTP_SS_ESTABLISHED;
5625 sctp_release_sock(newsk);
5621} 5626}
5622 5627
5623/* This proto struct describes the ULP interface for SCTP. */ 5628/* This proto struct describes the ULP interface for SCTP. */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 3ac4193a78ed..7026b0866b7b 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -159,6 +159,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
159 detail->update(tmp, new); 159 detail->update(tmp, new);
160 tmp->next = *head; 160 tmp->next = *head;
161 *head = tmp; 161 *head = tmp;
162 detail->entries++;
162 cache_get(tmp); 163 cache_get(tmp);
163 is_new = cache_fresh_locked(tmp, new->expiry_time); 164 is_new = cache_fresh_locked(tmp, new->expiry_time);
164 cache_fresh_locked(old, 0); 165 cache_fresh_locked(old, 0);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b54971059f16..891a6090cc09 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
62 case IPPROTO_COMP: 62 case IPPROTO_COMP:
63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
64 return -EINVAL; 64 return -EINVAL;
65 *spi = ntohl(ntohs(*(u16*)(skb->h.raw + 2))); 65 *spi = htonl(ntohs(*(u16*)(skb->h.raw + 2)));
66 *seq = 0; 66 *seq = 0;
67 return 0; 67 return 0;
68 default: 68 default: