aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_mcast.c2
-rw-r--r--net/core/net_namespace.c18
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/netfilter/nf_sockopt.c106
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/rxrpc/ar-local.c4
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/bind_addr.c13
-rw-r--r--net/sctp/endpointola.c35
-rw-r--r--net/sctp/input.c43
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/outqueue.c41
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c170
-rw-r--r--net/sctp/sm_sideeffect.c10
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c5
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c6
31 files changed, 303 insertions, 296 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index da22f900e89d..c1757c79dfbb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
766 if (!nf_bridge) 766 if (!nf_bridge)
767 return NF_ACCEPT; 767 return NF_ACCEPT;
768 768
769 if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
770 return NF_ACCEPT;
771
769 if (!realoutdev) 772 if (!realoutdev)
770 return NF_DROP; 773 return NF_DROP;
771 774
diff --git a/net/core/dev.c b/net/core/dev.c
index dd7e30754cbc..dd40b35bb006 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2688,7 +2688,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
2688 proc_net_remove(net, "dev"); 2688 proc_net_remove(net, "dev");
2689} 2689}
2690 2690
2691static struct pernet_operations dev_proc_ops = { 2691static struct pernet_operations __net_initdata dev_proc_ops = {
2692 .init = dev_proc_net_init, 2692 .init = dev_proc_net_init,
2693 .exit = dev_proc_net_exit, 2693 .exit = dev_proc_net_exit,
2694}; 2694};
@@ -4353,7 +4353,7 @@ static void __net_exit netdev_exit(struct net *net)
4353 kfree(net->dev_index_head); 4353 kfree(net->dev_index_head);
4354} 4354}
4355 4355
4356static struct pernet_operations netdev_net_ops = { 4356static struct pernet_operations __net_initdata netdev_net_ops = {
4357 .init = netdev_init, 4357 .init = netdev_init,
4358 .exit = netdev_exit, 4358 .exit = netdev_exit,
4359}; 4359};
@@ -4384,7 +4384,7 @@ static void __net_exit default_device_exit(struct net *net)
4384 rtnl_unlock(); 4384 rtnl_unlock();
4385} 4385}
4386 4386
4387static struct pernet_operations default_device_ops = { 4387static struct pernet_operations __net_initdata default_device_ops = {
4388 .exit = default_device_exit, 4388 .exit = default_device_exit,
4389}; 4389};
4390 4390
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 647973daca2b..69fff16ece10 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
285 proc_net_remove(net, "dev_mcast"); 285 proc_net_remove(net, "dev_mcast");
286} 286}
287 287
288static struct pernet_operations dev_mc_net_ops = { 288static struct pernet_operations __net_initdata dev_mc_net_ops = {
289 .init = dev_mc_net_init, 289 .init = dev_mc_net_init,
290 .exit = dev_mc_net_exit, 290 .exit = dev_mc_net_exit,
291}; 291};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f6d37deac45..383252b50411 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -188,6 +188,7 @@ static int __init net_ns_init(void)
188 188
189pure_initcall(net_ns_init); 189pure_initcall(net_ns_init);
190 190
191#ifdef CONFIG_NET_NS
191static int register_pernet_operations(struct list_head *list, 192static int register_pernet_operations(struct list_head *list,
192 struct pernet_operations *ops) 193 struct pernet_operations *ops)
193{ 194{
@@ -228,6 +229,23 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
228 ops->exit(net); 229 ops->exit(net);
229} 230}
230 231
232#else
233
234static int register_pernet_operations(struct list_head *list,
235 struct pernet_operations *ops)
236{
237 if (ops->init == NULL)
238 return 0;
239 return ops->init(&init_net);
240}
241
242static void unregister_pernet_operations(struct pernet_operations *ops)
243{
244 if (ops->exit)
245 ops->exit(&init_net);
246}
247#endif
248
231/** 249/**
232 * register_pernet_subsys - register a network namespace subsystem 250 * register_pernet_subsys - register a network namespace subsystem
233 * @ops: pernet operations structure for the subsystem 251 * @ops: pernet operations structure for the subsystem
diff --git a/net/core/sock.c b/net/core/sock.c
index 8fc2f84209e4..c519b439b8b1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2097,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc);
2097EXPORT_SYMBOL(sock_i_uid); 2097EXPORT_SYMBOL(sock_i_uid);
2098EXPORT_SYMBOL(sock_i_ino); 2098EXPORT_SYMBOL(sock_i_ino);
2099EXPORT_SYMBOL(sysctl_optmem_max); 2099EXPORT_SYMBOL(sysctl_optmem_max);
2100#ifdef CONFIG_SYSCTL
2101EXPORT_SYMBOL(sysctl_rmem_max);
2102EXPORT_SYMBOL(sysctl_wmem_max);
2103#endif
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 771031dfbd0f..af995198f643 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -61,7 +61,7 @@
61 * 4. Global variable peer_total is modified under the pool lock. 61 * 4. Global variable peer_total is modified under the pool lock.
62 * 5. struct inet_peer fields modification: 62 * 5. struct inet_peer fields modification:
63 * avl_left, avl_right, avl_parent, avl_height: pool lock 63 * avl_left, avl_right, avl_parent, avl_height: pool lock
64 * unused_next, unused_prevp: unused node list lock 64 * unused: unused node list lock
65 * refcnt: atomically against modifications on other CPU; 65 * refcnt: atomically against modifications on other CPU;
66 * usually under some other lock to prevent node disappearing 66 * usually under some other lock to prevent node disappearing
67 * dtime: unused node list lock 67 * dtime: unused node list lock
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
94int inet_peer_gc_mintime __read_mostly = 10 * HZ; 94int inet_peer_gc_mintime __read_mostly = 10 * HZ;
95int inet_peer_gc_maxtime __read_mostly = 120 * HZ; 95int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
96 96
97static struct inet_peer *inet_peer_unused_head; 97static LIST_HEAD(unused_peers);
98static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
99static DEFINE_SPINLOCK(inet_peer_unused_lock); 98static DEFINE_SPINLOCK(inet_peer_unused_lock);
100 99
101static void peer_check_expire(unsigned long dummy); 100static void peer_check_expire(unsigned long dummy);
@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
138static void unlink_from_unused(struct inet_peer *p) 137static void unlink_from_unused(struct inet_peer *p)
139{ 138{
140 spin_lock_bh(&inet_peer_unused_lock); 139 spin_lock_bh(&inet_peer_unused_lock);
141 if (p->unused_prevp != NULL) { 140 list_del_init(&p->unused);
142 /* On unused list. */
143 *p->unused_prevp = p->unused_next;
144 if (p->unused_next != NULL)
145 p->unused_next->unused_prevp = p->unused_prevp;
146 else
147 inet_peer_unused_tailp = p->unused_prevp;
148 p->unused_prevp = NULL; /* mark it as removed */
149 }
150 spin_unlock_bh(&inet_peer_unused_lock); 141 spin_unlock_bh(&inet_peer_unused_lock);
151} 142}
152 143
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
337/* May be called with local BH enabled. */ 328/* May be called with local BH enabled. */
338static int cleanup_once(unsigned long ttl) 329static int cleanup_once(unsigned long ttl)
339{ 330{
340 struct inet_peer *p; 331 struct inet_peer *p = NULL;
341 332
342 /* Remove the first entry from the list of unused nodes. */ 333 /* Remove the first entry from the list of unused nodes. */
343 spin_lock_bh(&inet_peer_unused_lock); 334 spin_lock_bh(&inet_peer_unused_lock);
344 p = inet_peer_unused_head; 335 if (!list_empty(&unused_peers)) {
345 if (p != NULL) { 336 __u32 delta;
346 __u32 delta = (__u32)jiffies - p->dtime; 337
338 p = list_first_entry(&unused_peers, struct inet_peer, unused);
339 delta = (__u32)jiffies - p->dtime;
340
347 if (delta < ttl) { 341 if (delta < ttl) {
348 /* Do not prune fresh entries. */ 342 /* Do not prune fresh entries. */
349 spin_unlock_bh(&inet_peer_unused_lock); 343 spin_unlock_bh(&inet_peer_unused_lock);
350 return -1; 344 return -1;
351 } 345 }
352 inet_peer_unused_head = p->unused_next; 346
353 if (p->unused_next != NULL) 347 list_del_init(&p->unused);
354 p->unused_next->unused_prevp = p->unused_prevp; 348
355 else
356 inet_peer_unused_tailp = p->unused_prevp;
357 p->unused_prevp = NULL; /* mark as not on the list */
358 /* Grab an extra reference to prevent node disappearing 349 /* Grab an extra reference to prevent node disappearing
359 * before unlink_from_pool() call. */ 350 * before unlink_from_pool() call. */
360 atomic_inc(&p->refcnt); 351 atomic_inc(&p->refcnt);
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
412 403
413 /* Link the node. */ 404 /* Link the node. */
414 link_to_pool(n); 405 link_to_pool(n);
415 n->unused_prevp = NULL; /* not on the list */ 406 INIT_LIST_HEAD(&n->unused);
416 peer_total++; 407 peer_total++;
417 write_unlock_bh(&peer_pool_lock); 408 write_unlock_bh(&peer_pool_lock);
418 409
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
467{ 458{
468 spin_lock_bh(&inet_peer_unused_lock); 459 spin_lock_bh(&inet_peer_unused_lock);
469 if (atomic_dec_and_test(&p->refcnt)) { 460 if (atomic_dec_and_test(&p->refcnt)) {
470 p->unused_prevp = inet_peer_unused_tailp; 461 list_add_tail(&p->unused, &unused_peers);
471 p->unused_next = NULL;
472 *inet_peer_unused_tailp = p;
473 inet_peer_unused_tailp = &p->unused_next;
474 p->dtime = (__u32)jiffies; 462 p->dtime = (__u32)jiffies;
475 } 463 }
476 spin_unlock_bh(&inet_peer_unused_lock); 464 spin_unlock_bh(&inet_peer_unused_lock);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index b7eeae622d9b..0a9f3c37e18d 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -441,7 +441,6 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
441 } else 441 } else
442 return NULL; 442 return NULL;
443} 443}
444EXPORT_SYMBOL(ip_vs_try_bind_dest);
445 444
446 445
447/* 446/*
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 3c4d22a468ec..b64cf45a9ead 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -604,7 +604,6 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
604 ip_vs_service_put(svc); 604 ip_vs_service_put(svc);
605 return dest; 605 return dest;
606} 606}
607EXPORT_SYMBOL(ip_vs_find_dest);
608 607
609/* 608/*
610 * Lookup dest by {svc,addr,port} in the destination trash. 609 * Lookup dest by {svc,addr,port} in the destination trash.
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 56e93f692e82..70e7997ea284 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data)
681 681
682 if (!nat) 682 if (!nat)
683 return 0; 683 return 0;
684 memset(nat, 0, sizeof(nat)); 684 memset(nat, 0, sizeof(*nat));
685 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); 685 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
686 return 0; 686 return 0;
687} 687}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 36f7dbfb6dbb..67997a74ddce 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1037 1037
1038 ndmsg = nlmsg_data(nlh); 1038 ndmsg = nlmsg_data(nlh);
1039 ndmsg->nduseropt_family = AF_INET6; 1039 ndmsg->nduseropt_family = AF_INET6;
1040 ndmsg->nduseropt_ifindex = ra->dev->ifindex;
1040 ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type; 1041 ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type;
1041 ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code; 1042 ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code;
1042 ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3; 1043 ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3;
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 2dfac3253569..87bc1443c520 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -60,46 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
60} 60}
61EXPORT_SYMBOL(nf_unregister_sockopt); 61EXPORT_SYMBOL(nf_unregister_sockopt);
62 62
63/* Call get/setsockopt() */ 63static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
64static int nf_sockopt(struct sock *sk, int pf, int val, 64 int val, int get)
65 char __user *opt, int *len, int get)
66{ 65{
67 struct nf_sockopt_ops *ops; 66 struct nf_sockopt_ops *ops;
68 int ret;
69 67
70 if (sk->sk_net != &init_net) 68 if (sk->sk_net != &init_net)
71 return -ENOPROTOOPT; 69 return ERR_PTR(-ENOPROTOOPT);
72 70
73 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 71 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
74 return -EINTR; 72 return ERR_PTR(-EINTR);
75 73
76 list_for_each_entry(ops, &nf_sockopts, list) { 74 list_for_each_entry(ops, &nf_sockopts, list) {
77 if (ops->pf == pf) { 75 if (ops->pf == pf) {
78 if (!try_module_get(ops->owner)) 76 if (!try_module_get(ops->owner))
79 goto out_nosup; 77 goto out_nosup;
78
80 if (get) { 79 if (get) {
81 if (val >= ops->get_optmin 80 if (val >= ops->get_optmin &&
82 && val < ops->get_optmax) { 81 val < ops->get_optmax)
83 mutex_unlock(&nf_sockopt_mutex);
84 ret = ops->get(sk, val, opt, len);
85 goto out; 82 goto out;
86 }
87 } else { 83 } else {
88 if (val >= ops->set_optmin 84 if (val >= ops->set_optmin &&
89 && val < ops->set_optmax) { 85 val < ops->set_optmax)
90 mutex_unlock(&nf_sockopt_mutex);
91 ret = ops->set(sk, val, opt, *len);
92 goto out; 86 goto out;
93 }
94 } 87 }
95 module_put(ops->owner); 88 module_put(ops->owner);
96 } 89 }
97 } 90 }
98 out_nosup: 91out_nosup:
92 ops = ERR_PTR(-ENOPROTOOPT);
93out:
99 mutex_unlock(&nf_sockopt_mutex); 94 mutex_unlock(&nf_sockopt_mutex);
100 return -ENOPROTOOPT; 95 return ops;
96}
97
98/* Call get/setsockopt() */
99static int nf_sockopt(struct sock *sk, int pf, int val,
100 char __user *opt, int *len, int get)
101{
102 struct nf_sockopt_ops *ops;
103 int ret;
104
105 ops = nf_sockopt_find(sk, pf, val, get);
106 if (IS_ERR(ops))
107 return PTR_ERR(ops);
108
109 if (get)
110 ret = ops->get(sk, val, opt, len);
111 else
112 ret = ops->set(sk, val, opt, *len);
101 113
102 out:
103 module_put(ops->owner); 114 module_put(ops->owner);
104 return ret; 115 return ret;
105} 116}
@@ -124,51 +135,22 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
124 struct nf_sockopt_ops *ops; 135 struct nf_sockopt_ops *ops;
125 int ret; 136 int ret;
126 137
127 if (sk->sk_net != &init_net) 138 ops = nf_sockopt_find(sk, pf, val, get);
128 return -ENOPROTOOPT; 139 if (IS_ERR(ops))
129 140 return PTR_ERR(ops);
130 141
131 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 142 if (get) {
132 return -EINTR; 143 if (ops->compat_get)
133 144 ret = ops->compat_get(sk, val, opt, len);
134 list_for_each_entry(ops, &nf_sockopts, list) { 145 else
135 if (ops->pf == pf) { 146 ret = ops->get(sk, val, ops, len);
136 if (!try_module_get(ops->owner)) 147 } else {
137 goto out_nosup; 148 if (ops->compat_set)
138 149 ret = ops->compat_set(sk, val, ops, *len);
139 if (get) { 150 else
140 if (val >= ops->get_optmin 151 ret = ops->set(sk, val, ops, *len);
141 && val < ops->get_optmax) {
142 mutex_unlock(&nf_sockopt_mutex);
143 if (ops->compat_get)
144 ret = ops->compat_get(sk,
145 val, opt, len);
146 else
147 ret = ops->get(sk,
148 val, opt, len);
149 goto out;
150 }
151 } else {
152 if (val >= ops->set_optmin
153 && val < ops->set_optmax) {
154 mutex_unlock(&nf_sockopt_mutex);
155 if (ops->compat_set)
156 ret = ops->compat_set(sk,
157 val, opt, *len);
158 else
159 ret = ops->set(sk,
160 val, opt, *len);
161 goto out;
162 }
163 }
164 module_put(ops->owner);
165 }
166 } 152 }
167 out_nosup:
168 mutex_unlock(&nf_sockopt_mutex);
169 return -ENOPROTOOPT;
170 153
171 out:
172 module_put(ops->owner); 154 module_put(ops->owner);
173 return ret; 155 return ret;
174} 156}
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index ff44f86c24ce..f9c55dcd894b 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -170,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb,
170 if (skb->tstamp.tv64 == 0) 170 if (skb->tstamp.tv64 == 0)
171 __net_timestamp((struct sk_buff *)skb); 171 __net_timestamp((struct sk_buff *)skb);
172 172
173 stamp = skb->tstamp.tv64; 173 stamp = ktime_to_ns(skb->tstamp);
174 do_div(stamp, NSEC_PER_SEC); 174 do_div(stamp, NSEC_PER_SEC);
175 175
176 if (info->flags & XT_TIME_LOCAL_TZ) 176 if (info->flags & XT_TIME_LOCAL_TZ)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 415c97236f63..de3988ba1f46 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net)
1888#endif 1888#endif
1889} 1889}
1890 1890
1891static struct pernet_operations netlink_net_ops = { 1891static struct pernet_operations __net_initdata netlink_net_ops = {
1892 .init = netlink_net_init, 1892 .init = netlink_net_init,
1893 .exit = netlink_net_exit, 1893 .exit = netlink_net_exit,
1894}; 1894};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index eb6be5030c70..8a7807dbba01 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -881,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
881 if (protocol == 0) 881 if (protocol == 0)
882 goto out_unlock; 882 goto out_unlock;
883 883
884 if (dev) { 884 if (!dev || (dev->flags & IFF_UP)) {
885 if (dev->flags&IFF_UP) {
886 dev_add_pack(&po->prot_hook);
887 sock_hold(sk);
888 po->running = 1;
889 } else {
890 sk->sk_err = ENETDOWN;
891 if (!sock_flag(sk, SOCK_DEAD))
892 sk->sk_error_report(sk);
893 }
894 } else {
895 dev_add_pack(&po->prot_hook); 885 dev_add_pack(&po->prot_hook);
896 sock_hold(sk); 886 sock_hold(sk);
897 po->running = 1; 887 po->running = 1;
888 } else {
889 sk->sk_err = ENETDOWN;
890 if (!sock_flag(sk, SOCK_DEAD))
891 sk->sk_error_report(sk);
898 } 892 }
899 893
900out_unlock: 894out_unlock:
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index fe03f71f17da..f3a2bd747a8f 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local)
114 return 0; 114 return 0;
115 115
116error: 116error:
117 local->socket->ops->shutdown(local->socket, 2); 117 kernel_sock_shutdown(local->socket, SHUT_RDWR);
118 local->socket->sk->sk_user_data = NULL; 118 local->socket->sk->sk_user_data = NULL;
119 sock_release(local->socket); 119 sock_release(local->socket);
120 local->socket = NULL; 120 local->socket = NULL;
@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work)
267 /* finish cleaning up the local descriptor */ 267 /* finish cleaning up the local descriptor */
268 rxrpc_purge_queue(&local->accept_queue); 268 rxrpc_purge_queue(&local->accept_queue);
269 rxrpc_purge_queue(&local->reject_queue); 269 rxrpc_purge_queue(&local->reject_queue);
270 local->socket->ops->shutdown(local->socket, 2); 270 kernel_sock_shutdown(local->socket, SHUT_RDWR);
271 sock_release(local->socket); 271 sock_release(local->socket);
272 272
273 up_read(&rxrpc_local_sem); 273 up_read(&rxrpc_local_sem);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 03158e3665da..013e3d3ab0f1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
262 */ 262 */
263 asoc->peer.sack_needed = 1; 263 asoc->peer.sack_needed = 1;
264 264
265 /* Assume that the peer recongizes ASCONF until reported otherwise 265 /* Assume that the peer will tell us if he recognizes ASCONF
266 * via an ERROR chunk. 266 * as part of INIT exchange.
267 * The sctp_addip_noauth option is there for backward compatibilty
268 * and will revert old behavior.
267 */ 269 */
268 asoc->peer.asconf_capable = 1; 270 asoc->peer.asconf_capable = 0;
271 if (sctp_addip_noauth)
272 asoc->peer.asconf_capable = 1;
269 273
270 /* Create an input queue. */ 274 /* Create an input queue. */
271 sctp_inq_init(&asoc->base.inqueue); 275 sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index dfffa94fb9f6..cae95af9a8cc 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
180/* Delete an address from the bind address list in the SCTP_bind_addr 180/* Delete an address from the bind address list in the SCTP_bind_addr
181 * structure. 181 * structure.
182 */ 182 */
183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
184 void fastcall (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head)))
186{ 184{
187 struct sctp_sockaddr_entry *addr, *temp; 185 struct sctp_sockaddr_entry *addr, *temp;
188 186
@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
198 } 196 }
199 } 197 }
200 198
201 /* Call the rcu callback provided in the args. This function is
202 * called by both BH packet processing and user side socket option
203 * processing, but it works on different lists in those 2 contexts.
204 * Each context provides it's own callback, whether call_rcu_bh()
205 * or call_rcu(), to make sure that we wait for an appropriate time.
206 */
207 if (addr && !addr->valid) { 199 if (addr && !addr->valid) {
208 rcu_call(&addr->rcu, sctp_local_addr_free); 200 call_rcu(&addr->rcu, sctp_local_addr_free);
209 SCTP_DBG_OBJCNT_DEC(addr); 201 SCTP_DBG_OBJCNT_DEC(addr);
202 return 0;
210 } 203 }
211 204
212 return -EINVAL; 205 return -EINVAL;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2d2d81ef4a69..de6f505d6ff8 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
328 const union sctp_addr *paddr, 328 const union sctp_addr *paddr,
329 struct sctp_transport **transport) 329 struct sctp_transport **transport)
330{ 330{
331 struct sctp_association *asoc = NULL;
332 struct sctp_transport *t = NULL;
333 struct sctp_hashbucket *head;
334 struct sctp_ep_common *epb;
335 struct hlist_node *node;
336 int hash;
331 int rport; 337 int rport;
332 struct sctp_association *asoc;
333 struct list_head *pos;
334 338
339 *transport = NULL;
335 rport = ntohs(paddr->v4.sin_port); 340 rport = ntohs(paddr->v4.sin_port);
336 341
337 list_for_each(pos, &ep->asocs) { 342 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
338 asoc = list_entry(pos, struct sctp_association, asocs); 343 head = &sctp_assoc_hashtable[hash];
339 if (rport == asoc->peer.port) { 344 read_lock(&head->lock);
340 *transport = sctp_assoc_lookup_paddr(asoc, paddr); 345 sctp_for_each_hentry(epb, node, &head->chain) {
341 346 asoc = sctp_assoc(epb);
342 if (*transport) 347 if (asoc->ep != ep || rport != asoc->peer.port)
343 return asoc; 348 goto next;
349
350 t = sctp_assoc_lookup_paddr(asoc, paddr);
351 if (t) {
352 *transport = t;
353 break;
344 } 354 }
355next:
356 asoc = NULL;
345 } 357 }
346 358 read_unlock(&head->lock);
347 *transport = NULL; 359 return asoc;
348 return NULL;
349} 360}
350 361
351/* Lookup association on an endpoint based on a peer address. BH-safe. */ 362/* Lookup association on an endpoint based on a peer address. BH-safe. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 86503e7fa21e..91ae463b079b 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -656,7 +656,6 @@ discard:
656/* Insert endpoint into the hash table. */ 656/* Insert endpoint into the hash table. */
657static void __sctp_hash_endpoint(struct sctp_endpoint *ep) 657static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
658{ 658{
659 struct sctp_ep_common **epp;
660 struct sctp_ep_common *epb; 659 struct sctp_ep_common *epb;
661 struct sctp_hashbucket *head; 660 struct sctp_hashbucket *head;
662 661
@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
666 head = &sctp_ep_hashtable[epb->hashent]; 665 head = &sctp_ep_hashtable[epb->hashent];
667 666
668 sctp_write_lock(&head->lock); 667 sctp_write_lock(&head->lock);
669 epp = &head->chain; 668 hlist_add_head(&epb->node, &head->chain);
670 epb->next = *epp;
671 if (epb->next)
672 (*epp)->pprev = &epb->next;
673 *epp = epb;
674 epb->pprev = epp;
675 sctp_write_unlock(&head->lock); 669 sctp_write_unlock(&head->lock);
676} 670}
677 671
@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
691 685
692 epb = &ep->base; 686 epb = &ep->base;
693 687
688 if (hlist_unhashed(&epb->node))
689 return;
690
694 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 691 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
695 692
696 head = &sctp_ep_hashtable[epb->hashent]; 693 head = &sctp_ep_hashtable[epb->hashent];
697 694
698 sctp_write_lock(&head->lock); 695 sctp_write_lock(&head->lock);
699 696 __hlist_del(&epb->node);
700 if (epb->pprev) {
701 if (epb->next)
702 epb->next->pprev = epb->pprev;
703 *epb->pprev = epb->next;
704 epb->pprev = NULL;
705 }
706
707 sctp_write_unlock(&head->lock); 697 sctp_write_unlock(&head->lock);
708} 698}
709 699
@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
721 struct sctp_hashbucket *head; 711 struct sctp_hashbucket *head;
722 struct sctp_ep_common *epb; 712 struct sctp_ep_common *epb;
723 struct sctp_endpoint *ep; 713 struct sctp_endpoint *ep;
714 struct hlist_node *node;
724 int hash; 715 int hash;
725 716
726 hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); 717 hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
727 head = &sctp_ep_hashtable[hash]; 718 head = &sctp_ep_hashtable[hash];
728 read_lock(&head->lock); 719 read_lock(&head->lock);
729 for (epb = head->chain; epb; epb = epb->next) { 720 sctp_for_each_hentry(epb, node, &head->chain) {
730 ep = sctp_ep(epb); 721 ep = sctp_ep(epb);
731 if (sctp_endpoint_is_match(ep, laddr)) 722 if (sctp_endpoint_is_match(ep, laddr))
732 goto hit; 723 goto hit;
@@ -744,7 +735,6 @@ hit:
744/* Insert association into the hash table. */ 735/* Insert association into the hash table. */
745static void __sctp_hash_established(struct sctp_association *asoc) 736static void __sctp_hash_established(struct sctp_association *asoc)
746{ 737{
747 struct sctp_ep_common **epp;
748 struct sctp_ep_common *epb; 738 struct sctp_ep_common *epb;
749 struct sctp_hashbucket *head; 739 struct sctp_hashbucket *head;
750 740
@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
756 head = &sctp_assoc_hashtable[epb->hashent]; 746 head = &sctp_assoc_hashtable[epb->hashent];
757 747
758 sctp_write_lock(&head->lock); 748 sctp_write_lock(&head->lock);
759 epp = &head->chain; 749 hlist_add_head(&epb->node, &head->chain);
760 epb->next = *epp;
761 if (epb->next)
762 (*epp)->pprev = &epb->next;
763 *epp = epb;
764 epb->pprev = epp;
765 sctp_write_unlock(&head->lock); 750 sctp_write_unlock(&head->lock);
766} 751}
767 752
@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
790 head = &sctp_assoc_hashtable[epb->hashent]; 775 head = &sctp_assoc_hashtable[epb->hashent];
791 776
792 sctp_write_lock(&head->lock); 777 sctp_write_lock(&head->lock);
793 778 __hlist_del(&epb->node);
794 if (epb->pprev) {
795 if (epb->next)
796 epb->next->pprev = epb->pprev;
797 *epb->pprev = epb->next;
798 epb->pprev = NULL;
799 }
800
801 sctp_write_unlock(&head->lock); 779 sctp_write_unlock(&head->lock);
802} 780}
803 781
@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
822 struct sctp_ep_common *epb; 800 struct sctp_ep_common *epb;
823 struct sctp_association *asoc; 801 struct sctp_association *asoc;
824 struct sctp_transport *transport; 802 struct sctp_transport *transport;
803 struct hlist_node *node;
825 int hash; 804 int hash;
826 805
827 /* Optimize here for direct hit, only listening connections can 806 /* Optimize here for direct hit, only listening connections can
@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
830 hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); 809 hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
831 head = &sctp_assoc_hashtable[hash]; 810 head = &sctp_assoc_hashtable[hash];
832 read_lock(&head->lock); 811 read_lock(&head->lock);
833 for (epb = head->chain; epb; epb = epb->next) { 812 sctp_for_each_hentry(epb, node, &head->chain) {
834 asoc = sctp_assoc(epb); 813 asoc = sctp_assoc(epb);
835 transport = sctp_assoc_is_match(asoc, local, peer); 814 transport = sctp_assoc_is_match(asoc, local, peer);
836 if (transport) 815 if (transport)
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f10fe7fbf24c..cf4b7eb023b3 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
90void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) 90void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
91{ 91{
92 /* Directly call the packet handling routine. */ 92 /* Directly call the packet handling routine. */
93 if (chunk->rcvr->dead) {
94 sctp_chunk_free(chunk);
95 return;
96 }
93 97
94 /* We are now calling this either from the soft interrupt 98 /* We are now calling this either from the soft interrupt
95 * or from the backlog processing. 99 * or from the backlog processing.
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 28f4fe77ceee..fa76f235169b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
382/* Mark all the eligible packets on a transport for retransmission. */ 382/* Mark all the eligible packets on a transport for retransmission. */
383void sctp_retransmit_mark(struct sctp_outq *q, 383void sctp_retransmit_mark(struct sctp_outq *q,
384 struct sctp_transport *transport, 384 struct sctp_transport *transport,
385 __u8 fast_retransmit) 385 __u8 reason)
386{ 386{
387 struct list_head *lchunk, *ltemp; 387 struct list_head *lchunk, *ltemp;
388 struct sctp_chunk *chunk; 388 struct sctp_chunk *chunk;
@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
412 continue; 412 continue;
413 } 413 }
414 414
415 /* If we are doing retransmission due to a fast retransmit, 415 /* If we are doing retransmission due to a timeout or pmtu
416 * only the chunk's that are marked for fast retransmit 416 * discovery, only the chunks that are not yet acked should
417 * should be added to the retransmit queue. If we are doing 417 * be added to the retransmit queue.
418 * retransmission due to a timeout or pmtu discovery, only the
419 * chunks that are not yet acked should be added to the
420 * retransmit queue.
421 */ 418 */
422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 419 if ((reason == SCTP_RTXR_FAST_RTX &&
423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 420 (chunk->fast_retransmit > 0)) ||
421 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
424 /* If this chunk was sent less then 1 rto ago, do not 422 /* If this chunk was sent less then 1 rto ago, do not
425 * retransmit this chunk, but give the peer time 423 * retransmit this chunk, but give the peer time
426 * to acknowlege it. 424 * to acknowlege it. Do this only when
425 * retransmitting due to T3 timeout.
427 */ 426 */
428 if ((jiffies - chunk->sent_at) < transport->rto) 427 if (reason == SCTP_RTXR_T3_RTX &&
428 (jiffies - chunk->sent_at) < transport->last_rto)
429 continue; 429 continue;
430 430
431 /* RFC 2960 6.2.1 Processing a Received SACK 431 /* RFC 2960 6.2.1 Processing a Received SACK
@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
467 } 467 }
468 } 468 }
469 469
470 SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " 470 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
471 "cwnd: %d, ssthresh: %d, flight_size: %d, " 471 "cwnd: %d, ssthresh: %d, flight_size: %d, "
472 "pba: %d\n", __FUNCTION__, 472 "pba: %d\n", __FUNCTION__,
473 transport, fast_retransmit, 473 transport, reason,
474 transport->cwnd, transport->ssthresh, 474 transport->cwnd, transport->ssthresh,
475 transport->flight_size, 475 transport->flight_size,
476 transport->partial_bytes_acked); 476 transport->partial_bytes_acked);
@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
484 sctp_retransmit_reason_t reason) 484 sctp_retransmit_reason_t reason)
485{ 485{
486 int error = 0; 486 int error = 0;
487 __u8 fast_retransmit = 0;
488 487
489 switch(reason) { 488 switch(reason) {
490 case SCTP_RTXR_T3_RTX: 489 case SCTP_RTXR_T3_RTX:
@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
499 case SCTP_RTXR_FAST_RTX: 498 case SCTP_RTXR_FAST_RTX:
500 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 499 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
501 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 500 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
502 fast_retransmit = 1;
503 break; 501 break;
504 case SCTP_RTXR_PMTUD: 502 case SCTP_RTXR_PMTUD:
505 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 503 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
506 break; 504 break;
505 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
507 break;
507 default: 508 default:
508 BUG(); 509 BUG();
509 } 510 }
510 511
511 sctp_retransmit_mark(q, transport, fast_retransmit); 512 sctp_retransmit_mark(q, transport, reason);
512 513
513 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 514 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
514 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 515 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
641 642
642 /* If we are here due to a retransmit timeout or a fast 643 /* If we are here due to a retransmit timeout or a fast
643 * retransmit and if there are any chunks left in the retransmit 644 * retransmit and if there are any chunks left in the retransmit
644 * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. 645 * queue that could not fit in the PMTU sized packet, they need
646 * to be marked as ineligible for a subsequent fast retransmit.
645 */ 647 */
646 if (rtx_timeout && !lchunk) { 648 if (rtx_timeout && !lchunk) {
647 list_for_each(lchunk1, lqueue) { 649 list_for_each(lchunk1, lqueue) {
@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
660int sctp_outq_uncork(struct sctp_outq *q) 662int sctp_outq_uncork(struct sctp_outq *q)
661{ 663{
662 int error = 0; 664 int error = 0;
663 if (q->cork) { 665 if (q->cork)
664 q->cork = 0; 666 q->cork = 0;
665 error = sctp_outq_flush(q, 0); 667 error = sctp_outq_flush(q, 0);
666 }
667 return error; 668 return error;
668} 669}
669 670
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index e4cd841a22e4..249973204070 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
225 struct sctp_ep_common *epb; 225 struct sctp_ep_common *epb;
226 struct sctp_endpoint *ep; 226 struct sctp_endpoint *ep;
227 struct sock *sk; 227 struct sock *sk;
228 struct hlist_node *node;
228 int hash = *(loff_t *)v; 229 int hash = *(loff_t *)v;
229 230
230 if (hash >= sctp_ep_hashsize) 231 if (hash >= sctp_ep_hashsize)
@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
233 head = &sctp_ep_hashtable[hash]; 234 head = &sctp_ep_hashtable[hash];
234 sctp_local_bh_disable(); 235 sctp_local_bh_disable();
235 read_lock(&head->lock); 236 read_lock(&head->lock);
236 for (epb = head->chain; epb; epb = epb->next) { 237 sctp_for_each_hentry(epb, node, &head->chain) {
237 ep = sctp_ep(epb); 238 ep = sctp_ep(epb);
238 sk = epb->sk; 239 sk = epb->sk;
239 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 240 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
328 struct sctp_ep_common *epb; 329 struct sctp_ep_common *epb;
329 struct sctp_association *assoc; 330 struct sctp_association *assoc;
330 struct sock *sk; 331 struct sock *sk;
332 struct hlist_node *node;
331 int hash = *(loff_t *)v; 333 int hash = *(loff_t *)v;
332 334
333 if (hash >= sctp_assoc_hashsize) 335 if (hash >= sctp_assoc_hashsize)
@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
336 head = &sctp_assoc_hashtable[hash]; 338 head = &sctp_assoc_hashtable[hash];
337 sctp_local_bh_disable(); 339 sctp_local_bh_disable();
338 read_lock(&head->lock); 340 read_lock(&head->lock);
339 for (epb = head->chain; epb; epb = epb->next) { 341 sctp_for_each_hentry(epb, node, &head->chain) {
340 assoc = sctp_assoc(epb); 342 assoc = sctp_assoc(epb);
341 sk = epb->sk; 343 sk = epb->sk;
342 seq_printf(seq, 344 seq_printf(seq,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 40c1a47d1b8d..d50f610d1b02 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
1137 } 1137 }
1138 for (i = 0; i < sctp_assoc_hashsize; i++) { 1138 for (i = 0; i < sctp_assoc_hashsize; i++) {
1139 rwlock_init(&sctp_assoc_hashtable[i].lock); 1139 rwlock_init(&sctp_assoc_hashtable[i].lock);
1140 sctp_assoc_hashtable[i].chain = NULL; 1140 INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
1141 } 1141 }
1142 1142
1143 /* Allocate and initialize the endpoint hash table. */ 1143 /* Allocate and initialize the endpoint hash table. */
@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
1151 } 1151 }
1152 for (i = 0; i < sctp_ep_hashsize; i++) { 1152 for (i = 0; i < sctp_ep_hashsize; i++) {
1153 rwlock_init(&sctp_ep_hashtable[i].lock); 1153 rwlock_init(&sctp_ep_hashtable[i].lock);
1154 sctp_ep_hashtable[i].chain = NULL; 1154 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
1155 } 1155 }
1156 1156
1157 /* Allocate and initialize the SCTP port hash table. */ 1157 /* Allocate and initialize the SCTP port hash table. */
@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
1170 } 1170 }
1171 for (i = 0; i < sctp_port_hashsize; i++) { 1171 for (i = 0; i < sctp_port_hashsize; i++) {
1172 spin_lock_init(&sctp_port_hashtable[i].lock); 1172 spin_lock_init(&sctp_port_hashtable[i].lock);
1173 sctp_port_hashtable[i].chain = NULL; 1173 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
1174 } 1174 }
1175 1175
1176 printk(KERN_INFO "SCTP: Hash tables configured " 1176 printk(KERN_INFO "SCTP: Hash tables configured "
@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
1179 1179
1180 /* Disable ADDIP by default. */ 1180 /* Disable ADDIP by default. */
1181 sctp_addip_enable = 0; 1181 sctp_addip_enable = 0;
1182 sctp_addip_noauth = 0;
1182 1183
1183 /* Enable PR-SCTP by default. */ 1184 /* Enable PR-SCTP by default. */
1184 sctp_prsctp_enable = 1; 1185 sctp_prsctp_enable = 1;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c377e4e8f653..5a9783c38de1 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1788 sizeof(sctp_paramhdr_t); 1788 sizeof(sctp_paramhdr_t);
1789 1789
1790 1790
1791 /* This is a fatal error. Any accumulated non-fatal errors are
1792 * not reported.
1793 */
1794 if (*errp)
1795 sctp_chunk_free(*errp);
1796
1791 /* Create an error chunk and fill it in with our payload. */ 1797 /* Create an error chunk and fill it in with our payload. */
1792 if (!*errp) 1798 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1793 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1794 1799
1795 if (*errp) { 1800 if (*errp) {
1796 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, 1801 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1813{ 1818{
1814 __u16 len = ntohs(param.p->length); 1819 __u16 len = ntohs(param.p->length);
1815 1820
1816 /* Make an ERROR chunk. */ 1821 /* Processing of the HOST_NAME parameter will generate an
1817 if (!*errp) 1822 * ABORT. If we've accumulated any non-fatal errors, they
1818 *errp = sctp_make_op_error_space(asoc, chunk, len); 1823 * would be unrecognized parameters and we should not include
1824 * them in the ABORT.
1825 */
1826 if (*errp)
1827 sctp_chunk_free(*errp);
1828
1829 *errp = sctp_make_op_error_space(asoc, chunk, len);
1819 1830
1820 if (*errp) { 1831 if (*errp) {
1821 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); 1832 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1847 break; 1858 break;
1848 case SCTP_CID_ASCONF: 1859 case SCTP_CID_ASCONF:
1849 case SCTP_CID_ASCONF_ACK: 1860 case SCTP_CID_ASCONF_ACK:
1850 asoc->peer.addip_capable = 1; 1861 asoc->peer.asconf_capable = 1;
1851 break; 1862 break;
1852 default: 1863 default:
1853 break; 1864 break;
@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1862 * taken if the processing endpoint does not recognize the 1873 * taken if the processing endpoint does not recognize the
1863 * Parameter Type. 1874 * Parameter Type.
1864 * 1875 *
1865 * 00 - Stop processing this SCTP chunk and discard it, 1876 * 00 - Stop processing this parameter; do not process any further
1866 * do not process any further chunks within it. 1877 * parameters within this chunk
1867 * 1878 *
1868 * 01 - Stop processing this SCTP chunk and discard it, 1879 * 01 - Stop processing this parameter, do not process any further
1869 * do not process any further chunks within it, and report 1880 * parameters within this chunk, and report the unrecognized
1870 * the unrecognized parameter in an 'Unrecognized 1881 * parameter in an 'Unrecognized Parameter' ERROR chunk.
1871 * Parameter Type' (in either an ERROR or in the INIT ACK).
1872 * 1882 *
1873 * 10 - Skip this parameter and continue processing. 1883 * 10 - Skip this parameter and continue processing.
1874 * 1884 *
1875 * 11 - Skip this parameter and continue processing but 1885 * 11 - Skip this parameter and continue processing but
1876 * report the unrecognized parameter in an 1886 * report the unrecognized parameter in an
1877 * 'Unrecognized Parameter Type' (in either an ERROR or in 1887 * 'Unrecognized Parameter' ERROR chunk.
1878 * the INIT ACK).
1879 * 1888 *
1880 * Return value: 1889 * Return value:
1881 * 0 - discard the chunk 1890 * SCTP_IERROR_NO_ERROR - continue with the chunk
1882 * 1 - continue with the chunk 1891 * SCTP_IERROR_ERROR - stop and report an error.
1892 * SCTP_IERROR_NOMEME - out of memory.
1883 */ 1893 */
1884static int sctp_process_unk_param(const struct sctp_association *asoc, 1894static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
1885 union sctp_params param, 1895 union sctp_params param,
1886 struct sctp_chunk *chunk, 1896 struct sctp_chunk *chunk,
1887 struct sctp_chunk **errp) 1897 struct sctp_chunk **errp)
1888{ 1898{
1889 int retval = 1; 1899 int retval = SCTP_IERROR_NO_ERROR;
1890 1900
1891 switch (param.p->type & SCTP_PARAM_ACTION_MASK) { 1901 switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
1892 case SCTP_PARAM_ACTION_DISCARD: 1902 case SCTP_PARAM_ACTION_DISCARD:
1893 retval = 0; 1903 retval = SCTP_IERROR_ERROR;
1894 break;
1895 case SCTP_PARAM_ACTION_DISCARD_ERR:
1896 retval = 0;
1897 /* Make an ERROR chunk, preparing enough room for
1898 * returning multiple unknown parameters.
1899 */
1900 if (NULL == *errp)
1901 *errp = sctp_make_op_error_space(asoc, chunk,
1902 ntohs(chunk->chunk_hdr->length));
1903
1904 if (*errp) {
1905 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1906 WORD_ROUND(ntohs(param.p->length)));
1907 sctp_addto_chunk(*errp,
1908 WORD_ROUND(ntohs(param.p->length)),
1909 param.v);
1910 }
1911
1912 break; 1904 break;
1913 case SCTP_PARAM_ACTION_SKIP: 1905 case SCTP_PARAM_ACTION_SKIP:
1914 break; 1906 break;
1907 case SCTP_PARAM_ACTION_DISCARD_ERR:
1908 retval = SCTP_IERROR_ERROR;
1909 /* Fall through */
1915 case SCTP_PARAM_ACTION_SKIP_ERR: 1910 case SCTP_PARAM_ACTION_SKIP_ERR:
1916 /* Make an ERROR chunk, preparing enough room for 1911 /* Make an ERROR chunk, preparing enough room for
1917 * returning multiple unknown parameters. 1912 * returning multiple unknown parameters.
@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1932 * to the peer and the association won't be 1927 * to the peer and the association won't be
1933 * established. 1928 * established.
1934 */ 1929 */
1935 retval = 0; 1930 retval = SCTP_IERROR_NOMEM;
1936 } 1931 }
1937
1938 break; 1932 break;
1939 default: 1933 default:
1940 break; 1934 break;
@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1943 return retval; 1937 return retval;
1944} 1938}
1945 1939
1946/* Find unrecognized parameters in the chunk. 1940/* Verify variable length parameters
1947 * Return values: 1941 * Return values:
1948 * 0 - discard the chunk 1942 * SCTP_IERROR_ABORT - trigger an ABORT
1949 * 1 - continue with the chunk 1943 * SCTP_IERROR_NOMEM - out of memory (abort)
1944 * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
1945 * SCTP_IERROR_NO_ERROR - continue with the chunk
1950 */ 1946 */
1951static int sctp_verify_param(const struct sctp_association *asoc, 1947static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
1952 union sctp_params param, 1948 union sctp_params param,
1953 sctp_cid_t cid, 1949 sctp_cid_t cid,
1954 struct sctp_chunk *chunk, 1950 struct sctp_chunk *chunk,
1955 struct sctp_chunk **err_chunk) 1951 struct sctp_chunk **err_chunk)
1956{ 1952{
1957 int retval = 1; 1953 int retval = SCTP_IERROR_NO_ERROR;
1958 1954
1959 /* FIXME - This routine is not looking at each parameter per the 1955 /* FIXME - This routine is not looking at each parameter per the
1960 * chunk type, i.e., unrecognized parameters should be further 1956 * chunk type, i.e., unrecognized parameters should be further
@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1976 1972
1977 case SCTP_PARAM_HOST_NAME_ADDRESS: 1973 case SCTP_PARAM_HOST_NAME_ADDRESS:
1978 /* Tell the peer, we won't support this param. */ 1974 /* Tell the peer, we won't support this param. */
1979 return sctp_process_hn_param(asoc, param, chunk, err_chunk); 1975 sctp_process_hn_param(asoc, param, chunk, err_chunk);
1976 retval = SCTP_IERROR_ABORT;
1977 break;
1980 1978
1981 case SCTP_PARAM_FWD_TSN_SUPPORT: 1979 case SCTP_PARAM_FWD_TSN_SUPPORT:
1982 if (sctp_prsctp_enable) 1980 if (sctp_prsctp_enable)
@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1993 * cause 'Protocol Violation'. 1991 * cause 'Protocol Violation'.
1994 */ 1992 */
1995 if (SCTP_AUTH_RANDOM_LENGTH != 1993 if (SCTP_AUTH_RANDOM_LENGTH !=
1996 ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) 1994 ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) {
1997 return sctp_process_inv_paramlength(asoc, param.p, 1995 sctp_process_inv_paramlength(asoc, param.p,
1998 chunk, err_chunk); 1996 chunk, err_chunk);
1997 retval = SCTP_IERROR_ABORT;
1998 }
1999 break; 1999 break;
2000 2000
2001 case SCTP_PARAM_CHUNKS: 2001 case SCTP_PARAM_CHUNKS:
@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
2007 * INIT-ACK chunk if the sender wants to receive authenticated 2007 * INIT-ACK chunk if the sender wants to receive authenticated
2008 * chunks. Its maximum length is 260 bytes. 2008 * chunks. Its maximum length is 260 bytes.
2009 */ 2009 */
2010 if (260 < ntohs(param.p->length)) 2010 if (260 < ntohs(param.p->length)) {
2011 return sctp_process_inv_paramlength(asoc, param.p, 2011 sctp_process_inv_paramlength(asoc, param.p,
2012 chunk, err_chunk); 2012 chunk, err_chunk);
2013 retval = SCTP_IERROR_ABORT;
2014 }
2013 break; 2015 break;
2014 2016
2015 case SCTP_PARAM_HMAC_ALGO: 2017 case SCTP_PARAM_HMAC_ALGO:
@@ -2020,8 +2022,7 @@ fallthrough:
2020 default: 2022 default:
2021 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", 2023 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
2022 ntohs(param.p->type), cid); 2024 ntohs(param.p->type), cid);
2023 return sctp_process_unk_param(asoc, param, chunk, err_chunk); 2025 retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
2024
2025 break; 2026 break;
2026 } 2027 }
2027 return retval; 2028 return retval;
@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
2036{ 2037{
2037 union sctp_params param; 2038 union sctp_params param;
2038 int has_cookie = 0; 2039 int has_cookie = 0;
2040 int result;
2039 2041
2040 /* Verify stream values are non-zero. */ 2042 /* Verify stream values are non-zero. */
2041 if ((0 == peer_init->init_hdr.num_outbound_streams) || 2043 if ((0 == peer_init->init_hdr.num_outbound_streams) ||
@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
2043 (0 == peer_init->init_hdr.init_tag) || 2045 (0 == peer_init->init_hdr.init_tag) ||
2044 (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) { 2046 (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
2045 2047
2046 sctp_process_inv_mandatory(asoc, chunk, errp); 2048 return sctp_process_inv_mandatory(asoc, chunk, errp);
2047 return 0;
2048 } 2049 }
2049 2050
2050 /* Check for missing mandatory parameters. */ 2051 /* Check for missing mandatory parameters. */
@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc,
2062 * VIOLATION error. We build the ERROR chunk here and let the normal 2063 * VIOLATION error. We build the ERROR chunk here and let the normal
2063 * error handling code build and send the packet. 2064 * error handling code build and send the packet.
2064 */ 2065 */
2065 if (param.v != (void*)chunk->chunk_end) { 2066 if (param.v != (void*)chunk->chunk_end)
2066 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 2067 return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
2067 return 0;
2068 }
2069 2068
2070 /* The only missing mandatory param possible today is 2069 /* The only missing mandatory param possible today is
2071 * the state cookie for an INIT-ACK chunk. 2070 * the state cookie for an INIT-ACK chunk.
2072 */ 2071 */
2073 if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) { 2072 if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
2074 sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, 2073 return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
2075 chunk, errp); 2074 chunk, errp);
2076 return 0;
2077 }
2078
2079 /* Find unrecognized parameters. */
2080 2075
2076 /* Verify all the variable length parameters */
2081 sctp_walk_params(param, peer_init, init_hdr.params) { 2077 sctp_walk_params(param, peer_init, init_hdr.params) {
2082 2078
2083 if (!sctp_verify_param(asoc, param, cid, chunk, errp)) { 2079 result = sctp_verify_param(asoc, param, cid, chunk, errp);
2084 if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type) 2080 switch (result) {
2081 case SCTP_IERROR_ABORT:
2082 case SCTP_IERROR_NOMEM:
2085 return 0; 2083 return 0;
2086 else 2084 case SCTP_IERROR_ERROR:
2087 return 1; 2085 return 1;
2086 case SCTP_IERROR_NO_ERROR:
2087 default:
2088 break;
2088 } 2089 }
2089 2090
2090 } /* for (loop through all parameters) */ 2091 } /* for (loop through all parameters) */
@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2137 2138
2138 /* If the peer claims support for ADD-IP without support 2139 /* If the peer claims support for ADD-IP without support
2139 * for AUTH, disable support for ADD-IP. 2140 * for AUTH, disable support for ADD-IP.
2141 * Do this only if backward compatible mode is turned off.
2140 */ 2142 */
2141 if (asoc->peer.addip_capable && !asoc->peer.auth_capable) { 2143 if (!sctp_addip_noauth &&
2144 (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
2142 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | 2145 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
2143 SCTP_PARAM_DEL_IP | 2146 SCTP_PARAM_DEL_IP |
2144 SCTP_PARAM_SET_PRIMARY); 2147 SCTP_PARAM_SET_PRIMARY);
2148 asoc->peer.asconf_capable = 0;
2145 } 2149 }
2146 2150
2147 /* Walk list of transports, removing transports in the UNKNOWN state. */ 2151 /* Walk list of transports, removing transports in the UNKNOWN state. */
@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
2848 2852
2849 __be16 err_code; 2853 __be16 err_code;
2850 int length = 0; 2854 int length = 0;
2851 int chunk_len = asconf->skb->len; 2855 int chunk_len;
2852 __u32 serial; 2856 __u32 serial;
2853 int all_param_pass = 1; 2857 int all_param_pass = 1;
2854 2858
2859 chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
2855 hdr = (sctp_addiphdr_t *)asconf->skb->data; 2860 hdr = (sctp_addiphdr_t *)asconf->skb->data;
2856 serial = ntohl(hdr->serial); 2861 serial = ntohl(hdr->serial);
2857 2862
@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2952 /* This is always done in BH context with a socket lock 2957 /* This is always done in BH context with a socket lock
2953 * held, so the list can not change. 2958 * held, so the list can not change.
2954 */ 2959 */
2960 local_bh_disable();
2955 list_for_each_entry(saddr, &bp->address_list, list) { 2961 list_for_each_entry(saddr, &bp->address_list, list) {
2956 if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2962 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2957 saddr->use_as_src = 1; 2963 saddr->use_as_src = 1;
2958 } 2964 }
2965 local_bh_enable();
2959 break; 2966 break;
2960 case SCTP_PARAM_DEL_IP: 2967 case SCTP_PARAM_DEL_IP:
2961 retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh); 2968 local_bh_disable();
2969 retval = sctp_del_bind_addr(bp, &addr);
2970 local_bh_enable();
2962 list_for_each(pos, &asoc->peer.transport_addr_list) { 2971 list_for_each(pos, &asoc->peer.transport_addr_list) {
2963 transport = list_entry(pos, struct sctp_transport, 2972 transport = list_entry(pos, struct sctp_transport,
2964 transports); 2973 transports);
@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
2990 sctp_addip_param_t *asconf_ack_param; 2999 sctp_addip_param_t *asconf_ack_param;
2991 sctp_errhdr_t *err_param; 3000 sctp_errhdr_t *err_param;
2992 int length; 3001 int length;
2993 int asconf_ack_len = asconf_ack->skb->len; 3002 int asconf_ack_len;
2994 __be16 err_code; 3003 __be16 err_code;
2995 3004
2996 if (no_err) 3005 if (no_err)
@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
2998 else 3007 else
2999 err_code = SCTP_ERROR_REQ_REFUSED; 3008 err_code = SCTP_ERROR_REQ_REFUSED;
3000 3009
3010 asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
3011 sizeof(sctp_chunkhdr_t);
3012
3001 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to 3013 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to
3002 * the first asconf_ack parameter. 3014 * the first asconf_ack parameter.
3003 */ 3015 */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bbdc938da86f..78d1a8a49bd0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
453 * maximum value discussed in rule C7 above (RTO.max) may be 453 * maximum value discussed in rule C7 above (RTO.max) may be
454 * used to provide an upper bound to this doubling operation. 454 * used to provide an upper bound to this doubling operation.
455 */ 455 */
456 transport->last_rto = transport->rto;
456 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 457 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
457} 458}
458 459
@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1267 sctp_ootb_pkt_free(packet); 1268 sctp_ootb_pkt_free(packet);
1268 break; 1269 break;
1269 1270
1271 case SCTP_CMD_T1_RETRAN:
1272 /* Mark a transport for retransmission. */
1273 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1274 SCTP_RTXR_T1_RTX);
1275 break;
1276
1270 case SCTP_CMD_RETRAN: 1277 case SCTP_CMD_RETRAN:
1271 /* Mark a transport for retransmission. */ 1278 /* Mark a transport for retransmission. */
1272 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1279 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1393 list_for_each(pos, &asoc->peer.transport_addr_list) { 1400 list_for_each(pos, &asoc->peer.transport_addr_list) {
1394 t = list_entry(pos, struct sctp_transport, 1401 t = list_entry(pos, struct sctp_transport,
1395 transports); 1402 transports);
1396 sctp_retransmit_mark(&asoc->outqueue, t, 0); 1403 sctp_retransmit_mark(&asoc->outqueue, t,
1404 SCTP_RTXR_T1_RTX);
1397 } 1405 }
1398 1406
1399 sctp_add_cmd_sf(commands, 1407 sctp_add_cmd_sf(commands,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f01b408508ff..5ebbe808d801 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2305 /* If we've sent any data bundled with COOKIE-ECHO we will need to 2305 /* If we've sent any data bundled with COOKIE-ECHO we will need to
2306 * resend 2306 * resend
2307 */ 2307 */
2308 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, 2308 sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
2309 SCTP_TRANSPORT(asoc->peer.primary_path)); 2309 SCTP_TRANSPORT(asoc->peer.primary_path));
2310 2310
2311 /* Cast away the const modifier, as we want to just 2311 /* Cast away the const modifier, as we want to just
@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
4064 struct sctp_chunk *chunk = arg; 4064 struct sctp_chunk *chunk = arg;
4065 struct sctp_chunk *abort = NULL; 4065 struct sctp_chunk *abort = NULL;
4066 4066
4067 /* Make the abort chunk. */
4068 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
4069 if (!abort)
4070 goto nomem;
4071
4072 /* SCTP-AUTH, Section 6.3: 4067 /* SCTP-AUTH, Section 6.3:
4073 * It should be noted that if the receiver wants to tear 4068 * It should be noted that if the receiver wants to tear
4074 * down an association in an authenticated way only, the 4069 * down an association in an authenticated way only, the
@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
4083 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 4078 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
4084 goto discard; 4079 goto discard;
4085 4080
4081 /* Make the abort chunk. */
4082 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
4083 if (!abort)
4084 goto nomem;
4085
4086 if (asoc) { 4086 if (asoc) {
4087 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4087 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4088 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 4088 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a7ecf3159e53..ff8bc95670ed 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
660 * socket routing and failover schemes. Refer to comments in 660 * socket routing and failover schemes. Refer to comments in
661 * sctp_do_bind(). -daisy 661 * sctp_do_bind(). -daisy
662 */ 662 */
663 retval = sctp_del_bind_addr(bp, sa_addr, call_rcu); 663 retval = sctp_del_bind_addr(bp, sa_addr);
664 664
665 addr_buf += af->sockaddr_len; 665 addr_buf += af->sockaddr_len;
666err_bindx_rem: 666err_bindx_rem:
@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5307{ 5307{
5308 struct sctp_bind_hashbucket *head; /* hash list */ 5308 struct sctp_bind_hashbucket *head; /* hash list */
5309 struct sctp_bind_bucket *pp; /* hash list port iterator */ 5309 struct sctp_bind_bucket *pp; /* hash list port iterator */
5310 struct hlist_node *node;
5310 unsigned short snum; 5311 unsigned short snum;
5311 int ret; 5312 int ret;
5312 5313
@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5331 index = sctp_phashfn(rover); 5332 index = sctp_phashfn(rover);
5332 head = &sctp_port_hashtable[index]; 5333 head = &sctp_port_hashtable[index];
5333 sctp_spin_lock(&head->lock); 5334 sctp_spin_lock(&head->lock);
5334 for (pp = head->chain; pp; pp = pp->next) 5335 sctp_for_each_hentry(pp, node, &head->chain)
5335 if (pp->port == rover) 5336 if (pp->port == rover)
5336 goto next; 5337 goto next;
5337 break; 5338 break;
@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5358 */ 5359 */
5359 head = &sctp_port_hashtable[sctp_phashfn(snum)]; 5360 head = &sctp_port_hashtable[sctp_phashfn(snum)];
5360 sctp_spin_lock(&head->lock); 5361 sctp_spin_lock(&head->lock);
5361 for (pp = head->chain; pp; pp = pp->next) { 5362 sctp_for_each_hentry(pp, node, &head->chain) {
5362 if (pp->port == snum) 5363 if (pp->port == snum)
5363 goto pp_found; 5364 goto pp_found;
5364 } 5365 }
@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5702 pp->port = snum; 5703 pp->port = snum;
5703 pp->fastreuse = 0; 5704 pp->fastreuse = 0;
5704 INIT_HLIST_HEAD(&pp->owner); 5705 INIT_HLIST_HEAD(&pp->owner);
5705 if ((pp->next = head->chain) != NULL) 5706 hlist_add_head(&pp->node, &head->chain);
5706 pp->next->pprev = &pp->next;
5707 head->chain = pp;
5708 pp->pprev = &head->chain;
5709 } 5707 }
5710 return pp; 5708 return pp;
5711} 5709}
@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5714static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5712static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
5715{ 5713{
5716 if (pp && hlist_empty(&pp->owner)) { 5714 if (pp && hlist_empty(&pp->owner)) {
5717 if (pp->next) 5715 __hlist_del(&pp->node);
5718 pp->next->pprev = pp->pprev;
5719 *(pp->pprev) = pp->next;
5720 kmem_cache_free(sctp_bucket_cachep, pp); 5716 kmem_cache_free(sctp_bucket_cachep, pp);
5721 SCTP_DBG_OBJCNT_DEC(bind_bucket); 5717 SCTP_DBG_OBJCNT_DEC(bind_bucket);
5722 } 5718 }
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 0669778e4335..da4f15734fb1 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
263 .proc_handler = &proc_dointvec, 263 .proc_handler = &proc_dointvec,
264 .strategy = &sysctl_intvec 264 .strategy = &sysctl_intvec
265 }, 265 },
266 {
267 .ctl_name = CTL_UNNUMBERED,
268 .procname = "addip_noauth_enable",
269 .data = &sctp_addip_noauth,
270 .maxlen = sizeof(int),
271 .mode = 0644,
272 .proc_handler = &proc_dointvec,
273 .strategy = &sysctl_intvec
274 },
266 { .ctl_name = 0 } 275 { .ctl_name = 0 }
267}; 276};
268 277
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f467c914f80..d55ce83a020b 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
74 * given destination transport address, set RTO to the protocol 74 * given destination transport address, set RTO to the protocol
75 * parameter 'RTO.Initial'. 75 * parameter 'RTO.Initial'.
76 */ 76 */
77 peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
77 peer->rtt = 0; 78 peer->rtt = 0;
78 peer->rto = msecs_to_jiffies(sctp_rto_initial);
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
81 peer->rto_pending = 0; 81 peer->rto_pending = 0;
@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
385 tp->rto = tp->asoc->rto_max; 385 tp->rto = tp->asoc->rto_max;
386 386
387 tp->rtt = rtt; 387 tp->rtt = rtt;
388 tp->last_rto = tp->rto;
388 389
389 /* Reset rto_pending so that a new RTT measurement is started when a 390 /* Reset rto_pending so that a new RTT measurement is started when a
390 * new data chunk is sent. 391 * new data chunk is sent.
@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
578 */ 579 */
579 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 580 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
580 t->ssthresh = asoc->peer.i.a_rwnd; 581 t->ssthresh = asoc->peer.i.a_rwnd;
581 t->rto = asoc->rto_initial; 582 t->last_rto = t->rto = asoc->rto_initial;
582 t->rtt = 0; 583 t->rtt = 0;
583 t->srtt = 0; 584 t->srtt = 0;
584 t->rttvar = 0; 585 t->rttvar = 0;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4be92d0a2cab..4908041ffb31 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
862 continue; 862 continue;
863 863
864 /* see if this ssn has been marked by skipping */ 864 /* see if this ssn has been marked by skipping */
865 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) 865 if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
866 break; 866 break;
867 867
868 __skb_unlink(pos, &ulpq->lobby); 868 __skb_unlink(pos, &ulpq->lobby);
diff --git a/net/socket.c b/net/socket.c
index 5d879fd3d01d..74784dfe8e5b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2319,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
2319 return err; 2319 return err;
2320} 2320}
2321 2321
2322int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
2323{
2324 return sock->ops->shutdown(sock, how);
2325}
2326
2322/* ABI emulation layers need these two */ 2327/* ABI emulation layers need these two */
2323EXPORT_SYMBOL(move_addr_to_kernel); 2328EXPORT_SYMBOL(move_addr_to_kernel);
2324EXPORT_SYMBOL(move_addr_to_user); 2329EXPORT_SYMBOL(move_addr_to_user);
@@ -2345,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt);
2345EXPORT_SYMBOL(kernel_setsockopt); 2350EXPORT_SYMBOL(kernel_setsockopt);
2346EXPORT_SYMBOL(kernel_sendpage); 2351EXPORT_SYMBOL(kernel_sendpage);
2347EXPORT_SYMBOL(kernel_sock_ioctl); 2352EXPORT_SYMBOL(kernel_sock_ioctl);
2353EXPORT_SYMBOL(kernel_sock_shutdown);