aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-10-13 09:43:54 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2007-10-13 09:43:54 -0400
commitb160292cc216a50fd0cd386b0bda2cd48352c73b (patch)
treeef07cf98f91353ee4c9ec1e1ca7a2a5d9d4b538a /net
parentb37bde147890c8fea8369a5a4e230dabdea4ebfb (diff)
parentbbf25010f1a6b761914430f5fca081ec8c7accd1 (diff)
Merge Linux 2.6.23
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/9p/conv.c1
-rw-r--r--net/9p/mux.c9
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hci_sock.c28
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c21
-rw-r--r--net/bridge/br_if.c16
-rw-r--r--net/bridge/br_input.c51
-rw-r--r--net/bridge/br_netfilter.c14
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev_mcast.c14
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/pktgen.c18
-rw-r--r--net/core/sock.c106
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ieee80211/ieee80211_rx.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c56
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c11
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/ndisc.c11
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/ieee80211.c2
-rw-r--r--net/mac80211/rc80211_simple.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/netfilter/nf_sockopt.c36
-rw-r--r--net/netfilter/nfnetlink_log.c13
-rw-r--r--net/netfilter/xt_tcpudp.c2
-rw-r--r--net/rose/rose_loopback.c4
-rw-r--r--net/rose/rose_route.c15
-rw-r--r--net/sched/act_api.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_sfq.c53
-rw-r--r--net/sctp/associola.c21
-rw-r--r--net/sctp/bind_addr.c70
-rw-r--r--net/sctp/endpointola.c27
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/ipv6.c46
-rw-r--r--net/sctp/outqueue.c7
-rw-r--r--net/sctp/protocol.c79
-rw-r--r--net/sctp/sm_make_chunk.c176
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c294
-rw-r--r--net/sctp/sm_statetable.c16
-rw-r--r--net/sctp/socket.c137
-rw-r--r--net/sctp/ulpqueue.c75
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/sysfs.c2
74 files changed, 1028 insertions, 621 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 1583c5ef963f..2a546919d6fb 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -562,8 +562,6 @@ static int register_vlan_device(struct net_device *real_dev,
562 if (err < 0) 562 if (err < 0)
563 goto out_free_newdev; 563 goto out_free_newdev;
564 564
565 /* Account for reference in struct vlan_dev_info */
566 dev_hold(real_dev);
567#ifdef VLAN_DEBUG 565#ifdef VLAN_DEBUG
568 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 566 printk(VLAN_DBG "Allocated new device successfully, returning.\n");
569#endif 567#endif
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bab322c9f8f..328759c32d61 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -116,12 +116,22 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
116 struct packet_type* ptype, struct net_device *orig_dev) 116 struct packet_type* ptype, struct net_device *orig_dev)
117{ 117{
118 unsigned char *rawp = NULL; 118 unsigned char *rawp = NULL;
119 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); 119 struct vlan_hdr *vhdr;
120 unsigned short vid; 120 unsigned short vid;
121 struct net_device_stats *stats; 121 struct net_device_stats *stats;
122 unsigned short vlan_TCI; 122 unsigned short vlan_TCI;
123 __be16 proto; 123 __be16 proto;
124 124
125 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
126 return -1;
127
128 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) {
129 kfree_skb(skb);
130 return -1;
131 }
132
133 vhdr = (struct vlan_hdr *)(skb->data);
134
125 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ 135 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
126 vlan_TCI = ntohs(vhdr->h_vlan_TCI); 136 vlan_TCI = ntohs(vhdr->h_vlan_TCI);
127 137
diff --git a/net/9p/conv.c b/net/9p/conv.c
index f2a041cb508a..d979d958ea19 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -796,6 +796,7 @@ struct p9_fcall *p9_create_twrite_u(u32 fid, u64 offset, u32 count,
796 if (err) { 796 if (err) {
797 kfree(fc); 797 kfree(fc);
798 fc = ERR_PTR(err); 798 fc = ERR_PTR(err);
799 goto error;
799 } 800 }
800 801
801 if (buf_check_overflow(bufp)) { 802 if (buf_check_overflow(bufp)) {
diff --git a/net/9p/mux.c b/net/9p/mux.c
index acb038810f39..5d70558c4c61 100644
--- a/net/9p/mux.c
+++ b/net/9p/mux.c
@@ -288,9 +288,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
288 m->extended = extended; 288 m->extended = extended;
289 m->trans = trans; 289 m->trans = trans;
290 m->tagpool = p9_idpool_create(); 290 m->tagpool = p9_idpool_create();
291 if (!m->tagpool) { 291 if (IS_ERR(m->tagpool)) {
292 mtmp = ERR_PTR(-ENOMEM);
292 kfree(m); 293 kfree(m);
293 return ERR_PTR(PTR_ERR(m->tagpool)); 294 return mtmp;
294 } 295 }
295 296
296 m->err = 0; 297 m->err = 0;
@@ -308,8 +309,10 @@ struct p9_conn *p9_conn_create(struct p9_transport *trans, int msize,
308 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 309 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
309 m->poll_task = NULL; 310 m->poll_task = NULL;
310 n = p9_mux_poll_start(m); 311 n = p9_mux_poll_start(m);
311 if (n) 312 if (n) {
313 kfree(m);
312 return ERR_PTR(n); 314 return ERR_PTR(n);
315 }
313 316
314 n = trans->poll(trans, &m->pt); 317 n = trans->poll(trans, &m->pt);
315 if (n & POLLIN) { 318 if (n & POLLIN) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 63caa414945d..18e3afc964df 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -183,6 +183,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{ 183{
184 struct sk_buff *skb; 184 struct sk_buff *skb;
185 __le16 param; 185 __le16 param;
186 __u8 flt_type;
186 187
187 BT_DBG("%s %ld", hdev->name, opt); 188 BT_DBG("%s %ld", hdev->name, opt);
188 189
@@ -233,11 +234,8 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
233 /* Optional initialization */ 234 /* Optional initialization */
234 235
235 /* Clear Event Filters */ 236 /* Clear Event Filters */
236 { 237 flt_type = HCI_FLT_CLEAR_ALL;
237 struct hci_cp_set_event_flt cp; 238 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type);
238 cp.flt_type = HCI_FLT_CLEAR_ALL;
239 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
240 }
241 239
242 /* Page timeout ~20 secs */ 240 /* Page timeout ~20 secs */
243 param = cpu_to_le16(0x8000); 241 param = cpu_to_le16(0x8000);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1dae3dfc66a9..5ccea5fbd236 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -37,6 +37,7 @@
37#include <linux/skbuff.h> 37#include <linux/skbuff.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/compat.h>
40#include <linux/socket.h> 41#include <linux/socket.h>
41#include <linux/ioctl.h> 42#include <linux/ioctl.h>
42#include <net/sock.h> 43#include <net/sock.h>
@@ -70,15 +71,15 @@ static struct hci_sec_filter hci_sec_filter = {
70 { 71 {
71 { 0x0 }, 72 { 0x0 },
72 /* OGF_LINK_CTL */ 73 /* OGF_LINK_CTL */
73 { 0xbe000006, 0x00000001, 0x000000, 0x00 }, 74 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 /* OGF_LINK_POLICY */ 75 /* OGF_LINK_POLICY */
75 { 0x00005200, 0x00000000, 0x000000, 0x00 }, 76 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 /* OGF_HOST_CTL */ 77 /* OGF_HOST_CTL */
77 { 0xaab00200, 0x2b402aaa, 0x020154, 0x00 }, 78 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 /* OGF_INFO_PARAM */ 79 /* OGF_INFO_PARAM */
79 { 0x000002be, 0x00000000, 0x000000, 0x00 }, 80 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
80 /* OGF_STATUS_PARAM */ 81 /* OGF_STATUS_PARAM */
81 { 0x000000ea, 0x00000000, 0x000000, 0x00 } 82 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
82 } 83 }
83}; 84};
84 85
@@ -342,9 +343,24 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
342 343
343 if (mask & HCI_CMSG_TSTAMP) { 344 if (mask & HCI_CMSG_TSTAMP) {
344 struct timeval tv; 345 struct timeval tv;
346 void *data;
347 int len;
345 348
346 skb_get_timestamp(skb, &tv); 349 skb_get_timestamp(skb, &tv);
347 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv); 350
351 data = &tv;
352 len = sizeof(tv);
353#ifdef CONFIG_COMPAT
354 if (msg->msg_flags & MSG_CMSG_COMPAT) {
355 struct compat_timeval ctv;
356 ctv.tv_sec = tv.tv_sec;
357 ctv.tv_usec = tv.tv_usec;
358 data = &ctv;
359 len = sizeof(ctv);
360 }
361#endif
362
363 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
348 } 364 }
349} 365}
350 366
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0eded176ce99..99292e8e1d0f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -41,11 +41,11 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
41 skb_pull(skb, ETH_HLEN); 41 skb_pull(skb, ETH_HLEN);
42 42
43 if (dest[0] & 1) 43 if (dest[0] & 1)
44 br_flood_deliver(br, skb, 0); 44 br_flood_deliver(br, skb);
45 else if ((dst = __br_fdb_get(br, dest)) != NULL) 45 else if ((dst = __br_fdb_get(br, dest)) != NULL)
46 br_deliver(dst->dst, skb); 46 br_deliver(dst->dst, skb);
47 else 47 else
48 br_flood_deliver(br, skb, 0); 48 br_flood_deliver(br, skb);
49 49
50 return 0; 50 return 0;
51} 51}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 69b70977f000..eb57502bb264 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
384 if (hold_time(br) == 0) 384 if (hold_time(br) == 0)
385 return; 385 return;
386 386
387 /* ignore packets unless we are using this port */
388 if (!(source->state == BR_STATE_LEARNING ||
389 source->state == BR_STATE_FORWARDING))
390 return;
391
387 fdb = fdb_find(head, addr); 392 fdb = fdb_find(head, addr);
388 if (likely(fdb)) { 393 if (likely(fdb)) {
389 /* attempt to update an entry for a local interface */ 394 /* attempt to update an entry for a local interface */
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index ada7f495445c..bdd7c35c3c7b 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -100,24 +100,13 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
100} 100}
101 101
102/* called under bridge lock */ 102/* called under bridge lock */
103static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone, 103static void br_flood(struct net_bridge *br, struct sk_buff *skb,
104 void (*__packet_hook)(const struct net_bridge_port *p, 104 void (*__packet_hook)(const struct net_bridge_port *p,
105 struct sk_buff *skb)) 105 struct sk_buff *skb))
106{ 106{
107 struct net_bridge_port *p; 107 struct net_bridge_port *p;
108 struct net_bridge_port *prev; 108 struct net_bridge_port *prev;
109 109
110 if (clone) {
111 struct sk_buff *skb2;
112
113 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
114 br->statistics.tx_dropped++;
115 return;
116 }
117
118 skb = skb2;
119 }
120
121 prev = NULL; 110 prev = NULL;
122 111
123 list_for_each_entry_rcu(p, &br->port_list, list) { 112 list_for_each_entry_rcu(p, &br->port_list, list) {
@@ -148,13 +137,13 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
148 137
149 138
150/* called with rcu_read_lock */ 139/* called with rcu_read_lock */
151void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone) 140void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
152{ 141{
153 br_flood(br, skb, clone, __br_deliver); 142 br_flood(br, skb, __br_deliver);
154} 143}
155 144
156/* called under bridge lock */ 145/* called under bridge lock */
157void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone) 146void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
158{ 147{
159 br_flood(br, skb, clone, __br_forward); 148 br_flood(br, skb, __br_forward);
160} 149}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 749f0e8f541d..9272f12f664c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -33,17 +33,17 @@
33 */ 33 */
34static int port_cost(struct net_device *dev) 34static int port_cost(struct net_device *dev)
35{ 35{
36 if (dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 int err = dev->ethtool_ops->get_settings(dev, &ecmd); 38
39 if (!err) { 39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch(ecmd.speed) {
41 case SPEED_100:
42 return 19;
43 case SPEED_1000:
44 return 4;
45 case SPEED_10000: 41 case SPEED_10000:
46 return 2; 42 return 2;
43 case SPEED_1000:
44 return 4;
45 case SPEED_100:
46 return 19;
47 case SPEED_10: 47 case SPEED_10:
48 return 100; 48 return 100;
49 } 49 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5c18595b7616..3a8a015c92e0 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -43,7 +43,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
43 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 43 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
44 struct net_bridge *br; 44 struct net_bridge *br;
45 struct net_bridge_fdb_entry *dst; 45 struct net_bridge_fdb_entry *dst;
46 int passedup = 0; 46 struct sk_buff *skb2;
47 47
48 if (!p || p->state == BR_STATE_DISABLED) 48 if (!p || p->state == BR_STATE_DISABLED)
49 goto drop; 49 goto drop;
@@ -55,39 +55,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
55 if (p->state == BR_STATE_LEARNING) 55 if (p->state == BR_STATE_LEARNING)
56 goto drop; 56 goto drop;
57 57
58 if (br->dev->flags & IFF_PROMISC) { 58 /* The packet skb2 goes to the local host (NULL to skip). */
59 struct sk_buff *skb2; 59 skb2 = NULL;
60 60
61 skb2 = skb_clone(skb, GFP_ATOMIC); 61 if (br->dev->flags & IFF_PROMISC)
62 if (skb2 != NULL) { 62 skb2 = skb;
63 passedup = 1; 63
64 br_pass_frame_up(br, skb2); 64 dst = NULL;
65 }
66 }
67 65
68 if (is_multicast_ether_addr(dest)) { 66 if (is_multicast_ether_addr(dest)) {
69 br->statistics.multicast++; 67 br->statistics.multicast++;
70 br_flood_forward(br, skb, !passedup); 68 skb2 = skb;
71 if (!passedup) 69 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
72 br_pass_frame_up(br, skb); 70 skb2 = skb;
73 goto out; 71 /* Do not forward the packet since it's local. */
72 skb = NULL;
74 } 73 }
75 74
76 dst = __br_fdb_get(br, dest); 75 if (skb2 == skb)
77 if (dst != NULL && dst->is_local) { 76 skb2 = skb_clone(skb, GFP_ATOMIC);
78 if (!passedup)
79 br_pass_frame_up(br, skb);
80 else
81 kfree_skb(skb);
82 goto out;
83 }
84 77
85 if (dst != NULL) { 78 if (skb2)
86 br_forward(dst->dst, skb); 79 br_pass_frame_up(br, skb2);
87 goto out;
88 }
89 80
90 br_flood_forward(br, skb, 0); 81 if (skb) {
82 if (dst)
83 br_forward(dst->dst, skb);
84 else
85 br_flood_forward(br, skb);
86 }
91 87
92out: 88out:
93 return 0; 89 return 0;
@@ -101,9 +97,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
101{ 97{
102 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 98 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
103 99
104 if (p && p->state != BR_STATE_DISABLED) 100 if (p)
105 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 101 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
106
107 return 0; /* process further */ 102 return 0; /* process further */
108} 103}
109 104
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa779874b9dd..fc13130035e7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -183,7 +183,7 @@ int nf_bridge_copy_header(struct sk_buff *skb)
183 int err; 183 int err;
184 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); 184 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
185 185
186 err = skb_cow(skb, header_size); 186 err = skb_cow_head(skb, header_size);
187 if (err) 187 if (err)
188 return err; 188 return err;
189 189
@@ -509,8 +509,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
509 int (*okfn)(struct sk_buff *)) 509 int (*okfn)(struct sk_buff *))
510{ 510{
511 struct iphdr *iph; 511 struct iphdr *iph;
512 __u32 len;
513 struct sk_buff *skb = *pskb; 512 struct sk_buff *skb = *pskb;
513 __u32 len = nf_bridge_encap_header_len(skb);
514
515 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
516 return NF_STOLEN;
517
518 if (unlikely(!pskb_may_pull(skb, len)))
519 goto out;
514 520
515 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 521 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
516 IS_PPPOE_IPV6(skb)) { 522 IS_PPPOE_IPV6(skb)) {
@@ -518,8 +524,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
518 if (!brnf_call_ip6tables) 524 if (!brnf_call_ip6tables)
519 return NF_ACCEPT; 525 return NF_ACCEPT;
520#endif 526#endif
521 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
522 goto out;
523 nf_bridge_pull_encap_header_rcsum(skb); 527 nf_bridge_pull_encap_header_rcsum(skb);
524 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); 528 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
525 } 529 }
@@ -532,8 +536,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
532 !IS_PPPOE_IP(skb)) 536 !IS_PPPOE_IP(skb))
533 return NF_ACCEPT; 537 return NF_ACCEPT;
534 538
535 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
536 goto out;
537 nf_bridge_pull_encap_header_rcsum(skb); 539 nf_bridge_pull_encap_header_rcsum(skb);
538 540
539 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 541 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 21bf3a9a03fd..e6dc6f52990d 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -170,12 +170,8 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb);
170extern void br_forward(const struct net_bridge_port *to, 170extern void br_forward(const struct net_bridge_port *to,
171 struct sk_buff *skb); 171 struct sk_buff *skb);
172extern int br_forward_finish(struct sk_buff *skb); 172extern int br_forward_finish(struct sk_buff *skb);
173extern void br_flood_deliver(struct net_bridge *br, 173extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
174 struct sk_buff *skb, 174extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
175 int clone);
176extern void br_flood_forward(struct net_bridge *br,
177 struct sk_buff *skb,
178 int clone);
179 175
180/* br_if.c */ 176/* br_if.c */
181extern void br_port_carrier_check(struct net_bridge_port *p); 177extern void br_port_carrier_check(struct net_bridge_port *p);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4169a2a89a39..6018d0e51938 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1513,6 +1513,7 @@ static struct nf_sockopt_ops ebt_sockopts =
1513 .get_optmin = EBT_BASE_CTL, 1513 .get_optmin = EBT_BASE_CTL,
1514 .get_optmax = EBT_SO_GET_MAX + 1, 1514 .get_optmax = EBT_SO_GET_MAX + 1,
1515 .get = do_ebt_get_ctl, 1515 .get = do_ebt_get_ctl,
1516 .owner = THIS_MODULE,
1516}; 1517};
1517 1518
1518static int __init ebtables_init(void) 1519static int __init ebtables_init(void)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb056f476126..029b93e246b4 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
450 __wsum csum; 450 __wsum csum;
451 int chunk = skb->len - hlen; 451 int chunk = skb->len - hlen;
452 452
453 if (!chunk)
454 return 0;
455
453 /* Skip filled elements. 456 /* Skip filled elements.
454 * Pretty silly, look at memcpy_toiovec, though 8) 457 * Pretty silly, look at memcpy_toiovec, though 8)
455 */ 458 */
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 99aece1aeccf..20330c572610 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -116,11 +116,13 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
116 */ 116 */
117int dev_mc_sync(struct net_device *to, struct net_device *from) 117int dev_mc_sync(struct net_device *to, struct net_device *from)
118{ 118{
119 struct dev_addr_list *da; 119 struct dev_addr_list *da, *next;
120 int err = 0; 120 int err = 0;
121 121
122 netif_tx_lock_bh(to); 122 netif_tx_lock_bh(to);
123 for (da = from->mc_list; da != NULL; da = da->next) { 123 da = from->mc_list;
124 while (da != NULL) {
125 next = da->next;
124 if (!da->da_synced) { 126 if (!da->da_synced) {
125 err = __dev_addr_add(&to->mc_list, &to->mc_count, 127 err = __dev_addr_add(&to->mc_list, &to->mc_count,
126 da->da_addr, da->da_addrlen, 0); 128 da->da_addr, da->da_addrlen, 0);
@@ -134,6 +136,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
134 __dev_addr_delete(&from->mc_list, &from->mc_count, 136 __dev_addr_delete(&from->mc_list, &from->mc_count,
135 da->da_addr, da->da_addrlen, 0); 137 da->da_addr, da->da_addrlen, 0);
136 } 138 }
139 da = next;
137 } 140 }
138 if (!err) 141 if (!err)
139 __dev_set_rx_mode(to); 142 __dev_set_rx_mode(to);
@@ -156,12 +159,14 @@ EXPORT_SYMBOL(dev_mc_sync);
156 */ 159 */
157void dev_mc_unsync(struct net_device *to, struct net_device *from) 160void dev_mc_unsync(struct net_device *to, struct net_device *from)
158{ 161{
159 struct dev_addr_list *da; 162 struct dev_addr_list *da, *next;
160 163
161 netif_tx_lock_bh(from); 164 netif_tx_lock_bh(from);
162 netif_tx_lock_bh(to); 165 netif_tx_lock_bh(to);
163 166
164 for (da = from->mc_list; da != NULL; da = da->next) { 167 da = from->mc_list;
168 while (da != NULL) {
169 next = da->next;
165 if (!da->da_synced) 170 if (!da->da_synced)
166 continue; 171 continue;
167 __dev_addr_delete(&to->mc_list, &to->mc_count, 172 __dev_addr_delete(&to->mc_list, &to->mc_count,
@@ -169,6 +174,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
169 da->da_synced = 0; 174 da->da_synced = 0;
170 __dev_addr_delete(&from->mc_list, &from->mc_count, 175 __dev_addr_delete(&from->mc_list, &from->mc_count,
171 da->da_addr, da->da_addrlen, 0); 176 da->da_addr, da->da_addrlen, 0);
177 da = next;
172 } 178 }
173 __dev_set_rx_mode(to); 179 __dev_set_rx_mode(to);
174 180
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca2a1533138a..f7de8f24d8dd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -33,6 +33,7 @@
33#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/log2.h>
36 37
37#define NEIGH_DEBUG 1 38#define NEIGH_DEBUG 1
38 39
@@ -311,7 +312,7 @@ static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
311 312
312 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 313 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
313 314
314 BUG_ON(new_entries & (new_entries - 1)); 315 BUG_ON(!is_power_of_2(new_entries));
315 new_hash = neigh_hash_alloc(new_entries); 316 new_hash = neigh_hash_alloc(new_entries);
316 if (!new_hash) 317 if (!new_hash)
317 return; 318 return;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7bae576ac115..803d0c8826af 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -111,6 +111,9 @@
111 * 111 *
112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
113 * 113 *
114 * Fixed src_mac command to set source mac of packet to value specified in
115 * command by Adit Ranadive <adit.262@gmail.com>
116 *
114 */ 117 */
115#include <linux/sys.h> 118#include <linux/sys.h>
116#include <linux/types.h> 119#include <linux/types.h>
@@ -380,7 +383,6 @@ struct pktgen_thread {
380 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 383 /* Field for thread to receive "posted" events terminate, stop ifs etc. */
381 384
382 u32 control; 385 u32 control;
383 int pid;
384 int cpu; 386 int cpu;
385 387
386 wait_queue_head_t queue; 388 wait_queue_head_t queue;
@@ -1452,8 +1454,11 @@ static ssize_t pktgen_if_write(struct file *file,
1452 } 1454 }
1453 if (!strcmp(name, "src_mac")) { 1455 if (!strcmp(name, "src_mac")) {
1454 char *v = valstr; 1456 char *v = valstr;
1457 unsigned char old_smac[ETH_ALEN];
1455 unsigned char *m = pkt_dev->src_mac; 1458 unsigned char *m = pkt_dev->src_mac;
1456 1459
1460 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1461
1457 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1462 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1458 if (len < 0) { 1463 if (len < 0) {
1459 return len; 1464 return len;
@@ -1482,6 +1487,10 @@ static ssize_t pktgen_if_write(struct file *file,
1482 } 1487 }
1483 } 1488 }
1484 1489
1490 /* Set up Src MAC */
1491 if (compare_ether_addr(old_smac, pkt_dev->src_mac))
1492 memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
1493
1485 sprintf(pg_result, "OK: srcmac"); 1494 sprintf(pg_result, "OK: srcmac");
1486 return count; 1495 return count;
1487 } 1496 }
@@ -3331,8 +3340,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3340 }
3332 3341
3333 if ((netif_queue_stopped(odev) || 3342 if ((netif_queue_stopped(odev) ||
3334 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || 3343 (pkt_dev->skb &&
3335 need_resched()) { 3344 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
3345 need_resched()) {
3336 idle_start = getCurUs(); 3346 idle_start = getCurUs();
3337 3347
3338 if (!netif_running(odev)) { 3348 if (!netif_running(odev)) {
@@ -3462,8 +3472,6 @@ static int pktgen_thread_worker(void *arg)
3462 3472
3463 init_waitqueue_head(&t->queue); 3473 init_waitqueue_head(&t->queue);
3464 3474
3465 t->pid = current->pid;
3466
3467 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); 3475 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid);
3468 3476
3469 max_before_softirq = t->max_before_softirq; 3477 max_before_softirq = t->max_before_softirq;
diff --git a/net/core/sock.c b/net/core/sock.c
index cfed7d42c485..190de61cd648 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -362,6 +362,61 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
362} 362}
363EXPORT_SYMBOL(sk_dst_check); 363EXPORT_SYMBOL(sk_dst_check);
364 364
365static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
366{
367 int ret = -ENOPROTOOPT;
368#ifdef CONFIG_NETDEVICES
369 char devname[IFNAMSIZ];
370 int index;
371
372 /* Sorry... */
373 ret = -EPERM;
374 if (!capable(CAP_NET_RAW))
375 goto out;
376
377 ret = -EINVAL;
378 if (optlen < 0)
379 goto out;
380
381 /* Bind this socket to a particular device like "eth0",
382 * as specified in the passed interface name. If the
383 * name is "" or the option length is zero the socket
384 * is not bound.
385 */
386 if (optlen > IFNAMSIZ - 1)
387 optlen = IFNAMSIZ - 1;
388 memset(devname, 0, sizeof(devname));
389
390 ret = -EFAULT;
391 if (copy_from_user(devname, optval, optlen))
392 goto out;
393
394 if (devname[0] == '\0') {
395 index = 0;
396 } else {
397 struct net_device *dev = dev_get_by_name(devname);
398
399 ret = -ENODEV;
400 if (!dev)
401 goto out;
402
403 index = dev->ifindex;
404 dev_put(dev);
405 }
406
407 lock_sock(sk);
408 sk->sk_bound_dev_if = index;
409 sk_dst_reset(sk);
410 release_sock(sk);
411
412 ret = 0;
413
414out:
415#endif
416
417 return ret;
418}
419
365/* 420/*
366 * This is meant for all protocols to use and covers goings on 421 * This is meant for all protocols to use and covers goings on
367 * at the socket level. Everything here is generic. 422 * at the socket level. Everything here is generic.
@@ -390,6 +445,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
390 } 445 }
391#endif 446#endif
392 447
448 if (optname == SO_BINDTODEVICE)
449 return sock_bindtodevice(sk, optval, optlen);
450
393 if (optlen < sizeof(int)) 451 if (optlen < sizeof(int))
394 return -EINVAL; 452 return -EINVAL;
395 453
@@ -578,54 +636,6 @@ set_rcvbuf:
578 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 636 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
579 break; 637 break;
580 638
581#ifdef CONFIG_NETDEVICES
582 case SO_BINDTODEVICE:
583 {
584 char devname[IFNAMSIZ];
585
586 /* Sorry... */
587 if (!capable(CAP_NET_RAW)) {
588 ret = -EPERM;
589 break;
590 }
591
592 /* Bind this socket to a particular device like "eth0",
593 * as specified in the passed interface name. If the
594 * name is "" or the option length is zero the socket
595 * is not bound.
596 */
597
598 if (!valbool) {
599 sk->sk_bound_dev_if = 0;
600 } else {
601 if (optlen > IFNAMSIZ - 1)
602 optlen = IFNAMSIZ - 1;
603 memset(devname, 0, sizeof(devname));
604 if (copy_from_user(devname, optval, optlen)) {
605 ret = -EFAULT;
606 break;
607 }
608
609 /* Remove any cached route for this socket. */
610 sk_dst_reset(sk);
611
612 if (devname[0] == '\0') {
613 sk->sk_bound_dev_if = 0;
614 } else {
615 struct net_device *dev = dev_get_by_name(devname);
616 if (!dev) {
617 ret = -ENODEV;
618 break;
619 }
620 sk->sk_bound_dev_if = dev->ifindex;
621 dev_put(dev);
622 }
623 }
624 break;
625 }
626#endif
627
628
629 case SO_ATTACH_FILTER: 639 case SO_ATTACH_FILTER:
630 ret = -EINVAL; 640 ret = -EINVAL;
631 if (optlen == sizeof(struct sock_fprog)) { 641 if (optlen == sizeof(struct sock_fprog)) {
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index fa6604fcf0e7..8def68209edd 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -814,7 +814,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
814 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 814 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
815 ifa = ifa->ifa_next, dn_idx++) { 815 ifa = ifa->ifa_next, dn_idx++) {
816 if (dn_idx < skip_naddr) 816 if (dn_idx < skip_naddr)
817 goto cont; 817 continue;
818 818
819 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 819 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
820 cb->nlh->nlmsg_seq, RTM_NEWADDR, 820 cb->nlh->nlmsg_seq, RTM_NEWADDR,
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index f2de2e48b021..6284c99b456e 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
366 frag = WLAN_GET_SEQ_FRAG(sc); 366 frag = WLAN_GET_SEQ_FRAG(sc);
367 hdrlen = ieee80211_get_hdrlen(fc); 367 hdrlen = ieee80211_get_hdrlen(fc);
368 368
369 if (skb->len < hdrlen) {
370 printk(KERN_INFO "%s: invalid SKB length %d\n",
371 dev->name, skb->len);
372 goto rx_dropped;
373 }
374
369 /* Put this code here so that we avoid duplicating it in all 375 /* Put this code here so that we avoid duplicating it in all
370 * Rx paths. - Jean II */ 376 * Rx paths. - Jean II */
371#ifdef CONFIG_WIRELESS_EXT 377#ifdef CONFIG_WIRELESS_EXT
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index afb6c6698b27..e475f2e1be13 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); 273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
274 if (ieee80211softmac_start_scan(mac)) { 274 if (ieee80211softmac_start_scan(mac)) {
275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); 275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
276 mac->associnfo.associating = 0;
277 mac->associnfo.associated = 0;
278 } 276 }
279 goto out; 277 goto out;
280 } else { 278 } else {
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index d054e9224b3e..5742dc803b79 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
70 char *extra) 70 char *extra)
71{ 71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_network *n;
74 struct ieee80211softmac_auth_queue_item *authptr; 73 struct ieee80211softmac_auth_queue_item *authptr;
75 int length = 0; 74 int length = 0;
76 75
77check_assoc_again: 76check_assoc_again:
78 mutex_lock(&sm->associnfo.mutex); 77 mutex_lock(&sm->associnfo.mutex);
79 /* Check if we're already associating to this or another network
80 * If it's another network, cancel and start over with our new network
81 * If it's our network, ignore the change, we're already doing it!
82 */
83 if((sm->associnfo.associating || sm->associnfo.associated) && 78 if((sm->associnfo.associating || sm->associnfo.associated) &&
84 (data->essid.flags && data->essid.length)) { 79 (data->essid.flags && data->essid.length)) {
85 /* Get the associating network */ 80 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
86 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); 81 /* Cancel assoc work */
87 if(n && n->essid.len == data->essid.length && 82 cancel_delayed_work(&sm->associnfo.work);
88 !memcmp(n->essid.data, extra, n->essid.len)) { 83 /* We don't have to do this, but it's a little cleaner */
89 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", 84 list_for_each_entry(authptr, &sm->auth_queue, list)
90 MAC_ARG(sm->associnfo.bssid)); 85 cancel_delayed_work(&authptr->work);
91 goto out; 86 sm->associnfo.bssvalid = 0;
92 } else { 87 sm->associnfo.bssfixed = 0;
93 dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); 88 sm->associnfo.associating = 0;
94 /* Cancel assoc work */ 89 sm->associnfo.associated = 0;
95 cancel_delayed_work(&sm->associnfo.work); 90 /* We must unlock to avoid deadlocks with the assoc workqueue
96 /* We don't have to do this, but it's a little cleaner */ 91 * on the associnfo.mutex */
97 list_for_each_entry(authptr, &sm->auth_queue, list) 92 mutex_unlock(&sm->associnfo.mutex);
98 cancel_delayed_work(&authptr->work); 93 flush_scheduled_work();
99 sm->associnfo.bssvalid = 0; 94 /* Avoid race! Check assoc status again. Maybe someone started an
100 sm->associnfo.bssfixed = 0; 95 * association while we flushed. */
101 sm->associnfo.associating = 0; 96 goto check_assoc_again;
102 sm->associnfo.associated = 0;
103 /* We must unlock to avoid deadlocks with the assoc workqueue
104 * on the associnfo.mutex */
105 mutex_unlock(&sm->associnfo.mutex);
106 flush_scheduled_work();
107 /* Avoid race! Check assoc status again. Maybe someone started an
108 * association while we flushed. */
109 goto check_assoc_again;
110 }
111 } 97 }
112 98
113 sm->associnfo.static_essid = 0; 99 sm->associnfo.static_essid = 0;
@@ -128,7 +114,7 @@ check_assoc_again:
128 sm->associnfo.associating = 1; 114 sm->associnfo.associating = 1;
129 /* queue lower level code to do work (if necessary) */ 115 /* queue lower level code to do work (if necessary) */
130 schedule_delayed_work(&sm->associnfo.work, 0); 116 schedule_delayed_work(&sm->associnfo.work, 0);
131out: 117
132 mutex_unlock(&sm->associnfo.mutex); 118 mutex_unlock(&sm->associnfo.mutex);
133 119
134 return 0; 120 return 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
153 data->essid.length = sm->associnfo.req_essid.len; 139 data->essid.length = sm->associnfo.req_essid.len;
154 data->essid.flags = 1; /* active */ 140 data->essid.flags = 1; /* active */
155 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); 141 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
156 } 142 dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
157 143 } else if (sm->associnfo.associated || sm->associnfo.associating) {
158 /* If we're associating/associated, return that */ 144 /* If we're associating/associated, return that */
159 if (sm->associnfo.associated || sm->associnfo.associating) {
160 data->essid.length = sm->associnfo.associate_essid.len; 145 data->essid.length = sm->associnfo.associate_essid.len;
161 data->essid.flags = 1; /* active */ 146 data->essid.flags = 1; /* active */
162 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); 147 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
148 dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
163 } 149 }
164 mutex_unlock(&sm->associnfo.mutex); 150 mutex_unlock(&sm->associnfo.mutex);
165 151
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7a23e59c374a..39f6211f1496 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -46,7 +46,7 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
46 memcpy(daddr, optptr+optlen-4, 4); 46 memcpy(daddr, optptr+optlen-4, 4);
47 /* Fall through */ 47 /* Fall through */
48 default: 48 default:
49 memset(optptr+2, 0, optlen-2); 49 memset(optptr, 0, optlen);
50 } 50 }
51 l -= optlen; 51 l -= optlen;
52 optptr += optlen; 52 optptr += optlen;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5b77bdaa57dd..5dbe5803b7d5 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1193,7 +1193,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1193 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; 1193 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1194 ifa = ifa->ifa_next, ip_idx++) { 1194 ifa = ifa->ifa_next, ip_idx++) {
1195 if (ip_idx < s_ip_idx) 1195 if (ip_idx < s_ip_idx)
1196 goto cont; 1196 continue;
1197 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 1197 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
1198 cb->nlh->nlmsg_seq, 1198 cb->nlh->nlmsg_seq,
1199 RTM_NEWADDR, NLM_F_MULTI) <= 0) 1199 RTM_NEWADDR, NLM_F_MULTI) <= 0)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index dbeacd8b0f90..def007ec1d6f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
836 return inet_diag_get_exact(skb, nlh); 836 return inet_diag_get_exact(skb, nlh);
837} 837}
838 838
839static DEFINE_MUTEX(inet_diag_mutex);
840
839static void inet_diag_rcv(struct sock *sk, int len) 841static void inet_diag_rcv(struct sock *sk, int len)
840{ 842{
841 unsigned int qlen = 0; 843 unsigned int qlen = 0;
842 844
843 do { 845 do {
846 mutex_lock(&inet_diag_mutex);
844 netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg); 847 netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
848 mutex_unlock(&inet_diag_mutex);
845 } while (qlen); 849 } while (qlen);
846} 850}
847 851
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d544573f48a..6b420aedcdcf 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -625,6 +625,10 @@ static int do_ip_setsockopt(struct sock *sk, int level,
625 { 625 {
626 struct ip_mreqn mreq; 626 struct ip_mreqn mreq;
627 627
628 err = -EPROTO;
629 if (inet_sk(sk)->is_icsk)
630 break;
631
628 if (optlen < sizeof(struct ip_mreq)) 632 if (optlen < sizeof(struct ip_mreq))
629 goto e_inval; 633 goto e_inval;
630 err = -EFAULT; 634 err = -EFAULT;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 902fd578aa3c..f656d41d8d41 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -2339,6 +2339,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2339 .get_optmin = IP_VS_BASE_CTL, 2339 .get_optmin = IP_VS_BASE_CTL,
2340 .get_optmax = IP_VS_SO_GET_MAX+1, 2340 .get_optmax = IP_VS_SO_GET_MAX+1,
2341 .get = do_ip_vs_get_ctl, 2341 .get = do_ip_vs_get_ctl,
2342 .owner = THIS_MODULE,
2342}; 2343};
2343 2344
2344 2345
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index d1149aba9351..29114a9ccd1d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1161,6 +1161,7 @@ static struct nf_sockopt_ops arpt_sockopts = {
1161 .get_optmin = ARPT_BASE_CTL, 1161 .get_optmin = ARPT_BASE_CTL,
1162 .get_optmax = ARPT_SO_GET_MAX+1, 1162 .get_optmax = ARPT_SO_GET_MAX+1,
1163 .get = do_arpt_get_ctl, 1163 .get = do_arpt_get_ctl,
1164 .owner = THIS_MODULE,
1164}; 1165};
1165 1166
1166static int __init arp_tables_init(void) 1167static int __init arp_tables_init(void)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e1b402c6b855..6486894f450c 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2296,6 +2296,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
2296#ifdef CONFIG_COMPAT 2296#ifdef CONFIG_COMPAT
2297 .compat_get = compat_do_ipt_get_ctl, 2297 .compat_get = compat_do_ipt_get_ctl,
2298#endif 2298#endif
2299 .owner = THIS_MODULE,
2299}; 2300};
2300 2301
2301static struct xt_match icmp_matchstruct __read_mostly = { 2302static struct xt_match icmp_matchstruct __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d9b5177989c6..f813e02aab30 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -87,14 +87,10 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
87 if (iph == NULL) 87 if (iph == NULL)
88 return -NF_DROP; 88 return -NF_DROP;
89 89
90 /* Never happen */ 90 /* Conntrack defragments packets, we might still see fragments
91 if (iph->frag_off & htons(IP_OFFSET)) { 91 * inside ICMP packets though. */
92 if (net_ratelimit()) { 92 if (iph->frag_off & htons(IP_OFFSET))
93 printk(KERN_ERR "ipv4_get_l4proto: Frag of proto %u\n",
94 iph->protocol);
95 }
96 return -NF_DROP; 93 return -NF_DROP;
97 }
98 94
99 *dataoff = nhoff + (iph->ihl << 2); 95 *dataoff = nhoff + (iph->ihl << 2);
100 *protonum = iph->protocol; 96 *protonum = iph->protocol;
@@ -403,6 +399,7 @@ static struct nf_sockopt_ops so_getorigdst = {
403 .get_optmin = SO_ORIGINAL_DST, 399 .get_optmin = SO_ORIGINAL_DST,
404 .get_optmax = SO_ORIGINAL_DST+1, 400 .get_optmax = SO_ORIGINAL_DST+1,
405 .get = &getorigdst, 401 .get = &getorigdst,
402 .owner = THIS_MODULE,
406}; 403};
407 404
408struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { 405struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f030435e0eb4..f893e90061eb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
555 tcp_grow_window(sk, skb); 555 tcp_grow_window(sk, skb);
556} 556}
557 557
558static u32 tcp_rto_min(struct sock *sk)
559{
560 struct dst_entry *dst = __sk_dst_get(sk);
561 u32 rto_min = TCP_RTO_MIN;
562
563 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
564 rto_min = dst->metrics[RTAX_RTO_MIN-1];
565 return rto_min;
566}
567
558/* Called to compute a smoothed rtt estimate. The data fed to this 568/* Called to compute a smoothed rtt estimate. The data fed to this
559 * routine either comes from timestamps, or from segments that were 569 * routine either comes from timestamps, or from segments that were
560 * known _not_ to have been retransmitted [see Karn/Partridge 570 * known _not_ to have been retransmitted [see Karn/Partridge
@@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
616 if (tp->mdev_max < tp->rttvar) 626 if (tp->mdev_max < tp->rttvar)
617 tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; 627 tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
618 tp->rtt_seq = tp->snd_nxt; 628 tp->rtt_seq = tp->snd_nxt;
619 tp->mdev_max = TCP_RTO_MIN; 629 tp->mdev_max = tcp_rto_min(sk);
620 } 630 }
621 } else { 631 } else {
622 /* no previous measure. */ 632 /* no previous measure. */
623 tp->srtt = m<<3; /* take the measured time to be rtt */ 633 tp->srtt = m<<3; /* take the measured time to be rtt */
624 tp->mdev = m<<1; /* make sure rto = 3*rtt */ 634 tp->mdev = m<<1; /* make sure rto = 3*rtt */
625 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 635 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
626 tp->rtt_seq = tp->snd_nxt; 636 tp->rtt_seq = tp->snd_nxt;
627 } 637 }
628} 638}
@@ -755,7 +765,15 @@ void tcp_update_metrics(struct sock *sk)
755 } 765 }
756} 766}
757 767
758/* Numbers are taken from RFC2414. */ 768/* Numbers are taken from RFC3390.
769 *
770 * John Heffner states:
771 *
772 * The RFC specifies a window of no more than 4380 bytes
773 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
774 * is a bit misleading because they use a clamp at 4380 bytes
775 * rather than use a multiplier in the relevant range.
776 */
759__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 777__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
760{ 778{
761 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 779 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -2402,6 +2420,9 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2402 __u32 dval = min(tp->fackets_out, packets_acked); 2420 __u32 dval = min(tp->fackets_out, packets_acked);
2403 tp->fackets_out -= dval; 2421 tp->fackets_out -= dval;
2404 } 2422 }
2423 /* hint's skb might be NULL but we don't need to care */
2424 tp->fastpath_cnt_hint -= min_t(u32, packets_acked,
2425 tp->fastpath_cnt_hint);
2405 tp->packets_out -= packets_acked; 2426 tp->packets_out -= packets_acked;
2406 2427
2407 BUG_ON(tcp_skb_pcount(skb) == 0); 2428 BUG_ON(tcp_skb_pcount(skb) == 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c94627c8c7e..e089a978e128 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
833 return NULL; 833 return NULL;
834 for (i = 0; i < tp->md5sig_info->entries4; i++) { 834 for (i = 0; i < tp->md5sig_info->entries4; i++) {
835 if (tp->md5sig_info->keys4[i].addr == addr) 835 if (tp->md5sig_info->keys4[i].addr == addr)
836 return (struct tcp_md5sig_key *) 836 return &tp->md5sig_info->keys4[i].base;
837 &tp->md5sig_info->keys4[i];
838 } 837 }
839 return NULL; 838 return NULL;
840} 839}
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
865 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); 864 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
866 if (key) { 865 if (key) {
867 /* Pre-existing entry - just update that one. */ 866 /* Pre-existing entry - just update that one. */
868 kfree(key->key); 867 kfree(key->base.key);
869 key->key = newkey; 868 key->base.key = newkey;
870 key->keylen = newkeylen; 869 key->base.keylen = newkeylen;
871 } else { 870 } else {
872 struct tcp_md5sig_info *md5sig; 871 struct tcp_md5sig_info *md5sig;
873 872
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
906 md5sig->alloced4++; 905 md5sig->alloced4++;
907 } 906 }
908 md5sig->entries4++; 907 md5sig->entries4++;
909 md5sig->keys4[md5sig->entries4 - 1].addr = addr; 908 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
910 md5sig->keys4[md5sig->entries4 - 1].key = newkey; 909 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
911 md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; 910 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
912 } 911 }
913 return 0; 912 return 0;
914} 913}
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
930 for (i = 0; i < tp->md5sig_info->entries4; i++) { 929 for (i = 0; i < tp->md5sig_info->entries4; i++) {
931 if (tp->md5sig_info->keys4[i].addr == addr) { 930 if (tp->md5sig_info->keys4[i].addr == addr) {
932 /* Free the key */ 931 /* Free the key */
933 kfree(tp->md5sig_info->keys4[i].key); 932 kfree(tp->md5sig_info->keys4[i].base.key);
934 tp->md5sig_info->entries4--; 933 tp->md5sig_info->entries4--;
935 934
936 if (tp->md5sig_info->entries4 == 0) { 935 if (tp->md5sig_info->entries4 == 0) {
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
964 if (tp->md5sig_info->entries4) { 963 if (tp->md5sig_info->entries4) {
965 int i; 964 int i;
966 for (i = 0; i < tp->md5sig_info->entries4; i++) 965 for (i = 0; i < tp->md5sig_info->entries4; i++)
967 kfree(tp->md5sig_info->keys4[i].key); 966 kfree(tp->md5sig_info->keys4[i].base.key);
968 tp->md5sig_info->entries4 = 0; 967 tp->md5sig_info->entries4 = 0;
969 tcp_free_md5sig_pool(); 968 tcp_free_md5sig_pool();
970 } 969 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28355350fb62..69d4bd10f9c6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -505,6 +505,8 @@ send:
505out: 505out:
506 up->len = 0; 506 up->len = 0;
507 up->pending = 0; 507 up->pending = 0;
508 if (!err)
509 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
508 return err; 510 return err;
509} 511}
510 512
@@ -693,10 +695,8 @@ out:
693 ip_rt_put(rt); 695 ip_rt_put(rt);
694 if (free) 696 if (free)
695 kfree(ipc.opt); 697 kfree(ipc.opt);
696 if (!err) { 698 if (!err)
697 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
698 return len; 699 return len;
699 }
700 /* 700 /*
701 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 701 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
702 * ENOBUFS might not be good (it's not tunable per se), but otherwise 702 * ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 91ef3be5abad..45b4c82148a0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1021,7 +1021,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
1021 hiscore.rule++; 1021 hiscore.rule++;
1022 } 1022 }
1023 if (ipv6_saddr_preferred(score.addr_type) || 1023 if (ipv6_saddr_preferred(score.addr_type) ||
1024 (((ifa_result->flags & 1024 (((ifa->flags &
1025 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) { 1025 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
1026 score.attrs |= IPV6_SADDR_SCORE_PREFERRED; 1026 score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
1027 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) { 1027 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5dead399fe64..26de3c0ea31e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1427,8 +1427,9 @@ void ip6_flush_pending_frames(struct sock *sk)
1427 struct sk_buff *skb; 1427 struct sk_buff *skb;
1428 1428
1429 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { 1429 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1430 IP6_INC_STATS(ip6_dst_idev(skb->dst), 1430 if (skb->dst)
1431 IPSTATS_MIB_OUTDISCARDS); 1431 IP6_INC_STATS(ip6_dst_idev(skb->dst),
1432 IPSTATS_MIB_OUTDISCARDS);
1432 kfree_skb(skb); 1433 kfree_skb(skb);
1433 } 1434 }
1434 1435
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 761a910f4f97..6b038aa72e88 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -554,6 +554,10 @@ done:
554 { 554 {
555 struct ipv6_mreq mreq; 555 struct ipv6_mreq mreq;
556 556
557 retv = -EPROTO;
558 if (inet_sk(sk)->is_icsk)
559 break;
560
557 retv = -EFAULT; 561 retv = -EFAULT;
558 if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) 562 if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
559 break; 563 break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0358e6066a4e..5b596659177c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
736 * so fail our DAD process 736 * so fail our DAD process
737 */ 737 */
738 addrconf_dad_failure(ifp); 738 addrconf_dad_failure(ifp);
739 goto out; 739 return;
740 } else { 740 } else {
741 /* 741 /*
742 * This is not a dad solicitation. 742 * This is not a dad solicitation.
@@ -1268,9 +1268,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1268 1268
1269 if (ipv6_addr_equal(dest, target)) { 1269 if (ipv6_addr_equal(dest, target)) {
1270 on_link = 1; 1270 on_link = 1;
1271 } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1271 } else if (ipv6_addr_type(target) !=
1272 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1272 ND_PRINTK2(KERN_WARNING 1273 ND_PRINTK2(KERN_WARNING
1273 "ICMPv6 Redirect: target address is not link-local.\n"); 1274 "ICMPv6 Redirect: target address is not link-local unicast.\n");
1274 return; 1275 return;
1275 } 1276 }
1276 1277
@@ -1344,9 +1345,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1344 } 1345 }
1345 1346
1346 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && 1347 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
1347 !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { 1348 ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1348 ND_PRINTK2(KERN_WARNING 1349 ND_PRINTK2(KERN_WARNING
1349 "ICMPv6 Redirect: target address is not link-local.\n"); 1350 "ICMPv6 Redirect: target address is not link-local unicast.\n");
1350 return; 1351 return;
1351 } 1352 }
1352 1353
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index aeda617246b7..cd9df02bb85c 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1462,6 +1462,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
1462 .get_optmin = IP6T_BASE_CTL, 1462 .get_optmin = IP6T_BASE_CTL,
1463 .get_optmax = IP6T_SO_GET_MAX+1, 1463 .get_optmax = IP6T_SO_GET_MAX+1,
1464 .get = do_ip6t_get_ctl, 1464 .get = do_ip6t_get_ctl,
1465 .owner = THIS_MODULE,
1465}; 1466};
1466 1467
1467static struct xt_match icmp6_matchstruct __read_mostly = { 1468static struct xt_match icmp6_matchstruct __read_mostly = {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e27383d855de..77167afa3455 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -882,11 +882,10 @@ back_from_confirm:
882 ip6_flush_pending_frames(sk); 882 ip6_flush_pending_frames(sk);
883 else if (!(msg->msg_flags & MSG_MORE)) 883 else if (!(msg->msg_flags & MSG_MORE))
884 err = rawv6_push_pending_frames(sk, &fl, rp); 884 err = rawv6_push_pending_frames(sk, &fl, rp);
885 release_sock(sk);
885 } 886 }
886done: 887done:
887 dst_release(dst); 888 dst_release(dst);
888 if (!inet->hdrincl)
889 release_sock(sk);
890out: 889out:
891 fl6_sock_release(flowlabel); 890 fl6_sock_release(flowlabel);
892 return err<0?err:len; 891 return err<0?err:len;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f7defb482e9..3e06799b37a6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
539 539
540 for (i = 0; i < tp->md5sig_info->entries6; i++) { 540 for (i = 0; i < tp->md5sig_info->entries6; i++) {
541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) 541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
542 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; 542 return &tp->md5sig_info->keys6[i].base;
543 } 543 }
544 return NULL; 544 return NULL;
545} 545}
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); 567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
568 if (key) { 568 if (key) {
569 /* modify existing entry - just update that one */ 569 /* modify existing entry - just update that one */
570 kfree(key->key); 570 kfree(key->base.key);
571 key->key = newkey; 571 key->base.key = newkey;
572 key->keylen = newkeylen; 572 key->base.keylen = newkeylen;
573 } else { 573 } else {
574 /* reallocate new list if current one is full. */ 574 /* reallocate new list if current one is full. */
575 if (!tp->md5sig_info) { 575 if (!tp->md5sig_info) {
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
603 603
604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, 604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
605 peer); 605 peer);
606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; 606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; 607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
608 608
609 tp->md5sig_info->entries6++; 609 tp->md5sig_info->entries6++;
610 } 610 }
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
626 for (i = 0; i < tp->md5sig_info->entries6; i++) { 626 for (i = 0; i < tp->md5sig_info->entries6; i++) {
627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { 627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
628 /* Free the key */ 628 /* Free the key */
629 kfree(tp->md5sig_info->keys6[i].key); 629 kfree(tp->md5sig_info->keys6[i].base.key);
630 tp->md5sig_info->entries6--; 630 tp->md5sig_info->entries6--;
631 631
632 if (tp->md5sig_info->entries6 == 0) { 632 if (tp->md5sig_info->entries6 == 0) {
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
657 657
658 if (tp->md5sig_info->entries6) { 658 if (tp->md5sig_info->entries6) {
659 for (i = 0; i < tp->md5sig_info->entries6; i++) 659 for (i = 0; i < tp->md5sig_info->entries6; i++)
660 kfree(tp->md5sig_info->keys6[i].key); 660 kfree(tp->md5sig_info->keys6[i].base.key);
661 tp->md5sig_info->entries6 = 0; 661 tp->md5sig_info->entries6 = 0;
662 tcp_free_md5sig_pool(); 662 tcp_free_md5sig_pool();
663 } 663 }
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
668 668
669 if (tp->md5sig_info->entries4) { 669 if (tp->md5sig_info->entries4) {
670 for (i = 0; i < tp->md5sig_info->entries4; i++) 670 for (i = 0; i < tp->md5sig_info->entries4; i++)
671 kfree(tp->md5sig_info->keys4[i].key); 671 kfree(tp->md5sig_info->keys4[i].base.key);
672 tp->md5sig_info->entries4 = 0; 672 tp->md5sig_info->entries4 = 0;
673 tcp_free_md5sig_pool(); 673 tcp_free_md5sig_pool();
674 } 674 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4210951edb6e..c347f3e30e2e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -555,6 +555,8 @@ static int udp_v6_push_pending_frames(struct sock *sk)
555out: 555out:
556 up->len = 0; 556 up->len = 0;
557 up->pending = 0; 557 up->pending = 0;
558 if (!err)
559 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
558 return err; 560 return err;
559} 561}
560 562
@@ -823,10 +825,8 @@ do_append_data:
823 release_sock(sk); 825 release_sock(sk);
824out: 826out:
825 fl6_sock_release(flowlabel); 827 fl6_sock_release(flowlabel);
826 if (!err) { 828 if (!err)
827 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
828 return len; 829 return len;
829 }
830 /* 830 /*
831 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 831 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
832 * ENOBUFS might not be good (it's not tunable per se), but otherwise 832 * ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 7286c389a4d0..ff2172ffd861 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
5259} 5259}
5260 5260
5261 5261
5262module_init(ieee80211_init); 5262subsys_initcall(ieee80211_init);
5263module_exit(ieee80211_exit); 5263module_exit(ieee80211_exit);
5264 5264
5265MODULE_DESCRIPTION("IEEE 802.11 subsystem"); 5265MODULE_DESCRIPTION("IEEE 802.11 subsystem");
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index f6780d63b342..17b9f46bbf2b 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
431} 431}
432 432
433 433
434module_init(rate_control_simple_init); 434subsys_initcall(rate_control_simple_init);
435module_exit(rate_control_simple_exit); 435module_exit(rate_control_simple_exit);
436 436
437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); 437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89ce81529694..7ab82b376e1b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
424 skb_queue_head_init(&q->requeued[i]); 424 skb_queue_head_init(&q->requeued[i]);
425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, 425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
426 qd->handle); 426 qd->handle);
427 if (q->queues[i] == 0) { 427 if (!q->queues[i]) {
428 q->queues[i] = &noop_qdisc; 428 q->queues[i] = &noop_qdisc;
429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); 429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
430 } 430 }
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 8b8ece750313..e32761ce260c 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -55,18 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
55 55
56void nf_unregister_sockopt(struct nf_sockopt_ops *reg) 56void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
57{ 57{
58 /* No point being interruptible: we're probably in cleanup_module() */
59 restart:
60 mutex_lock(&nf_sockopt_mutex); 58 mutex_lock(&nf_sockopt_mutex);
61 if (reg->use != 0) {
62 /* To be woken by nf_sockopt call... */
63 /* FIXME: Stuart Young's name appears gratuitously. */
64 set_current_state(TASK_UNINTERRUPTIBLE);
65 reg->cleanup_task = current;
66 mutex_unlock(&nf_sockopt_mutex);
67 schedule();
68 goto restart;
69 }
70 list_del(&reg->list); 59 list_del(&reg->list);
71 mutex_unlock(&nf_sockopt_mutex); 60 mutex_unlock(&nf_sockopt_mutex);
72} 61}
@@ -86,10 +75,11 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
86 list_for_each(i, &nf_sockopts) { 75 list_for_each(i, &nf_sockopts) {
87 ops = (struct nf_sockopt_ops *)i; 76 ops = (struct nf_sockopt_ops *)i;
88 if (ops->pf == pf) { 77 if (ops->pf == pf) {
78 if (!try_module_get(ops->owner))
79 goto out_nosup;
89 if (get) { 80 if (get) {
90 if (val >= ops->get_optmin 81 if (val >= ops->get_optmin
91 && val < ops->get_optmax) { 82 && val < ops->get_optmax) {
92 ops->use++;
93 mutex_unlock(&nf_sockopt_mutex); 83 mutex_unlock(&nf_sockopt_mutex);
94 ret = ops->get(sk, val, opt, len); 84 ret = ops->get(sk, val, opt, len);
95 goto out; 85 goto out;
@@ -97,23 +87,20 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
97 } else { 87 } else {
98 if (val >= ops->set_optmin 88 if (val >= ops->set_optmin
99 && val < ops->set_optmax) { 89 && val < ops->set_optmax) {
100 ops->use++;
101 mutex_unlock(&nf_sockopt_mutex); 90 mutex_unlock(&nf_sockopt_mutex);
102 ret = ops->set(sk, val, opt, *len); 91 ret = ops->set(sk, val, opt, *len);
103 goto out; 92 goto out;
104 } 93 }
105 } 94 }
95 module_put(ops->owner);
106 } 96 }
107 } 97 }
98 out_nosup:
108 mutex_unlock(&nf_sockopt_mutex); 99 mutex_unlock(&nf_sockopt_mutex);
109 return -ENOPROTOOPT; 100 return -ENOPROTOOPT;
110 101
111 out: 102 out:
112 mutex_lock(&nf_sockopt_mutex); 103 module_put(ops->owner);
113 ops->use--;
114 if (ops->cleanup_task)
115 wake_up_process(ops->cleanup_task);
116 mutex_unlock(&nf_sockopt_mutex);
117 return ret; 104 return ret;
118} 105}
119 106
@@ -144,10 +131,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
144 list_for_each(i, &nf_sockopts) { 131 list_for_each(i, &nf_sockopts) {
145 ops = (struct nf_sockopt_ops *)i; 132 ops = (struct nf_sockopt_ops *)i;
146 if (ops->pf == pf) { 133 if (ops->pf == pf) {
134 if (!try_module_get(ops->owner))
135 goto out_nosup;
136
147 if (get) { 137 if (get) {
148 if (val >= ops->get_optmin 138 if (val >= ops->get_optmin
149 && val < ops->get_optmax) { 139 && val < ops->get_optmax) {
150 ops->use++;
151 mutex_unlock(&nf_sockopt_mutex); 140 mutex_unlock(&nf_sockopt_mutex);
152 if (ops->compat_get) 141 if (ops->compat_get)
153 ret = ops->compat_get(sk, 142 ret = ops->compat_get(sk,
@@ -160,7 +149,6 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
160 } else { 149 } else {
161 if (val >= ops->set_optmin 150 if (val >= ops->set_optmin
162 && val < ops->set_optmax) { 151 && val < ops->set_optmax) {
163 ops->use++;
164 mutex_unlock(&nf_sockopt_mutex); 152 mutex_unlock(&nf_sockopt_mutex);
165 if (ops->compat_set) 153 if (ops->compat_set)
166 ret = ops->compat_set(sk, 154 ret = ops->compat_set(sk,
@@ -171,17 +159,15 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
171 goto out; 159 goto out;
172 } 160 }
173 } 161 }
162 module_put(ops->owner);
174 } 163 }
175 } 164 }
165 out_nosup:
176 mutex_unlock(&nf_sockopt_mutex); 166 mutex_unlock(&nf_sockopt_mutex);
177 return -ENOPROTOOPT; 167 return -ENOPROTOOPT;
178 168
179 out: 169 out:
180 mutex_lock(&nf_sockopt_mutex); 170 module_put(ops->owner);
181 ops->use--;
182 if (ops->cleanup_task)
183 wake_up_process(ops->cleanup_task);
184 mutex_unlock(&nf_sockopt_mutex);
185 return ret; 171 return ret;
186} 172}
187 173
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e185a5b55913..2351533a8507 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -58,7 +58,6 @@ struct nfulnl_instance {
58 58
59 unsigned int qlen; /* number of nlmsgs in skb */ 59 unsigned int qlen; /* number of nlmsgs in skb */
60 struct sk_buff *skb; /* pre-allocatd skb */ 60 struct sk_buff *skb; /* pre-allocatd skb */
61 struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
62 struct timer_list timer; 61 struct timer_list timer;
63 int peer_pid; /* PID of the peer process */ 62 int peer_pid; /* PID of the peer process */
64 63
@@ -345,10 +344,12 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
345static int 344static int
346__nfulnl_send(struct nfulnl_instance *inst) 345__nfulnl_send(struct nfulnl_instance *inst)
347{ 346{
348 int status; 347 int status = -1;
349 348
350 if (inst->qlen > 1) 349 if (inst->qlen > 1)
351 inst->lastnlh->nlmsg_type = NLMSG_DONE; 350 NLMSG_PUT(inst->skb, 0, 0,
351 NLMSG_DONE,
352 sizeof(struct nfgenmsg));
352 353
353 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); 354 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
354 if (status < 0) { 355 if (status < 0) {
@@ -358,8 +359,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
358 359
359 inst->qlen = 0; 360 inst->qlen = 0;
360 inst->skb = NULL; 361 inst->skb = NULL;
361 inst->lastnlh = NULL;
362 362
363nlmsg_failure:
363 return status; 364 return status;
364} 365}
365 366
@@ -538,7 +539,6 @@ __build_packet_message(struct nfulnl_instance *inst,
538 } 539 }
539 540
540 nlh->nlmsg_len = inst->skb->tail - old_tail; 541 nlh->nlmsg_len = inst->skb->tail - old_tail;
541 inst->lastnlh = nlh;
542 return 0; 542 return 0;
543 543
544nlmsg_failure: 544nlmsg_failure:
@@ -644,7 +644,8 @@ nfulnl_log_packet(unsigned int pf,
644 } 644 }
645 645
646 if (inst->qlen >= qthreshold || 646 if (inst->qlen >= qthreshold ||
647 (inst->skb && size > skb_tailroom(inst->skb))) { 647 (inst->skb && size >
648 skb_tailroom(inst->skb) - sizeof(struct nfgenmsg))) {
648 /* either the queue len is too high or we don't have 649 /* either the queue len is too high or we don't have
649 * enough room in the skb left. flush to userspace. */ 650 * enough room in the skb left. flush to userspace. */
650 UDEBUG("flushing old skb\n"); 651 UDEBUG("flushing old skb\n");
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index ab7d845224fc..223f9bded672 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -188,7 +188,7 @@ udp_checkentry(const char *tablename,
188 void *matchinfo, 188 void *matchinfo,
189 unsigned int hook_mask) 189 unsigned int hook_mask)
190{ 190{
191 const struct xt_tcp *udpinfo = matchinfo; 191 const struct xt_udp *udpinfo = matchinfo;
192 192
193 /* Must specify no unknown invflags */ 193 /* Must specify no unknown invflags */
194 return !(udpinfo->invflags & ~XT_UDP_INV_MASK); 194 return !(udpinfo->invflags & ~XT_UDP_INV_MASK);
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index cd01642f0491..114df6eec8c3 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param)
79 79
80 skb_reset_transport_header(skb); 80 skb_reset_transport_header(skb);
81 81
82 sk = rose_find_socket(lci_o, &rose_loopback_neigh); 82 sk = rose_find_socket(lci_o, rose_loopback_neigh);
83 if (sk) { 83 if (sk) {
84 if (rose_process_rx_frame(sk, skb) == 0) 84 if (rose_process_rx_frame(sk, skb) == 0)
85 kfree_skb(skb); 85 kfree_skb(skb);
@@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param)
88 88
89 if (frametype == ROSE_CALL_REQUEST) { 89 if (frametype == ROSE_CALL_REQUEST) {
90 if ((dev = rose_dev_get(dest)) != NULL) { 90 if ((dev = rose_dev_get(dest)) != NULL) {
91 if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0) 91 if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
92 kfree_skb(skb); 92 kfree_skb(skb);
93 } else { 93 } else {
94 kfree_skb(skb); 94 kfree_skb(skb);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index bbcbad1da0d0..96f61a71b252 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock);
45static struct rose_route *rose_route_list; 45static struct rose_route *rose_route_list;
46static DEFINE_SPINLOCK(rose_route_list_lock); 46static DEFINE_SPINLOCK(rose_route_list_lock);
47 47
48struct rose_neigh rose_loopback_neigh; 48struct rose_neigh *rose_loopback_neigh;
49 49
50/* 50/*
51 * Add a new route to a node, and in the process add the node and the 51 * Add a new route to a node, and in the process add the node and the
@@ -362,7 +362,12 @@ out:
362 */ 362 */
363void rose_add_loopback_neigh(void) 363void rose_add_loopback_neigh(void)
364{ 364{
365 struct rose_neigh *sn = &rose_loopback_neigh; 365 struct rose_neigh *sn;
366
367 rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
368 if (!rose_loopback_neigh)
369 return;
370 sn = rose_loopback_neigh;
366 371
367 sn->callsign = null_ax25_address; 372 sn->callsign = null_ax25_address;
368 sn->digipeat = NULL; 373 sn->digipeat = NULL;
@@ -417,13 +422,13 @@ int rose_add_loopback_node(rose_address *address)
417 rose_node->mask = 10; 422 rose_node->mask = 10;
418 rose_node->count = 1; 423 rose_node->count = 1;
419 rose_node->loopback = 1; 424 rose_node->loopback = 1;
420 rose_node->neighbour[0] = &rose_loopback_neigh; 425 rose_node->neighbour[0] = rose_loopback_neigh;
421 426
422 /* Insert at the head of list. Address is always mask=10 */ 427 /* Insert at the head of list. Address is always mask=10 */
423 rose_node->next = rose_node_list; 428 rose_node->next = rose_node_list;
424 rose_node_list = rose_node; 429 rose_node_list = rose_node;
425 430
426 rose_loopback_neigh.count++; 431 rose_loopback_neigh->count++;
427 432
428out: 433out:
429 spin_unlock_bh(&rose_node_list_lock); 434 spin_unlock_bh(&rose_node_list_lock);
@@ -454,7 +459,7 @@ void rose_del_loopback_node(rose_address *address)
454 459
455 rose_remove_node(rose_node); 460 rose_remove_node(rose_node);
456 461
457 rose_loopback_neigh.count--; 462 rose_loopback_neigh->count--;
458 463
459out: 464out:
460 spin_unlock_bh(&rose_node_list_lock); 465 spin_unlock_bh(&rose_node_list_lock);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index feef366cad5d..72cdb0fade20 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -68,7 +68,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
68 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 68 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
69 struct rtattr *r ; 69 struct rtattr *r ;
70 70
71 read_lock(hinfo->lock); 71 read_lock_bh(hinfo->lock);
72 72
73 s_i = cb->args[0]; 73 s_i = cb->args[0];
74 74
@@ -96,7 +96,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
96 } 96 }
97 } 97 }
98done: 98done:
99 read_unlock(hinfo->lock); 99 read_unlock_bh(hinfo->lock);
100 if (n_i) 100 if (n_i)
101 cb->args[0] += n_i; 101 cb->args[0] += n_i;
102 return n_i; 102 return n_i;
@@ -156,13 +156,13 @@ struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
156{ 156{
157 struct tcf_common *p; 157 struct tcf_common *p;
158 158
159 read_lock(hinfo->lock); 159 read_lock_bh(hinfo->lock);
160 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 160 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
161 p = p->tcfc_next) { 161 p = p->tcfc_next) {
162 if (p->tcfc_index == index) 162 if (p->tcfc_index == index)
163 break; 163 break;
164 } 164 }
165 read_unlock(hinfo->lock); 165 read_unlock_bh(hinfo->lock);
166 166
167 return p; 167 return p;
168} 168}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6085be578459..17f6f27e28a2 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -56,7 +56,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
57 struct rtattr *r; 57 struct rtattr *r;
58 58
59 read_lock(&police_lock); 59 read_lock_bh(&police_lock);
60 60
61 s_i = cb->args[0]; 61 s_i = cb->args[0];
62 62
@@ -85,7 +85,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
85 } 85 }
86 } 86 }
87done: 87done:
88 read_unlock(&police_lock); 88 read_unlock_bh(&police_lock);
89 if (n_i) 89 if (n_i)
90 cb->args[0] += n_i; 90 cb->args[0] += n_i;
91 return n_i; 91 return n_i;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 8dbe36912ecb..d4d5d2f271d2 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -502,7 +502,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
502 502
503#ifdef CONFIG_NET_CLS_IND 503#ifdef CONFIG_NET_CLS_IND
504 if (tb[TCA_U32_INDEV-1]) { 504 if (tb[TCA_U32_INDEV-1]) {
505 int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); 505 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
506 if (err < 0) 506 if (err < 0)
507 goto errout; 507 goto errout;
508 } 508 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index e38c2839b25c..cbef3bbfc20f 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -380,7 +380,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
380{ 380{
381 struct cbq_sched_data *q = qdisc_priv(sch); 381 struct cbq_sched_data *q = qdisc_priv(sch);
382 int len = skb->len; 382 int len = skb->len;
383 int ret; 383 int uninitialized_var(ret);
384 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 384 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
385 385
386#ifdef CONFIG_NET_CLS_ACT 386#ifdef CONFIG_NET_CLS_ACT
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 4a49db65772e..abd82fc3ec60 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
44 if (TC_H_MAJ(skb->priority) != sch->handle) { 44 if (TC_H_MAJ(skb->priority) != sch->handle) {
45 err = tc_classify(skb, q->filter_list, &res); 45 err = tc_classify(skb, q->filter_list, &res);
46#ifdef CONFIG_NET_CLS_ACT 46#ifdef CONFIG_NET_CLS_ACT
47 switch (tc_classify(skb, q->filter_list, &res)) { 47 switch (err) {
48 case TC_ACT_STOLEN: 48 case TC_ACT_STOLEN:
49 case TC_ACT_QUEUED: 49 case TC_ACT_QUEUED:
50 *qerr = NET_XMIT_SUCCESS; 50 *qerr = NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 957957309859..b542c875e154 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/ipv6.h> 20#include <linux/ipv6.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/jhash.h>
22#include <net/ip.h> 23#include <net/ip.h>
23#include <net/netlink.h> 24#include <net/netlink.h>
24#include <net/pkt_sched.h> 25#include <net/pkt_sched.h>
@@ -95,7 +96,7 @@ struct sfq_sched_data
95 96
96/* Variables */ 97/* Variables */
97 struct timer_list perturb_timer; 98 struct timer_list perturb_timer;
98 int perturbation; 99 u32 perturbation;
99 sfq_index tail; /* Index of current slot in round */ 100 sfq_index tail; /* Index of current slot in round */
100 sfq_index max_depth; /* Maximal depth */ 101 sfq_index max_depth; /* Maximal depth */
101 102
@@ -109,12 +110,7 @@ struct sfq_sched_data
109 110
110static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) 111static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
111{ 112{
112 int pert = q->perturbation; 113 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
113
114 /* Have we any rotation primitives? If not, WHY? */
115 h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
116 h ^= h>>10;
117 return h & 0x3FF;
118} 114}
119 115
120static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) 116static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
256 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 252 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
257 q->hash[x] = hash; 253 q->hash[x] = hash;
258 } 254 }
255 /* If selected queue has length q->limit, this means that
256 * all another queues are empty and that we do simple tail drop,
257 * i.e. drop _this_ packet.
258 */
259 if (q->qs[x].qlen >= q->limit)
260 return qdisc_drop(skb, sch);
261
259 sch->qstats.backlog += skb->len; 262 sch->qstats.backlog += skb->len;
260 __skb_queue_tail(&q->qs[x], skb); 263 __skb_queue_tail(&q->qs[x], skb);
261 sfq_inc(q, x); 264 sfq_inc(q, x);
@@ -270,7 +273,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
270 q->tail = x; 273 q->tail = x;
271 } 274 }
272 } 275 }
273 if (++sch->q.qlen < q->limit-1) { 276 if (++sch->q.qlen <= q->limit) {
274 sch->bstats.bytes += skb->len; 277 sch->bstats.bytes += skb->len;
275 sch->bstats.packets++; 278 sch->bstats.packets++;
276 return 0; 279 return 0;
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
294 } 297 }
295 sch->qstats.backlog += skb->len; 298 sch->qstats.backlog += skb->len;
296 __skb_queue_head(&q->qs[x], skb); 299 __skb_queue_head(&q->qs[x], skb);
300 /* If selected queue has length q->limit+1, this means that
301 * all another queues are empty and we do simple tail drop.
302 * This packet is still requeued at head of queue, tail packet
303 * is dropped.
304 */
305 if (q->qs[x].qlen > q->limit) {
306 skb = q->qs[x].prev;
307 __skb_unlink(skb, &q->qs[x]);
308 sch->qstats.drops++;
309 sch->qstats.backlog -= skb->len;
310 kfree_skb(skb);
311 return NET_XMIT_CN;
312 }
297 sfq_inc(q, x); 313 sfq_inc(q, x);
298 if (q->qs[x].qlen == 1) { /* The flow is new */ 314 if (q->qs[x].qlen == 1) { /* The flow is new */
299 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 315 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
@@ -306,7 +322,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
306 q->tail = x; 322 q->tail = x;
307 } 323 }
308 } 324 }
309 if (++sch->q.qlen < q->limit - 1) { 325 if (++sch->q.qlen <= q->limit) {
310 sch->qstats.requeues++; 326 sch->qstats.requeues++;
311 return 0; 327 return 0;
312 } 328 }
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg)
370 struct Qdisc *sch = (struct Qdisc*)arg; 386 struct Qdisc *sch = (struct Qdisc*)arg;
371 struct sfq_sched_data *q = qdisc_priv(sch); 387 struct sfq_sched_data *q = qdisc_priv(sch);
372 388
373 q->perturbation = net_random()&0x1F; 389 get_random_bytes(&q->perturbation, 4);
374 390
375 if (q->perturb_period) { 391 if (q->perturb_period)
376 q->perturb_timer.expires = jiffies + q->perturb_period; 392 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
377 add_timer(&q->perturb_timer);
378 }
379} 393}
380 394
381static int sfq_change(struct Qdisc *sch, struct rtattr *opt) 395static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
@@ -391,17 +405,17 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
391 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 405 q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
392 q->perturb_period = ctl->perturb_period*HZ; 406 q->perturb_period = ctl->perturb_period*HZ;
393 if (ctl->limit) 407 if (ctl->limit)
394 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); 408 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
395 409
396 qlen = sch->q.qlen; 410 qlen = sch->q.qlen;
397 while (sch->q.qlen >= q->limit-1) 411 while (sch->q.qlen > q->limit)
398 sfq_drop(sch); 412 sfq_drop(sch);
399 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); 413 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
400 414
401 del_timer(&q->perturb_timer); 415 del_timer(&q->perturb_timer);
402 if (q->perturb_period) { 416 if (q->perturb_period) {
403 q->perturb_timer.expires = jiffies + q->perturb_period; 417 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
404 add_timer(&q->perturb_timer); 418 get_random_bytes(&q->perturbation, 4);
405 } 419 }
406 sch_tree_unlock(sch); 420 sch_tree_unlock(sch);
407 return 0; 421 return 0;
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
423 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 437 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH;
424 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 438 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH;
425 } 439 }
426 q->limit = SFQ_DEPTH; 440 q->limit = SFQ_DEPTH - 1;
427 q->max_depth = 0; 441 q->max_depth = 0;
428 q->tail = SFQ_DEPTH; 442 q->tail = SFQ_DEPTH;
429 if (opt == NULL) { 443 if (opt == NULL) {
430 q->quantum = psched_mtu(sch->dev); 444 q->quantum = psched_mtu(sch->dev);
431 q->perturb_period = 0; 445 q->perturb_period = 0;
446 get_random_bytes(&q->perturbation, 4);
432 } else { 447 } else {
433 int err = sfq_change(sch, opt); 448 int err = sfq_change(sch, opt);
434 if (err) 449 if (err)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 498edb0cd4e5..9bad8ba0feda 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -99,7 +99,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
99 99
100 /* Initialize the bind addr area. */ 100 /* Initialize the bind addr area. */
101 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 101 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
102 rwlock_init(&asoc->base.addr_lock);
103 102
104 asoc->state = SCTP_STATE_CLOSED; 103 asoc->state = SCTP_STATE_CLOSED;
105 104
@@ -727,7 +726,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
727 break; 726 break;
728 727
729 case SCTP_TRANSPORT_DOWN: 728 case SCTP_TRANSPORT_DOWN:
730 transport->state = SCTP_INACTIVE; 729 /* if the transort was never confirmed, do not transition it
730 * to inactive state.
731 */
732 if (transport->state != SCTP_UNCONFIRMED)
733 transport->state = SCTP_INACTIVE;
734
731 spc_state = SCTP_ADDR_UNREACHABLE; 735 spc_state = SCTP_ADDR_UNREACHABLE;
732 break; 736 break;
733 737
@@ -932,8 +936,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
932{ 936{
933 struct sctp_transport *transport; 937 struct sctp_transport *transport;
934 938
935 sctp_read_lock(&asoc->base.addr_lock);
936
937 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && 939 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
938 (htons(asoc->peer.port) == paddr->v4.sin_port)) { 940 (htons(asoc->peer.port) == paddr->v4.sin_port)) {
939 transport = sctp_assoc_lookup_paddr(asoc, paddr); 941 transport = sctp_assoc_lookup_paddr(asoc, paddr);
@@ -947,7 +949,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
947 transport = NULL; 949 transport = NULL;
948 950
949out: 951out:
950 sctp_read_unlock(&asoc->base.addr_lock);
951 return transport; 952 return transport;
952} 953}
953 954
@@ -1371,19 +1372,13 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1371int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1372int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1372 const union sctp_addr *laddr) 1373 const union sctp_addr *laddr)
1373{ 1374{
1374 int found; 1375 int found = 0;
1375 1376
1376 sctp_read_lock(&asoc->base.addr_lock);
1377 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1377 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1378 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1378 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1379 sctp_sk(asoc->base.sk))) { 1379 sctp_sk(asoc->base.sk)))
1380 found = 1; 1380 found = 1;
1381 goto out;
1382 }
1383 1381
1384 found = 0;
1385out:
1386 sctp_read_unlock(&asoc->base.addr_lock);
1387 return found; 1382 return found;
1388} 1383}
1389 1384
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index fdb287a9e2e2..dfffa94fb9f6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -163,9 +163,15 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
163 addr->a.v4.sin_port = htons(bp->port); 163 addr->a.v4.sin_port = htons(bp->port);
164 164
165 addr->use_as_src = use_as_src; 165 addr->use_as_src = use_as_src;
166 addr->valid = 1;
166 167
167 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
168 list_add_tail(&addr->list, &bp->address_list); 169 INIT_RCU_HEAD(&addr->rcu);
170
171 /* We always hold a socket lock when calling this function,
172 * and that acts as a writer synchronizing lock.
173 */
174 list_add_tail_rcu(&addr->list, &bp->address_list);
169 SCTP_DBG_OBJCNT_INC(addr); 175 SCTP_DBG_OBJCNT_INC(addr);
170 176
171 return 0; 177 return 0;
@@ -174,23 +180,35 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
174/* Delete an address from the bind address list in the SCTP_bind_addr 180/* Delete an address from the bind address list in the SCTP_bind_addr
175 * structure. 181 * structure.
176 */ 182 */
177int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
184 void fastcall (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head)))
178{ 186{
179 struct list_head *pos, *temp; 187 struct sctp_sockaddr_entry *addr, *temp;
180 struct sctp_sockaddr_entry *addr;
181 188
182 list_for_each_safe(pos, temp, &bp->address_list) { 189 /* We hold the socket lock when calling this function,
183 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 190 * and that acts as a writer synchronizing lock.
191 */
192 list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
184 if (sctp_cmp_addr_exact(&addr->a, del_addr)) { 193 if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
185 /* Found the exact match. */ 194 /* Found the exact match. */
186 list_del(pos); 195 addr->valid = 0;
187 kfree(addr); 196 list_del_rcu(&addr->list);
188 SCTP_DBG_OBJCNT_DEC(addr); 197 break;
189
190 return 0;
191 } 198 }
192 } 199 }
193 200
201 /* Call the rcu callback provided in the args. This function is
202 * called by both BH packet processing and user side socket option
203 * processing, but it works on different lists in those 2 contexts.
204 * Each context provides it's own callback, whether call_rcu_bh()
205 * or call_rcu(), to make sure that we wait for an appropriate time.
206 */
207 if (addr && !addr->valid) {
208 rcu_call(&addr->rcu, sctp_local_addr_free);
209 SCTP_DBG_OBJCNT_DEC(addr);
210 }
211
194 return -EINVAL; 212 return -EINVAL;
195} 213}
196 214
@@ -300,15 +318,20 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
300 struct sctp_sock *opt) 318 struct sctp_sock *opt)
301{ 319{
302 struct sctp_sockaddr_entry *laddr; 320 struct sctp_sockaddr_entry *laddr;
303 struct list_head *pos; 321 int match = 0;
304 322
305 list_for_each(pos, &bp->address_list) { 323 rcu_read_lock();
306 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 324 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
307 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 325 if (!laddr->valid)
308 return 1; 326 continue;
327 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) {
328 match = 1;
329 break;
330 }
309 } 331 }
332 rcu_read_unlock();
310 333
311 return 0; 334 return match;
312} 335}
313 336
314/* Find the first address in the bind address list that is not present in 337/* Find the first address in the bind address list that is not present in
@@ -323,18 +346,19 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
323 union sctp_addr *addr; 346 union sctp_addr *addr;
324 void *addr_buf; 347 void *addr_buf;
325 struct sctp_af *af; 348 struct sctp_af *af;
326 struct list_head *pos;
327 int i; 349 int i;
328 350
329 list_for_each(pos, &bp->address_list) { 351 /* This is only called sctp_send_asconf_del_ip() and we hold
330 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 352 * the socket lock in that code patch, so that address list
331 353 * can't change.
354 */
355 list_for_each_entry(laddr, &bp->address_list, list) {
332 addr_buf = (union sctp_addr *)addrs; 356 addr_buf = (union sctp_addr *)addrs;
333 for (i = 0; i < addrcnt; i++) { 357 for (i = 0; i < addrcnt; i++) {
334 addr = (union sctp_addr *)addr_buf; 358 addr = (union sctp_addr *)addr_buf;
335 af = sctp_get_af_specific(addr->v4.sin_family); 359 af = sctp_get_af_specific(addr->v4.sin_family);
336 if (!af) 360 if (!af)
337 return NULL; 361 break;
338 362
339 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 363 if (opt->pf->cmp_addr(&laddr->a, addr, opt))
340 break; 364 break;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1404a9e2e78f..8f485a0d14bd 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
92 92
93 /* Initialize the bind addr area */ 93 /* Initialize the bind addr area */
94 sctp_bind_addr_init(&ep->base.bind_addr, 0); 94 sctp_bind_addr_init(&ep->base.bind_addr, 0);
95 rwlock_init(&ep->base.addr_lock);
96 95
97 /* Remember who we are attached to. */ 96 /* Remember who we are attached to. */
98 ep->base.sk = sk; 97 ep->base.sk = sk;
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
225struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, 224struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
226 const union sctp_addr *laddr) 225 const union sctp_addr *laddr)
227{ 226{
228 struct sctp_endpoint *retval; 227 struct sctp_endpoint *retval = NULL;
229 228
230 sctp_read_lock(&ep->base.addr_lock);
231 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { 229 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
232 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, 230 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
233 sctp_sk(ep->base.sk))) { 231 sctp_sk(ep->base.sk)))
234 retval = ep; 232 retval = ep;
235 goto out;
236 }
237 } 233 }
238 234
239 retval = NULL;
240
241out:
242 sctp_read_unlock(&ep->base.addr_lock);
243 return retval; 235 return retval;
244} 236}
245 237
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
261 list_for_each(pos, &ep->asocs) { 253 list_for_each(pos, &ep->asocs) {
262 asoc = list_entry(pos, struct sctp_association, asocs); 254 asoc = list_entry(pos, struct sctp_association, asocs);
263 if (rport == asoc->peer.port) { 255 if (rport == asoc->peer.port) {
264 sctp_read_lock(&asoc->base.addr_lock);
265 *transport = sctp_assoc_lookup_paddr(asoc, paddr); 256 *transport = sctp_assoc_lookup_paddr(asoc, paddr);
266 sctp_read_unlock(&asoc->base.addr_lock);
267 257
268 if (*transport) 258 if (*transport)
269 return asoc; 259 return asoc;
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
295int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, 285int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
296 const union sctp_addr *paddr) 286 const union sctp_addr *paddr)
297{ 287{
298 struct list_head *pos;
299 struct sctp_sockaddr_entry *addr; 288 struct sctp_sockaddr_entry *addr;
300 struct sctp_bind_addr *bp; 289 struct sctp_bind_addr *bp;
301 290
302 sctp_read_lock(&ep->base.addr_lock);
303 bp = &ep->base.bind_addr; 291 bp = &ep->base.bind_addr;
304 list_for_each(pos, &bp->address_list) { 292 /* This function is called with the socket lock held,
305 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 293 * so the address_list can not change.
306 if (sctp_has_association(&addr->a, paddr)) { 294 */
307 sctp_read_unlock(&ep->base.addr_lock); 295 list_for_each_entry(addr, &bp->address_list, list) {
296 if (sctp_has_association(&addr->a, paddr))
308 return 1; 297 return 1;
309 }
310 } 298 }
311 sctp_read_unlock(&ep->base.addr_lock);
312 299
313 return 0; 300 return 0;
314} 301}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 47e56017f4ce..f9a0c9276e3b 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) 622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
623 goto discard; 623 goto discard;
624 624
625 /* RFC 4460, 2.11.2
626 * This will discard packets with INIT chunk bundled as
627 * subsequent chunks in the packet. When INIT is first,
628 * the normal INIT processing will discard the chunk.
629 */
630 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
631 goto discard;
632
625 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR 633 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
626 * or a COOKIE ACK the SCTP Packet should be silently 634 * or a COOKIE ACK the SCTP Packet should be silently
627 * discarded. 635 * discarded.
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 88aa22407549..e4ea7fdf36ed 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
130 /* Force chunk->skb->data to chunk->chunk_end. */ 130 /* Force chunk->skb->data to chunk->chunk_end. */
131 skb_pull(chunk->skb, 131 skb_pull(chunk->skb,
132 chunk->chunk_end - chunk->skb->data); 132 chunk->chunk_end - chunk->skb->data);
133
134 /* Verify that we have at least chunk headers
135 * worth of buffer left.
136 */
137 if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
138 sctp_chunk_free(chunk);
139 chunk = queue->in_progress = NULL;
140 }
133 } 141 }
134 } 142 }
135 143
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f8aa23dda1c1..670fd2740b89 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -77,13 +77,18 @@
77 77
78#include <asm/uaccess.h> 78#include <asm/uaccess.h>
79 79
80/* Event handler for inet6 address addition/deletion events. */ 80/* Event handler for inet6 address addition/deletion events.
81 * The sctp_local_addr_list needs to be protocted by a spin lock since
82 * multiple notifiers (say IPv4 and IPv6) may be running at the same
83 * time and thus corrupt the list.
84 * The reader side is protected with RCU.
85 */
81static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, 86static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
82 void *ptr) 87 void *ptr)
83{ 88{
84 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 89 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
85 struct sctp_sockaddr_entry *addr; 90 struct sctp_sockaddr_entry *addr = NULL;
86 struct list_head *pos, *temp; 91 struct sctp_sockaddr_entry *temp;
87 92
88 switch (ev) { 93 switch (ev) {
89 case NETDEV_UP: 94 case NETDEV_UP:
@@ -94,19 +99,26 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
94 memcpy(&addr->a.v6.sin6_addr, &ifa->addr, 99 memcpy(&addr->a.v6.sin6_addr, &ifa->addr,
95 sizeof(struct in6_addr)); 100 sizeof(struct in6_addr));
96 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 101 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
97 list_add_tail(&addr->list, &sctp_local_addr_list); 102 addr->valid = 1;
103 spin_lock_bh(&sctp_local_addr_lock);
104 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
105 spin_unlock_bh(&sctp_local_addr_lock);
98 } 106 }
99 break; 107 break;
100 case NETDEV_DOWN: 108 case NETDEV_DOWN:
101 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 109 spin_lock_bh(&sctp_local_addr_lock);
102 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 110 list_for_each_entry_safe(addr, temp,
103 if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { 111 &sctp_local_addr_list, list) {
104 list_del(pos); 112 if (ipv6_addr_equal(&addr->a.v6.sin6_addr,
105 kfree(addr); 113 &ifa->addr)) {
114 addr->valid = 0;
115 list_del_rcu(&addr->list);
106 break; 116 break;
107 } 117 }
108 } 118 }
109 119 spin_unlock_bh(&sctp_local_addr_lock);
120 if (addr && !addr->valid)
121 call_rcu(&addr->rcu, sctp_local_addr_free);
110 break; 122 break;
111 } 123 }
112 124
@@ -290,9 +302,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
290 union sctp_addr *saddr) 302 union sctp_addr *saddr)
291{ 303{
292 struct sctp_bind_addr *bp; 304 struct sctp_bind_addr *bp;
293 rwlock_t *addr_lock;
294 struct sctp_sockaddr_entry *laddr; 305 struct sctp_sockaddr_entry *laddr;
295 struct list_head *pos;
296 sctp_scope_t scope; 306 sctp_scope_t scope;
297 union sctp_addr *baddr = NULL; 307 union sctp_addr *baddr = NULL;
298 __u8 matchlen = 0; 308 __u8 matchlen = 0;
@@ -312,14 +322,14 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
312 scope = sctp_scope(daddr); 322 scope = sctp_scope(daddr);
313 323
314 bp = &asoc->base.bind_addr; 324 bp = &asoc->base.bind_addr;
315 addr_lock = &asoc->base.addr_lock;
316 325
317 /* Go through the bind address list and find the best source address 326 /* Go through the bind address list and find the best source address
318 * that matches the scope of the destination address. 327 * that matches the scope of the destination address.
319 */ 328 */
320 sctp_read_lock(addr_lock); 329 rcu_read_lock();
321 list_for_each(pos, &bp->address_list) { 330 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
322 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 331 if (!laddr->valid)
332 continue;
323 if ((laddr->use_as_src) && 333 if ((laddr->use_as_src) &&
324 (laddr->a.sa.sa_family == AF_INET6) && 334 (laddr->a.sa.sa_family == AF_INET6) &&
325 (scope <= sctp_scope(&laddr->a))) { 335 (scope <= sctp_scope(&laddr->a))) {
@@ -341,7 +351,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
341 __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); 351 __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr));
342 } 352 }
343 353
344 sctp_read_unlock(addr_lock); 354 rcu_read_unlock();
345} 355}
346 356
347/* Make a copy of all potential local addresses. */ 357/* Make a copy of all potential local addresses. */
@@ -367,7 +377,9 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
367 addr->a.v6.sin6_port = 0; 377 addr->a.v6.sin6_port = 0;
368 addr->a.v6.sin6_addr = ifp->addr; 378 addr->a.v6.sin6_addr = ifp->addr;
369 addr->a.v6.sin6_scope_id = dev->ifindex; 379 addr->a.v6.sin6_scope_id = dev->ifindex;
380 addr->valid = 1;
370 INIT_LIST_HEAD(&addr->list); 381 INIT_LIST_HEAD(&addr->list);
382 INIT_RCU_HEAD(&addr->rcu);
371 list_add_tail(&addr->list, addrlist); 383 list_add_tail(&addr->list, addrlist);
372 } 384 }
373 } 385 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 992f361084b7..28f4fe77ceee 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
421 */ 421 */
422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 423 (!fast_retransmit && !chunk->tsn_gap_acked)) {
424 /* If this chunk was sent less then 1 rto ago, do not
425 * retransmit this chunk, but give the peer time
426 * to acknowlege it.
427 */
428 if ((jiffies - chunk->sent_at) < transport->rto)
429 continue;
430
424 /* RFC 2960 6.2.1 Processing a Received SACK 431 /* RFC 2960 6.2.1 Processing a Received SACK
425 * 432 *
426 * C) Any time a DATA chunk is marked for 433 * C) Any time a DATA chunk is marked for
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e98579b788b8..3d036cdfae41 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -153,6 +153,9 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
153 addr->a.v4.sin_family = AF_INET; 153 addr->a.v4.sin_family = AF_INET;
154 addr->a.v4.sin_port = 0; 154 addr->a.v4.sin_port = 0;
155 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 155 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
156 addr->valid = 1;
157 INIT_LIST_HEAD(&addr->list);
158 INIT_RCU_HEAD(&addr->rcu);
156 list_add_tail(&addr->list, addrlist); 159 list_add_tail(&addr->list, addrlist);
157 } 160 }
158 } 161 }
@@ -192,16 +195,24 @@ static void sctp_free_local_addr_list(void)
192 } 195 }
193} 196}
194 197
198void sctp_local_addr_free(struct rcu_head *head)
199{
200 struct sctp_sockaddr_entry *e = container_of(head,
201 struct sctp_sockaddr_entry, rcu);
202 kfree(e);
203}
204
195/* Copy the local addresses which are valid for 'scope' into 'bp'. */ 205/* Copy the local addresses which are valid for 'scope' into 'bp'. */
196int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 206int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
197 gfp_t gfp, int copy_flags) 207 gfp_t gfp, int copy_flags)
198{ 208{
199 struct sctp_sockaddr_entry *addr; 209 struct sctp_sockaddr_entry *addr;
200 int error = 0; 210 int error = 0;
201 struct list_head *pos, *temp;
202 211
203 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 212 rcu_read_lock();
204 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 213 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
214 if (!addr->valid)
215 continue;
205 if (sctp_in_scope(&addr->a, scope)) { 216 if (sctp_in_scope(&addr->a, scope)) {
206 /* Now that the address is in scope, check to see if 217 /* Now that the address is in scope, check to see if
207 * the address type is really supported by the local 218 * the address type is really supported by the local
@@ -213,7 +224,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
213 (copy_flags & SCTP_ADDR6_ALLOWED) && 224 (copy_flags & SCTP_ADDR6_ALLOWED) &&
214 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 225 (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
215 error = sctp_add_bind_addr(bp, &addr->a, 1, 226 error = sctp_add_bind_addr(bp, &addr->a, 1,
216 GFP_ATOMIC); 227 GFP_ATOMIC);
217 if (error) 228 if (error)
218 goto end_copy; 229 goto end_copy;
219 } 230 }
@@ -221,6 +232,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
221 } 232 }
222 233
223end_copy: 234end_copy:
235 rcu_read_unlock();
224 return error; 236 return error;
225} 237}
226 238
@@ -416,9 +428,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
416 struct rtable *rt; 428 struct rtable *rt;
417 struct flowi fl; 429 struct flowi fl;
418 struct sctp_bind_addr *bp; 430 struct sctp_bind_addr *bp;
419 rwlock_t *addr_lock;
420 struct sctp_sockaddr_entry *laddr; 431 struct sctp_sockaddr_entry *laddr;
421 struct list_head *pos;
422 struct dst_entry *dst = NULL; 432 struct dst_entry *dst = NULL;
423 union sctp_addr dst_saddr; 433 union sctp_addr dst_saddr;
424 434
@@ -447,23 +457,20 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
447 goto out; 457 goto out;
448 458
449 bp = &asoc->base.bind_addr; 459 bp = &asoc->base.bind_addr;
450 addr_lock = &asoc->base.addr_lock;
451 460
452 if (dst) { 461 if (dst) {
453 /* Walk through the bind address list and look for a bind 462 /* Walk through the bind address list and look for a bind
454 * address that matches the source address of the returned dst. 463 * address that matches the source address of the returned dst.
455 */ 464 */
456 sctp_read_lock(addr_lock); 465 rcu_read_lock();
457 list_for_each(pos, &bp->address_list) { 466 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
458 laddr = list_entry(pos, struct sctp_sockaddr_entry, 467 if (!laddr->valid || !laddr->use_as_src)
459 list);
460 if (!laddr->use_as_src)
461 continue; 468 continue;
462 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 469 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
463 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 470 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
464 goto out_unlock; 471 goto out_unlock;
465 } 472 }
466 sctp_read_unlock(addr_lock); 473 rcu_read_unlock();
467 474
468 /* None of the bound addresses match the source address of the 475 /* None of the bound addresses match the source address of the
469 * dst. So release it. 476 * dst. So release it.
@@ -475,10 +482,10 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
475 /* Walk through the bind address list and try to get a dst that 482 /* Walk through the bind address list and try to get a dst that
476 * matches a bind address as the source address. 483 * matches a bind address as the source address.
477 */ 484 */
478 sctp_read_lock(addr_lock); 485 rcu_read_lock();
479 list_for_each(pos, &bp->address_list) { 486 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
480 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 487 if (!laddr->valid)
481 488 continue;
482 if ((laddr->use_as_src) && 489 if ((laddr->use_as_src) &&
483 (AF_INET == laddr->a.sa.sa_family)) { 490 (AF_INET == laddr->a.sa.sa_family)) {
484 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 491 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
@@ -490,7 +497,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
490 } 497 }
491 498
492out_unlock: 499out_unlock:
493 sctp_read_unlock(addr_lock); 500 rcu_read_unlock();
494out: 501out:
495 if (dst) 502 if (dst)
496 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", 503 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
@@ -600,13 +607,18 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
600 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); 607 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
601} 608}
602 609
603/* Event handler for inet address addition/deletion events. */ 610/* Event handler for inet address addition/deletion events.
611 * The sctp_local_addr_list needs to be protocted by a spin lock since
612 * multiple notifiers (say IPv4 and IPv6) may be running at the same
613 * time and thus corrupt the list.
614 * The reader side is protected with RCU.
615 */
604static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, 616static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
605 void *ptr) 617 void *ptr)
606{ 618{
607 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 619 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
608 struct sctp_sockaddr_entry *addr; 620 struct sctp_sockaddr_entry *addr = NULL;
609 struct list_head *pos, *temp; 621 struct sctp_sockaddr_entry *temp;
610 622
611 switch (ev) { 623 switch (ev) {
612 case NETDEV_UP: 624 case NETDEV_UP:
@@ -615,19 +627,25 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
615 addr->a.v4.sin_family = AF_INET; 627 addr->a.v4.sin_family = AF_INET;
616 addr->a.v4.sin_port = 0; 628 addr->a.v4.sin_port = 0;
617 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 629 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
618 list_add_tail(&addr->list, &sctp_local_addr_list); 630 addr->valid = 1;
631 spin_lock_bh(&sctp_local_addr_lock);
632 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
633 spin_unlock_bh(&sctp_local_addr_lock);
619 } 634 }
620 break; 635 break;
621 case NETDEV_DOWN: 636 case NETDEV_DOWN:
622 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 637 spin_lock_bh(&sctp_local_addr_lock);
623 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 638 list_for_each_entry_safe(addr, temp,
639 &sctp_local_addr_list, list) {
624 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { 640 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
625 list_del(pos); 641 addr->valid = 0;
626 kfree(addr); 642 list_del_rcu(&addr->list);
627 break; 643 break;
628 } 644 }
629 } 645 }
630 646 spin_unlock_bh(&sctp_local_addr_lock);
647 if (addr && !addr->valid)
648 call_rcu(&addr->rcu, sctp_local_addr_free);
631 break; 649 break;
632 } 650 }
633 651
@@ -1160,6 +1178,7 @@ SCTP_STATIC __init int sctp_init(void)
1160 1178
1161 /* Initialize the local address list. */ 1179 /* Initialize the local address list. */
1162 INIT_LIST_HEAD(&sctp_local_addr_list); 1180 INIT_LIST_HEAD(&sctp_local_addr_list);
1181 spin_lock_init(&sctp_local_addr_lock);
1163 sctp_get_local_addr_list(); 1182 sctp_get_local_addr_list();
1164 1183
1165 /* Register notifier for inet address additions/deletions. */ 1184 /* Register notifier for inet address additions/deletions. */
@@ -1227,6 +1246,9 @@ SCTP_STATIC __exit void sctp_exit(void)
1227 sctp_v6_del_protocol(); 1246 sctp_v6_del_protocol();
1228 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); 1247 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1229 1248
1249 /* Unregister notifier for inet address additions/deletions. */
1250 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1251
1230 /* Free the local address list. */ 1252 /* Free the local address list. */
1231 sctp_free_local_addr_list(); 1253 sctp_free_local_addr_list();
1232 1254
@@ -1240,9 +1262,6 @@ SCTP_STATIC __exit void sctp_exit(void)
1240 inet_unregister_protosw(&sctp_stream_protosw); 1262 inet_unregister_protosw(&sctp_stream_protosw);
1241 inet_unregister_protosw(&sctp_seqpacket_protosw); 1263 inet_unregister_protosw(&sctp_seqpacket_protosw);
1242 1264
1243 /* Unregister notifier for inet address additions/deletions. */
1244 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1245
1246 sctp_sysctl_unregister(); 1265 sctp_sysctl_unregister();
1247 list_del(&sctp_ipv4_specific.list); 1266 list_del(&sctp_ipv4_specific.list);
1248 1267
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 51c4d7fef1d2..23ae37ec8711 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
110 * abort chunk. 110 * abort chunk.
111 */ 111 */
112void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 112void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
113 const void *payload, size_t paylen) 113 size_t paylen)
114{ 114{
115 sctp_errhdr_t err; 115 sctp_errhdr_t err;
116 __u16 len; 116 __u16 len;
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
120 len = sizeof(sctp_errhdr_t) + paylen; 120 len = sizeof(sctp_errhdr_t) + paylen;
121 err.length = htons(len); 121 err.length = htons(len);
122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
123 sctp_addto_chunk(chunk, paylen, payload);
124} 123}
125 124
126/* 3.3.2 Initiation (INIT) (1) 125/* 3.3.2 Initiation (INIT) (1)
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
780 779
781 /* Put the tsn back into network byte order. */ 780 /* Put the tsn back into network byte order. */
782 payload = htonl(tsn); 781 payload = htonl(tsn);
783 sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, 782 sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
784 sizeof(payload)); 783 sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
785 784
786 /* RFC 2960 6.4 Multi-homed SCTP Endpoints 785 /* RFC 2960 6.4 Multi-homed SCTP Endpoints
787 * 786 *
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
823 goto err_copy; 822 goto err_copy;
824 } 823 }
825 824
826 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 825 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
826 sctp_addto_chunk(retval, paylen, payload);
827 827
828 if (paylen) 828 if (paylen)
829 kfree(payload); 829 kfree(payload);
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
850 struct sctp_paramhdr phdr; 850 struct sctp_paramhdr phdr;
851 851
852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen 852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen
853 + sizeof(sctp_chunkhdr_t)); 853 + sizeof(sctp_paramhdr_t));
854 if (!retval) 854 if (!retval)
855 goto end; 855 goto end;
856 856
857 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); 857 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen
858 + sizeof(sctp_paramhdr_t));
858 859
859 phdr.type = htons(chunk->chunk_hdr->type); 860 phdr.type = htons(chunk->chunk_hdr->type);
860 phdr.length = chunk->chunk_hdr->length; 861 phdr.length = chunk->chunk_hdr->length;
861 sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); 862 sctp_addto_chunk(retval, paylen, payload);
863 sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr);
862 864
863end: 865end:
864 return retval; 866 return retval;
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
955 if (!retval) 957 if (!retval)
956 goto nodata; 958 goto nodata;
957 959
958 sctp_init_cause(retval, cause_code, payload, paylen); 960 sctp_init_cause(retval, cause_code, paylen);
961 sctp_addto_chunk(retval, paylen, payload);
959 962
960nodata: 963nodata:
961 return retval; 964 return retval;
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1128 void *target; 1131 void *target;
1129 void *padding; 1132 void *padding;
1130 int chunklen = ntohs(chunk->chunk_hdr->length); 1133 int chunklen = ntohs(chunk->chunk_hdr->length);
1131 int padlen = chunklen % 4; 1134 int padlen = WORD_ROUND(chunklen) - chunklen;
1132 1135
1133 padding = skb_put(chunk->skb, padlen); 1136 padding = skb_put(chunk->skb, padlen);
1134 target = skb_put(chunk->skb, len); 1137 target = skb_put(chunk->skb, len);
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1143 return target; 1146 return target;
1144} 1147}
1145 1148
1149/* Append bytes to the end of a parameter. Will panic if chunk is not big
1150 * enough.
1151 */
1152void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data)
1153{
1154 void *target;
1155 int chunklen = ntohs(chunk->chunk_hdr->length);
1156
1157 target = skb_put(chunk->skb, len);
1158
1159 memcpy(target, data, len);
1160
1161 /* Adjust the chunk length field. */
1162 chunk->chunk_hdr->length = htons(chunklen + len);
1163 chunk->chunk_end = skb_tail_pointer(chunk->skb);
1164
1165 return target;
1166}
1167
1146/* Append bytes from user space to the end of a chunk. Will panic if 1168/* Append bytes from user space to the end of a chunk. Will panic if
1147 * chunk is not big enough. 1169 * chunk is not big enough.
1148 * Returns a kernel err value. 1170 * Returns a kernel err value.
@@ -1174,25 +1196,36 @@ out:
1174 */ 1196 */
1175void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) 1197void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
1176{ 1198{
1199 struct sctp_datamsg *msg;
1200 struct sctp_chunk *lchunk;
1201 struct sctp_stream *stream;
1177 __u16 ssn; 1202 __u16 ssn;
1178 __u16 sid; 1203 __u16 sid;
1179 1204
1180 if (chunk->has_ssn) 1205 if (chunk->has_ssn)
1181 return; 1206 return;
1182 1207
1183 /* This is the last possible instant to assign a SSN. */ 1208 /* All fragments will be on the same stream */
1184 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1209 sid = ntohs(chunk->subh.data_hdr->stream);
1185 ssn = 0; 1210 stream = &chunk->asoc->ssnmap->out;
1186 } else { 1211
1187 sid = ntohs(chunk->subh.data_hdr->stream); 1212 /* Now assign the sequence number to the entire message.
1188 if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 1213 * All fragments must have the same stream sequence number.
1189 ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); 1214 */
1190 else 1215 msg = chunk->msg;
1191 ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); 1216 list_for_each_entry(lchunk, &msg->chunks, frag_list) {
1192 } 1217 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
1218 ssn = 0;
1219 } else {
1220 if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
1221 ssn = sctp_ssn_next(stream, sid);
1222 else
1223 ssn = sctp_ssn_peek(stream, sid);
1224 }
1193 1225
1194 chunk->subh.data_hdr->ssn = htons(ssn); 1226 lchunk->subh.data_hdr->ssn = htons(ssn);
1195 chunk->has_ssn = 1; 1227 lchunk->has_ssn = 1;
1228 }
1196} 1229}
1197 1230
1198/* Helper function to assign a TSN if needed. This assumes that both 1231/* Helper function to assign a TSN if needed. This assumes that both
@@ -1466,7 +1499,8 @@ no_hmac:
1466 __be32 n = htonl(usecs); 1499 __be32 n = htonl(usecs);
1467 1500
1468 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1501 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
1469 &n, sizeof(n)); 1502 sizeof(n));
1503 sctp_addto_chunk(*errp, sizeof(n), &n);
1470 *error = -SCTP_IERROR_STALE_COOKIE; 1504 *error = -SCTP_IERROR_STALE_COOKIE;
1471 } else 1505 } else
1472 *error = -SCTP_IERROR_NOMEM; 1506 *error = -SCTP_IERROR_NOMEM;
@@ -1497,7 +1531,7 @@ no_hmac:
1497 /* Also, add the destination address. */ 1531 /* Also, add the destination address. */
1498 if (list_empty(&retval->base.bind_addr.address_list)) { 1532 if (list_empty(&retval->base.bind_addr.address_list)) {
1499 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, 1533 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1500 GFP_ATOMIC); 1534 GFP_ATOMIC);
1501 } 1535 }
1502 1536
1503 retval->next_tsn = retval->c.initial_tsn; 1537 retval->next_tsn = retval->c.initial_tsn;
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
1556 report.num_missing = htonl(1); 1590 report.num_missing = htonl(1);
1557 report.type = paramtype; 1591 report.type = paramtype;
1558 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, 1592 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
1559 &report, sizeof(report)); 1593 sizeof(report));
1594 sctp_addto_chunk(*errp, sizeof(report), &report);
1560 } 1595 }
1561 1596
1562 /* Stop processing this chunk. */ 1597 /* Stop processing this chunk. */
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
1574 *errp = sctp_make_op_error_space(asoc, chunk, 0); 1609 *errp = sctp_make_op_error_space(asoc, chunk, 0);
1575 1610
1576 if (*errp) 1611 if (*errp)
1577 sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); 1612 sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
1578 1613
1579 /* Stop processing this chunk. */ 1614 /* Stop processing this chunk. */
1580 return 0; 1615 return 0;
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1595 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1630 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1596 1631
1597 if (*errp) { 1632 if (*errp) {
1598 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, 1633 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
1599 sizeof(error)); 1634 sizeof(error) + sizeof(sctp_paramhdr_t));
1600 sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); 1635 sctp_addto_chunk(*errp, sizeof(error), error);
1636 sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param);
1601 } 1637 }
1602 1638
1603 return 0; 1639 return 0;
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1618 if (!*errp) 1654 if (!*errp)
1619 *errp = sctp_make_op_error_space(asoc, chunk, len); 1655 *errp = sctp_make_op_error_space(asoc, chunk, len);
1620 1656
1621 if (*errp) 1657 if (*errp) {
1622 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, 1658 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
1623 param.v, len); 1659 sctp_addto_chunk(*errp, len, param.v);
1660 }
1624 1661
1625 /* Stop processing this chunk. */ 1662 /* Stop processing this chunk. */
1626 return 0; 1663 return 0;
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1672 *errp = sctp_make_op_error_space(asoc, chunk, 1709 *errp = sctp_make_op_error_space(asoc, chunk,
1673 ntohs(chunk->chunk_hdr->length)); 1710 ntohs(chunk->chunk_hdr->length));
1674 1711
1675 if (*errp) 1712 if (*errp) {
1676 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1713 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1677 param.v,
1678 WORD_ROUND(ntohs(param.p->length))); 1714 WORD_ROUND(ntohs(param.p->length)));
1715 sctp_addto_chunk(*errp,
1716 WORD_ROUND(ntohs(param.p->length)),
1717 param.v);
1718 }
1679 1719
1680 break; 1720 break;
1681 case SCTP_PARAM_ACTION_SKIP: 1721 case SCTP_PARAM_ACTION_SKIP:
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1690 1730
1691 if (*errp) { 1731 if (*errp) {
1692 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1732 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1693 param.v,
1694 WORD_ROUND(ntohs(param.p->length))); 1733 WORD_ROUND(ntohs(param.p->length)));
1734 sctp_addto_chunk(*errp,
1735 WORD_ROUND(ntohs(param.p->length)),
1736 param.v);
1695 } else { 1737 } else {
1696 /* If there is no memory for generating the ERROR 1738 /* If there is no memory for generating the ERROR
1697 * report as specified, an ABORT will be triggered 1739 * report as specified, an ABORT will be triggered
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
1791 * VIOLATION error. We build the ERROR chunk here and let the normal 1833 * VIOLATION error. We build the ERROR chunk here and let the normal
1792 * error handling code build and send the packet. 1834 * error handling code build and send the packet.
1793 */ 1835 */
1794 if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { 1836 if (param.v != (void*)chunk->chunk_end) {
1795 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 1837 sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
1796 return 0; 1838 return 0;
1797 } 1839 }
@@ -2457,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2457 return SCTP_ERROR_NO_ERROR; 2499 return SCTP_ERROR_NO_ERROR;
2458} 2500}
2459 2501
2502/* Verify the ASCONF packet before we process it. */
2503int sctp_verify_asconf(const struct sctp_association *asoc,
2504 struct sctp_paramhdr *param_hdr, void *chunk_end,
2505 struct sctp_paramhdr **errp) {
2506 sctp_addip_param_t *asconf_param;
2507 union sctp_params param;
2508 int length, plen;
2509
2510 param.v = (sctp_paramhdr_t *) param_hdr;
2511 while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
2512 length = ntohs(param.p->length);
2513 *errp = param.p;
2514
2515 if (param.v > chunk_end - length ||
2516 length < sizeof(sctp_paramhdr_t))
2517 return 0;
2518
2519 switch (param.p->type) {
2520 case SCTP_PARAM_ADD_IP:
2521 case SCTP_PARAM_DEL_IP:
2522 case SCTP_PARAM_SET_PRIMARY:
2523 asconf_param = (sctp_addip_param_t *)param.v;
2524 plen = ntohs(asconf_param->param_hdr.length);
2525 if (plen < sizeof(sctp_addip_param_t) +
2526 sizeof(sctp_paramhdr_t))
2527 return 0;
2528 break;
2529 case SCTP_PARAM_SUCCESS_REPORT:
2530 case SCTP_PARAM_ADAPTATION_LAYER_IND:
2531 if (length != sizeof(sctp_addip_param_t))
2532 return 0;
2533
2534 break;
2535 default:
2536 break;
2537 }
2538
2539 param.v += WORD_ROUND(length);
2540 }
2541
2542 if (param.v != chunk_end)
2543 return 0;
2544
2545 return 1;
2546}
2547
2460/* Process an incoming ASCONF chunk with the next expected serial no. and 2548/* Process an incoming ASCONF chunk with the next expected serial no. and
2461 * return an ASCONF_ACK chunk to be sent in response. 2549 * return an ASCONF_ACK chunk to be sent in response.
2462 */ 2550 */
@@ -2571,22 +2659,16 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2571 2659
2572 switch (asconf_param->param_hdr.type) { 2660 switch (asconf_param->param_hdr.type) {
2573 case SCTP_PARAM_ADD_IP: 2661 case SCTP_PARAM_ADD_IP:
2574 sctp_local_bh_disable(); 2662 /* This is always done in BH context with a socket lock
2575 sctp_write_lock(&asoc->base.addr_lock); 2663 * held, so the list can not change.
2576 list_for_each(pos, &bp->address_list) { 2664 */
2577 saddr = list_entry(pos, struct sctp_sockaddr_entry, list); 2665 list_for_each_entry(saddr, &bp->address_list, list) {
2578 if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2666 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2579 saddr->use_as_src = 1; 2667 saddr->use_as_src = 1;
2580 } 2668 }
2581 sctp_write_unlock(&asoc->base.addr_lock);
2582 sctp_local_bh_enable();
2583 break; 2669 break;
2584 case SCTP_PARAM_DEL_IP: 2670 case SCTP_PARAM_DEL_IP:
2585 sctp_local_bh_disable(); 2671 retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
2586 sctp_write_lock(&asoc->base.addr_lock);
2587 retval = sctp_del_bind_addr(bp, &addr);
2588 sctp_write_unlock(&asoc->base.addr_lock);
2589 sctp_local_bh_enable();
2590 list_for_each(pos, &asoc->peer.transport_addr_list) { 2672 list_for_each(pos, &asoc->peer.transport_addr_list) {
2591 transport = list_entry(pos, struct sctp_transport, 2673 transport = list_entry(pos, struct sctp_transport,
2592 transports); 2674 transports);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f6ffc3..8d7890083493 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1013 break; 1013 break;
1014 1014
1015 case SCTP_DISPOSITION_VIOLATION: 1015 case SCTP_DISPOSITION_VIOLATION:
1016 printk(KERN_ERR "sctp protocol violation state %d " 1016 if (net_ratelimit())
1017 "chunkid %d\n", state, subtype.chunk); 1017 printk(KERN_ERR "sctp protocol violation state %d "
1018 "chunkid %d\n", state, subtype.chunk);
1018 break; 1019 break;
1019 1020
1020 case SCTP_DISPOSITION_NOT_IMPL: 1021 case SCTP_DISPOSITION_NOT_IMPL:
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1130 /* Move the Cumulattive TSN Ack ahead. */ 1131 /* Move the Cumulattive TSN Ack ahead. */
1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1132 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1132 1133
1134 /* purge the fragmentation queue */
1135 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1136
1133 /* Abort any in progress partial delivery. */ 1137 /* Abort any in progress partial delivery. */
1134 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1138 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1135 break; 1139 break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 71cad56dd73f..a583d67cab63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
90 const sctp_subtype_t type, 90 const sctp_subtype_t type,
91 void *arg, 91 void *arg,
92 sctp_cmd_seq_t *commands); 92 sctp_cmd_seq_t *commands);
93static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
94 const struct sctp_association *asoc,
95 const sctp_subtype_t type,
96 void *arg,
97 sctp_cmd_seq_t *commands);
93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 98static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
94 99
95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 100static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
98 struct sctp_transport *transport); 103 struct sctp_transport *transport);
99 104
100static sctp_disposition_t sctp_sf_abort_violation( 105static sctp_disposition_t sctp_sf_abort_violation(
106 const struct sctp_endpoint *ep,
101 const struct sctp_association *asoc, 107 const struct sctp_association *asoc,
102 void *arg, 108 void *arg,
103 sctp_cmd_seq_t *commands, 109 sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
111 void *arg, 117 void *arg,
112 sctp_cmd_seq_t *commands); 118 sctp_cmd_seq_t *commands);
113 119
120static sctp_disposition_t sctp_sf_violation_paramlen(
121 const struct sctp_endpoint *ep,
122 const struct sctp_association *asoc,
123 const sctp_subtype_t type,
124 void *arg,
125 sctp_cmd_seq_t *commands);
126
114static sctp_disposition_t sctp_sf_violation_ctsn( 127static sctp_disposition_t sctp_sf_violation_ctsn(
115 const struct sctp_endpoint *ep, 128 const struct sctp_endpoint *ep,
116 const struct sctp_association *asoc, 129 const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
118 void *arg, 131 void *arg,
119 sctp_cmd_seq_t *commands); 132 sctp_cmd_seq_t *commands);
120 133
134static sctp_disposition_t sctp_sf_violation_chunk(
135 const struct sctp_endpoint *ep,
136 const struct sctp_association *asoc,
137 const sctp_subtype_t type,
138 void *arg,
139 sctp_cmd_seq_t *commands);
140
121/* Small helper function that checks if the chunk length 141/* Small helper function that checks if the chunk length
122 * is of the appropriate length. The 'required_length' argument 142 * is of the appropriate length. The 'required_length' argument
123 * is set to be the size of a specific chunk we are testing. 143 * is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
181 struct sctp_chunk *chunk = arg; 201 struct sctp_chunk *chunk = arg;
182 struct sctp_ulpevent *ev; 202 struct sctp_ulpevent *ev;
183 203
204 if (!sctp_vtag_verify_either(chunk, asoc))
205 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
206
184 /* RFC 2960 6.10 Bundling 207 /* RFC 2960 6.10 Bundling
185 * 208 *
186 * An endpoint MUST NOT bundle INIT, INIT ACK or 209 * An endpoint MUST NOT bundle INIT, INIT ACK or
187 * SHUTDOWN COMPLETE with any other chunks. 210 * SHUTDOWN COMPLETE with any other chunks.
188 */ 211 */
189 if (!chunk->singleton) 212 if (!chunk->singleton)
190 return SCTP_DISPOSITION_VIOLATION; 213 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
191 214
192 if (!sctp_vtag_verify_either(chunk, asoc)) 215 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
193 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 216 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
217 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
218 commands);
194 219
195 /* RFC 2960 10.2 SCTP-to-ULP 220 /* RFC 2960 10.2 SCTP-to-ULP
196 * 221 *
@@ -264,7 +289,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
264 struct sctp_chunk *err_chunk; 289 struct sctp_chunk *err_chunk;
265 struct sctp_packet *packet; 290 struct sctp_packet *packet;
266 sctp_unrecognized_param_t *unk_param; 291 sctp_unrecognized_param_t *unk_param;
267 struct sock *sk;
268 int len; 292 int len;
269 293
270 /* 6.10 Bundling 294 /* 6.10 Bundling
@@ -285,16 +309,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
285 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 309 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 310 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
287 311
288 sk = ep->base.sk;
289 /* If the endpoint is not listening or if the number of associations
290 * on the TCP-style socket exceed the max backlog, respond with an
291 * ABORT.
292 */
293 if (!sctp_sstate(sk, LISTENING) ||
294 (sctp_style(sk, TCP) &&
295 sk_acceptq_is_full(sk)))
296 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
297
298 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification 312 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
299 * Tag. 313 * Tag.
300 */ 314 */
@@ -461,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
461 if (!sctp_vtag_verify(chunk, asoc)) 475 if (!sctp_vtag_verify(chunk, asoc))
462 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 476 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
463 477
464 /* Make sure that the INIT-ACK chunk has a valid length */
465 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
466 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
467 commands);
468 /* 6.10 Bundling 478 /* 6.10 Bundling
469 * An endpoint MUST NOT bundle INIT, INIT ACK or 479 * An endpoint MUST NOT bundle INIT, INIT ACK or
470 * SHUTDOWN COMPLETE with any other chunks. 480 * SHUTDOWN COMPLETE with any other chunks.
471 */ 481 */
472 if (!chunk->singleton) 482 if (!chunk->singleton)
473 return SCTP_DISPOSITION_VIOLATION; 483 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
474 484
485 /* Make sure that the INIT-ACK chunk has a valid length */
486 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
487 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
488 commands);
475 /* Grab the INIT header. */ 489 /* Grab the INIT header. */
476 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; 490 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
477 491
@@ -590,12 +604,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
590 struct sctp_ulpevent *ev, *ai_ev = NULL; 604 struct sctp_ulpevent *ev, *ai_ev = NULL;
591 int error = 0; 605 int error = 0;
592 struct sctp_chunk *err_chk_p; 606 struct sctp_chunk *err_chk_p;
607 struct sock *sk;
593 608
594 /* If the packet is an OOTB packet which is temporarily on the 609 /* If the packet is an OOTB packet which is temporarily on the
595 * control endpoint, respond with an ABORT. 610 * control endpoint, respond with an ABORT.
596 */ 611 */
597 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 612 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
598 return sctp_sf_ootb(ep, asoc, type, arg, commands); 613 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
599 614
600 /* Make sure that the COOKIE_ECHO chunk has a valid length. 615 /* Make sure that the COOKIE_ECHO chunk has a valid length.
601 * In this case, we check that we have enough for at least a 616 * In this case, we check that we have enough for at least a
@@ -605,6 +620,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
605 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 620 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
606 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 621 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
607 622
623 /* If the endpoint is not listening or if the number of associations
624 * on the TCP-style socket exceed the max backlog, respond with an
625 * ABORT.
626 */
627 sk = ep->base.sk;
628 if (!sctp_sstate(sk, LISTENING) ||
629 (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
630 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
631
608 /* "Decode" the chunk. We have no optional parameters so we 632 /* "Decode" the chunk. We have no optional parameters so we
609 * are in good shape. 633 * are in good shape.
610 */ 634 */
@@ -1032,19 +1056,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1032 /* This should never happen, but lets log it if so. */ 1056 /* This should never happen, but lets log it if so. */
1033 if (unlikely(!link)) { 1057 if (unlikely(!link)) {
1034 if (from_addr.sa.sa_family == AF_INET6) { 1058 if (from_addr.sa.sa_family == AF_INET6) {
1035 printk(KERN_WARNING 1059 if (net_ratelimit())
1036 "%s association %p could not find address " 1060 printk(KERN_WARNING
1037 NIP6_FMT "\n", 1061 "%s association %p could not find address "
1038 __FUNCTION__, 1062 NIP6_FMT "\n",
1039 asoc, 1063 __FUNCTION__,
1040 NIP6(from_addr.v6.sin6_addr)); 1064 asoc,
1065 NIP6(from_addr.v6.sin6_addr));
1041 } else { 1066 } else {
1042 printk(KERN_WARNING 1067 if (net_ratelimit())
1043 "%s association %p could not find address " 1068 printk(KERN_WARNING
1044 NIPQUAD_FMT "\n", 1069 "%s association %p could not find address "
1045 __FUNCTION__, 1070 NIPQUAD_FMT "\n",
1046 asoc, 1071 __FUNCTION__,
1047 NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1072 asoc,
1073 NIPQUAD(from_addr.v4.sin_addr.s_addr));
1048 } 1074 }
1049 return SCTP_DISPOSITION_DISCARD; 1075 return SCTP_DISPOSITION_DISCARD;
1050 } 1076 }
@@ -2495,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
2495 struct sctp_chunk *chunk = (struct sctp_chunk *) arg; 2521 struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
2496 struct sctp_chunk *reply; 2522 struct sctp_chunk *reply;
2497 2523
2524 /* Make sure that the chunk has a valid length */
2525 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
2526 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
2527 commands);
2528
2498 /* Since we are not going to really process this INIT, there 2529 /* Since we are not going to really process this INIT, there
2499 * is no point in verifying chunk boundries. Just generate 2530 * is no point in verifying chunk boundries. Just generate
2500 * the SHUTDOWN ACK. 2531 * the SHUTDOWN ACK.
@@ -2928,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
2928 * 2959 *
2929 * The return value is the disposition of the chunk. 2960 * The return value is the disposition of the chunk.
2930*/ 2961*/
2931sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, 2962static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2932 const struct sctp_association *asoc, 2963 const struct sctp_association *asoc,
2933 const sctp_subtype_t type, 2964 const sctp_subtype_t type,
2934 void *arg, 2965 void *arg,
@@ -2964,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2964 2995
2965 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 2996 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
2966 2997
2998 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2967 return SCTP_DISPOSITION_CONSUME; 2999 return SCTP_DISPOSITION_CONSUME;
2968 } 3000 }
2969 3001
@@ -3124,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3124 3156
3125 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; 3157 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
3126 do { 3158 do {
3127 /* Break out if chunk length is less then minimal. */ 3159 /* Report violation if the chunk is less then minimal */
3128 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) 3160 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
3129 break; 3161 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3130 3162 commands);
3131 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3132 if (ch_end > skb_tail_pointer(skb))
3133 break;
3134 3163
3164 /* Now that we know we at least have a chunk header,
3165 * do things that are type appropriate.
3166 */
3135 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3167 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3136 ootb_shut_ack = 1; 3168 ootb_shut_ack = 1;
3137 3169
@@ -3143,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3143 if (SCTP_CID_ABORT == ch->type) 3175 if (SCTP_CID_ABORT == ch->type)
3144 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3176 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3145 3177
3178 /* Report violation if chunk len overflows */
3179 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3180 if (ch_end > skb_tail_pointer(skb))
3181 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3182 commands);
3183
3146 ch = (sctp_chunkhdr_t *) ch_end; 3184 ch = (sctp_chunkhdr_t *) ch_end;
3147 } while (ch_end < skb_tail_pointer(skb)); 3185 } while (ch_end < skb_tail_pointer(skb));
3148 3186
3149 if (ootb_shut_ack) 3187 if (ootb_shut_ack)
3150 sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3188 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3151 else 3189 else
3152 sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3190 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3153
3154 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3155} 3191}
3156 3192
3157/* 3193/*
@@ -3217,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3217 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3253 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3218 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3254 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3219 3255
3220 return SCTP_DISPOSITION_CONSUME; 3256 /* We need to discard the rest of the packet to prevent
3257 * potential bomming attacks from additional bundled chunks.
3258 * This is documented in SCTP Threats ID.
3259 */
3260 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3221 } 3261 }
3222 3262
3223 return SCTP_DISPOSITION_NOMEM; 3263 return SCTP_DISPOSITION_NOMEM;
@@ -3240,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
3240 void *arg, 3280 void *arg,
3241 sctp_cmd_seq_t *commands) 3281 sctp_cmd_seq_t *commands)
3242{ 3282{
3283 struct sctp_chunk *chunk = arg;
3284
3285 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3286 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3287 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3288 commands);
3289
3243 /* Although we do have an association in this case, it corresponds 3290 /* Although we do have an association in this case, it corresponds
3244 * to a restarted association. So the packet is treated as an OOTB 3291 * to a restarted association. So the packet is treated as an OOTB
3245 * packet and the state function that handles OOTB SHUTDOWN_ACK is 3292 * packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3256,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3256{ 3303{
3257 struct sctp_chunk *chunk = arg; 3304 struct sctp_chunk *chunk = arg;
3258 struct sctp_chunk *asconf_ack = NULL; 3305 struct sctp_chunk *asconf_ack = NULL;
3306 struct sctp_paramhdr *err_param = NULL;
3259 sctp_addiphdr_t *hdr; 3307 sctp_addiphdr_t *hdr;
3308 union sctp_addr_param *addr_param;
3260 __u32 serial; 3309 __u32 serial;
3310 int length;
3261 3311
3262 if (!sctp_vtag_verify(chunk, asoc)) { 3312 if (!sctp_vtag_verify(chunk, asoc)) {
3263 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3313 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3273,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3273 hdr = (sctp_addiphdr_t *)chunk->skb->data; 3323 hdr = (sctp_addiphdr_t *)chunk->skb->data;
3274 serial = ntohl(hdr->serial); 3324 serial = ntohl(hdr->serial);
3275 3325
3326 addr_param = (union sctp_addr_param *)hdr->params;
3327 length = ntohs(addr_param->p.length);
3328 if (length < sizeof(sctp_paramhdr_t))
3329 return sctp_sf_violation_paramlen(ep, asoc, type,
3330 (void *)addr_param, commands);
3331
3332 /* Verify the ASCONF chunk before processing it. */
3333 if (!sctp_verify_asconf(asoc,
3334 (sctp_paramhdr_t *)((void *)addr_param + length),
3335 (void *)chunk->chunk_end,
3336 &err_param))
3337 return sctp_sf_violation_paramlen(ep, asoc, type,
3338 (void *)&err_param, commands);
3339
3276 /* ADDIP 4.2 C1) Compare the value of the serial number to the value 3340 /* ADDIP 4.2 C1) Compare the value of the serial number to the value
3277 * the endpoint stored in a new association variable 3341 * the endpoint stored in a new association variable
3278 * 'Peer-Serial-Number'. 3342 * 'Peer-Serial-Number'.
@@ -3327,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3327 struct sctp_chunk *asconf_ack = arg; 3391 struct sctp_chunk *asconf_ack = arg;
3328 struct sctp_chunk *last_asconf = asoc->addip_last_asconf; 3392 struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
3329 struct sctp_chunk *abort; 3393 struct sctp_chunk *abort;
3394 struct sctp_paramhdr *err_param = NULL;
3330 sctp_addiphdr_t *addip_hdr; 3395 sctp_addiphdr_t *addip_hdr;
3331 __u32 sent_serial, rcvd_serial; 3396 __u32 sent_serial, rcvd_serial;
3332 3397
@@ -3344,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3344 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; 3409 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
3345 rcvd_serial = ntohl(addip_hdr->serial); 3410 rcvd_serial = ntohl(addip_hdr->serial);
3346 3411
3412 /* Verify the ASCONF-ACK chunk before processing it. */
3413 if (!sctp_verify_asconf(asoc,
3414 (sctp_paramhdr_t *)addip_hdr->params,
3415 (void *)asconf_ack->chunk_end,
3416 &err_param))
3417 return sctp_sf_violation_paramlen(ep, asoc, type,
3418 (void *)&err_param, commands);
3419
3347 if (last_asconf) { 3420 if (last_asconf) {
3348 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; 3421 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
3349 sent_serial = ntohl(addip_hdr->serial); 3422 sent_serial = ntohl(addip_hdr->serial);
@@ -3362,7 +3435,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3362 abort = sctp_make_abort(asoc, asconf_ack, 3435 abort = sctp_make_abort(asoc, asconf_ack,
3363 sizeof(sctp_errhdr_t)); 3436 sizeof(sctp_errhdr_t));
3364 if (abort) { 3437 if (abort) {
3365 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); 3438 sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
3366 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3439 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3367 SCTP_CHUNK(abort)); 3440 SCTP_CHUNK(abort));
3368 } 3441 }
@@ -3392,7 +3465,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3392 abort = sctp_make_abort(asoc, asconf_ack, 3465 abort = sctp_make_abort(asoc, asconf_ack,
3393 sizeof(sctp_errhdr_t)); 3466 sizeof(sctp_errhdr_t));
3394 if (abort) { 3467 if (abort) {
3395 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); 3468 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
3396 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3469 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3397 SCTP_CHUNK(abort)); 3470 SCTP_CHUNK(abort));
3398 } 3471 }
@@ -3654,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
3654 void *arg, 3727 void *arg,
3655 sctp_cmd_seq_t *commands) 3728 sctp_cmd_seq_t *commands)
3656{ 3729{
3730 struct sctp_chunk *chunk = arg;
3731
3732 /* Make sure that the chunk has a valid length.
3733 * Since we don't know the chunk type, we use a general
3734 * chunkhdr structure to make a comparison.
3735 */
3736 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3737 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3738 commands);
3739
3657 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); 3740 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
3658 return SCTP_DISPOSITION_DISCARD; 3741 return SCTP_DISPOSITION_DISCARD;
3659} 3742}
@@ -3709,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3709 void *arg, 3792 void *arg,
3710 sctp_cmd_seq_t *commands) 3793 sctp_cmd_seq_t *commands)
3711{ 3794{
3795 struct sctp_chunk *chunk = arg;
3796
3797 /* Make sure that the chunk has a valid length. */
3798 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3799 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3800 commands);
3801
3712 return SCTP_DISPOSITION_VIOLATION; 3802 return SCTP_DISPOSITION_VIOLATION;
3713} 3803}
3714 3804
@@ -3716,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3716 * Common function to handle a protocol violation. 3806 * Common function to handle a protocol violation.
3717 */ 3807 */
3718static sctp_disposition_t sctp_sf_abort_violation( 3808static sctp_disposition_t sctp_sf_abort_violation(
3809 const struct sctp_endpoint *ep,
3719 const struct sctp_association *asoc, 3810 const struct sctp_association *asoc,
3720 void *arg, 3811 void *arg,
3721 sctp_cmd_seq_t *commands, 3812 sctp_cmd_seq_t *commands,
3722 const __u8 *payload, 3813 const __u8 *payload,
3723 const size_t paylen) 3814 const size_t paylen)
3724{ 3815{
3816 struct sctp_packet *packet = NULL;
3725 struct sctp_chunk *chunk = arg; 3817 struct sctp_chunk *chunk = arg;
3726 struct sctp_chunk *abort = NULL; 3818 struct sctp_chunk *abort = NULL;
3727 3819
@@ -3730,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
3730 if (!abort) 3822 if (!abort)
3731 goto nomem; 3823 goto nomem;
3732 3824
3733 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 3825 if (asoc) {
3734 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 3826 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3827 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3735 3828
3736 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3829 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
3737 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3830 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3738 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3831 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3739 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3832 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3740 SCTP_ERROR(ECONNREFUSED)); 3833 SCTP_ERROR(ECONNREFUSED));
3741 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3834 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
3742 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3835 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3836 } else {
3837 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3838 SCTP_ERROR(ECONNABORTED));
3839 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3840 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3841 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
3842 }
3743 } else { 3843 } else {
3744 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3844 packet = sctp_ootb_pkt_new(asoc, chunk);
3745 SCTP_ERROR(ECONNABORTED)); 3845
3746 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3846 if (!packet)
3747 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3847 goto nomem_pkt;
3748 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3848
3849 if (sctp_test_T_bit(abort))
3850 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3851
3852 abort->skb->sk = ep->base.sk;
3853
3854 sctp_packet_append_chunk(packet, abort);
3855
3856 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3857 SCTP_PACKET(packet));
3858
3859 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3749 } 3860 }
3750 3861
3751 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 3862 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
3752 3863
3753 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3864 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
3754 3865
3755 return SCTP_DISPOSITION_ABORT; 3866 return SCTP_DISPOSITION_ABORT;
3756 3867
3868nomem_pkt:
3869 sctp_chunk_free(abort);
3757nomem: 3870nomem:
3758 return SCTP_DISPOSITION_NOMEM; 3871 return SCTP_DISPOSITION_NOMEM;
3759} 3872}
@@ -3786,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3786{ 3899{
3787 char err_str[]="The following chunk had invalid length:"; 3900 char err_str[]="The following chunk had invalid length:";
3788 3901
3789 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3902 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3903 sizeof(err_str));
3904}
3905
3906/*
3907 * Handle a protocol violation when the parameter length is invalid.
3908 * "Invalid" length is identified as smaller then the minimal length a
3909 * given parameter can be.
3910 */
3911static sctp_disposition_t sctp_sf_violation_paramlen(
3912 const struct sctp_endpoint *ep,
3913 const struct sctp_association *asoc,
3914 const sctp_subtype_t type,
3915 void *arg,
3916 sctp_cmd_seq_t *commands) {
3917 char err_str[] = "The following parameter had invalid length:";
3918
3919 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3790 sizeof(err_str)); 3920 sizeof(err_str));
3791} 3921}
3792 3922
@@ -3805,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
3805{ 3935{
3806 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; 3936 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
3807 3937
3808 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3938 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3809 sizeof(err_str)); 3939 sizeof(err_str));
3810} 3940}
3811 3941
3942/* Handle protocol violation of an invalid chunk bundling. For example,
3943 * when we have an association and we recieve bundled INIT-ACK, or
3944 * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
3945 * statement from the specs. Additinally, there might be an attacker
3946 * on the path and we may not want to continue this communication.
3947 */
3948static sctp_disposition_t sctp_sf_violation_chunk(
3949 const struct sctp_endpoint *ep,
3950 const struct sctp_association *asoc,
3951 const sctp_subtype_t type,
3952 void *arg,
3953 sctp_cmd_seq_t *commands)
3954{
3955 char err_str[]="The following chunk violates protocol:";
3956
3957 if (!asoc)
3958 return sctp_sf_violation(ep, asoc, type, arg, commands);
3959
3960 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3961 sizeof(err_str));
3962}
3812/*************************************************************************** 3963/***************************************************************************
3813 * These are the state functions for handling primitive (Section 10) events. 3964 * These are the state functions for handling primitive (Section 10) events.
3814 ***************************************************************************/ 3965 ***************************************************************************/
@@ -5175,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5175 * association exists, otherwise, use the peer's vtag. 5326 * association exists, otherwise, use the peer's vtag.
5176 */ 5327 */
5177 if (asoc) { 5328 if (asoc) {
5178 vtag = asoc->peer.i.init_tag; 5329 /* Special case the INIT-ACK as there is no peer's vtag
5330 * yet.
5331 */
5332 switch(chunk->chunk_hdr->type) {
5333 case SCTP_CID_INIT_ACK:
5334 {
5335 sctp_initack_chunk_t *initack;
5336
5337 initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
5338 vtag = ntohl(initack->init_hdr.init_tag);
5339 break;
5340 }
5341 default:
5342 vtag = asoc->peer.i.init_tag;
5343 break;
5344 }
5179 } else { 5345 } else {
5180 /* Special case the INIT and stale COOKIE_ECHO as there is no 5346 /* Special case the INIT and stale COOKIE_ECHO as there is no
5181 * vtag yet. 5347 * vtag yet.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 70a91ece3c49..ddb0ba3974b0 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
110 /* SCTP_STATE_EMPTY */ \ 110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 112 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 113 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 114 /* SCTP_STATE_COOKIE_WAIT */ \
115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
116 /* SCTP_STATE_COOKIE_ECHOED */ \ 116 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
173 /* SCTP_STATE_EMPTY */ \ 173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 175 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 176 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 177 /* SCTP_STATE_COOKIE_WAIT */ \
178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
179 /* SCTP_STATE_COOKIE_ECHOED */ \ 179 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
194 /* SCTP_STATE_EMPTY */ \ 194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 196 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 197 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 198 /* SCTP_STATE_COOKIE_WAIT */ \
199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
200 /* SCTP_STATE_COOKIE_ECHOED */ \ 200 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
216 /* SCTP_STATE_EMPTY */ \ 216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 218 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 219 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 220 /* SCTP_STATE_COOKIE_WAIT */ \
221 TYPE_SCTP_FUNC(sctp_sf_violation), \ 221 TYPE_SCTP_FUNC(sctp_sf_violation), \
222 /* SCTP_STATE_COOKIE_ECHOED */ \ 222 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
258 /* SCTP_STATE_EMPTY */ \ 258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 260 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 261 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 262 /* SCTP_STATE_COOKIE_WAIT */ \
263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
264 /* SCTP_STATE_COOKIE_ECHOED */ \ 264 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
300 /* SCTP_STATE_EMPTY */ \ 300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 302 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 303 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 304 /* SCTP_STATE_COOKIE_WAIT */ \
305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
306 /* SCTP_STATE_COOKIE_ECHOED */ \ 306 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
499 /* SCTP_STATE_EMPTY */ \ 499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 501 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 502 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 503 /* SCTP_STATE_COOKIE_WAIT */ \
504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
505 /* SCTP_STATE_COOKIE_ECHOED */ \ 505 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
528 /* SCTP_STATE_EMPTY */ 528 /* SCTP_STATE_EMPTY */
529 TYPE_SCTP_FUNC(sctp_sf_ootb), 529 TYPE_SCTP_FUNC(sctp_sf_ootb),
530 /* SCTP_STATE_CLOSED */ 530 /* SCTP_STATE_CLOSED */
531 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), 531 TYPE_SCTP_FUNC(sctp_sf_ootb),
532 /* SCTP_STATE_COOKIE_WAIT */ 532 /* SCTP_STATE_COOKIE_WAIT */
533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk), 533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
534 /* SCTP_STATE_COOKIE_ECHOED */ 534 /* SCTP_STATE_COOKIE_ECHOED */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 01c6364245b7..772fbfb4bfda 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
353 * The function sctp_get_port_local() does duplicate address 353 * The function sctp_get_port_local() does duplicate address
354 * detection. 354 * detection.
355 */ 355 */
356 addr->v4.sin_port = htons(snum);
356 if ((ret = sctp_get_port_local(sk, addr))) { 357 if ((ret = sctp_get_port_local(sk, addr))) {
357 if (ret == (long) sk) { 358 if (ret == (long) sk) {
358 /* This endpoint has a conflicting address. */ 359 /* This endpoint has a conflicting address. */
@@ -366,14 +367,10 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
366 if (!bp->port) 367 if (!bp->port)
367 bp->port = inet_sk(sk)->num; 368 bp->port = inet_sk(sk)->num;
368 369
369 /* Add the address to the bind address list. */ 370 /* Add the address to the bind address list.
370 sctp_local_bh_disable(); 371 * Use GFP_ATOMIC since BHs will be disabled.
371 sctp_write_lock(&ep->base.addr_lock); 372 */
372
373 /* Use GFP_ATOMIC since BHs are disabled. */
374 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); 373 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
375 sctp_write_unlock(&ep->base.addr_lock);
376 sctp_local_bh_enable();
377 374
378 /* Copy back into socket for getsockname() use. */ 375 /* Copy back into socket for getsockname() use. */
379 if (!ret) { 376 if (!ret) {
@@ -543,15 +540,12 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
543 if (i < addrcnt) 540 if (i < addrcnt)
544 continue; 541 continue;
545 542
546 /* Use the first address in bind addr list of association as 543 /* Use the first valid address in bind addr list of
547 * Address Parameter of ASCONF CHUNK. 544 * association as Address Parameter of ASCONF CHUNK.
548 */ 545 */
549 sctp_read_lock(&asoc->base.addr_lock);
550 bp = &asoc->base.bind_addr; 546 bp = &asoc->base.bind_addr;
551 p = bp->address_list.next; 547 p = bp->address_list.next;
552 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 548 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
553 sctp_read_unlock(&asoc->base.addr_lock);
554
555 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 549 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
556 addrcnt, SCTP_PARAM_ADD_IP); 550 addrcnt, SCTP_PARAM_ADD_IP);
557 if (!chunk) { 551 if (!chunk) {
@@ -566,8 +560,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
566 /* Add the new addresses to the bind address list with 560 /* Add the new addresses to the bind address list with
567 * use_as_src set to 0. 561 * use_as_src set to 0.
568 */ 562 */
569 sctp_local_bh_disable();
570 sctp_write_lock(&asoc->base.addr_lock);
571 addr_buf = addrs; 563 addr_buf = addrs;
572 for (i = 0; i < addrcnt; i++) { 564 for (i = 0; i < addrcnt; i++) {
573 addr = (union sctp_addr *)addr_buf; 565 addr = (union sctp_addr *)addr_buf;
@@ -577,8 +569,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
577 GFP_ATOMIC); 569 GFP_ATOMIC);
578 addr_buf += af->sockaddr_len; 570 addr_buf += af->sockaddr_len;
579 } 571 }
580 sctp_write_unlock(&asoc->base.addr_lock);
581 sctp_local_bh_enable();
582 } 572 }
583 573
584out: 574out:
@@ -650,13 +640,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
650 * socket routing and failover schemes. Refer to comments in 640 * socket routing and failover schemes. Refer to comments in
651 * sctp_do_bind(). -daisy 641 * sctp_do_bind(). -daisy
652 */ 642 */
653 sctp_local_bh_disable(); 643 retval = sctp_del_bind_addr(bp, sa_addr, call_rcu);
654 sctp_write_lock(&ep->base.addr_lock);
655
656 retval = sctp_del_bind_addr(bp, sa_addr);
657
658 sctp_write_unlock(&ep->base.addr_lock);
659 sctp_local_bh_enable();
660 644
661 addr_buf += af->sockaddr_len; 645 addr_buf += af->sockaddr_len;
662err_bindx_rem: 646err_bindx_rem:
@@ -747,14 +731,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
747 * make sure that we do not delete all the addresses in the 731 * make sure that we do not delete all the addresses in the
748 * association. 732 * association.
749 */ 733 */
750 sctp_read_lock(&asoc->base.addr_lock);
751 bp = &asoc->base.bind_addr; 734 bp = &asoc->base.bind_addr;
752 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 735 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
753 addrcnt, sp); 736 addrcnt, sp);
754 sctp_read_unlock(&asoc->base.addr_lock);
755 if (!laddr) 737 if (!laddr)
756 continue; 738 continue;
757 739
740 /* We do not need RCU protection throughout this loop
741 * because this is done under a socket lock from the
742 * setsockopt call.
743 */
758 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 744 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
759 SCTP_PARAM_DEL_IP); 745 SCTP_PARAM_DEL_IP);
760 if (!chunk) { 746 if (!chunk) {
@@ -765,23 +751,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
765 /* Reset use_as_src flag for the addresses in the bind address 751 /* Reset use_as_src flag for the addresses in the bind address
766 * list that are to be deleted. 752 * list that are to be deleted.
767 */ 753 */
768 sctp_local_bh_disable();
769 sctp_write_lock(&asoc->base.addr_lock);
770 addr_buf = addrs; 754 addr_buf = addrs;
771 for (i = 0; i < addrcnt; i++) { 755 for (i = 0; i < addrcnt; i++) {
772 laddr = (union sctp_addr *)addr_buf; 756 laddr = (union sctp_addr *)addr_buf;
773 af = sctp_get_af_specific(laddr->v4.sin_family); 757 af = sctp_get_af_specific(laddr->v4.sin_family);
774 list_for_each(pos1, &bp->address_list) { 758 list_for_each_entry(saddr, &bp->address_list, list) {
775 saddr = list_entry(pos1,
776 struct sctp_sockaddr_entry,
777 list);
778 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 759 if (sctp_cmp_addr_exact(&saddr->a, laddr))
779 saddr->use_as_src = 0; 760 saddr->use_as_src = 0;
780 } 761 }
781 addr_buf += af->sockaddr_len; 762 addr_buf += af->sockaddr_len;
782 } 763 }
783 sctp_write_unlock(&asoc->base.addr_lock);
784 sctp_local_bh_enable();
785 764
786 /* Update the route and saddr entries for all the transports 765 /* Update the route and saddr entries for all the transports
787 * as some of the addresses in the bind address list are 766 * as some of the addresses in the bind address list are
@@ -4058,9 +4037,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4058 sctp_assoc_t id; 4037 sctp_assoc_t id;
4059 struct sctp_bind_addr *bp; 4038 struct sctp_bind_addr *bp;
4060 struct sctp_association *asoc; 4039 struct sctp_association *asoc;
4061 struct list_head *pos, *temp;
4062 struct sctp_sockaddr_entry *addr; 4040 struct sctp_sockaddr_entry *addr;
4063 rwlock_t *addr_lock;
4064 int cnt = 0; 4041 int cnt = 0;
4065 4042
4066 if (len < sizeof(sctp_assoc_t)) 4043 if (len < sizeof(sctp_assoc_t))
@@ -4077,17 +4054,13 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4077 */ 4054 */
4078 if (0 == id) { 4055 if (0 == id) {
4079 bp = &sctp_sk(sk)->ep->base.bind_addr; 4056 bp = &sctp_sk(sk)->ep->base.bind_addr;
4080 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4081 } else { 4057 } else {
4082 asoc = sctp_id2assoc(sk, id); 4058 asoc = sctp_id2assoc(sk, id);
4083 if (!asoc) 4059 if (!asoc)
4084 return -EINVAL; 4060 return -EINVAL;
4085 bp = &asoc->base.bind_addr; 4061 bp = &asoc->base.bind_addr;
4086 addr_lock = &asoc->base.addr_lock;
4087 } 4062 }
4088 4063
4089 sctp_read_lock(addr_lock);
4090
4091 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid 4064 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
4092 * addresses from the global local address list. 4065 * addresses from the global local address list.
4093 */ 4066 */
@@ -4095,27 +4068,33 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4095 addr = list_entry(bp->address_list.next, 4068 addr = list_entry(bp->address_list.next,
4096 struct sctp_sockaddr_entry, list); 4069 struct sctp_sockaddr_entry, list);
4097 if (sctp_is_any(&addr->a)) { 4070 if (sctp_is_any(&addr->a)) {
4098 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 4071 rcu_read_lock();
4099 addr = list_entry(pos, 4072 list_for_each_entry_rcu(addr,
4100 struct sctp_sockaddr_entry, 4073 &sctp_local_addr_list, list) {
4101 list); 4074 if (!addr->valid)
4075 continue;
4076
4102 if ((PF_INET == sk->sk_family) && 4077 if ((PF_INET == sk->sk_family) &&
4103 (AF_INET6 == addr->a.sa.sa_family)) 4078 (AF_INET6 == addr->a.sa.sa_family))
4104 continue; 4079 continue;
4080
4105 cnt++; 4081 cnt++;
4106 } 4082 }
4083 rcu_read_unlock();
4107 } else { 4084 } else {
4108 cnt = 1; 4085 cnt = 1;
4109 } 4086 }
4110 goto done; 4087 goto done;
4111 } 4088 }
4112 4089
4113 list_for_each(pos, &bp->address_list) { 4090 /* Protection on the bound address list is not needed,
4091 * since in the socket option context we hold the socket lock,
4092 * so there is no way that the bound address list can change.
4093 */
4094 list_for_each_entry(addr, &bp->address_list, list) {
4114 cnt ++; 4095 cnt ++;
4115 } 4096 }
4116
4117done: 4097done:
4118 sctp_read_unlock(addr_lock);
4119 return cnt; 4098 return cnt;
4120} 4099}
4121 4100
@@ -4126,14 +4105,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4126 int max_addrs, void *to, 4105 int max_addrs, void *to,
4127 int *bytes_copied) 4106 int *bytes_copied)
4128{ 4107{
4129 struct list_head *pos, *next;
4130 struct sctp_sockaddr_entry *addr; 4108 struct sctp_sockaddr_entry *addr;
4131 union sctp_addr temp; 4109 union sctp_addr temp;
4132 int cnt = 0; 4110 int cnt = 0;
4133 int addrlen; 4111 int addrlen;
4134 4112
4135 list_for_each_safe(pos, next, &sctp_local_addr_list) { 4113 rcu_read_lock();
4136 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4114 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4115 if (!addr->valid)
4116 continue;
4117
4137 if ((PF_INET == sk->sk_family) && 4118 if ((PF_INET == sk->sk_family) &&
4138 (AF_INET6 == addr->a.sa.sa_family)) 4119 (AF_INET6 == addr->a.sa.sa_family))
4139 continue; 4120 continue;
@@ -4148,6 +4129,7 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4148 cnt ++; 4129 cnt ++;
4149 if (cnt >= max_addrs) break; 4130 if (cnt >= max_addrs) break;
4150 } 4131 }
4132 rcu_read_unlock();
4151 4133
4152 return cnt; 4134 return cnt;
4153} 4135}
@@ -4155,14 +4137,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4155static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4137static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4156 size_t space_left, int *bytes_copied) 4138 size_t space_left, int *bytes_copied)
4157{ 4139{
4158 struct list_head *pos, *next;
4159 struct sctp_sockaddr_entry *addr; 4140 struct sctp_sockaddr_entry *addr;
4160 union sctp_addr temp; 4141 union sctp_addr temp;
4161 int cnt = 0; 4142 int cnt = 0;
4162 int addrlen; 4143 int addrlen;
4163 4144
4164 list_for_each_safe(pos, next, &sctp_local_addr_list) { 4145 rcu_read_lock();
4165 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4146 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4147 if (!addr->valid)
4148 continue;
4149
4166 if ((PF_INET == sk->sk_family) && 4150 if ((PF_INET == sk->sk_family) &&
4167 (AF_INET6 == addr->a.sa.sa_family)) 4151 (AF_INET6 == addr->a.sa.sa_family))
4168 continue; 4152 continue;
@@ -4170,8 +4154,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4170 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4154 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4171 &temp); 4155 &temp);
4172 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4156 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4173 if (space_left < addrlen) 4157 if (space_left < addrlen) {
4174 return -ENOMEM; 4158 cnt = -ENOMEM;
4159 break;
4160 }
4175 memcpy(to, &temp, addrlen); 4161 memcpy(to, &temp, addrlen);
4176 4162
4177 to += addrlen; 4163 to += addrlen;
@@ -4179,6 +4165,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4179 space_left -= addrlen; 4165 space_left -= addrlen;
4180 *bytes_copied += addrlen; 4166 *bytes_copied += addrlen;
4181 } 4167 }
4168 rcu_read_unlock();
4182 4169
4183 return cnt; 4170 return cnt;
4184} 4171}
@@ -4191,7 +4178,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4191{ 4178{
4192 struct sctp_bind_addr *bp; 4179 struct sctp_bind_addr *bp;
4193 struct sctp_association *asoc; 4180 struct sctp_association *asoc;
4194 struct list_head *pos;
4195 int cnt = 0; 4181 int cnt = 0;
4196 struct sctp_getaddrs_old getaddrs; 4182 struct sctp_getaddrs_old getaddrs;
4197 struct sctp_sockaddr_entry *addr; 4183 struct sctp_sockaddr_entry *addr;
@@ -4199,7 +4185,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4199 union sctp_addr temp; 4185 union sctp_addr temp;
4200 struct sctp_sock *sp = sctp_sk(sk); 4186 struct sctp_sock *sp = sctp_sk(sk);
4201 int addrlen; 4187 int addrlen;
4202 rwlock_t *addr_lock;
4203 int err = 0; 4188 int err = 0;
4204 void *addrs; 4189 void *addrs;
4205 void *buf; 4190 void *buf;
@@ -4221,13 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4221 */ 4206 */
4222 if (0 == getaddrs.assoc_id) { 4207 if (0 == getaddrs.assoc_id) {
4223 bp = &sctp_sk(sk)->ep->base.bind_addr; 4208 bp = &sctp_sk(sk)->ep->base.bind_addr;
4224 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4225 } else { 4209 } else {
4226 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4210 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4227 if (!asoc) 4211 if (!asoc)
4228 return -EINVAL; 4212 return -EINVAL;
4229 bp = &asoc->base.bind_addr; 4213 bp = &asoc->base.bind_addr;
4230 addr_lock = &asoc->base.addr_lock;
4231 } 4214 }
4232 4215
4233 to = getaddrs.addrs; 4216 to = getaddrs.addrs;
@@ -4241,8 +4224,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4241 if (!addrs) 4224 if (!addrs)
4242 return -ENOMEM; 4225 return -ENOMEM;
4243 4226
4244 sctp_read_lock(addr_lock);
4245
4246 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4227 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4247 * addresses from the global local address list. 4228 * addresses from the global local address list.
4248 */ 4229 */
@@ -4258,8 +4239,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4258 } 4239 }
4259 4240
4260 buf = addrs; 4241 buf = addrs;
4261 list_for_each(pos, &bp->address_list) { 4242 /* Protection on the bound address list is not needed since
4262 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4243 * in the socket option context we hold a socket lock and
4244 * thus the bound address list can't change.
4245 */
4246 list_for_each_entry(addr, &bp->address_list, list) {
4263 memcpy(&temp, &addr->a, sizeof(temp)); 4247 memcpy(&temp, &addr->a, sizeof(temp));
4264 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4248 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4265 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4249 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
@@ -4271,8 +4255,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4271 } 4255 }
4272 4256
4273copy_getaddrs: 4257copy_getaddrs:
4274 sctp_read_unlock(addr_lock);
4275
4276 /* copy the entire address list into the user provided space */ 4258 /* copy the entire address list into the user provided space */
4277 if (copy_to_user(to, addrs, bytes_copied)) { 4259 if (copy_to_user(to, addrs, bytes_copied)) {
4278 err = -EFAULT; 4260 err = -EFAULT;
@@ -4294,7 +4276,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4294{ 4276{
4295 struct sctp_bind_addr *bp; 4277 struct sctp_bind_addr *bp;
4296 struct sctp_association *asoc; 4278 struct sctp_association *asoc;
4297 struct list_head *pos;
4298 int cnt = 0; 4279 int cnt = 0;
4299 struct sctp_getaddrs getaddrs; 4280 struct sctp_getaddrs getaddrs;
4300 struct sctp_sockaddr_entry *addr; 4281 struct sctp_sockaddr_entry *addr;
@@ -4302,7 +4283,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4302 union sctp_addr temp; 4283 union sctp_addr temp;
4303 struct sctp_sock *sp = sctp_sk(sk); 4284 struct sctp_sock *sp = sctp_sk(sk);
4304 int addrlen; 4285 int addrlen;
4305 rwlock_t *addr_lock;
4306 int err = 0; 4286 int err = 0;
4307 size_t space_left; 4287 size_t space_left;
4308 int bytes_copied = 0; 4288 int bytes_copied = 0;
@@ -4323,13 +4303,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4323 */ 4303 */
4324 if (0 == getaddrs.assoc_id) { 4304 if (0 == getaddrs.assoc_id) {
4325 bp = &sctp_sk(sk)->ep->base.bind_addr; 4305 bp = &sctp_sk(sk)->ep->base.bind_addr;
4326 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4327 } else { 4306 } else {
4328 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4307 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4329 if (!asoc) 4308 if (!asoc)
4330 return -EINVAL; 4309 return -EINVAL;
4331 bp = &asoc->base.bind_addr; 4310 bp = &asoc->base.bind_addr;
4332 addr_lock = &asoc->base.addr_lock;
4333 } 4311 }
4334 4312
4335 to = optval + offsetof(struct sctp_getaddrs,addrs); 4313 to = optval + offsetof(struct sctp_getaddrs,addrs);
@@ -4339,8 +4317,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4339 if (!addrs) 4317 if (!addrs)
4340 return -ENOMEM; 4318 return -ENOMEM;
4341 4319
4342 sctp_read_lock(addr_lock);
4343
4344 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4320 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4345 * addresses from the global local address list. 4321 * addresses from the global local address list.
4346 */ 4322 */
@@ -4352,21 +4328,24 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4352 space_left, &bytes_copied); 4328 space_left, &bytes_copied);
4353 if (cnt < 0) { 4329 if (cnt < 0) {
4354 err = cnt; 4330 err = cnt;
4355 goto error_lock; 4331 goto out;
4356 } 4332 }
4357 goto copy_getaddrs; 4333 goto copy_getaddrs;
4358 } 4334 }
4359 } 4335 }
4360 4336
4361 buf = addrs; 4337 buf = addrs;
4362 list_for_each(pos, &bp->address_list) { 4338 /* Protection on the bound address list is not needed since
4363 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4339 * in the socket option context we hold a socket lock and
4340 * thus the bound address list can't change.
4341 */
4342 list_for_each_entry(addr, &bp->address_list, list) {
4364 memcpy(&temp, &addr->a, sizeof(temp)); 4343 memcpy(&temp, &addr->a, sizeof(temp));
4365 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4344 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4366 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4345 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4367 if (space_left < addrlen) { 4346 if (space_left < addrlen) {
4368 err = -ENOMEM; /*fixme: right error?*/ 4347 err = -ENOMEM; /*fixme: right error?*/
4369 goto error_lock; 4348 goto out;
4370 } 4349 }
4371 memcpy(buf, &temp, addrlen); 4350 memcpy(buf, &temp, addrlen);
4372 buf += addrlen; 4351 buf += addrlen;
@@ -4376,8 +4355,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4376 } 4355 }
4377 4356
4378copy_getaddrs: 4357copy_getaddrs:
4379 sctp_read_unlock(addr_lock);
4380
4381 if (copy_to_user(to, addrs, bytes_copied)) { 4358 if (copy_to_user(to, addrs, bytes_copied)) {
4382 err = -EFAULT; 4359 err = -EFAULT;
4383 goto out; 4360 goto out;
@@ -4388,12 +4365,6 @@ copy_getaddrs:
4388 } 4365 }
4389 if (put_user(bytes_copied, optlen)) 4366 if (put_user(bytes_copied, optlen))
4390 err = -EFAULT; 4367 err = -EFAULT;
4391
4392 goto out;
4393
4394error_lock:
4395 sctp_read_unlock(addr_lock);
4396
4397out: 4368out:
4398 kfree(addrs); 4369 kfree(addrs);
4399 return err; 4370 return err;
@@ -5202,6 +5173,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
5202 5173
5203 sctp_unhash_endpoint(ep); 5174 sctp_unhash_endpoint(ep);
5204 sk->sk_state = SCTP_SS_CLOSED; 5175 sk->sk_state = SCTP_SS_CLOSED;
5176 return 0;
5205 } 5177 }
5206 5178
5207 /* Return if we are already listening. */ 5179 /* Return if we are already listening. */
@@ -5249,6 +5221,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
5249 5221
5250 sctp_unhash_endpoint(ep); 5222 sctp_unhash_endpoint(ep);
5251 sk->sk_state = SCTP_SS_CLOSED; 5223 sk->sk_state = SCTP_SS_CLOSED;
5224 return 0;
5252 } 5225 }
5253 5226
5254 if (sctp_sstate(sk, LISTENING)) 5227 if (sctp_sstate(sk, LISTENING))
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977a204d..fa0ba2a5564e 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
659 return retval; 659 return retval;
660} 660}
661 661
662/*
663 * Flush out stale fragments from the reassembly queue when processing
664 * a Forward TSN.
665 *
666 * RFC 3758, Section 3.6
667 *
668 * After receiving and processing a FORWARD TSN, the data receiver MUST
669 * take cautions in updating its re-assembly queue. The receiver MUST
670 * remove any partially reassembled message, which is still missing one
671 * or more TSNs earlier than or equal to the new cumulative TSN point.
672 * In the event that the receiver has invoked the partial delivery API,
673 * a notification SHOULD also be generated to inform the upper layer API
674 * that the message being partially delivered will NOT be completed.
675 */
676void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
677{
678 struct sk_buff *pos, *tmp;
679 struct sctp_ulpevent *event;
680 __u32 tsn;
681
682 if (skb_queue_empty(&ulpq->reasm))
683 return;
684
685 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
686 event = sctp_skb2event(pos);
687 tsn = event->tsn;
688
689 /* Since the entire message must be abandoned by the
690 * sender (item A3 in Section 3.5, RFC 3758), we can
691 * free all fragments on the list that are less then
692 * or equal to ctsn_point
693 */
694 if (TSN_lte(tsn, fwd_tsn)) {
695 __skb_unlink(pos, &ulpq->reasm);
696 sctp_ulpevent_free(event);
697 } else
698 break;
699 }
700}
701
662/* Helper function to gather skbs that have possibly become 702/* Helper function to gather skbs that have possibly become
663 * ordered by an an incoming chunk. 703 * ordered by an an incoming chunk.
664 */ 704 */
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
794/* Helper function to gather skbs that have possibly become 834/* Helper function to gather skbs that have possibly become
795 * ordered by forward tsn skipping their dependencies. 835 * ordered by forward tsn skipping their dependencies.
796 */ 836 */
797static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) 837static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
798{ 838{
799 struct sk_buff *pos, *tmp; 839 struct sk_buff *pos, *tmp;
800 struct sctp_ulpevent *cevent; 840 struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
813 csid = cevent->stream; 853 csid = cevent->stream;
814 cssn = cevent->ssn; 854 cssn = cevent->ssn;
815 855
816 if (cssn != sctp_ssn_peek(in, csid)) 856 /* Have we gone too far? */
857 if (csid > sid)
817 break; 858 break;
818 859
819 /* Found it, so mark in the ssnmap. */ 860 /* Have we not gone far enough? */
820 sctp_ssn_next(in, csid); 861 if (csid < sid)
862 continue;
863
864 /* see if this ssn has been marked by skipping */
865 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
866 break;
821 867
822 __skb_unlink(pos, &ulpq->lobby); 868 __skb_unlink(pos, &ulpq->lobby);
823 if (!event) { 869 if (!event)
824 /* Create a temporary list to collect chunks on. */ 870 /* Create a temporary list to collect chunks on. */
825 event = sctp_skb2event(pos); 871 event = sctp_skb2event(pos);
826 __skb_queue_tail(&temp, sctp_event2skb(event)); 872
827 } else { 873 /* Attach all gathered skbs to the event. */
828 /* Attach all gathered skbs to the event. */ 874 __skb_queue_tail(&temp, pos);
829 __skb_queue_tail(&temp, pos);
830 }
831 } 875 }
832 876
833 /* Send event to the ULP. 'event' is the sctp_ulpevent for 877 /* Send event to the ULP. 'event' is the sctp_ulpevent for
834 * very first SKB on the 'temp' list. 878 * very first SKB on the 'temp' list.
835 */ 879 */
836 if (event) 880 if (event) {
881 /* see if we have more ordered that we can deliver */
882 sctp_ulpq_retrieve_ordered(ulpq, event);
837 sctp_ulpq_tail_event(ulpq, event); 883 sctp_ulpq_tail_event(ulpq, event);
884 }
838} 885}
839 886
840/* Skip over an SSN. */ 887/* Skip over an SSN. This is used during the processing of
888 * Forwared TSN chunk to skip over the abandoned ordered data
889 */
841void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 890void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
842{ 891{
843 struct sctp_stream *in; 892 struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
855 /* Go find any other chunks that were waiting for 904 /* Go find any other chunks that were waiting for
856 * ordering and deliver them if needed. 905 * ordering and deliver them if needed.
857 */ 906 */
858 sctp_ulpq_reap_ordered(ulpq); 907 sctp_ulpq_reap_ordered(ulpq, sid);
859 return; 908 return;
860} 909}
861 910
diff --git a/net/socket.c b/net/socket.c
index 7d44453dfae1..b09eb9036a17 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
777 if (pos != 0) 777 if (pos != 0)
778 return -ESPIPE; 778 return -ESPIPE;
779 779
780 if (iocb->ki_left == 0) /* Match SYS5 behaviour */
781 return 0;
782
783 x = alloc_sock_iocb(iocb, &siocb); 780 x = alloc_sock_iocb(iocb, &siocb);
784 if (!x) 781 if (!x)
785 return -ENOMEM; 782 return -ENOMEM;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 12ff5da8160e..036ab520df21 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1110,7 +1110,8 @@ svc_tcp_accept(struct svc_sock *svsk)
1110 serv->sv_name); 1110 serv->sv_name);
1111 printk(KERN_NOTICE 1111 printk(KERN_NOTICE
1112 "%s: last TCP connect from %s\n", 1112 "%s: last TCP connect from %s\n",
1113 serv->sv_name, buf); 1113 serv->sv_name, __svc_print_addr(sin,
1114 buf, sizeof(buf)));
1114 } 1115 }
1115 /* 1116 /*
1116 * Always select the oldest socket. It's not fair, 1117 * Always select the oldest socket. It's not fair,
@@ -1592,7 +1593,7 @@ svc_age_temp_sockets(unsigned long closure)
1592 1593
1593 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1594 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1594 continue; 1595 continue;
1595 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) 1596 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
1596 continue; 1597 continue;
1597 atomic_inc(&svsk->sk_inuse); 1598 atomic_inc(&svsk->sk_inuse);
1598 list_move(le, &to_be_aged); 1599 list_move(le, &to_be_aged);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7eabd55417a5..9771451eae21 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -213,7 +213,7 @@ out_fail_notifier:
213out_fail_sysfs: 213out_fail_sysfs:
214 return err; 214 return err;
215} 215}
216module_init(cfg80211_init); 216subsys_initcall(cfg80211_init);
217 217
218static void cfg80211_exit(void) 218static void cfg80211_exit(void)
219{ 219{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 88aaacd9f822..2d5d2255a27c 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
52 cfg80211_dev_free(rdev); 52 cfg80211_dev_free(rdev);
53} 53}
54 54
55#ifdef CONFIG_HOTPLUG
55static int wiphy_uevent(struct device *dev, char **envp, 56static int wiphy_uevent(struct device *dev, char **envp,
56 int num_envp, char *buf, int size) 57 int num_envp, char *buf, int size)
57{ 58{
58 /* TODO, we probably need stuff here */ 59 /* TODO, we probably need stuff here */
59 return 0; 60 return 0;
60} 61}
62#endif
61 63
62struct class ieee80211_class = { 64struct class ieee80211_class = {
63 .name = "ieee80211", 65 .name = "ieee80211",